hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
e7c1f4bd533a4f8ed015e77c6f0c4b6672d7cd56
641,002
ipynb
Jupyter Notebook
Data P/INTERNAL DATA/Data_Preparation/TXT_DATA/txtdata_extraction.ipynb
mohamedba01/EPilepsy_EEG
1ece9810490ad276b258371981ab95d46deb4d32
[ "Apache-2.0" ]
null
null
null
Data P/INTERNAL DATA/Data_Preparation/TXT_DATA/txtdata_extraction.ipynb
mohamedba01/EPilepsy_EEG
1ece9810490ad276b258371981ab95d46deb4d32
[ "Apache-2.0" ]
null
null
null
Data P/INTERNAL DATA/Data_Preparation/TXT_DATA/txtdata_extraction.ipynb
mohamedba01/EPilepsy_EEG
1ece9810490ad276b258371981ab95d46deb4d32
[ "Apache-2.0" ]
null
null
null
49.160365
293
0.442835
[ [ [ "", "_____no_output_____" ], [ "#L : liste \n#LR : Liste root", "_____no_output_____" ], [ "from google.colab import drive\n\ndrive.mount(\"/content/gdrive\")", "Mounted at /content/gdrive\n" ], [ "!pip install mne", "Collecting mne\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/c2/29/7f38c7c99ca65fe4aac9054239d885c44ab7f9e8b4f65e9f2bfa489b0f38/mne-0.22.1-py3-none-any.whl (6.9MB)\n\u001b[K |████████████████████████████████| 6.9MB 7.9MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.7/dist-packages (from mne) (1.19.5)\nRequirement already satisfied: scipy>=0.17.1 in /usr/local/lib/python3.7/dist-packages (from mne) (1.4.1)\nInstalling collected packages: mne\nSuccessfully installed mne-0.22.1\n" ], [ "import mne\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import os\nfile=os.listdir('/content/gdrive/MyDrive/Données_internes/edf')", "_____no_output_____" ], [ "os.chdir(r'/content/gdrive/MyDrive/Données_internes/edf')\nList_files=[]\nfor root, dirs, files in os.walk(\".\", topdown = False):\n for name in files:\n #print(os.path.join(root, name))\n List_files.append(os.path.join(root,name))\n #for name in dirs:\n #print(os.path.join(root, name))\nList_files ", "_____no_output_____" ], [ "####################################################\n# #\n# TXT EPILEPSY #\n# #\n#################################################### ", "_____no_output_____" ], [ "#@title Default title text\n#liste des liens fichiers txt des patients epileptiques\nos.chdir(r'/content/gdrive/MyDrive/Données_internes/edf')\nLR_Txt_epl=[]\nfor root, dirs, files in os.walk(\"./epilepsy\", topdown = False):\n for name in files:\n #print(os.path.join(root, name))\n if name.endswith(\"txt\"):\n LR_Txt_epl.append(os.path.join(root,name))\n #for name in dirs:\n #print(os.path.join(root, name))\nLR_Txt_epl", "_____no_output_____" ], [ "#liste des fichiers txt des patients épileptiques\nL_txt_epl = []\n\nfor root,dirs,files in os.walk(r'/content/gdrive/MyDrive/Données_internes/edf/epilepsy'):\n for filename in files:\n if filename.endswith(\"txt\"):\n L_txt_epl.append(filename)\n\nL_txt_epl\n", "_____no_output_____" ], [ "\n#liste des fichiers txt des patients épileptiques ._\nL_txt_epl = []\n\nfor root,dirs,files in os.walk(r'/content/gdrive/MyDrive/Données_internes/edf/epilepsy'):\n for filename in files:\n if filename.endswith(\"txt\"):\n if filename.startswith(\"._\"):\n L_txt_epl.append(filename)\n\nL_txt_epl\n\n\n", "_____no_output_____" ], [ "#liste des ID des fichiers txt des patients épileptiques\nL_ID_txt_epl=[]\nfor i in range(len(L_txt_epl)):\n FILE = L_txt_epl[i];\n ID = FILE[0:FILE.index('.txt')];\n L_ID_txt_epl.append(ID);\n\nL_ID_txt_epl ", "_____no_output_____" ], [ "#fonction retourne ID de fichier a partir de lien \n#root = './epilepsy/02_tcp_le/003/00000355/s002_2009_12_03'\nL=[]\nfor root,dirs,files in os.walk(r'/content/gdrive/MyDrive/Données_internes/edf/epilepsy/02_tcp_le/003/00000355/s002_2009_12_03'):\n\n\n\n for filename in files:\n if filename.endswith(\"txt\"):\n L.append(filename)\n\n", "_____no_output_____" ], [ "L", "_____no_output_____" ], [ "#liste des liens fichiers txt des patients \nos.chdir(r'/content/gdrive/MyDrive/Données_internes/edf')\nLR_Txt_nepl=[]\nfor root, dirs, files in os.walk(\".\", topdown = False):\n for name in files:\n #print(os.path.join(root, name))\n # if name.endswith('txt.', 14, 18):\n # LR_Txt_nepl.append(os.path.join(root,name))\n #if name.endswith(\"txt\"):\n if name.startswith(\"._\"):\n LR_Txt_nepl.append(os.path.join(root,name))\n #for name in dirs:\n #print(os.path.join(root, name))\nLR_Txt_nepl\n\n#drop\n#os.chdir(r'/content/gdrive/MyDrive/Données_internes/edf')\n\n#for root, dirs, files in os.walk(\".\", topdown = False):\n # for name in files:\n #print(os.path.join(root, name))\n # if name.endswith('txt.', 14, 18):\n # os.remove(name)\n #for name in dirs:\n #print(os.path.join(root, name))\n\n", "_____no_output_____" ], [ "os.chdir('/content/gdrive/MyDrive/Données_internes/edf')\nfor l in LR_Txt_nepl :\n os.remove(l)", "_____no_output_____" ], [ "####################################################\n# #\n# TXT NO EPILEPSY #\n# #\n#################################################### ", "_____no_output_____" ], [ "#liste des liens fichiers txt des patients no epileptiques\nos.chdir(r'/content/gdrive/MyDrive/Données_internes/edf')\nLR_Txt_nepl=[]\nfor root, dirs, files in os.walk(\"./no_epilepsy\", topdown = False):\n for name in files:\n #print(os.path.join(root, name))\n if name.endswith(\"txt\"):\n LR_Txt_nepl.append(os.path.join(root,name))\n #for name in dirs:\n #print(os.path.join(root, name))\nLR_Txt_nepl", "_____no_output_____" ], [ "\n#liste des fichiers txt des patients no épileptiques\nL_txt_nepl = []\n\nfor root,dirs,files in os.walk(r'/content/gdrive/MyDrive/Données_internes/edf/no_epilepsy'):\n for filename in files:\n if filename.endswith(\"txt\"):\n L_txt_nepl.append(filename)\n\nL_txt_nepl\n", "_____no_output_____" ], [ "#liste des ID des fichiers txt des patients no épileptiques\nL_ID_txt_nepl=[]\nfor i in range(len(L_txt_nepl)):\n FILE = L_txt_nepl[i];\n ID = FILE[0:FILE.index('.txt')];\n L_ID_txt_nepl.append(ID);\n\nL_ID_txt_nepl ", "_____no_output_____" ], [ "##################################################\n##################################################\n##################################################\n#liste des liens des fichiers txt des patients\nLR_Txt = LR_Txt_epl + LR_Txt_nepl\n#liste des fichiers txt des patients\nL_txt = L_txt_epl + L_txt_nepl\n#liste des ID des fichiers txt des patients\nL_ID_txt = L_ID_txt_epl + L_ID_txt_nepl \n##################################################\n##################################################\n##################################################\nLR_Txt", "_____no_output_____" ], [ "#liste des medicamant \nL_medc = ['phenobarbital','keppra','dilantin','aspirin','lovastatin','carbamazepine','omeprazole','vitamins','tegretol',\n 'lamictal','zoloft','hctz','actonel','zantac','calcium','hydrochlorothiazide','hydroxyurea','rilosec','ativan'\n ,'etomidate','fentanyl','versed','rocurontium','norvasc','coreg','percocet','lovenox','topamax','vimpat'\n ,'topiramate','succinylcholine','insulin','aricept','pravachol','nifedipine','glipizide','cymbalta','plavix','bactrim','lisinopril'\n ,'metformin','hiv','avandia','diovan','amiodipine','lorazepam','heparin','acetazolamide','Ceftriaxone','morphine',\n 'antoprazole','levothyroxine','glyburide','tylenol','celexa','elavil','rocuronium','vecuronium','metoprolol','demerol'\n ,'depakote','midazolam','zonisamide','serax','thiamine','lantus','levetiracetam','sinemet','asa','protonix'\n ,'labetalol','clonazepam''digoxin','hydralazine','lipitor','atenolol','diltiazem','vancomycin', 'zosyn',\n 'midodrine','seroquel','klonopin','albuterol','pantoprazole','atropine','penicillin','famotidine','clindamycin'\n ,'atorvastatin','spiriva','zocor','advair','dopamine','norepinephrine','bicarb','abilify','doxycycline',\n 'zosyn','protonix','pyridoxine','rifampin','dexamethasone','acyclovir','brinzolamide',\n 'lopressor','pravastatin','dextrose','ciloxan','augmentin','citalopram','lexapro','cipro','ipratropium','pulmicort',\n 'opioids','cannabis','xanax','haldol','benadryl','coumadin','isuprel',\n 'pitavastatin','ambien','fenofibrate','carbonate','indapamide','motrin','antibiotics','clonidine','bentyl',\n 'potassium','prenatal','captopril','baclofen','alprazolam','zolpidem','colace','zanaflex',\n 'vasotec','mycolog','zomig','cozaar','voltaren','folvite','trazodone' ,'dilaudid','oxycodone','carvedilol']", "_____no_output_____" ], [ "# Creating an empty Dataframe with column names only\nimport pandas as pd\n\nData_medic = pd.DataFrame(columns=['ID']+ L_medc )", "_____no_output_____" ], [ "Data_medic['ID']= L_ID_txt ", "_____no_output_____" ], [ "Data_medic", "_____no_output_____" ], [ "Data_medic[L_medc[:]]= 0", "_____no_output_____" ], [ "Data_medic", "_____no_output_____" ], [ "\nData_txt = pd.DataFrame(columns=['ID','age','gender','egg nature','HR'] )\nData_txt['ID'] = L_ID_txt \nData_txt", "_____no_output_____" ], [ "!apt-get -qq install -y libarchive-dev && pip install -U fasttext", "Requirement already up-to-date: fasttext in /usr/local/lib/python3.7/dist-packages (0.9.2)\nRequirement already satisfied, skipping upgrade: pybind11>=2.2 in /usr/local/lib/python3.7/dist-packages (from fasttext) (2.6.2)\nRequirement already satisfied, skipping upgrade: setuptools>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from fasttext) (54.2.0)\nRequirement already satisfied, skipping upgrade: numpy in /usr/local/lib/python3.7/dist-packages (from fasttext) (1.19.5)\n" ], [ "!apt-get -qq install -y libarchive-dev && pip install -U contractions", "Requirement already up-to-date: contractions in /usr/local/lib/python3.7/dist-packages (0.0.48)\nRequirement already satisfied, skipping upgrade: textsearch>=0.0.21 in /usr/local/lib/python3.7/dist-packages (from contractions) (0.0.21)\nRequirement already satisfied, skipping upgrade: anyascii in /usr/local/lib/python3.7/dist-packages (from textsearch>=0.0.21->contractions) (0.1.7)\nRequirement already satisfied, skipping upgrade: pyahocorasick in /usr/local/lib/python3.7/dist-packages (from textsearch>=0.0.21->contractions) (1.4.2)\n" ], [ "import io\n# import library\nimport contractions\n# contracted text\ndef preprocessing1(ID):\n text = io.open(ID, encoding='ISO-8859-1').read()\n# creating an empty list\n expanded_words = [] \n for word in text.split():\n # using contractions.fix to expand the shotened words\n expanded_words.append(contractions.fix(word)) \n \n expanded_text = ' '.join(expanded_words)\n #print('Original text: ' + text)\n #print('Expanded_text: ' + expanded_text)\n\n #Converting all Characters to Lowercase\n# ==> Transforming all words to lowercase\n# Checking for lowercase characters\n\n string = expanded_text.lower()\n #print(string)\n return string\n\n #Removing Punctuations ==> Punctuation is often removed from our corpus since they serve little value once we begin to analyze our data.\n# initializing string\n\n \n# initializing punctuations string \ndef preprocessing2(s):\n punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n \n# Removing punctuations in string\n# Using loop + punctuation string\n for ele in s: \n if ele in punc: \n s = s.replace(ele, \" \") \n \n# printing result \n #print(\"The string after punctuation filter : \" + s)\n return s", "_____no_output_____" ], [ "#Removing Stopwords ==> Stopwords are typically useless words and do not add much meaning to a sentence. In the English language common stopwords include “you, he, she, in, a, has, are, etc.”. First, we need to import the NLTK stopwords library and set our stopwords to “english”.\n#for files ; doesn t work:\nimport nltk\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nnltk.download('punkt')\nnltk.download('stopwords')\n\ndef stopwordss(tp):\n \n stop_wordss = set(stopwords.words('english')) \n \n word_tokenss = word_tokenize(tp) \n \n filtered_sentence = [w for w in word_tokenss if not w in stop_wordss] \n \n filtered_sentence = [] \n \n for w in word_tokenss: \n if w not in stop_wordss: \n filtered_sentence.append(w) \n \n # print(word_tokenss) \n #print(filtered_sentence) \n return filtered_sentence", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "def patientGender(fs):\n a=\"\"\n # if \"male\" or \"man\" in fs :\n if \"male\" in fs :\n print (\"male\")\n a= \"male\"\n elif \"man\" in fs :\n print (\"male\")\n a= \"male\"\n elif \"woman\" in fs :\n print (\"female\")\n a= \"female\"\n elif \"female\" in fs :\n print (\"female\")\n a= \"female\"\n else:\n # print (\"Not specified\")\n a= \"Not specified\"\n return a\n", "_____no_output_____" ], [ "def eeg(fs):\n a=\"\"\n if \"normal\" in fs :\n # print (\"normal EEG\")\n a=\"normal EEG\"\n elif \"abnormal\" in fs :\n # print (\"abnormal EEG\")\n a=\"abnormal EEG\"\n else:\n # print (\"Not specified\")\n a=\"Not specified\"\n return a", "_____no_output_____" ], [ "def age(fs):\n age=\"\"\n n=\"\"\n for a in fs:\n if a ==\"year\":\n age=n\n # print(age)\n break\n else:\n n=a\n\n if age == \"\":\n age= -1 \n print(\"not specified\")\n return age", "_____no_output_____" ], [ "#Removing Punctuations ==> Punctuation is often removed from our corpus since they serve little value once we begin to analyze our data.\n# initializing string\n\n#Removing Stopwords ==> Stopwords are typically useless words and do not add much meaning to a sentence. In the English language common stopwords include “you, he, she, in, a, has, are, etc.”. First, we need to import the NLTK stopwords library and set our stopwords to “english”.\n#for files ; doesn t work:\nimport nltk\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nnltk.download('punkt')\nnltk.download('stopwords')\n \ndef preprossesshr(s):\n \n \n# initializing punctuations string \n punc = '''!()[]{};:'\",<>\\?@#$%^&*_~'''\n \n# Removing punctuations in string\n# Using loop + punctuation string\n for ele in s: \n if ele in punc: \n s = s.replace(ele, \" \") \n \n# printing result \n # print(\"The string after punctuation filter : \" + s)\n \n\n stop_wordss = set(stopwords.words('english')) \n \n word_tokenss = word_tokenize(s) \n \n filtered_sentenc = [w for w in word_tokenss if not w in stop_wordss] \n \n filtered_sentenc = [] \n \n for w in word_tokenss: \n if w not in stop_wordss: \n filtered_sentenc.append(w) \n \n #print(word_tokenss) \n #print(filtered_sentenc) \n \n\n bpm=\"\"\n t=False\n for a in filtered_sentenc :\n if a ==\"hr\" or a ==\"rate\":\n t = True\n continue\n if t== True:\n bpm=a\n # print(a)\n break\n if bpm ==\"\":\n bpm=\"not specified\" \n # print(\"not specified\")\n return bpm", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "# Define a list of our text\n# Define a list of searching medications\ndef medicament(ID):\n l=stopwordss(preprocessing1(ID))\n searchList = L_medc\n # Define an empty list\n Lmedc=[]\n foundList = []\n #medicament=[]\n# Iterate each element from the selected list\n for index, sList in enumerate(l):\n # Match the element with the element of searchList\n if sList in searchList:\n # Store the value in foundList if the match is found\n foundList.append(l[index])\n\n# iterate the searchList\n for val in searchList:\n # Check the value exists in foundList or not\n \n if val in foundList:\n Lmedc.append(val)\n # print(\"%s is selected.\\n\" %val)\n print(Lmedc) \n \n return Lmedc", "_____no_output_____" ], [ "medicament", "_____no_output_____" ], [ "LR_Txt", "_____no_output_____" ], [ "ageCol=[]\neeg_nature=[]\nhr_col=[] \ngender_col=[]\ntp=[]\ng=\"\"\na=\"\"\neg=\"\"\nhr=\"\"\n\ni = 0\nfor file in LR_Txt :\n medicaments=[]\n tp=preprocessing2(preprocessing1(file))\n g=patientGender(stopwordss(tp))\n gender_col.append(g)\n a=age(stopwordss(tp))\n ageCol.append(a)\n eg=eeg(stopwordss(tp))\n eeg_nature.append(eg)\n hr=preprossesshr(preprocessing1(file))\n hr_col.append(hr)\n medicaments=medicament(file)\n print(medicaments)\n Data_medic.loc[i,medicaments]=1 \n # Data_medic [ medicaments [i] ]=1\n #Data_medic[medicaments]=1 \n # print(a,eg,hr,medicaments)\n #print(g)\n print(\"fichier :\",i ,\" de id : \",L_ID_txt[i],\"\\n\")\n i+=1 \n\nData_txt['age'] = ageCol \nData_txt['egg nature'] = eeg_nature \nData_txt['HR'] = hr_col\nData_txt['gender'] = gender_col\n", "female\n['ativan', 'zocor']\n['ativan', 'zocor']\nfichier : 0 de id : 00006514_s011 \n\nfemale\n['phenobarbital', 'hctz', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'protonix', 'zosyn', 'zocor', 'zosyn', 'protonix']\n['phenobarbital', 'hctz', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'protonix', 'zosyn', 'zocor', 'zosyn', 'protonix']\nfichier : 1 de id : 00006514_s010 \n\nfemale\n['ativan', 'zocor']\n['ativan', 'zocor']\nfichier : 2 de id : 00006514_s009 \n\nfemale\n['phenobarbital', 'ativan']\n['phenobarbital', 'ativan']\nfichier : 3 de id : 00006514_s004 \n\nfemale\n['phenobarbital', 'ativan']\n['phenobarbital', 'ativan']\nfichier : 4 de id : 00006514_s006 \n\nfemale\n['phenobarbital', 'ativan']\n['phenobarbital', 'ativan']\nfichier : 5 de id : 00006514_s005 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'levetiracetam', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'levetiracetam', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\nfichier : 6 de id : 00006514_s012 \n\nfemale\n['keppra', 'dilantin', 'carbamazepine', 'zoloft', 'ativan', 'topamax', 'protonix', 'labetalol', 'protonix']\n['keppra', 'dilantin', 'carbamazepine', 'zoloft', 'ativan', 'topamax', 'protonix', 'labetalol', 'protonix']\nfichier : 7 de id : 00006514_s030 \n\nfemale\n['keppra', 'dilantin', 'carbamazepine', 'zoloft', 'ativan', 'topamax', 'protonix', 'labetalol', 'protonix']\n['keppra', 'dilantin', 'carbamazepine', 'zoloft', 'ativan', 'topamax', 'protonix', 'labetalol', 'protonix']\nfichier : 8 de id : 00006514_s029 \n\nfemale\n['phenobarbital', 'keppra', 'hctz', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'protonix', 'zosyn', 'zocor', 'zosyn', 'protonix']\n['phenobarbital', 'keppra', 'hctz', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'protonix', 'zosyn', 'zocor', 'zosyn', 'protonix']\nfichier : 9 de id : 00006514_s016 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'zocor', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'zocor', 'potassium']\nfichier : 10 de id : 00006514_s017 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\nfichier : 11 de id : 00006514_s015 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'zocor', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'thiamine', 'asa', 'zocor', 'potassium']\nfichier : 12 de id : 00006514_s018 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\nfichier : 13 de id : 00006514_s013 \n\nfemale\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\n['phenobarbital', 'lovenox', 'plavix', 'diovan', 'levothyroxine', 'metoprolol', 'midazolam', 'thiamine', 'asa', 'protonix', 'zocor', 'protonix', 'potassium']\nfichier : 14 de id : 00006514_s014 \n\nfemale\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\nfichier : 15 de id : 00006514_s021 \n\nfemale\n['keppra']\n['keppra']\nfichier : 16 de id : 00005765_s004 \n\nmale\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\nfichier : 17 de id : 00007572_s005 \n\nmale\n['keppra']\n['keppra']\nfichier : 18 de id : 00007572_s002 \n\nmale\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\nfichier : 19 de id : 00007572_s006 \n\nmale\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\nfichier : 20 de id : 00007572_s003 \n\nmale\n['keppra']\n['keppra']\nfichier : 21 de id : 00007572_s004 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 22 de id : 00006894_s002 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 23 de id : 00006103_s006 \n\nfemale\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\nfichier : 24 de id : 00000883_s005 \n\nfemale\n['phenobarbital', 'tegretol']\n['phenobarbital', 'tegretol']\nfichier : 25 de id : 00000883_s008 \n\nfemale\n['phenobarbital', 'carbamazepine', 'asa']\n['phenobarbital', 'carbamazepine', 'asa']\nfichier : 26 de id : 00000883_s010 \n\nfemale\n['phenobarbital', 'carbamazepine', 'asa']\n['phenobarbital', 'carbamazepine', 'asa']\nfichier : 27 de id : 00000883_s011 \n\nfemale\n['phenobarbital', 'tegretol']\n['phenobarbital', 'tegretol']\nfichier : 28 de id : 00000883_s009 \n\nfemale\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\nfichier : 29 de id : 00000883_s003 \n\nfemale\n['phenobarbital', 'carbamazepine', 'asa']\n['phenobarbital', 'carbamazepine', 'asa']\nfichier : 30 de id : 00000883_s012 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 31 de id : 00007656_s003 \n\nfemale\n['keppra', 'dilantin', 'ativan', 'versed']\n['keppra', 'dilantin', 'ativan', 'versed']\nfichier : 32 de id : 00007656_s006 \n\nfemale\n['keppra', 'dilantin', 'ativan', 'versed']\n['keppra', 'dilantin', 'ativan', 'versed']\nfichier : 33 de id : 00007656_s002 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 34 de id : 00007656_s009 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 35 de id : 00007656_s005 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 36 de id : 00007656_s010 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 37 de id : 00007656_s004 \n\nfemale\n['keppra', 'dilantin', 'ativan', 'versed']\n['keppra', 'dilantin', 'ativan', 'versed']\nfichier : 38 de id : 00007656_s007 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 39 de id : 00007656_s008 \n\nfemale\n['keppra', 'dilantin', 'fentanyl']\n['keppra', 'dilantin', 'fentanyl']\nfichier : 40 de id : 00006607_s003 \n\nfemale\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\nfichier : 41 de id : 00006607_s013 \n\nfemale\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\nfichier : 42 de id : 00006607_s008 \n\nfemale\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin', 'motrin']\nfichier : 43 de id : 00006607_s009 \n\nfemale\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin']\n['phenobarbital', 'lamictal', 'ativan', 'versed', 'tylenol', 'vancomycin']\nfichier : 44 de id : 00006607_s011 \n\nfemale\n['carbamazepine']\n['carbamazepine']\nfichier : 45 de id : 00000883_s001 \n\nfemale\n['phenobarbital']\n['phenobarbital']\nfichier : 46 de id : 00000592_s001 \n\nfemale\n['lamictal', 'zoloft', 'zantac']\n['lamictal', 'zoloft', 'zantac']\nfichier : 47 de id : 00000592_s002 \n\nmale\n['keppra', 'tegretol', 'lovenox', 'protonix', 'protonix']\n['keppra', 'tegretol', 'lovenox', 'protonix', 'protonix']\nfichier : 48 de id : 00000355_s002 \n\nmale\n['tegretol']\n['tegretol']\nfichier : 49 de id : 00000355_s001 \n\nfemale\n[]\n[]\nfichier : 50 de id : 00000767_s001 \n\nmale\n['antibiotics']\n['antibiotics']\nfichier : 51 de id : 00003281_s001 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 52 de id : 00000930_s001 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 53 de id : 00003667_s002 \n\nfemale\n['dilantin', 'hctz']\n['dilantin', 'hctz']\nfichier : 54 de id : 00003667_s003 \n\nfemale\n['keppra', 'vecuronium', 'seroquel']\n['keppra', 'vecuronium', 'seroquel']\nfichier : 55 de id : 00003101_s002 \n\n['keppra', 'antibiotics']\n['keppra', 'antibiotics']\nfichier : 56 de id : 00003101_s003 \n\nmale\n['ativan', 'depakote']\n['ativan', 'depakote']\nfichier : 57 de id : 00003136_s002 \n\nmale\n['ativan', 'depakote', 'hydralazine']\n['ativan', 'depakote', 'hydralazine']\nfichier : 58 de id : 00003136_s001 \n\n['norvasc', 'topiramate', 'depakote', 'seroquel', 'klonopin']\n['norvasc', 'topiramate', 'depakote', 'seroquel', 'klonopin']\nfichier : 59 de id : 00004671_s001 \n\n['dilantin', 'hydroxyurea']\n['dilantin', 'hydroxyurea']\nfichier : 60 de id : 00003593_s003 \n\nmale\n['dilantin']\n['dilantin']\nfichier : 61 de id : 00003593_s001 \n\n['dilantin']\n['dilantin']\nfichier : 62 de id : 00003593_s002 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 63 de id : 00004401_s001 \n\nfemale\n['tegretol']\n['tegretol']\nfichier : 64 de id : 00002309_s001 \n\nfemale\n['keppra', 'tegretol', 'hctz', 'norvasc', 'asa', 'zocor']\n['keppra', 'tegretol', 'hctz', 'norvasc', 'asa', 'zocor']\nfichier : 65 de id : 00002309_s002 \n\nmale\n['phenobarbital', 'keppra', 'tegretol']\n['phenobarbital', 'keppra', 'tegretol']\nfichier : 66 de id : 00001819_s001 \n\nmale\n['keppra', 'dilantin', 'ativan', 'etomidate', 'fentanyl', 'versed', 'rocuronium']\n['keppra', 'dilantin', 'ativan', 'etomidate', 'fentanyl', 'versed', 'rocuronium']\nfichier : 67 de id : 00003005_s003 \n\nmale\n['aspirin', 'lovenox', 'heparin']\n['aspirin', 'lovenox', 'heparin']\nfichier : 68 de id : 00003005_s001 \n\nfemale\n['ativan', 'zocor']\n['ativan', 'zocor']\nfichier : 69 de id : 00006514_s008 \n\nfemale\n['phenobarbital', 'keppra', 'zoloft', 'ativan', 'lovenox', 'plavix', 'bactrim', 'levothyroxine', 'metoprolol', 'sinemet', 'asa', 'protonix', 'protonix']\n['phenobarbital', 'keppra', 'zoloft', 'ativan', 'lovenox', 'plavix', 'bactrim', 'levothyroxine', 'metoprolol', 'sinemet', 'asa', 'protonix', 'protonix']\nfichier : 70 de id : 00006514_s024 \n\nfemale\n['phenobarbital', 'keppra', 'zoloft', 'morphine', 'labetalol']\n['phenobarbital', 'keppra', 'zoloft', 'morphine', 'labetalol']\nfichier : 71 de id : 00006514_s022 \n\nfemale\n['phenobarbital', 'keppra', 'ativan']\n['phenobarbital', 'keppra', 'ativan']\nfichier : 72 de id : 00006514_s023 \n\nfemale\n['ativan', 'zocor']\n['ativan', 'zocor']\nfichier : 73 de id : 00006514_s007 \n\nfemale\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\nfichier : 74 de id : 00006514_s019 \n\nfemale\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\n['phenobarbital', 'keppra', 'hctz', 'ativan', 'plavix', 'diovan', 'metoprolol', 'asa']\nfichier : 75 de id : 00006514_s020 \n\nfemale\n['phenobarbital', 'keppra', 'carbamazepine', 'zoloft', 'lovenox', 'insulin', 'plavix', 'lorazepam', 'levothyroxine', 'metoprolol', 'asa', 'protonix', 'protonix']\n['phenobarbital', 'keppra', 'carbamazepine', 'zoloft', 'lovenox', 'insulin', 'plavix', 'lorazepam', 'levothyroxine', 'metoprolol', 'asa', 'protonix', 'protonix']\nfichier : 76 de id : 00006514_s025 \n\nmale\n['glipizide', 'lisinopril', 'asa', 'baclofen']\n['glipizide', 'lisinopril', 'asa', 'baclofen']\nfichier : 77 de id : 00007065_s001 \n\nmale\n['xanax', 'alprazolam']\n['xanax', 'alprazolam']\nfichier : 78 de id : 00007067_s001 \n\nfemale\n['glipizide', 'protonix', 'protonix']\n['glipizide', 'protonix', 'protonix']\nfichier : 79 de id : 00007140_s002 \n\nmale\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\n['phenobarbital', 'keppra', 'ativan', 'depakote', 'labetalol', 'pantoprazole']\nfichier : 80 de id : 00007572_s001 \n\nmale\n['versed', 'xanax']\n['versed', 'xanax']\nfichier : 81 de id : 00007666_s002 \n\nfemale\n['ativan', 'versed']\n['ativan', 'versed']\nfichier : 82 de id : 00007666_s001 \n\nfemale\n['insulin', 'heparin', 'pantoprazole']\n['insulin', 'heparin', 'pantoprazole']\nfichier : 83 de id : 00007666_s003 \n\nmale\n['aspirin', 'plavix', 'demerol', 'labetalol', 'lipitor', 'vancomycin', 'zosyn', 'famotidine', 'zosyn', 'captopril']\n['aspirin', 'plavix', 'demerol', 'labetalol', 'lipitor', 'vancomycin', 'zosyn', 'famotidine', 'zosyn', 'captopril']\nfichier : 84 de id : 00007813_s001 \n\nmale\n['plavix', 'asa', 'protonix', 'labetalol', 'protonix']\n['plavix', 'asa', 'protonix', 'labetalol', 'protonix']\nfichier : 85 de id : 00007813_s002 \n\nfemale\n['percocet', 'plavix', 'metoprolol', 'asa', 'lipitor']\n['percocet', 'plavix', 'metoprolol', 'asa', 'lipitor']\nfichier : 86 de id : 00005551_s002 \n\nfemale\n['percocet', 'plavix', 'metoprolol', 'asa', 'lipitor']\n['percocet', 'plavix', 'metoprolol', 'asa', 'lipitor']\nfichier : 87 de id : 00005551_s001 \n\nfemale\n[]\n[]\nfichier : 88 de id : 00005522_s003 \n\nfemale\n['diovan', 'heparin', 'morphine', 'depakote', 'asa', 'hydralazine']\n['diovan', 'heparin', 'morphine', 'depakote', 'asa', 'hydralazine']\nfichier : 89 de id : 00005522_s002 \n\nfemale\n['lisinopril', 'diovan', 'heparin', 'morphine', 'depakote', 'asa', 'hydralazine']\n['lisinopril', 'diovan', 'heparin', 'morphine', 'depakote', 'asa', 'hydralazine']\nfichier : 90 de id : 00005522_s001 \n\n['dilantin', 'norvasc', 'lovenox', 'haldol']\n['dilantin', 'norvasc', 'lovenox', 'haldol']\nfichier : 91 de id : 00005553_s001 \n\nfemale\n['dilantin', 'aspirin', 'plavix', 'metformin', 'hiv']\n['dilantin', 'aspirin', 'plavix', 'metformin', 'hiv']\nfichier : 92 de id : 00005745_s001 \n\nfemale\n['keppra', 'dilantin', 'insulin', 'lorazepam', 'pantoprazole']\n['keppra', 'dilantin', 'insulin', 'lorazepam', 'pantoprazole']\nfichier : 93 de id : 00005765_s001 \n\nfemale\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 94 de id : 00005765_s002 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 95 de id : 00005765_s006 \n\nfemale\n['keppra', 'dilantin', 'heparin', 'sinemet', 'asa', 'protonix', 'protonix']\n['keppra', 'dilantin', 'heparin', 'sinemet', 'asa', 'protonix', 'protonix']\nfichier : 96 de id : 00006607_s006 \n\nfemale\n['keppra', 'dilantin', 'benadryl']\n['keppra', 'dilantin', 'benadryl']\nfichier : 97 de id : 00006607_s004 \n\nfemale\n['keppra']\n['keppra']\nfichier : 98 de id : 00006607_s002 \n\nfemale\n['keppra', 'depakote']\n['keppra', 'depakote']\nfichier : 99 de id : 00005641_s002 \n\nfemale\n['keppra']\n['keppra']\nfichier : 100 de id : 00005641_s001 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 101 de id : 00007962_s001 \n\nfemale\n['dilantin', 'aspirin', 'lovastatin', 'carbamazepine', 'omeprazole', 'vitamins']\n['dilantin', 'aspirin', 'lovastatin', 'carbamazepine', 'omeprazole', 'vitamins']\nfichier : 102 de id : 00000767_s004 \n\nfemale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 103 de id : 00000767_s007 \n\nfemale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 104 de id : 00000767_s006 \n\nfemale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 105 de id : 00000767_s002 \n\nnot specified\n[]\n[]\nfichier : 106 de id : 00000767_s003 \n\nfemale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 107 de id : 00000767_s005 \n\nnot specified\n['dilantin']\n['dilantin']\nfichier : 108 de id : 00000767_s008 \n\nnot specified\n['dilantin']\n['dilantin']\nfichier : 109 de id : 00000767_s009 \n\nfemale\n['dilantin', 'carbamazepine']\n['dilantin', 'carbamazepine']\nfichier : 110 de id : 00000767_s010 \n\nmale\n['keppra', 'tylenol']\n['keppra', 'tylenol']\nfichier : 111 de id : 00003005_s004 \n\nmale\n['keppra', 'dilantin', 'ativan', 'etomidate', 'fentanyl', 'versed', 'rocuronium']\n['keppra', 'dilantin', 'ativan', 'etomidate', 'fentanyl', 'versed', 'rocuronium']\nfichier : 112 de id : 00003005_s002 \n\n['keppra']\n['keppra']\nfichier : 113 de id : 00003005_s005 \n\n['keppra', 'depakote']\n['keppra', 'depakote']\nfichier : 114 de id : 00003005_s006 \n\nmale\n['keppra']\n['keppra']\nfichier : 115 de id : 00003005_s007 \n\nfemale\n['keppra', 'topamax']\n['keppra', 'topamax']\nfichier : 116 de id : 00000930_s002 \n\nmale\n['lexapro']\n['lexapro']\nfichier : 117 de id : 00003136_s003 \n\nmale\n['topamax']\n['topamax']\nfichier : 118 de id : 00003136_s004 \n\nfemale\n[]\n[]\nfichier : 119 de id : 00003101_s005 \n\nfemale\n['lamictal']\n['lamictal']\nfichier : 120 de id : 00003101_s004 \n\nmale\n['tegretol']\n['tegretol']\nfichier : 121 de id : 00003281_s002 \n\nfemale\n['lamictal', 'zoloft', 'hctz', 'actonel', 'zantac', 'calcium']\n['lamictal', 'zoloft', 'hctz', 'actonel', 'zantac', 'calcium']\nfichier : 122 de id : 00000592_s004 \n\nfemale\n['lamictal', 'zoloft', 'hydrochlorothiazide']\n['lamictal', 'zoloft', 'hydrochlorothiazide']\nfichier : 123 de id : 00000592_s005 \n\nmale\n[]\n[]\nfichier : 124 de id : 00000355_s004 \n\nmale\n['keppra']\n['keppra']\nfichier : 125 de id : 00000355_s006 \n\nmale\n['keppra']\n['keppra']\nfichier : 126 de id : 00000355_s005 \n\nmale\n[]\n[]\nfichier : 127 de id : 00000355_s009 \n\nmale\n[]\n[]\nfichier : 128 de id : 00000355_s007 \n\nmale\n[]\n[]\nfichier : 129 de id : 00000355_s008 \n\n['keppra']\n['keppra']\nfichier : 130 de id : 00000355_s003 \n\nnot specified\n['keppra']\n['keppra']\nfichier : 131 de id : 00000355_s011 \n\nnot specified\n['keppra']\n['keppra']\nfichier : 132 de id : 00000355_s010 \n\nfemale\n['phenobarbital', 'tegretol', 'atenolol', 'diltiazem']\n['phenobarbital', 'tegretol', 'atenolol', 'diltiazem']\nfichier : 133 de id : 00000883_s015 \n\nfemale\n['tegretol', 'lisinopril', 'lorazepam', 'asa', 'diltiazem']\n['tegretol', 'lisinopril', 'lorazepam', 'asa', 'diltiazem']\nfichier : 134 de id : 00000883_s017 \n\nfemale\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\nfichier : 135 de id : 00000883_s002 \n\nfemale\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\n['carbamazepine', 'tegretol', 'atenolol', 'diltiazem']\nfichier : 136 de id : 00000883_s007 \n\nmale\n['phenobarbital', 'tegretol', 'versed', 'midazolam', 'seroquel']\n['phenobarbital', 'tegretol', 'versed', 'midazolam', 'seroquel']\nfichier : 137 de id : 00001819_s004 \n\nmale\n['phenobarbital', 'tegretol', 'versed', 'midazolam', 'seroquel']\n['phenobarbital', 'tegretol', 'versed', 'midazolam', 'seroquel']\nfichier : 138 de id : 00001819_s005 \n\nmale\n['phenobarbital', 'tegretol']\n['phenobarbital', 'tegretol']\nfichier : 139 de id : 00001819_s003 \n\nmale\n['phenobarbital', 'tegretol']\n['phenobarbital', 'tegretol']\nfichier : 140 de id : 00001819_s002 \n\nmale\n['phenobarbital', 'tegretol']\n['phenobarbital', 'tegretol']\nfichier : 141 de id : 00001819_s006 \n\nmale\nnot specified\n['dilantin', 'aspirin', 'hydroxyurea']\n['dilantin', 'aspirin', 'hydroxyurea']\nfichier : 142 de id : 00003593_s004 \n\nfemale\n['phenobarbital', 'ativan']\n['phenobarbital', 'ativan']\nfichier : 143 de id : 00006514_s003 \n\nfemale\n['hydrochlorothiazide', 'diovan', 'metoprolol', 'asa', 'pantoprazole', 'zocor']\n['hydrochlorothiazide', 'diovan', 'metoprolol', 'asa', 'pantoprazole', 'zocor']\nfichier : 144 de id : 00006514_s001 \n\nfemale\n['keppra', 'tegretol']\n['keppra', 'tegretol']\nfichier : 145 de id : 00006514_s027 \n\nfemale\n['phenobarbital', 'keppra', 'tegretol', 'zoloft', 'levothyroxine', 'lipitor']\n['phenobarbital', 'keppra', 'tegretol', 'zoloft', 'levothyroxine', 'lipitor']\nfichier : 146 de id : 00006514_s026 \n\nfemale\n['keppra', 'aspirin', 'carbamazepine', 'zoloft', 'lovenox', 'topamax', 'protonix', 'labetalol', 'protonix']\n['keppra', 'aspirin', 'carbamazepine', 'zoloft', 'lovenox', 'topamax', 'protonix', 'labetalol', 'protonix']\nfichier : 147 de id : 00006514_s028 \n\nfemale\n['keppra', 'dilantin', 'lovenox', 'bactrim', 'levothyroxine', 'sinemet', 'asa', 'protonix', 'labetalol', 'protonix']\n['keppra', 'dilantin', 'lovenox', 'bactrim', 'levothyroxine', 'sinemet', 'asa', 'protonix', 'labetalol', 'protonix']\nfichier : 148 de id : 00006514_s032 \n\nfemale\n['keppra', 'dilantin', 'topamax', 'lorazepam', 'morphine', 'metoprolol', 'depakote', 'protonix', 'klonopin', 'protonix']\n['keppra', 'dilantin', 'topamax', 'lorazepam', 'morphine', 'metoprolol', 'depakote', 'protonix', 'klonopin', 'protonix']\nfichier : 149 de id : 00006514_s031 \n\nfemale\n['keppra', 'dilantin', 'ativan', 'fentanyl', 'versed']\n['keppra', 'dilantin', 'ativan', 'fentanyl', 'versed']\nfichier : 150 de id : 00006514_s033 \n\nmale\n['dilantin', 'insulin', 'lorazepam', 'heparin', 'acetazolamide', 'morphine', 'pantoprazole', 'haldol']\n['dilantin', 'insulin', 'lorazepam', 'heparin', 'acetazolamide', 'morphine', 'pantoprazole', 'haldol']\nfichier : 151 de id : 00005931_s003 \n\nmale\n['ativan', 'insulin', 'metformin', 'heparin', 'morphine']\n['ativan', 'insulin', 'metformin', 'heparin', 'morphine']\nfichier : 152 de id : 00005931_s002 \n\nmale\n['metformin', 'avandia', 'diovan', 'amiodipine']\n['metformin', 'avandia', 'diovan', 'amiodipine']\nfichier : 153 de id : 00005931_s001 \n\nmale\n['norvasc', 'vimpat', 'topiramate', 'celexa', 'seroquel', 'lopressor', 'clonidine']\n['norvasc', 'vimpat', 'topiramate', 'celexa', 'seroquel', 'lopressor', 'clonidine']\nfichier : 154 de id : 00004671_s011 \n\nmale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 155 de id : 00004671_s008 \n\nmale\n['dilantin', 'ativan', 'norvasc', 'topamax', 'vimpat', 'lisinopril', 'metoprolol']\n['dilantin', 'ativan', 'norvasc', 'topamax', 'vimpat', 'lisinopril', 'metoprolol']\nfichier : 156 de id : 00004671_s009 \n\n['dilantin', 'ativan', 'topamax', 'vimpat', 'klonopin']\n['dilantin', 'ativan', 'topamax', 'vimpat', 'klonopin']\nfichier : 157 de id : 00004671_s007 \n\nmale\n['norvasc', 'vimpat', 'topiramate', 'celexa', 'seroquel', 'lopressor', 'clonidine']\n['norvasc', 'vimpat', 'topiramate', 'celexa', 'seroquel', 'lopressor', 'clonidine']\nfichier : 158 de id : 00004671_s010 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 159 de id : 00004401_s002 \n\nfemale\n['keppra']\n['keppra']\nfichier : 160 de id : 00005765_s003 \n\nfemale\n['keppra', 'lantus']\n['keppra', 'lantus']\nfichier : 161 de id : 00005765_s005 \n\nfemale\n['glipizide', 'cymbalta', 'plavix', 'metformin', 'hiv']\n['glipizide', 'cymbalta', 'plavix', 'metformin', 'hiv']\nfichier : 162 de id : 00005745_s003 \n\nfemale\n['nifedipine', 'glipizide', 'cymbalta', 'plavix', 'bactrim', 'lisinopril', 'metformin', 'hiv']\n['nifedipine', 'glipizide', 'cymbalta', 'plavix', 'bactrim', 'lisinopril', 'metformin', 'hiv']\nfichier : 163 de id : 00005745_s002 \n\nfemale\n['dilantin', 'norvasc', 'coreg']\n['dilantin', 'norvasc', 'coreg']\nfichier : 164 de id : 00003667_s004 \n\nnot specified\n['dilantin']\n['dilantin']\nfichier : 165 de id : 00003667_s005 \n\nfemale\n['keppra', 'percocet', 'lovenox', 'topamax']\n['keppra', 'percocet', 'lovenox', 'topamax']\nfichier : 166 de id : 00005551_s003 \n\nfemale\n['keppra', 'ativan', 'succinylcholine']\n['keppra', 'ativan', 'succinylcholine']\nfichier : 167 de id : 00005551_s008 \n\nfemale\n['keppra', 'ativan', 'succinylcholine']\n['keppra', 'ativan', 'succinylcholine']\nfichier : 168 de id : 00005551_s007 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'ativan']\n['phenobarbital', 'keppra', 'dilantin', 'ativan']\nfichier : 169 de id : 00005551_s004 \n\nfemale\n['keppra', 'topamax', 'lorazepam', 'metoprolol', 'pantoprazole']\n['keppra', 'topamax', 'lorazepam', 'metoprolol', 'pantoprazole']\nfichier : 170 de id : 00005551_s005 \n\nfemale\n['keppra', 'ativan', 'vimpat', 'topiramate']\n['keppra', 'ativan', 'vimpat', 'topiramate']\nfichier : 171 de id : 00005551_s006 \n\nmale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 172 de id : 00005553_s002 \n\nmale\n['dilantin', 'lovenox', 'insulin', 'aricept', 'pravachol', 'metformin']\n['dilantin', 'lovenox', 'insulin', 'aricept', 'pravachol', 'metformin']\nfichier : 173 de id : 00005553_s003 \n\nmale\n['dilantin', 'aricept']\n['dilantin', 'aricept']\nfichier : 174 de id : 00005553_s004 \n\nmale\n[]\n[]\nfichier : 175 de id : 00005553_s005 \n\nfemale\n['keppra', 'labetalol', 'hydralazine']\n['keppra', 'labetalol', 'hydralazine']\nfichier : 176 de id : 00006103_s003 \n\nnot specified\n['phenobarbital']\n['phenobarbital']\nfichier : 177 de id : 00006103_s013 \n\nfemale\n[]\n[]\nfichier : 178 de id : 00006103_s007 \n\nfemale\n['keppra']\n['keppra']\nfichier : 179 de id : 00006103_s010 \n\nnot specified\n['phenobarbital', 'dilantin']\n['phenobarbital', 'dilantin']\nfichier : 180 de id : 00006103_s012 \n\nnot specified\n['phenobarbital']\n['phenobarbital']\nfichier : 181 de id : 00006103_s014 \n\nfemale\n['nifedipine', 'depakote', 'lantus', 'asa', 'atenolol']\n['nifedipine', 'depakote', 'lantus', 'asa', 'atenolol']\nfichier : 182 de id : 00006103_s008 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 183 de id : 00006103_s005 \n\nfemale\n['lovenox', 'nifedipine', 'asa', 'labetalol']\n['lovenox', 'nifedipine', 'asa', 'labetalol']\nfichier : 184 de id : 00006103_s004 \n\nfemale\nnot specified\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 185 de id : 00006103_s011 \n\nfemale\n['keppra']\n['keppra']\nfichier : 186 de id : 00006103_s009 \n\nfemale\n['phenobarbital', 'topiramate']\n['phenobarbital', 'topiramate']\nfichier : 187 de id : 00006103_s017 \n\nfemale\n['phenobarbital', 'aspirin', 'zocor']\n['phenobarbital', 'aspirin', 'zocor']\nfichier : 188 de id : 00006103_s015 \n\nfemale\n['phenobarbital', 'vancomycin', 'norepinephrine', 'antibiotics']\n['phenobarbital', 'vancomycin', 'norepinephrine', 'antibiotics']\nfichier : 189 de id : 00006103_s018 \n\nfemale\n['phenobarbital', 'ativan', 'insulin', 'nifedipine', 'bactrim', 'atenolol', 'pravastatin']\n['phenobarbital', 'ativan', 'insulin', 'nifedipine', 'bactrim', 'atenolol', 'pravastatin']\nfichier : 190 de id : 00006103_s016 \n\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 191 de id : 00006123_s002 \n\nfemale\n['lamictal', 'ativan', 'insulin', 'lorazepam', 'heparin', 'levothyroxine', 'protonix', 'protonix', 'benadryl']\n['lamictal', 'ativan', 'insulin', 'lorazepam', 'heparin', 'levothyroxine', 'protonix', 'protonix', 'benadryl']\nfichier : 192 de id : 00006607_s005 \n\nfemale\n['keppra', 'insulin']\n['keppra', 'insulin']\nfichier : 193 de id : 00006607_s001 \n\nfemale\n['keppra', 'lamictal']\n['keppra', 'lamictal']\nfichier : 194 de id : 00006607_s007 \n\nmale\n['hctz', 'norvasc', 'lisinopril', 'metformin', 'levothyroxine']\n['hctz', 'norvasc', 'lisinopril', 'metformin', 'levothyroxine']\nfichier : 195 de id : 00006622_s002 \n\nmale\n['albuterol']\n['albuterol']\nfichier : 196 de id : 00006622_s001 \n\nfemale\n['levetiracetam']\n['levetiracetam']\nfichier : 197 de id : 00005461_s002 \n\nfemale\n['levetiracetam']\n['levetiracetam']\nfichier : 198 de id : 00005461_s001 \n\nfemale\n[]\n[]\nfichier : 199 de id : 00007323_s001 \n\nfemale\n['lipitor']\n['lipitor']\nfichier : 200 de id : 00007386_s001 \n\nmale\n['phenobarbital', 'keppra']\n['phenobarbital', 'keppra']\nfichier : 201 de id : 00007192_s002 \n\nmale\n['phenobarbital', 'dilantin']\n['phenobarbital', 'dilantin']\nfichier : 202 de id : 00007192_s001 \n\nmale\n['phenobarbital', 'keppra']\n['phenobarbital', 'keppra']\nfichier : 203 de id : 00007192_s003 \n\nfemale\n['keppra', 'versed', 'topamax']\n['keppra', 'versed', 'topamax']\nfichier : 204 de id : 00007140_s012 \n\nnot specified\n[]\n[]\nfichier : 205 de id : 00007140_s008 \n\nfemale\n['keppra', 'versed', 'topamax']\n['keppra', 'versed', 'topamax']\nfichier : 206 de id : 00007140_s011 \n\nfemale\n[]\n[]\nfichier : 207 de id : 00007140_s005 \n\nfemale\n[]\n[]\nfichier : 208 de id : 00007140_s007 \n\nnot specified\n[]\n[]\nfichier : 209 de id : 00007140_s009 \n\nfemale\n['keppra', 'dilantin', 'topamax']\n['keppra', 'dilantin', 'topamax']\nfichier : 210 de id : 00007140_s003 \n\nfemale\n[]\n[]\nfichier : 211 de id : 00007140_s006 \n\nfemale\n['keppra', 'dilantin', 'topamax']\n['keppra', 'dilantin', 'topamax']\nfichier : 212 de id : 00007140_s010 \n\nfemale\n['keppra', 'dilantin', 'lovenox', 'topamax', 'glipizide', 'zocor']\n['keppra', 'dilantin', 'lovenox', 'topamax', 'glipizide', 'zocor']\nfichier : 213 de id : 00007140_s004 \n\nfemale\n['hydrochlorothiazide', 'lisinopril']\n['hydrochlorothiazide', 'lisinopril']\nfichier : 214 de id : 00007176_s002 \n\nfemale\n['hctz']\n['hctz']\nfichier : 215 de id : 00007176_s003 \n\nmale\n['dopamine', 'norepinephrine', 'baclofen']\n['dopamine', 'norepinephrine', 'baclofen']\nfichier : 216 de id : 00007065_s002 \n\nmale\n['keppra', 'baclofen']\n['keppra', 'baclofen']\nfichier : 217 de id : 00007065_s003 \n\n['hiv', 'heparin', 'vancomycin']\n['hiv', 'heparin', 'vancomycin']\nfichier : 218 de id : 00007067_s003 \n\nmale\n['depakote', 'xanax']\n['depakote', 'xanax']\nfichier : 219 de id : 00007067_s002 \n\nmale\n['keppra', 'ativan', 'benadryl']\n['keppra', 'ativan', 'benadryl']\nfichier : 220 de id : 00007972_s002 \n\nnot specified\n[]\n[]\nfichier : 221 de id : 00007972_s001 \n\nfemale\n['depakote']\n['depakote']\nfichier : 222 de id : 00007962_s002 \n\nfemale\n['tegretol', 'levothyroxine']\n['tegretol', 'levothyroxine']\nfichier : 223 de id : 00007267_s002 \n\nfemale\n['tegretol', 'levothyroxine']\n['tegretol', 'levothyroxine']\nfichier : 224 de id : 00007267_s001 \n\nfemale\n['dilantin', 'zoloft', 'hydralazine', 'diltiazem']\n['dilantin', 'zoloft', 'hydralazine', 'diltiazem']\nfichier : 225 de id : 00006940_s001 \n\nfemale\n['dilantin', 'advair']\n['dilantin', 'advair']\nfichier : 226 de id : 00006927_s002 \n\nfemale\n['asa', 'advair']\n['asa', 'advair']\nfichier : 227 de id : 00006927_s001 \n\nfemale\n[]\n[]\nfichier : 228 de id : 00007864_s002 \n\nfemale\n['aspirin']\n['aspirin']\nfichier : 229 de id : 00007864_s001 \n\nfemale\n[]\n[]\nfichier : 230 de id : 00007805_s001 \n\nfemale\n['keppra', 'ativan', 'metoprolol', 'pantoprazole']\n['keppra', 'ativan', 'metoprolol', 'pantoprazole']\nfichier : 231 de id : 00006805_s001 \n\nfemale\n['ativan', 'lovenox', 'insulin', 'metoprolol', 'labetalol', 'hydralazine', 'pantoprazole']\n['ativan', 'lovenox', 'insulin', 'metoprolol', 'labetalol', 'hydralazine', 'pantoprazole']\nfichier : 232 de id : 00006805_s002 \n\nfemale\n['ativan', 'lovenox', 'insulin', 'metoprolol', 'labetalol', 'hydralazine', 'pantoprazole']\n['ativan', 'lovenox', 'insulin', 'metoprolol', 'labetalol', 'hydralazine', 'pantoprazole']\nfichier : 233 de id : 00006805_s003 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 234 de id : 00006894_s001 \n\nfemale\n['dilantin', 'lovenox', 'zocor', 'lopressor']\n['dilantin', 'lovenox', 'zocor', 'lopressor']\nfichier : 235 de id : 00006894_s003 \n\nmale\n['keppra', 'metoprolol']\n['keppra', 'metoprolol']\nfichier : 236 de id : 00007600_s001 \n\nmale\n['keppra', 'metoprolol']\n['keppra', 'metoprolol']\nfichier : 237 de id : 00007600_s002 \n\nmale\n['keppra', 'metoprolol']\n['keppra', 'metoprolol']\nfichier : 238 de id : 00007600_s003 \n\nfemale\n['phenobarbital', 'dilantin', 'topamax']\n['phenobarbital', 'dilantin', 'topamax']\nfichier : 239 de id : 00007645_s004 \n\nfemale\n['phenobarbital', 'dilantin', 'diovan', 'pantoprazole']\n['phenobarbital', 'dilantin', 'diovan', 'pantoprazole']\nfichier : 240 de id : 00007645_s002 \n\nfemale\n['phenobarbital', 'dilantin', 'topamax']\n['phenobarbital', 'dilantin', 'topamax']\nfichier : 241 de id : 00007645_s005 \n\nfemale\n['phenobarbital', 'dilantin', 'topamax', 'diovan', 'asa', 'coumadin']\n['phenobarbital', 'dilantin', 'topamax', 'diovan', 'asa', 'coumadin']\nfichier : 242 de id : 00007645_s001 \n\nfemale\n['phenobarbital', 'dilantin', 'diovan', 'pantoprazole']\n['phenobarbital', 'dilantin', 'diovan', 'pantoprazole']\nfichier : 243 de id : 00007645_s003 \n\nfemale\n['phenobarbital', 'dilantin', 'topamax']\n['phenobarbital', 'dilantin', 'topamax']\nfichier : 244 de id : 00007645_s006 \n\nfemale\n['phenobarbital', 'dilantin', 'topamax']\n['phenobarbital', 'dilantin', 'topamax']\nfichier : 245 de id : 00007645_s007 \n\nfemale\n['phenobarbital', 'dilantin', 'vimpat', 'insulin', 'pantoprazole']\n['phenobarbital', 'dilantin', 'vimpat', 'insulin', 'pantoprazole']\nfichier : 246 de id : 00007656_s011 \n\nfemale\n['keppra', 'dilantin', 'ativan', 'versed']\n['keppra', 'dilantin', 'ativan', 'versed']\nfichier : 247 de id : 00007656_s001 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\n['phenobarbital', 'keppra', 'dilantin', 'versed', 'vimpat', 'asa']\nfichier : 248 de id : 00007656_s012 \n\nmale\n['phenobarbital', 'dilantin', 'lovenox', 'morphine', 'benadryl', 'clonidine', 'dilaudid']\n['phenobarbital', 'dilantin', 'lovenox', 'morphine', 'benadryl', 'clonidine', 'dilaudid']\nfichier : 249 de id : 00007635_s001 \n\nmale\n['keppra']\n['keppra']\nfichier : 250 de id : 00007572_s007 \n\nfemale\n['keppra', 'topamax']\n['keppra', 'topamax']\nfichier : 251 de id : 00008617_s001 \n\nfemale\n['lamictal', 'topamax']\n['lamictal', 'topamax']\nfichier : 252 de id : 00008617_s003 \n\nfemale\n['lamictal', 'topamax', 'vimpat']\n['lamictal', 'topamax', 'vimpat']\nfichier : 253 de id : 00008617_s004 \n\nfemale\n['lamictal', 'topamax']\n['lamictal', 'topamax']\nfichier : 254 de id : 00008617_s002 \n\nfemale\n['keppra', 'lovenox', 'vancomycin', 'acyclovir']\n['keppra', 'lovenox', 'vancomycin', 'acyclovir']\nfichier : 255 de id : 00008602_s001 \n\nfemale\n['keppra', 'aspirin']\n['keppra', 'aspirin']\nfichier : 256 de id : 00008602_s003 \n\nfemale\n['keppra']\n['keppra']\nfichier : 257 de id : 00008602_s002 \n\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 258 de id : 00008656_s001 \n\nmale\n['dilantin', 'lovenox', 'metoprolol', 'depakote', 'protonix', 'zosyn', 'zosyn', 'protonix']\n['dilantin', 'lovenox', 'metoprolol', 'depakote', 'protonix', 'zosyn', 'zosyn', 'protonix']\nfichier : 259 de id : 00008656_s011 \n\nmale\n['ativan', 'versed', 'benadryl']\n['ativan', 'versed', 'benadryl']\nfichier : 260 de id : 00008656_s004 \n\nmale\n['dilantin']\n['dilantin']\nfichier : 261 de id : 00008656_s003 \n\nmale\n['depakote', 'midazolam']\n['depakote', 'midazolam']\nfichier : 262 de id : 00008656_s006 \n\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 263 de id : 00008656_s002 \n\nmale\n['ativan', 'versed', 'benadryl']\n['ativan', 'versed', 'benadryl']\nfichier : 264 de id : 00008656_s005 \n\nmale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 265 de id : 00008656_s010 \n\nmale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 266 de id : 00008656_s009 \n\nmale\n['asa']\n['asa']\nfichier : 267 de id : 00008656_s007 \n\nmale\n['dilantin', 'ativan', 'midazolam']\n['dilantin', 'ativan', 'midazolam']\nfichier : 268 de id : 00008656_s008 \n\nfemale\n['norvasc', 'glyburide', 'tylenol']\n['norvasc', 'glyburide', 'tylenol']\nfichier : 269 de id : 00008570_s001 \n\nfemale\n['dilantin', 'lovenox']\n['dilantin', 'lovenox']\nfichier : 270 de id : 00008476_s002 \n\nfemale\n[]\n[]\nfichier : 271 de id : 00008476_s008 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 272 de id : 00008476_s005 \n\nfemale\n['dilantin', 'lovenox']\n['dilantin', 'lovenox']\nfichier : 273 de id : 00008476_s003 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 274 de id : 00008476_s004 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 275 de id : 00008476_s001 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 276 de id : 00008476_s006 \n\nnot specified\n[]\n[]\nfichier : 277 de id : 00008476_s007 \n\nfemale\n['topiramate', 'tylenol']\n['topiramate', 'tylenol']\nfichier : 278 de id : 00008410_s001 \n\nfemale\n['norvasc', 'depakote']\n['norvasc', 'depakote']\nfichier : 279 de id : 00008403_s002 \n\nfemale\n['norvasc']\n['norvasc']\nfichier : 280 de id : 00008403_s001 \n\nfemale\n['topamax', 'zonisamide']\n['topamax', 'zonisamide']\nfichier : 281 de id : 00008459_s003 \n\nfemale\nnot specified\n['topamax', 'zonisamide']\n['topamax', 'zonisamide']\nfichier : 282 de id : 00008459_s006 \n\nfemale\n['keppra', 'vitamins', 'prenatal']\n['keppra', 'vitamins', 'prenatal']\nfichier : 283 de id : 00008459_s001 \n\nfemale\n['dilantin', 'topamax']\n['dilantin', 'topamax']\nfichier : 284 de id : 00008459_s005 \n\nfemale\n['topamax']\n['topamax']\nfichier : 285 de id : 00008459_s002 \n\nfemale\n['topamax', 'zonisamide']\n['topamax', 'zonisamide']\nfichier : 286 de id : 00008459_s004 \n\nfemale\nnot specified\n['topamax', 'zonisamide']\n['topamax', 'zonisamide']\nfichier : 287 de id : 00008459_s007 \n\nnot specified\n[]\n[]\nfichier : 288 de id : 00008459_s008 \n\nmale\n['dilantin', 'serax', 'thiamine', 'protonix', 'protonix']\n['dilantin', 'serax', 'thiamine', 'protonix', 'protonix']\nfichier : 289 de id : 00008493_s001 \n\nfemale\n[]\n[]\nfichier : 290 de id : 00008395_s001 \n\nfemale\n['keppra']\n['keppra']\nfichier : 291 de id : 00008322_s001 \n\nfemale\n['keppra', 'vimpat', 'metoprolol']\n['keppra', 'vimpat', 'metoprolol']\nfichier : 292 de id : 00008322_s002 \n\nfemale\n['carbamazepine']\n['carbamazepine']\nfichier : 293 de id : 00008928_s001 \n\nfemale\n['tegretol']\n['tegretol']\nfichier : 294 de id : 00008928_s002 \n\nfemale\n['tegretol']\n['tegretol']\nfichier : 295 de id : 00008928_s003 \n\nfemale\n['carbamazepine']\n['carbamazepine']\nfichier : 296 de id : 00008928_s004 \n\nmale\n['keppra', 'heparin', 'tylenol', 'labetalol', 'hydralazine', 'famotidine']\n['keppra', 'heparin', 'tylenol', 'labetalol', 'hydralazine', 'famotidine']\nfichier : 297 de id : 00008929_s001 \n\nmale\n['dilantin']\n['dilantin']\nfichier : 298 de id : 00008987_s001 \n\nmale\n['keppra', 'vancomycin', 'zosyn', 'zosyn', 'acyclovir']\n['keppra', 'vancomycin', 'zosyn', 'zosyn', 'acyclovir']\nfichier : 299 de id : 00008794_s002 \n\nmale\n['keppra', 'vancomycin', 'acyclovir']\n['keppra', 'vancomycin', 'acyclovir']\nfichier : 300 de id : 00008794_s001 \n\nnot specified\n['keppra', 'trazodone']\n['keppra', 'trazodone']\nfichier : 301 de id : 00008754_s001 \n\nfemale\n['keppra', 'hctz', 'ativan', 'norvasc', 'percocet', 'morphine', 'atenolol', 'zocor']\n['keppra', 'hctz', 'ativan', 'norvasc', 'percocet', 'morphine', 'atenolol', 'zocor']\nfichier : 302 de id : 00008194_s002 \n\nfemale\n['keppra', 'omeprazole', 'atenolol']\n['keppra', 'omeprazole', 'atenolol']\nfichier : 303 de id : 00008194_s003 \n\nfemale\n['omeprazole', 'hctz', 'levetiracetam', 'atenolol']\n['omeprazole', 'hctz', 'levetiracetam', 'atenolol']\nfichier : 304 de id : 00008194_s001 \n\nfemale\n['keppra', 'hydrochlorothiazide', 'atenolol', 'zocor', 'colace']\n['keppra', 'hydrochlorothiazide', 'atenolol', 'zocor', 'colace']\nfichier : 305 de id : 00008194_s004 \n\nfemale\n['keppra']\n['keppra']\nfichier : 306 de id : 00008194_s006 \n\nfemale\n['keppra', 'omeprazole', 'hydrochlorothiazide', 'glipizide', 'metformin']\n['keppra', 'omeprazole', 'hydrochlorothiazide', 'glipizide', 'metformin']\nfichier : 307 de id : 00008194_s005 \n\nfemale\n['phenobarbital', 'keppra', 'dilantin']\n['phenobarbital', 'keppra', 'dilantin']\nfichier : 308 de id : 00008889_s005 \n\nfemale\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 309 de id : 00008889_s002 \n\nfemale\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 310 de id : 00008889_s003 \n\nfemale\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 311 de id : 00008889_s001 \n\nfemale\n['keppra', 'dilantin', 'ativan']\n['keppra', 'dilantin', 'ativan']\nfichier : 312 de id : 00008889_s004 \n\nmale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 313 de id : 00008092_s003 \n\nmale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 314 de id : 00008092_s004 \n\nmale\n['keppra', 'ativan', 'serax']\n['keppra', 'ativan', 'serax']\nfichier : 315 de id : 00008092_s001 \n\nmale\n['keppra']\n['keppra']\nfichier : 316 de id : 00008092_s008 \n\nmale\n['dilantin', 'ativan', 'depakote']\n['dilantin', 'ativan', 'depakote']\nfichier : 317 de id : 00008092_s002 \n\nmale\n['dilantin', 'depakote', 'protonix', 'protonix']\n['dilantin', 'depakote', 'protonix', 'protonix']\nfichier : 318 de id : 00008092_s006 \n\nmale\n['dilantin', 'depakote', 'protonix', 'protonix']\n['dilantin', 'depakote', 'protonix', 'protonix']\nfichier : 319 de id : 00008092_s005 \n\nmale\n['dilantin', 'depakote', 'protonix', 'protonix']\n['dilantin', 'depakote', 'protonix', 'protonix']\nfichier : 320 de id : 00008092_s007 \n\nmale\n['depakote']\n['depakote']\nfichier : 321 de id : 00008092_s010 \n\nmale\n['depakote']\n['depakote']\nfichier : 322 de id : 00008092_s009 \n\nmale\n['depakote']\n['depakote']\nfichier : 323 de id : 00008092_s011 \n\nmale\n['depakote']\n['depakote']\nfichier : 324 de id : 00008092_s012 \n\nmale\n['depakote']\n['depakote']\nfichier : 325 de id : 00008092_s013 \n\nmale\n['depakote']\n['depakote']\nfichier : 326 de id : 00008092_s014 \n\nmale\n['depakote']\n['depakote']\nfichier : 327 de id : 00008092_s015 \n\nmale\n['depakote']\n['depakote']\nfichier : 328 de id : 00008092_s016 \n\nmale\n['depakote']\n['depakote']\nfichier : 329 de id : 00008092_s017 \n\nfemale\n['keppra', 'protonix', 'protonix']\n['keppra', 'protonix', 'protonix']\nfichier : 330 de id : 00008270_s002 \n\nfemale\n['morphine', 'labetalol', 'hydralazine']\n['morphine', 'labetalol', 'hydralazine']\nfichier : 331 de id : 00008270_s001 \n\nfemale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 332 de id : 00008295_s001 \n\nfemale\n['dilantin', 'ativan']\n['dilantin', 'ativan']\nfichier : 333 de id : 00008295_s002 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 334 de id : 00008295_s004 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 335 de id : 00008295_s003 \n\nfemale\n['keppra', 'dilantin', 'depakote']\n['keppra', 'dilantin', 'depakote']\nfichier : 336 de id : 00008295_s005 \n\nfemale\n['phenobarbital', 'dilantin', 'ativan']\n['phenobarbital', 'dilantin', 'ativan']\nfichier : 337 de id : 00008295_s008 \n\nfemale\nnot specified\n['keppra', 'depakote', 'famotidine', 'baclofen']\n['keppra', 'depakote', 'famotidine', 'baclofen']\nfichier : 338 de id : 00008295_s013 \n\nfemale\n['dilantin', 'depakote']\n['dilantin', 'depakote']\nfichier : 339 de id : 00008295_s011 \n\nfemale\n['keppra', 'dilantin', 'depakote']\n['keppra', 'dilantin', 'depakote']\nfichier : 340 de id : 00008295_s006 \n\nfemale\n['dilantin', 'depakote']\n['dilantin', 'depakote']\nfichier : 341 de id : 00008295_s012 \n\nfemale\n['dilantin', 'depakote']\n['dilantin', 'depakote']\nfichier : 342 de id : 00008295_s010 \n\nfemale\n['keppra', 'dilantin', 'depakote']\n['keppra', 'dilantin', 'depakote']\nfichier : 343 de id : 00008295_s007 \n\nfemale\n['dilantin', 'depakote']\n['dilantin', 'depakote']\nfichier : 344 de id : 00008295_s009 \n\nmale\n['dilantin']\n['dilantin']\nfichier : 345 de id : 00009373_s002 \n\nmale\n['dilantin']\n['dilantin']\nfichier : 346 de id : 00009373_s001 \n\nmale\n['dilantin', 'ativan', 'citalopram']\n['dilantin', 'ativan', 'citalopram']\nfichier : 347 de id : 00009453_s001 \n\nfemale\n['fentanyl', 'versed', 'vecuronium', 'protonix', 'labetalol', 'hydralazine', 'protonix']\n['fentanyl', 'versed', 'vecuronium', 'protonix', 'labetalol', 'hydralazine', 'protonix']\nfichier : 348 de id : 00009029_s001 \n\nfemale\n['depakote', 'clonidine']\n['depakote', 'clonidine']\nfichier : 349 de id : 00009029_s003 \n\nfemale\n['fentanyl', 'versed', 'vecuronium', 'protonix', 'labetalol', 'hydralazine', 'protonix']\n['fentanyl', 'versed', 'vecuronium', 'protonix', 'labetalol', 'hydralazine', 'protonix']\nfichier : 350 de id : 00009029_s002 \n\nfemale\n['aspirin', 'hydralazine', 'famotidine']\n['aspirin', 'hydralazine', 'famotidine']\nfichier : 351 de id : 00009073_s001 \n\nfemale\n[]\n[]\nfichier : 352 de id : 00009073_s003 \n\nfemale\n[]\n[]\nfichier : 353 de id : 00009073_s002 \n\nfemale\n['dilantin']\n['dilantin']\nfichier : 354 de id : 00009073_s005 \n\nfemale\n['dilantin', 'tylenol']\n['dilantin', 'tylenol']\nfichier : 355 de id : 00009073_s004 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 356 de id : 00009527_s001 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 357 de id : 00009527_s002 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 358 de id : 00009527_s006 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 359 de id : 00009527_s003 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 360 de id : 00009527_s004 \n\nmale\n['dilantin', 'tegretol']\n['dilantin', 'tegretol']\nfichier : 361 de id : 00009527_s005 \n\nmale\n[]\n[]\nfichier : 362 de id : 00009526_s001 \n\nfemale\n['topamax']\n['topamax']\nfichier : 363 de id : 00009553_s001 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 364 de id : 00009158_s002 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 365 de id : 00009158_s004 \n\nfemale\n['keppra', 'versed']\n['keppra', 'versed']\nfichier : 366 de id : 00009158_s001 \n\nfemale\n['keppra']\n['keppra']\nfichier : 367 de id : 00009158_s005 \n\nfemale\n['keppra', 'dilantin']\n['keppra', 'dilantin']\nfichier : 368 de id : 00009158_s003 \n\nfemale\n['dilantin', 'depakote']\n['dilantin', 'depakote']\nfichier : 369 de id : 00009158_s010 \n\nfemale\n['keppra', 'dilantin', 'lamictal', 'depakote', 'xanax']\n['keppra', 'dilantin', 'lamictal', 'depakote', 'xanax']\nfichier : 370 de id : 00009158_s006 \n\nfemale\n[]\n[]\nfichier : 371 de id : 00009158_s007 \n\nfemale\n[]\n[]\nfichier : 372 de id : 00009158_s009 \n\nfemale\n[]\n[]\nfichier : 373 de id : 00009158_s008 \n\nfemale\n['keppra']\n['keppra']\nfichier : 374 de id : 00009151_s001 \n\nfemale\n['keppra', 'lorazepam', 'xanax']\n['keppra', 'lorazepam', 'xanax']\nfichier : 375 de id : 00009151_s002 \n\nmale\n['lisinopril', 'hydralazine']\n['lisinopril', 'hydralazine']\nfichier : 376 de id : 00009105_s001 \n\nfemale\n['ativan', 'versed', 'depakote']\n['ativan', 'versed', 'depakote']\nfichier : 377 de id : 00009736_s001 \n\nmale\n['clonidine']\n['clonidine']\nfichier : 378 de id : 00009848_s001 \n\nmale\n['zonisamide']\n['zonisamide']\nfichier : 379 de id : 00009842_s002 \n\nmale\n['zonisamide']\n['zonisamide']\nfichier : 380 de id : 00009842_s001 \n\nnot specified\n['zonisamide']\n['zonisamide']\nfichier : 381 de id : 00009842_s003 \n\nfemale\n['fentanyl', 'demerol']\n['fentanyl', 'demerol']\nfichier : 382 de id : 00009840_s001 \n\nfemale\n['versed', 'depakote']\n['versed', 'depakote']\nfichier : 383 de id : 00009840_s008 \n\nfemale\n['fentanyl', 'demerol']\n['fentanyl', 'demerol']\nfichier : 384 de id : 00009840_s002 \n\nfemale\n['aspirin', 'demerol', 'depakote', 'midazolam']\n['aspirin', 'demerol', 'depakote', 'midazolam']\nfichier : 385 de id : 00009840_s006 \n\nfemale\n['versed', 'depakote']\n['versed', 'depakote']\nfichier : 386 de id : 00009840_s004 \n\nnot specified\n['depakote', 'midazolam']\n['depakote', 'midazolam']\nfichier : 387 de id : 00009840_s007 \n\nfemale\n['versed', 'depakote']\n['versed', 'depakote']\nfichier : 388 de id : 00009840_s005 \n\nfemale\n['versed', 'depakote']\n['versed', 'depakote']\nfichier : 389 de id : 00009840_s003 \n\nmale\n['keppra', 'midazolam', 'famotidine', 'dilaudid']\n['keppra', 'midazolam', 'famotidine', 'dilaudid']\nfichier : 390 de id : 00009992_s001 \n\nmale\n['keppra', 'tylenol', 'oxycodone']\n['keppra', 'tylenol', 'oxycodone']\nfichier : 391 de id : 00009992_s002 \n\nmale\n[]\n[]\nfichier : 392 de id : 00009969_s001 \n\nfemale\n[]\n[]\nfichier : 393 de id : 00009622_s001 \n\nfemale\n['keppra', 'coreg', 'topamax', 'lexapro']\n['keppra', 'coreg', 'topamax', 'lexapro']\nfichier : 394 de id : 00009622_s003 \n\nfemale\n['coreg', 'topamax', 'plavix', 'lorazepam', 'midazolam', 'coumadin', 'ambien']\n['coreg', 'topamax', 'plavix', 'lorazepam', 'midazolam', 'coumadin', 'ambien']\nfichier : 395 de id : 00009622_s002 \n\nnot specified\n['keppra', 'dilantin', 'ativan', 'serax']\n['keppra', 'dilantin', 'ativan', 'serax']\nfichier : 396 de id : 00010022_s004 \n\nnot specified\n['keppra', 'dilantin', 'ativan', 'serax']\n['keppra', 'dilantin', 'ativan', 'serax']\nfichier : 397 de id : 00010022_s001 \n\nnot specified\n['keppra', 'dilantin', 'ativan', 'serax']\n['keppra', 'dilantin', 'ativan', 'serax']\nfichier : 398 de id : 00010022_s003 \n\nmale\n['keppra', 'dilantin', 'vimpat']\n['keppra', 'dilantin', 'vimpat']\nfichier : 399 de id : 00010022_s005 \n\nnot specified\n['keppra', 'dilantin', 'ativan', 'serax']\n['keppra', 'dilantin', 'ativan', 'serax']\nfichier : 400 de id : 00010022_s002 \n\nmale\n['keppra', 'dilantin', 'vimpat']\n['keppra', 'dilantin', 'vimpat']\nfichier : 401 de id : 00010022_s006 \n\nfemale\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\nfichier : 402 de id : 00010052_s003 \n\nfemale\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\nfichier : 403 de id : 00010052_s006 \n\nfemale\n['keppra']\n['keppra']\nfichier : 404 de id : 00010052_s001 \n\nfemale\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\nfichier : 405 de id : 00010052_s007 \n\nfemale\n['keppra', 'xanax', 'baclofen']\n['keppra', 'xanax', 'baclofen']\nfichier : 406 de id : 00010052_s005 \n\nfemale\n['keppra']\n['keppra']\nfichier : 407 de id : 00010052_s002 \n\nfemale\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\n['keppra', 'percocet', 'levothyroxine', 'xanax', 'baclofen']\nfichier : 408 de id : 00010052_s004 \n\nfemale\n['metoprolol']\n['metoprolol']\nfichier : 409 de id : 00010138_s001 \n\nfemale\n['keppra', 'xanax']\n['keppra', 'xanax']\nfichier : 410 de id : 00010590_s002 \n\nfemale\n['xanax', 'oxycodone']\n['xanax', 'oxycodone']\nfichier : 411 de id : 00010590_s001 \n\nfemale\n['celexa']\n['celexa']\nfichier : 412 de id : 00010412_s002 \n\nfemale\n['celexa']\n['celexa']\nfichier : 413 de id : 00010412_s003 \n\nfemale\n['celexa']\n['celexa']\nfichier : 414 de id : 00010412_s001 \n\nfemale\n['celexa']\n['celexa']\nfichier : 415 de id : 00010412_s004 \n\nfemale\n['celexa', 'elavil']\n['celexa', 'elavil']\nfichier : 416 de id : 00010412_s007 \n\nfemale\n['celexa']\n['celexa']\nfichier : 417 de id : 00010412_s005 \n\nfemale\n['celexa', 'elavil']\n['celexa', 'elavil']\nfichier : 418 de id : 00010412_s006 \n\nmale\n['keppra', 'percocet', 'glipizide']\n['keppra', 'percocet', 'glipizide']\nfichier : 419 de id : 00010715_s001 \n\nmale\nnot specified\n['aspirin', 'lisinopril', 'hydralazine']\n['aspirin', 'lisinopril', 'hydralazine']\nfichier : 420 de id : 00010709_s001 \n\nfemale\n['dilantin', 'aspirin', 'ativan', 'lovenox', 'protonix', 'protonix']\n['dilantin', 'aspirin', 'ativan', 'lovenox', 'protonix', 'protonix']\nfichier : 421 de id : 00010270_s001 \n\nfemale\n['dilantin', 'lorazepam', 'morphine']\n['dilantin', 'lorazepam', 'morphine']\nfichier : 422 de id : 00010270_s003 \n\nfemale\n['dilantin', 'aspirin', 'ativan', 'lovenox', 'protonix', 'protonix']\n['dilantin', 'aspirin', 'ativan', 'lovenox', 'protonix', 'protonix']\nfichier : 423 de id : 00010270_s002 \n\nfemale\n['dilantin', 'midazolam', 'coumadin']\n['dilantin', 'midazolam', 'coumadin']\nfichier : 424 de id : 00010270_s004 \n\nmale\n['dilantin', 'aspirin', 'diovan', 'heparin', 'protonix', 'seroquel', 'protonix', 'carvedilol']\n['dilantin', 'aspirin', 'diovan', 'heparin', 'protonix', 'seroquel', 'protonix', 'carvedilol']\nfichier : 425 de id : 00010300_s001 \n\nmale\n['albuterol']\n['albuterol']\nfichier : 426 de id : 00010347_s001 \n\nmale\n['ativan', 'etomidate', 'rocuronium', 'vecuronium']\n['ativan', 'etomidate', 'rocuronium', 'vecuronium']\nfichier : 427 de id : 00010387_s001 \n\nfemale\n['plavix', 'metformin', 'avandia', 'motrin']\n['plavix', 'metformin', 'avandia', 'motrin']\nfichier : 428 de id : 00006493_s001 \n\nmale\n['fentanyl', 'insulin', 'heparin', 'asa', 'labetalol', 'lipitor', 'vancomycin', 'pantoprazole']\n['fentanyl', 'insulin', 'heparin', 'asa', 'labetalol', 'lipitor', 'vancomycin', 'pantoprazole']\nfichier : 429 de id : 00006980_s001 \n\nmale\n['fentanyl', 'insulin', 'heparin', 'asa', 'labetalol', 'lipitor', 'vancomycin', 'pantoprazole']\n['fentanyl', 'insulin', 'heparin', 'asa', 'labetalol', 'lipitor', 'vancomycin', 'pantoprazole']\nfichier : 430 de id : 00006980_s002 \n\nmale\n['insulin', 'plavix', 'heparin', 'asa', 'pantoprazole', 'zocor', 'lopressor']\n['insulin', 'plavix', 'heparin', 'asa', 'pantoprazole', 'zocor', 'lopressor']\nfichier : 431 de id : 00006917_s001 \n\nfemale\n['albuterol', 'rifampin']\n['albuterol', 'rifampin']\nfichier : 432 de id : 00006815_s001 \n\nfemale\n['insulin', 'amiodipine', 'midazolam', 'protonix', 'hydralazine', 'zocor', 'protonix']\n['insulin', 'amiodipine', 'midazolam', 'protonix', 'hydralazine', 'zocor', 'protonix']\nfichier : 433 de id : 00007073_s001 \n\nmale\n['vancomycin', 'zosyn', 'zosyn']\n['vancomycin', 'zosyn', 'zosyn']\nfichier : 434 de id : 00007834_s001 \n\nmale\n['vancomycin', 'zosyn', 'zosyn']\n['vancomycin', 'zosyn', 'zosyn']\nfichier : 435 de id : 00007834_s002 \n\nfemale\n['insulin', 'nifedipine', 'asa', 'zocor']\n['insulin', 'nifedipine', 'asa', 'zocor']\nfichier : 436 de id : 00007848_s001 \n\nfemale\n['norvasc', 'lisinopril', 'xanax']\n['norvasc', 'lisinopril', 'xanax']\nfichier : 437 de id : 00007862_s001 \n\nfemale\n['aspirin', 'lisinopril', 'lipitor', 'diltiazem']\n['aspirin', 'lisinopril', 'lipitor', 'diltiazem']\nfichier : 438 de id : 00002744_s001 \n\nmale\n['hiv']\n['hiv']\nfichier : 439 de id : 00006567_s001 \n\nfemale\n[]\n[]\nfichier : 440 de id : 00006533_s002 \n\nfemale\n['ativan', 'fentanyl', 'versed', 'heparin', 'asa', 'vancomycin', 'zosyn', 'zosyn']\n['ativan', 'fentanyl', 'versed', 'heparin', 'asa', 'vancomycin', 'zosyn', 'zosyn']\nfichier : 441 de id : 00006533_s001 \n\n['coreg', 'asa', 'baclofen']\n['coreg', 'asa', 'baclofen']\nfichier : 442 de id : 00008037_s001 \n\nfemale\n[]\n[]\nfichier : 443 de id : 00008066_s001 \n\n['aspirin', 'metoprolol', 'protonix', 'protonix']\n['aspirin', 'metoprolol', 'protonix', 'protonix']\nfichier : 444 de id : 00008090_s001 \n\nfemale\n['lovenox', 'nifedipine', 'asa']\n['lovenox', 'nifedipine', 'asa']\nfichier : 445 de id : 00005573_s001 \n\nfemale\n['calcium']\n['calcium']\nfichier : 446 de id : 00005573_s002 \n\nmale\n['aspirin', 'norvasc', 'hydralazine']\n['aspirin', 'norvasc', 'hydralazine']\nfichier : 447 de id : 00003612_s001 \n\nmale\n['ativan', 'norepinephrine']\n['ativan', 'norepinephrine']\nfichier : 448 de id : 00008240_s001 \n\n['hiv', 'morphine', 'alprazolam']\n['hiv', 'morphine', 'alprazolam']\nfichier : 449 de id : 00008138_s001 \n\n['plavix', 'lorazepam', 'metoprolol', 'demerol', 'asa', 'famotidine']\n['plavix', 'lorazepam', 'metoprolol', 'demerol', 'asa', 'famotidine']\nfichier : 450 de id : 00008130_s001 \n\nmale\n['norvasc', 'insulin']\n['norvasc', 'insulin']\nfichier : 451 de id : 00008134_s001 \n\nmale\n['metoprolol']\n['metoprolol']\nfichier : 452 de id : 00008455_s001 \n\nfemale\n[]\n[]\nfichier : 453 de id : 00007671_s002 \n\nfemale\n['hctz', 'asa', 'lipitor', 'atenolol', 'diltiazem']\n['hctz', 'asa', 'lipitor', 'atenolol', 'diltiazem']\nfichier : 454 de id : 00002744_s002 \n\nfemale\n[]\n[]\nfichier : 455 de id : 00002744_s003 \n\nmale\n[]\n[]\nfichier : 456 de id : 00006906_s001 \n\nfemale\n[]\n[]\nfichier : 457 de id : 00006249_s003 \n\nfemale\n['aspirin', 'omeprazole', 'metoprolol', 'pravastatin']\n['aspirin', 'omeprazole', 'metoprolol', 'pravastatin']\nfichier : 458 de id : 00006249_s004 \n\nfemale\n['zoloft', 'celexa', 'midodrine', 'seroquel', 'klonopin', 'albuterol']\n['zoloft', 'celexa', 'midodrine', 'seroquel', 'klonopin', 'albuterol']\nfichier : 459 de id : 00006253_s003 \n\nmale\n['ativan', 'lovenox', 'lorazepam', 'hydralazine', 'atropine', 'penicillin', 'famotidine', 'clindamycin']\n['ativan', 'lovenox', 'lorazepam', 'hydralazine', 'atropine', 'penicillin', 'famotidine', 'clindamycin']\nfichier : 460 de id : 00006694_s001 \n\nmale\n['ativan', 'fentanyl', 'lorazepam', 'tylenol', 'haldol', 'motrin']\n['ativan', 'fentanyl', 'lorazepam', 'tylenol', 'haldol', 'motrin']\nfichier : 461 de id : 00006687_s001 \n\nmale\n['protonix', 'vancomycin', 'protonix', 'xanax']\n['protonix', 'vancomycin', 'protonix', 'xanax']\nfichier : 462 de id : 00007021_s001 \n\nfemale\n['metoprolol', 'spiriva', 'zocor', 'advair']\n['metoprolol', 'spiriva', 'zocor', 'advair']\nfichier : 463 de id : 00007026_s001 \n\nfemale\n['lisinopril', 'metoprolol', 'asa', 'pantoprazole', 'atorvastatin']\n['lisinopril', 'metoprolol', 'asa', 'pantoprazole', 'atorvastatin']\nfichier : 464 de id : 00007026_s002 \n\nfemale\n['fentanyl', 'plavix', 'asa', 'lipitor', 'vancomycin', 'zosyn', 'zosyn']\n['fentanyl', 'plavix', 'asa', 'lipitor', 'vancomycin', 'zosyn', 'zosyn']\nfichier : 465 de id : 00005459_s001 \n\nmale\n['cymbalta', 'seroquel', 'advair']\n['cymbalta', 'seroquel', 'advair']\nfichier : 466 de id : 00006538_s001 \n\nfemale\n['aspirin', 'lovenox', 'insulin', 'pantoprazole']\n['aspirin', 'lovenox', 'insulin', 'pantoprazole']\nfichier : 467 de id : 00006473_s001 \n\nmale\n['lisinopril', 'zocor', 'coumadin']\n['lisinopril', 'zocor', 'coumadin']\nfichier : 468 de id : 00003612_s002 \n\nfemale\n['lovenox', 'insulin', 'metoprolol', 'asa', 'protonix', 'hydralazine', 'lipitor', 'protonix', 'oxycodone']\n['lovenox', 'insulin', 'metoprolol', 'asa', 'protonix', 'hydralazine', 'lipitor', 'protonix', 'oxycodone']\nfichier : 469 de id : 00005573_s003 \n\nfemale\n[]\n[]\nfichier : 470 de id : 00006801_s001 \n\nfemale\n[]\n[]\nfichier : 471 de id : 00007671_s001 \n\nfemale\n['lisinopril', 'zocor']\n['lisinopril', 'zocor']\nfichier : 472 de id : 00007906_s001 \n\nmale\n['fentanyl', 'morphine', 'rocuronium']\n['fentanyl', 'morphine', 'rocuronium']\nfichier : 473 de id : 00008644_s002 \n\nmale\n['heparin']\n['heparin']\nfichier : 474 de id : 00008644_s001 \n\n['insulin', 'heparin', 'asa', 'albuterol', 'pantoprazole', 'rifampin']\n['insulin', 'heparin', 'asa', 'albuterol', 'pantoprazole', 'rifampin']\nfichier : 475 de id : 00008695_s001 \n\nfemale\n['percocet']\n['percocet']\nfichier : 476 de id : 00008684_s001 \n\nfemale\n['pantoprazole', 'dextrose']\n['pantoprazole', 'dextrose']\nfichier : 477 de id : 00008684_s002 \n\nmale\n['zoloft', 'abilify', 'doxycycline']\n['zoloft', 'abilify', 'doxycycline']\nfichier : 478 de id : 00008249_s001 \n\nmale\n['aspirin', 'metoprolol', 'zocor', 'coumadin']\n['aspirin', 'metoprolol', 'zocor', 'coumadin']\nfichier : 479 de id : 00007732_s001 \n\nfemale\n['metformin', 'diovan', 'asa']\n['metformin', 'diovan', 'asa']\nfichier : 480 de id : 00007161_s001 \n\n['hiv']\n['hiv']\nfichier : 481 de id : 00008317_s001 \n\nmale\n['vancomycin']\n['vancomycin']\nfichier : 482 de id : 00008576_s001 \n\nmale\n['aspirin', 'heparin', 'protonix', 'hydralazine', 'protonix']\n['aspirin', 'heparin', 'protonix', 'hydralazine', 'protonix']\nfichier : 483 de id : 00008437_s001 \n\nmale\n['omeprazole', 'hctz', 'levothyroxine', 'asa', 'lopressor', 'pravastatin']\n['omeprazole', 'hctz', 'levothyroxine', 'asa', 'lopressor', 'pravastatin']\nfichier : 484 de id : 00008408_s001 \n\nmale\n['protonix', 'vancomycin', 'zosyn', 'dopamine', 'zosyn', 'protonix', 'pyridoxine', 'rifampin', 'dexamethasone', 'acyclovir', 'brinzolamide']\n['protonix', 'vancomycin', 'zosyn', 'dopamine', 'zosyn', 'protonix', 'pyridoxine', 'rifampin', 'dexamethasone', 'acyclovir', 'brinzolamide']\nfichier : 485 de id : 00008443_s001 \n\nfemale\n[]\n[]\nfichier : 486 de id : 00008446_s001 \n\nfemale\n['protonix', 'dopamine', 'norepinephrine', 'bicarb', 'protonix']\n['protonix', 'dopamine', 'norepinephrine', 'bicarb', 'protonix']\nfichier : 487 de id : 00007331_s001 \n\nfemale\n['metoprolol', 'ciloxan', 'augmentin', 'citalopram', 'lexapro']\n['metoprolol', 'ciloxan', 'augmentin', 'citalopram', 'lexapro']\nfichier : 488 de id : 00008867_s001 \n\nfemale\n['fentanyl', 'vecuronium', 'midazolam']\n['fentanyl', 'vecuronium', 'midazolam']\nfichier : 489 de id : 00008867_s002 \n\nmale\n['lisinopril', 'asa', 'lipitor', 'baclofen', 'oxycodone']\n['lisinopril', 'asa', 'lipitor', 'baclofen', 'oxycodone']\nfichier : 490 de id : 00008840_s002 \n\nmale\n['aspirin', 'ativan', 'lorazepam', 'hydralazine']\n['aspirin', 'ativan', 'lorazepam', 'hydralazine']\nfichier : 491 de id : 00008840_s001 \n\nmale\n['vancomycin', 'antibiotics', 'clonidine']\n['vancomycin', 'antibiotics', 'clonidine']\nfichier : 492 de id : 00009488_s001 \n\nfemale\n['serax', 'thiamine', 'famotidine', 'haldol']\n['serax', 'thiamine', 'famotidine', 'haldol']\nfichier : 493 de id : 00009666_s001 \n\nmale\n['vancomycin']\n['vancomycin']\nfichier : 494 de id : 00009604_s001 \n\nmale\n['heparin', 'morphine']\n['heparin', 'morphine']\nfichier : 495 de id : 00009676_s002 \n\nmale\n['fentanyl', 'heparin', 'labetalol']\n['fentanyl', 'heparin', 'labetalol']\nfichier : 496 de id : 00009676_s003 \n\nmale\n['heparin', 'morphine']\n['heparin', 'morphine']\nfichier : 497 de id : 00009676_s001 \n\nmale\n['coumadin']\n['coumadin']\nfichier : 498 de id : 00009002_s001 \n\nmale\n[]\n[]\nfichier : 499 de id : 00009002_s002 \n\nfemale\n['acyclovir', 'cipro', 'ipratropium', 'pulmicort']\n['acyclovir', 'cipro', 'ipratropium', 'pulmicort']\nfichier : 500 de id : 00009072_s002 \n\nfemale\n['benadryl']\n['benadryl']\nfichier : 501 de id : 00009072_s001 \n\nfemale\n['ativan', 'tylenol', 'opioids', 'cannabis', 'xanax', 'haldol']\n['ativan', 'tylenol', 'opioids', 'cannabis', 'xanax', 'haldol']\nfichier : 502 de id : 00009072_s003 \n\nmale\n['fentanyl', 'insulin', 'heparin', 'carvedilol']\n['fentanyl', 'insulin', 'heparin', 'carvedilol']\nfichier : 503 de id : 00009074_s001 \n\n['oxycodone']\n['oxycodone']\nfichier : 504 de id : 00009074_s002 \n\nmale\n['fentanyl', 'heparin', 'midazolam', 'vancomycin', 'norepinephrine', 'potassium']\n['fentanyl', 'heparin', 'midazolam', 'vancomycin', 'norepinephrine', 'potassium']\nfichier : 505 de id : 00009051_s002 \n\nmale\n['lisinopril', 'albuterol', 'motrin']\n['lisinopril', 'albuterol', 'motrin']\nfichier : 506 de id : 00009051_s001 \n\nfemale\n['aspirin', 'lorazepam', 'tylenol']\n['aspirin', 'lorazepam', 'tylenol']\nfichier : 507 de id : 00009539_s001 \n\nfemale\n[]\n[]\nfichier : 508 de id : 00009560_s001 \n\n[]\n[]\nfichier : 509 de id : 00009583_s001 \n\nfemale\n['fentanyl', 'midazolam', 'coumadin']\n['fentanyl', 'midazolam', 'coumadin']\nfichier : 510 de id : 00009853_s001 \n\nmale\n['fentanyl', 'heparin', 'vecuronium']\n['fentanyl', 'heparin', 'vecuronium']\nfichier : 511 de id : 00008950_s002 \n\n['plavix', 'diovan', 'asa']\n['plavix', 'diovan', 'asa']\nfichier : 512 de id : 00008950_s001 \n\nfemale\n['metoprolol']\n['metoprolol']\nfichier : 513 de id : 00009725_s001 \n\nfemale\n['hydralazine', 'lopressor', 'bentyl']\n['hydralazine', 'lopressor', 'bentyl']\nfichier : 514 de id : 00009725_s002 \n\nnot specified\n[]\n[]\nfichier : 515 de id : 00009710_s001 \n\nfemale\n['lisinopril', 'atorvastatin']\n['lisinopril', 'atorvastatin']\nfichier : 516 de id : 00009375_s001 \n\nmale\n['motrin']\n['motrin']\nfichier : 517 de id : 00009358_s001 \n\nfemale\n['insulin', 'bactrim', 'heparin', 'morphine', 'asa', 'famotidine', 'ipratropium']\n['insulin', 'bactrim', 'heparin', 'morphine', 'asa', 'famotidine', 'ipratropium']\nfichier : 518 de id : 00009333_s001 \n\nmale\n['lovenox', 'atropine', 'isuprel']\n['lovenox', 'atropine', 'isuprel']\nfichier : 519 de id : 00009165_s001 \n\nfemale\n['calcium', 'plavix', 'lisinopril', 'asa', 'spiriva', 'pitavastatin', 'ambien', 'fenofibrate', 'carbonate', 'indapamide']\n['calcium', 'plavix', 'lisinopril', 'asa', 'spiriva', 'pitavastatin', 'ambien', 'fenofibrate', 'carbonate', 'indapamide']\nfichier : 520 de id : 00009147_s001 \n\nfemale\n['fentanyl', 'heparin', 'vecuronium']\n['fentanyl', 'heparin', 'vecuronium']\nfichier : 521 de id : 00009147_s002 \n\nfemale\n[]\n[]\nfichier : 522 de id : 00010155_s001 \n\nfemale\n['fentanyl', 'coreg', 'lisinopril', 'vancomycin', 'zocor']\n['fentanyl', 'coreg', 'lisinopril', 'vancomycin', 'zocor']\nfichier : 523 de id : 00010107_s001 \n\nmale\n['insulin', 'plavix', 'bactrim', 'hiv', 'heparin', 'hydralazine', 'atorvastatin']\n['insulin', 'plavix', 'bactrim', 'hiv', 'heparin', 'hydralazine', 'atorvastatin']\nfichier : 524 de id : 00010101_s001 \n\nfemale\n[]\n[]\nfichier : 525 de id : 00009910_s001 \n\nfemale\n['vitamins', 'tylenol', 'benadryl', 'potassium', 'prenatal']\n['vitamins', 'tylenol', 'benadryl', 'potassium', 'prenatal']\nfichier : 526 de id : 00009945_s001 \n\nmale\n[]\n[]\nfichier : 527 de id : 00009977_s001 \n\nmale\n['aspirin', 'norvasc', 'lisinopril', 'metoprolol', 'lipitor']\n['aspirin', 'norvasc', 'lisinopril', 'metoprolol', 'lipitor']\nfichier : 528 de id : 00009977_s002 \n\nfemale\n[]\n[]\nfichier : 529 de id : 00009966_s001 \n\nmale\n['fentanyl', 'versed', 'heparin', 'vecuronium']\n['fentanyl', 'versed', 'heparin', 'vecuronium']\nfichier : 530 de id : 00010539_s002 \n\nmale\n['aricept']\n['aricept']\nfichier : 531 de id : 00010539_s001 \n\nmale\n['versed', 'protonix', 'vancomycin', 'protonix']\n['versed', 'protonix', 'vancomycin', 'protonix']\nfichier : 532 de id : 00010501_s001 \n\nmale\n['heparin', 'vancomycin']\n['heparin', 'vancomycin']\nfichier : 533 de id : 00010501_s002 \n\nmale\n['ativan']\n['ativan']\nfichier : 534 de id : 00010586_s001 \n\nmale\nnot specified\n['albuterol']\n['albuterol']\nfichier : 535 de id : 00010544_s002 \n\nnot specified\n['albuterol']\n['albuterol']\nfichier : 536 de id : 00010544_s001 \n\nfemale\n[]\n[]\nfichier : 537 de id : 00010662_s001 \n\nfemale\n['lipitor']\n['lipitor']\nfichier : 538 de id : 00010624_s001 \n\nmale\n['lovastatin', 'metoprolol']\n['lovastatin', 'metoprolol']\nfichier : 539 de id : 00010674_s001 \n\nfemale\n['tylenol', 'vancomycin', 'famotidine', 'norepinephrine']\n['tylenol', 'vancomycin', 'famotidine', 'norepinephrine']\nfichier : 540 de id : 00010617_s001 \n\nfemale\n['levothyroxine']\n['levothyroxine']\nfichier : 541 de id : 00010620_s002 \n\nfemale\n['coreg']\n['coreg']\nfichier : 542 de id : 00010620_s001 \n\nfemale\nnot specified\n['lovenox', 'hydralazine', 'atorvastatin', 'carvedilol']\n['lovenox', 'hydralazine', 'atorvastatin', 'carvedilol']\nfichier : 543 de id : 00010292_s001 \n\nfemale\nnot specified\n['elavil', 'asa', 'zocor', 'zanaflex', 'vasotec', 'zomig', 'cozaar', 'voltaren', 'folvite']\n['elavil', 'asa', 'zocor', 'zanaflex', 'vasotec', 'zomig', 'cozaar', 'voltaren', 'folvite']\nfichier : 544 de id : 00010736_s001 \n\nmale\n[]\n[]\nfichier : 545 de id : 00010311_s001 \n\n['ativan', 'colace']\n['ativan', 'colace']\nfichier : 546 de id : 00010838_s001 \n\nmale\n['hiv']\n['hiv']\nfichier : 547 de id : 00010838_s002 \n\nmale\n[]\n[]\nfichier : 548 de id : 00010460_s001 \n\nfemale\n['heparin', 'trazodone', 'dilaudid', 'oxycodone']\n['heparin', 'trazodone', 'dilaudid', 'oxycodone']\nfichier : 549 de id : 00010472_s001 \n\nfemale\n['heparin', 'trazodone', 'dilaudid', 'oxycodone']\n['heparin', 'trazodone', 'dilaudid', 'oxycodone']\nfichier : 550 de id : 00010472_s002 \n\nnot specified\n[]\n[]\nfichier : 551 de id : 00010444_s001 \n\nfemale\n['motrin']\n['motrin']\nfichier : 552 de id : 00010429_s001 \n\nmale\n['aspirin', 'heparin', 'metoprolol']\n['aspirin', 'heparin', 'metoprolol']\nfichier : 553 de id : 00010419_s001 \n\nmale\n['aspirin', 'plavix', 'heparin', 'protonix', 'diltiazem', 'protonix', 'trazodone']\n['aspirin', 'plavix', 'heparin', 'protonix', 'diltiazem', 'protonix', 'trazodone']\nfichier : 554 de id : 00010422_s001 \n\nmale\n[]\n[]\nfichier : 555 de id : 00010442_s001 \n\nfemale\n['morphine', 'protonix', 'protonix', 'captopril']\n['morphine', 'protonix', 'protonix', 'captopril']\nfichier : 556 de id : 00010017_s001 \n\nfemale\n['baclofen', 'alprazolam', 'zolpidem']\n['baclofen', 'alprazolam', 'zolpidem']\nfichier : 557 de id : 00010095_s001 \n\nmale\n['vancomycin']\n['vancomycin']\nfichier : 558 de id : 00010075_s002 \n\nmale\n['vancomycin']\n['vancomycin']\nfichier : 559 de id : 00010075_s001 \n\nfemale\n['aspirin']\n['aspirin']\nfichier : 560 de id : 00010085_s001 \n\n" ], [ "Data_txt", "_____no_output_____" ], [ "Data_medic ", "_____no_output_____" ], [ "Data_medic = Data_medic.drop('ID',1)\nData_medic", "_____no_output_____" ], [ "# Merge two Dataframes on index of both the dataframes\nmergedDf = Data_txt.merge(Data_medic, left_index=True, right_index=True)", "_____no_output_____" ], [ "mergedDf", "_____no_output_____" ], [ "mergedDf.to_csv('/content/gdrive/MyDrive/DS PROJECT/DATA/data_txt001.csv')", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c1fabf2cc0892d2ffc568ba55f64e6706ddfd9
14,456
ipynb
Jupyter Notebook
Policy Based/CONCEPTS.ipynb
abhishekdabas31/Reinforcement-Learning-World
92bb96e4e8f40df39b25d29b8b09ea3b6b0742f0
[ "MIT" ]
null
null
null
Policy Based/CONCEPTS.ipynb
abhishekdabas31/Reinforcement-Learning-World
92bb96e4e8f40df39b25d29b8b09ea3b6b0742f0
[ "MIT" ]
null
null
null
Policy Based/CONCEPTS.ipynb
abhishekdabas31/Reinforcement-Learning-World
92bb96e4e8f40df39b25d29b8b09ea3b6b0742f0
[ "MIT" ]
1
2020-12-26T05:10:08.000Z
2020-12-26T05:10:08.000Z
44.073171
568
0.660003
[ [ [ "## Introduction to Poilcy Based methods : Reinforcement Learning", "_____no_output_____" ], [ "Reinforcement learning is utlimately about learning the optimal policy from interaction with the enviroment.\n\nIn value based methods we first try to find an estimate of optimal value function. For small states spaces this function is represented by a table where each row corrsponds to a state space and column corresponds to each action. To find the optimal policy we find the state action pair with highest value estimate.\n\n", "_____no_output_____" ], [ "If the number of state space is very large like in case of cart pole , we use neural network to get most optimal action based on output of the neural network. But even here we find optimal action after finding optimal value function. But,what if we completely want to skip the step of finding value function.Yes, we can do it and this comes under policy based methods.\n\n<img src=\"bad_value_state_example.PNG\" alt=\"J\" align=\"left\"/>", "_____no_output_____" ], [ "Now consider the cart pole problem for hill climbing. The agent has two possible actions, it mght go left or right based on the position of the pole. We can construct a neural network that can take the state as input and the output of the network can decide the probabilities of taking an action. Our objective here is to then determine the weights of the network for finding the most optimal probabiities of the actions. It is an iterative process, where the weights are amended in each iterations to optimize the weights for finding the most optimal policy.\n\n<img src=\"cartpole policy neural.PNG\" alt=\"J\" align=\"left\"/>", "_____no_output_____" ], [ "### Policy-Based Methods\n* With value-based methods, the agent uses its experience with the environment to maintain an estimate of the optimal action-value function. The optimal policy is then obtained from the optimal action-value function estimate.\n* Policy-based methods directly learn the optimal policy, without having to maintain a separate value function estimate.", "_____no_output_____" ], [ "### Policy Function Approximation\n* In deep reinforcement learning, it is common to represent the policy with a neural network. \n** This network takes the environment state as input.\n** If the environment has discrete actions, the output layer has a node for each possible action and contains the probability that the agent should select each possible action.\n* The weights in this neural network are initially set to random values. Then, the agent updates the weights as it interacts with (and learns more about) the environment.\n* Policy-based methods can learn either stochastic or deterministic policies, and they can be used to solve environments with either finite or continuous action spaces.", "_____no_output_____" ], [ "### Hill Climbing\n* Hill climbing is an iterative algorithm that can be used to find the weights $\\theta$ for an optimal policy.\n* At each iteration,\n** We slightly perturb the values of the current best estimate for the weights $\\theta_{best}$, to yield a new set of weights.\n** These new weights are then used to collect an episode. If the new weights $\\theta_{new}$ resulted in higher return than the old weights, then we set $\\theta_{best} \\leftarrow \\theta_{new}$.", "_____no_output_____" ], [ "The agent's goal is to always maximize the expected return (J). Lets refer to the network weights as $\\theta$. $\\theta$ encodes the policy that makes some actions more likely than others, and those actions influence the reward which affect the expected return value $J$.\n\n<img src =\"jtheta_obj_fn.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "We refer to the general class of approaches that find $\\arg\\max_{\\theta}J(\\theta)$ through randomly perturbing the most recent best estimate as stochastic policy search. Likewise, we can refer to J as an objective function, which just refers to the fact that we'd like to maximize it!", "_____no_output_____" ], [ "* Consider a case where a neural network has only 2 weights: $\\theta_0$ and $\\theta_1$. Then we can plot the exepected return J as a function of value of both of the weights.\n Remember here that the agent's goal is to maximize the expected return J. To do so, we first initialize the weights randomly. We collect a single episode with the policy that corresponds to those weights and then record the return. This return is going to be an estimate of what the surface looks like at that value of $\\theta$\n\n<img src =\"hill_climbing_first.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "* Now that we have found the return at this point, we add some noise (say Guassian) to our candidate weights for $\\theta$. To see how good those new weights are, we'll use the policy that they give us to again interact with the environment for an episode and add up the return. If the new weights give us more return than our current best estimate, we focus our attention on our new value.\n\n<img src =\"hill_climbing_second.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "* Then we just repeat by iteratively proposing new policies in the hope that they outperform the existing policy.\n\n<img src =\"hill_climbing_third.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "* In the event that they don't\n\n<img src =\"hill_climbing_fourth.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "* Then we just go back to our last best guess for the optimal policy \n\n<img src =\"hill_climbing_fifth.PNG\" alt=\"J and Theta\" align=\"left\"/>", "_____no_output_____" ], [ "* and iterate until we end up with the optimal policy\n\n<img src = \"hill_climbing_sixth.PNG\" alt=\"J\" align=\"left\"/>", "_____no_output_____" ], [ "### Beyond Hill Climbing\n* Steepest ascent hill climbing is a variation of hill climbing that chooses a small number of neighboring policies at each iteration and chooses the best among them.\n* Simulated annealing uses a pre-defined schedule to control how the policy space is explored, and gradually reduces the search radius as we get closer to the optimal solution.\n* Adaptive noise scaling decreases the search radius with each iteration when a new best policy is found, and otherwise increases the search radius.\n* The cross-entropy method iteratively suggests a small number of neighboring policies, and uses a small percentage of the best performing policies to calculate a new estimate.\n* The evolution strategies technique considers the return corresponding to each candidate policy. The policy estimate at the next iteration is a weighted sum of all of the candidate policies, where policies that got higher return are given higher weight.", "_____no_output_____" ], [ "### Why Policy-Based Methods?\n* There are three reasons why we consider policy-based methods:\n 1. Simplicity: Policy-based methods directly get to the problem at hand (estimating the optimal policy), without having to store a bunch of additional data (i.e., the action values) that may not be useful.\n 2. Stochastic policies: Unlike value-based methods, policy-based methods can learn true stochastic policies.\n 3. Continuous action spaces: Policy-based methods are well-suited for continuous action spaces.", "_____no_output_____" ], [ "### What are Policy Gradient Methods?\n* Policy-based methods are a class of algorithms that search directly for the optimal policy, without simultaneously maintaining value function estimates.\n* Policy gradient methods are a subclass of policy-based methods that estimate the weights of an optimal policy through gradient ascent.\nIn this lesson, we represent the policy with a neural network, where our goal is to find the weights \\thetaθ of the network that maximize expected return.", "_____no_output_____" ], [ "### The Big Picture\n* The policy gradient method will iteratively amend the policy network weights to:\nmake (state, action) pairs that resulted in positive return more likely, and\nmake (state, action) pairs that resulted in negative return less likely.", "_____no_output_____" ], [ "### Problem Setup\n* A trajectory $\\tau$ is a state-action sequence $s_0, a_0, \\ldots, s_H, a_H, s_{H+1}$.\n* In this lesson, we will use the notation $R(\\tau)$ to refer to the return corresponding to trajectory $\\tau$.\n* Our goal is to find the weights $\\theta$ of the policy network to maximize the expected return $U(\\theta) := \\sum_\\tau \\mathbb{P}(\\tau;\\theta)R(\\tau)$", "_____no_output_____" ], [ "### REINFORCE\nThe pseudocode for REINFORCE is as follows:\n* Use the policy $\\pi_\\theta$ to collect mm trajectories ${ \\tau^{(1)}, \\tau^{(2)}, \\ldots, \\tau^{(m)}}$ with horizon $H$. We refer to the $i$-th trajectory as\n$\\tau^{(i)} = (s_0^{(i)}, a_0^{(i)}, \\ldots, s_H^{(i)}, a_H^{(i)}, s_{H+1}^{(i)})$.\n* Use the trajectories to estimate the gradient $\\nabla_\\theta U(\\theta) :\n\\nabla_\\theta U(\\theta) \\approx \\hat{g} := \\frac{1}{m}\\sum_{i=1}^m \\sum_{t=0}^{H} \\nabla_\\theta \\log \\pi_\\theta(a_t^{(i)}|s_t^{(i)}) R(\\tau^{(i)})$\n* Update the weights of the policy:\n$\\theta \\leftarrow \\theta + \\alpha \\hat{g}$\n \nLoop over steps 1-3.", "_____no_output_____" ], [ "* Ultimately we use Gradient Ascent over a set of state, action and rewards called a Trajectory and find the maximum of the Objective function that help us get our optimal weights for $\\theta$ and $J$.", "_____no_output_____" ], [ "* REINFORCE can solve Markov Decision Processes (MDPs) with either discrete or continuous action spaces.", "_____no_output_____" ], [ "# Conclusion\nHere I have explained hill climbing and REINFORCE method wthin policy based methods", "_____no_output_____" ], [ "# References\n * The canonical reference for reinforcement learning is the [Reinforcement Learning book by Sutton and Barto](http://www.incompleteideas.net/book/the-book-2nd.html).\n * Referenced this [paper](https://people.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) for understanding of REINFORCE \n * Reference this [blog](https://openai.com/blog/evolution-strategies/) for policy based methods ", "_____no_output_____" ], [ "#### Copyright 2020 Sonali Vinodkumar Singh\n\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7c1fda916b688db1a2d8fba722cd0738670e7ec
23,892
ipynb
Jupyter Notebook
services/energy_consumption_forecast/lstm/LSTM (Iterative).ipynb
phgupta/XBOS
acc59f33600943569d62c145dae11a1775296b44
[ "BSD-2-Clause" ]
27
2016-04-26T17:26:56.000Z
2021-08-22T15:11:55.000Z
services/energy_consumption_forecast/lstm/LSTM (Iterative).ipynb
phgupta/XBOS
acc59f33600943569d62c145dae11a1775296b44
[ "BSD-2-Clause" ]
75
2017-02-17T18:00:37.000Z
2019-06-20T04:12:08.000Z
services/energy_consumption_forecast/lstm/LSTM (Iterative).ipynb
vishalbelsare/XBOS
1fea0b024d97ae142d97b3a94510403928ed44b7
[ "BSD-2-Clause" ]
20
2017-07-28T14:50:04.000Z
2020-01-16T05:04:54.000Z
24.913452
132
0.542106
[ [ [ "# To Do\n\n1. Try different architectures\n2. Try stateful/stateless LSTM.\n3. Add OAT, holidays.\n4. Check if data has consecutive blocks.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Dropout, Dense, LSTM\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "power_data_folder = '/Users/pranavhgupta/Documents/GitHub/XBOS_HVAC_Predictions/micro-service/data'\nhvac_states_data_folder = '/Users/pranavhgupta/Documents/GitHub/XBOS_HVAC_Predictions/micro-service/hvac_states_batch_data'\nsite = 'avenal-animal-shelter'", "_____no_output_____" ] ], [ [ "# Import data", "_____no_output_____" ], [ "## Power data", "_____no_output_____" ] ], [ [ "df_power = pd.read_csv(power_data_folder + '/power_' + site + '.csv', index_col=[0], parse_dates=True)\ndf_power.columns = ['power']\ndf_power.head()", "_____no_output_____" ], [ "df_power.plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "### Check for missing data", "_____no_output_____" ] ], [ [ "df_power.isna().any()", "_____no_output_____" ] ], [ [ "### Clean data", "_____no_output_____" ] ], [ [ "# Resample to 5min\ndf_processed = df_power.resample('5T').mean()\n\ndf_processed.head()", "_____no_output_____" ], [ "df_processed.plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "### Check for missing data", "_____no_output_____" ] ], [ [ "print(df_processed.isna().any())\nprint('\\n')\nmissing = df_processed['power'].isnull().sum()\ntotal = df_processed['power'].shape[0]\nprint('% Missing data for power: ', (missing/total)*100, '%')", "_____no_output_____" ] ], [ [ "### Depending on the percent missing data, either drop it or forward fill the NaN's", "_____no_output_____" ] ], [ [ "# Option 1: Drop NaN's\ndf_processed.dropna(inplace=True)\n\n# # Option 2: ffill NaN's\n# df_processed = df_processed.fillna(method='ffill')", "_____no_output_____" ] ], [ [ "### Normalize data", "_____no_output_____" ] ], [ [ "scaler = MinMaxScaler(feature_range=(0,1))\ndf_normalized = pd.DataFrame(scaler.fit_transform(df_processed), \n columns=df_processed.columns, index=df_processed.index)\ndf_normalized.head()", "_____no_output_____" ] ], [ [ "### Check for missing data", "_____no_output_____" ] ], [ [ "df_normalized.isna().any()", "_____no_output_____" ] ], [ [ "## Check for stationarity", "_____no_output_____" ] ], [ [ "result = adfuller(df_normalized['power'], autolag='AIC')\noutput = pd.Series(result[0:4], index=['Test Statistic', 'p-value', '#Lags Used',\n '#Observations Used'])\nfor key, value in result[4].items():\n output['Critical Value (%s)' % key] = value\n \noutput", "_____no_output_____" ] ], [ [ "## HVAC States data", "_____no_output_____" ] ], [ [ "df_hvac_states = pd.read_csv(hvac_states_data_folder + '/hvac_states_' + site + '.csv', \n index_col=[0], parse_dates=True)\ndf_hvac_states.columns = ['zone' + str(i) for i in range(len(df_hvac_states.columns))]\ndf_hvac_states.head()", "_____no_output_____" ] ], [ [ "### Check for missing data", "_____no_output_____" ] ], [ [ "df_hvac_states.isna().any()", "_____no_output_____" ] ], [ [ "### Convert categorical (HVAC states) into dummy variables", "_____no_output_____" ] ], [ [ "var_to_expand = df_hvac_states.columns\n\n# One-hot encode the HVAC states\nfor var in var_to_expand:\n\n add_var = pd.get_dummies(df_hvac_states[var], prefix=var, drop_first=True)\n\n # Add all the columns to the model data\n df_hvac_states = df_hvac_states.join(add_var)\n\n # Drop the original column that was expanded\n df_hvac_states.drop(columns=[var], inplace=True)\n \ndf_hvac_states.head()", "_____no_output_____" ], [ "# def func(row):\n# \"\"\" Possible situations: (0,0,0), (1,0,1), (0,1,2) --> 0, 1, 2\n \n# If all are same --> first element\n# If there is a majority among the 3 --> majority\n# If all are unique --> last element\n \n# \"\"\"\n\n# count = len(set(list(row.values)))\n# if count == 1:\n# return row.values[0]\n# elif count == 2:\n# max(set(list(row.values)), key=list(row.values).count)\n# else:\n# return row.values[-1]\n \n# resample_df_hvac = df_raw_hvac_states.resample('15T').apply(func)\n\n# resample_df_hvac = resample_df_hvac.fillna(method='ffill')\n# resample_df_hvac.isna().any()", "_____no_output_____" ] ], [ [ "# Join power and hvac_states data", "_____no_output_____" ] ], [ [ "# CHECK: pd.concat gives a lot of duplicate indices. \n# Try below code to see,\n# start = pd.Timestamp('2018-02-10 06:00:00+00:00')\n# df.loc[start]\n\ndf = pd.concat([df_normalized, df_hvac_states], axis=1)\ndf.head()", "_____no_output_____" ], [ "df = df.drop_duplicates()", "_____no_output_____" ], [ "missing = df.isnull().sum()\ntotal = df.shape[0]\nprint('missing data for power: ', (missing/total)*100, '%')", "_____no_output_____" ] ], [ [ "### Depending on the percent missing data, either drop it or forward fill the NaN's", "_____no_output_____" ] ], [ [ "# Option 1: Drop NaN's\ndf.dropna(inplace=True)\n\n# # Option 2: ffill NaN's\n# df = df.fillna(method='ffill')", "_____no_output_____" ] ], [ [ "# Visualizations", "_____no_output_____" ], [ "## Box plot", "_____no_output_____" ] ], [ [ "df_box_plot = pd.DataFrame(df['power'])\ndf_box_plot['quarter'] = df_box_plot.index.quarter\ndf_box_plot.boxplot(column='power', by='quarter')", "_____no_output_____" ] ], [ [ "## Histogram", "_____no_output_____" ] ], [ [ "df['power'].hist()", "_____no_output_____" ] ], [ [ "## ACF and PACF", "_____no_output_____" ] ], [ [ "fig1 = plot_acf(df_processed['power'], lags=50)\nfig2 = plot_pacf(df_processed['power'], lags=50)", "_____no_output_____" ] ], [ [ "# Prepare data", "_____no_output_____" ], [ "## Split into training & testing data", "_____no_output_____" ] ], [ [ "X_train = df[(df.index < '2019-01-01')]\ny_train = df.loc[(df.index < '2019-01-01'), 'power']\n\nX_test = df[(df.index >= '2019-01-01')]\ny_test = df.loc[(df.index >= '2019-01-01'), 'power']", "_____no_output_____" ] ], [ [ "## Prepare data for LSTM\n\nNote: NUM_TIMESTEPS is a hyper-parameter too!", "_____no_output_____" ] ], [ [ "# Number of columns in X_train\nNUM_FEATURES = len(X_train.columns)\n\n# A sequence contains NUM_TIMESTEPS number of elements and predicts NUM_MODEL_PREDICTIONS number of predictions\nNUM_TIMESTEPS = 24\n\n# Since this is an iterative method, model will predict only 1 timestep ahead\nNUM_MODEL_PREDICTIONS = 1\n\n# 4 hour predictions = Fourty eight 5min predictions\nNUM_ACTUAL_PREDICTIONS = 48", "_____no_output_____" ], [ "train_x, train_y = [], []\nfor i in range(NUM_TIMESTEPS, len(X_train)-NUM_MODEL_PREDICTIONS):\n train_x.append(X_train.values[i-NUM_TIMESTEPS:i])\n train_y.append(y_train.values[i:i+NUM_MODEL_PREDICTIONS]) \ntrain_x, train_y = np.array(train_x), np.array(train_y)\nprint(train_x.shape)\nprint(train_y.shape)\n\ntest_x, test_y = [], []\nfor i in range(NUM_TIMESTEPS, len(X_test)-NUM_MODEL_PREDICTIONS):\n test_x.append(X_test.values[i-NUM_TIMESTEPS:i])\n test_y.append(y_test.values[i:i+NUM_MODEL_PREDICTIONS]) \ntest_x, test_y = np.array(test_x), np.array(test_y)\nprint(test_x.shape)\nprint(test_y.shape)", "_____no_output_____" ] ], [ [ "# LSTM", "_____no_output_____" ] ], [ [ "model = Sequential([\n \n LSTM(units=128, input_shape=(NUM_TIMESTEPS, NUM_FEATURES), return_sequences=True),\n Dropout(0.2),\n \n LSTM(units=128, return_sequences=True),\n Dropout(0.2),\n \n LSTM(units=128, activation='softmax', return_sequences=False),\n Dropout(0.2),\n \n Dense(NUM_MODEL_PREDICTIONS)\n])\n\nmodel.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\nmodel.summary()", "_____no_output_____" ], [ "# Stop training if validation loss fails to decrease\ncallbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1)]\n\nhistory = model.fit(train_x, train_y, \n epochs=100, batch_size=128, shuffle=False, \n validation_data=(test_x, test_y), callbacks=callbacks)", "_____no_output_____" ] ], [ [ "# Results", "_____no_output_____" ], [ "## Loss", "_____no_output_____" ] ], [ [ "train_loss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = [x for x in range(len(train_loss))]\n\ndf_train_loss = pd.DataFrame(train_loss, columns=['train_loss'], index=epochs)\ndf_val_loss = pd.DataFrame(val_loss, columns=['val_loss'], index=epochs)\n\ndf_loss = pd.concat([df_train_loss, df_val_loss], axis=1)", "_____no_output_____" ], [ "df_loss.plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "## Accuracy", "_____no_output_____" ] ], [ [ "train_acc = history.history['acc']\nval_acc = history.history['val_acc']\nepochs = [x for x in range(len(train_acc))]\n\ndf_train_acc = pd.DataFrame(train_acc, columns=['train_acc'], index=epochs)\ndf_val_acc = pd.DataFrame(val_acc, columns=['val_acc'], index=epochs)\n\ndf_acc = pd.concat([df_train_acc, df_val_acc], axis=1)", "_____no_output_____" ], [ "df_acc.plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "# Plot predicted & true values", "_____no_output_____" ] ], [ [ "# Make predictions through trained model\npred_y = model.predict(test_x)\n\n# Convert predicted and actual values to dataframes (for plotting)\ndf_y_pred = pd.DataFrame(scaler.inverse_transform(pred_y),\n index=y_test[NUM_TIMESTEPS:-NUM_MODEL_PREDICTIONS].index, \n columns=['power'])\n\ndf_y_true = pd.DataFrame(scaler.inverse_transform(test_y),\n index=y_test[NUM_TIMESTEPS:-NUM_MODEL_PREDICTIONS].index, \n columns=['power'])", "_____no_output_____" ], [ "df_y_pred.head()", "_____no_output_____" ], [ "df_plot = pd.concat([df_y_pred, df_y_true], axis=1)\ndf_plot.columns = ['pred', 'true']\n\ndf_plot.head()", "_____no_output_____" ], [ "df_plot.plot(figsize=(18,5))", "_____no_output_____" ], [ "# # Plot between two time periods\n\n# start = pd.Timestamp('2019-01-01 23:45:00+00:00')\n# end = pd.Timestamp('2019-02-01 23:45:00+00:00')\n# df_plot.loc[start:end].plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "# Make predictions through iterative fitting for a particular timestamp", "_____no_output_____" ], [ "## Choose a particular timestamp", "_____no_output_____" ] ], [ [ "timestamp = pd.Timestamp('2019-01-01 23:45:00+00:00')\n\n# Keep copy of timestamp to use it after the for loop\norig_timestamp = timestamp", "_____no_output_____" ], [ "X_test_pred = X_test.copy()\n\nfor _ in range(NUM_ACTUAL_PREDICTIONS):\n \n # Create test sequence\n test = np.array(X_test_pred.loc[:timestamp].tail(NUM_TIMESTEPS))\n test = np.reshape(test, (1, test.shape[0], test.shape[1]))\n \n # Increment timestamp\n timestamp = X_test_pred.loc[timestamp:].index.values[1]\n\n # Make prediction\n y_pred_power = model.predict(test)\n y_pred_power = list(y_pred_power[0])\n \n # Add prediction to end of test array\n X_test_pred.loc[timestamp, 'power'] = y_pred_power", "_____no_output_____" ], [ "# X_test_pred.loc[pd.Timestamp('2019-01-01 23:45:00+00:00'):].head(NUM_ACTUAL_PREDICTIONS)", "_____no_output_____" ], [ "# X_test.loc[pd.Timestamp('2019-01-01 23:45:00+00:00'):].head(NUM_ACTUAL_PREDICTIONS)", "_____no_output_____" ] ], [ [ "## Plot", "_____no_output_____" ] ], [ [ "arr_pred = np.reshape(X_test_pred.loc[orig_timestamp:,'power'].head(NUM_ACTUAL_PREDICTIONS).values, (-1, 1))\narr_true = np.reshape(X_test.loc[orig_timestamp:,'power'].head(NUM_ACTUAL_PREDICTIONS).values, (-1, 1))\n\ndf_pred = pd.DataFrame(scaler.inverse_transform(arr_pred),\n index=X_test_pred.loc[orig_timestamp:].head(NUM_ACTUAL_PREDICTIONS).index)\n\ndf_true = pd.DataFrame(scaler.inverse_transform(arr_true),\n index=X_test.loc[orig_timestamp:].head(NUM_ACTUAL_PREDICTIONS).index)", "_____no_output_____" ], [ "df_plot = pd.concat([df_pred, df_true], axis=1)\ndf_plot.columns = ['pred', 'true']", "_____no_output_____" ], [ "df_plot.plot(figsize=(18,5))", "_____no_output_____" ] ], [ [ "# Get accuracy and mse of the entire test set using iterative fitting\n\nNote: This takes a while to compute!", "_____no_output_____" ] ], [ [ "# These two lists store the entire dataframes of 48 predictions of each element in test set!\n# This is not really necessary but only to double check if the outputs are in the correct format\npredicted_values = []\ntrue_values = []\n\nfor i in range(NUM_TIMESTEPS, len(X_test)-NUM_ACTUAL_PREDICTIONS):\n \n # Keep copy of timestamp to store it for use after the for loop \n timestamp = pd.Timestamp(X_test.index.values[i])\n orig_timestamp = timestamp\n \n X_test_pred = X_test.copy()\n \n for _ in range(NUM_ACTUAL_PREDICTIONS):\n \n # Create test sequence\n test = np.array(X_test_pred.loc[:timestamp].tail(NUM_TIMESTEPS))\n test = np.reshape(test, (1, test.shape[0], test.shape[1]))\n\n # Increment timestamp\n timestamp = X_test_pred.loc[timestamp:].index.values[1]\n\n # Make prediction\n y_pred_power = model.predict(test)\n y_pred_power = list(y_pred_power[0])\n\n # Add prediction to end of test array\n X_test_pred.loc[timestamp, 'power'] = y_pred_power\n \n predicted_values.append(X_test_pred.loc[orig_timestamp:].head(NUM_ACTUAL_PREDICTIONS))\n true_values.append(X_test.loc[orig_timestamp:].head(NUM_ACTUAL_PREDICTIONS))", "_____no_output_____" ], [ "# Get only the power values from the original predicted_values and true_values lists and then reshape them \n# into the correct format for sklearn metrics' functions.\n\npredicted_power_values = []\ntrue_power_values = []\n\nfor df in predicted_values:\n predicted_power_values.append(df[['power']].values)\n \nfor df in true_values:\n true_power_values.append(df[['power']].values)\n \npredicted_power_values = np.array(predicted_power_values)\npredicted_power_values = np.reshape(predicted_power_values, \n (predicted_power_values.shape[0], predicted_power_values.shape[1]))\n\ntrue_power_values = np.array(true_power_values)\ntrue_power_values = np.reshape(true_power_values, \n (true_power_values.shape[0], true_power_values.shape[1]))", "_____no_output_____" ], [ "from sklearn.metrics import r2_score\nscore = r2_score(true_power_values, predicted_power_values)\nscore", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error\nmse = mean_squared_error(true_power_values, predicted_power_values)\nmse", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7c20662222b0f38510efc9af213dca3f5ba2414
124,548
ipynb
Jupyter Notebook
Notebook-1-Project-4-Hunger-Games-Fanfiction-webscraping.ipynb
sutrofog/Sillman-Metis-Project4
80029641e306e899ae215c7100f63e618bc1d439
[ "MIT" ]
1
2022-03-31T23:27:39.000Z
2022-03-31T23:27:39.000Z
Notebook-1-Project-4-Hunger-Games-Fanfiction-webscraping.ipynb
sutrofog/Sillman-Metis-Project4
80029641e306e899ae215c7100f63e618bc1d439
[ "MIT" ]
null
null
null
Notebook-1-Project-4-Hunger-Games-Fanfiction-webscraping.ipynb
sutrofog/Sillman-Metis-Project4
80029641e306e899ae215c7100f63e618bc1d439
[ "MIT" ]
null
null
null
43.096194
756
0.470558
[ [ [ "**Project 4 Notebook 1**\n\n\n**Data Acquisition**\n\nUsing the Google Chrome web browser extension \"Web Scraper\", I scraped stories and other data from Fanfiction.net. I searched for Hunger Games stories, filtering for stories that were rated T, and that had Katniss Everdeen (there are 4 fields where you can put characters, and I put Katniss Everdeen in for all 4). Looking at the .csv files in Excel, some of the stories were split into several cells. I later learned that Excel has a limit of 32,767 characters per cell, so that when a cell contains more characters than this limit, the remaining characters are split into several cells over the next rows. This is a limitation of Excel, but not of .csv files in general, and so should not affect loading the .csv files into a pandas dataframe.\n\n**Preprocessing issues**\n\nOn Tuesday 2/23/21, I decided to go back and re-do the preprocessing, but leave the capital letters in. Because so many of the names in the stories are slight variations from modern American English (eg Peeta/Peter, Katniss/Katherine) or don't exist in modern American English, I thought it would be important to leave the capitalization in so that the POS tagger recognizes these words as proper nouns.\nOn 2/24/21, I observed that leaving words capitalized resulted in stop words that were capitalized not being removed. Also, I decided to not do parts of speech tagging, as the tagger will not recognize some words as nouns if they are not capitalized (eg Peeta, Katniss, Haymitch). I will replace capital letters, remove numbers and punctuation, then do ngrams, then remove stop words and proceed to vectorization and topic modeling. This happens in Notebook 2.\nLater, when I couldn't get stop word removal working from the quadgrams, I decided to tokenize by single word, then use stemming to try to reduce the number of words.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport nltk\nimport pandas as pd", "_____no_output_____" ] ], [ [ "The data was scraped in two batches and saved in .csv files. I read in the two files, created Pandas DataFrames, and then joined the two DataFrames using append.", "_____no_output_____" ] ], [ [ "data = pd.read_csv('Project-4-data/fanfiction-katniss1_pre_page_69.csv')\ndata.head()", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1725 entries, 0 to 1724\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 1725 non-null object\n 1 web-scraper-start-url 1725 non-null object\n 2 story_link 1725 non-null object\n 3 story_link-href 1725 non-null object\n 4 story_title 1725 non-null object\n 5 author_id 1725 non-null object\n 6 author_id-href 1725 non-null object\n 7 story_info 1725 non-null object\n 8 story_text 1725 non-null object\n 9 previous_pages 1700 non-null object\n 10 previous_pages-href 1700 non-null object\ndtypes: object(11)\nmemory usage: 148.4+ KB\n" ], [ "data2=pd.read_csv('Project-4-data/fanfiction-katniss1_p69-end_complete.csv')\ndata.head()", "_____no_output_____" ], [ "data2.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1718 entries, 0 to 1717\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 1718 non-null object\n 1 web-scraper-start-url 1718 non-null object\n 2 story_link 1718 non-null object\n 3 story_link-href 1718 non-null object\n 4 story_title 1718 non-null object\n 5 author_id 1718 non-null object\n 6 author_id-href 1718 non-null object\n 7 story_info 1718 non-null object\n 8 story_text 1718 non-null object\n 9 next_pages 1693 non-null object\n 10 next_pages-href 1693 non-null object\ndtypes: object(11)\nmemory usage: 147.8+ KB\n" ] ], [ [ "Append the dataframes to make a dataframe with the complete dataset.", "_____no_output_____" ] ], [ [ "katniss=data.append(data2)\nkatniss.head()", "_____no_output_____" ], [ "katniss.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3443 entries, 0 to 1717\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 3443 non-null object\n 1 web-scraper-start-url 3443 non-null object\n 2 story_link 3443 non-null object\n 3 story_link-href 3443 non-null object\n 4 story_title 3443 non-null object\n 5 author_id 3443 non-null object\n 6 author_id-href 3443 non-null object\n 7 story_info 3443 non-null object\n 8 story_text 3443 non-null object\n 9 previous_pages 1700 non-null object\n 10 previous_pages-href 1700 non-null object\n 11 next_pages 1693 non-null object\n 12 next_pages-href 1693 non-null object\ndtypes: object(13)\nmemory usage: 376.6+ KB\n" ] ], [ [ "Removed some unnecessary columns.", "_____no_output_____" ] ], [ [ "##Can delete columns \"previous_pages\" and \"next_pages\". \n##These are links that the scraping extension put in.\nkatniss.drop([\"previous_pages\", \"previous_pages-href\",\n \"next_pages\", \"next_pages-href\"], axis=1, inplace=True )", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "katniss.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3443 entries, 0 to 1717\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 3443 non-null object\n 1 web-scraper-start-url 3443 non-null object\n 2 story_link 3443 non-null object\n 3 story_link-href 3443 non-null object\n 4 story_title 3443 non-null object\n 5 author_id 3443 non-null object\n 6 author_id-href 3443 non-null object\n 7 story_info 3443 non-null object\n 8 story_text 3443 non-null object\ndtypes: object(9)\nmemory usage: 269.0+ KB\n" ], [ "#replace punctuation with a white space, remove numbers, capital letters\n##on 2/23, decided to not replace capital letters\n##on 2/24, decided to go back and replace capital letters again, and then not tag parts of speech, as the pos\n##tagger will not recognize some names as nouns (eg Katniss, Peeta, Haymitch). Captialized stopwords\n##were not being removed, which creates its own mess.\nimport re\nimport string\n\nalphanumeric = lambda x: re.sub('\\w*\\d\\w*', ' ', x)\npunc_lower = lambda x: re.sub('[%s]' % re.escape(string.punctuation), ' ', x.lower()) #this was used 2/22 to replace\n#capital letters and remove punctuation.\n#punc_remove = lambda x: re.sub('[%s]' % re.escape(string.punctuation), ' ', x) this is from 2/23\nkatniss['story_text'] = data.story_text.map(alphanumeric).map(punc_remove)\nkatniss.head()", "_____no_output_____" ], [ "katniss.to_csv('katniss-no-punc-num.csv')\n##save this to a .csv file", "_____no_output_____" ], [ "import re\nimport string", "_____no_output_____" ], [ "#import nltk\nnltk.download('stopwords')", "[nltk_data] Downloading package stopwords to\n[nltk_data] /Users/amysillman/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "from nltk.corpus import stopwords\nnltk.download('stopwords')\nfrom nltk.tokenize import word_tokenize\nstop=stopwords.words('english')\n#import texthero as hero\n#set(stopwords.words('english'))", "[nltk_data] Downloading package stopwords to\n[nltk_data] /Users/amysillman/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "#this does not work. It separated all of the story_text into single letters!\n#Good thing I saved the last iteration as a .csv. I'll have to load it and figure out what I did wrong.\n#katniss['story_text_without_stopwords'] = katniss['story_text'].apply(lambda x: [item for item in x if item not in stop])", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "##katniss=pd.read_csv('Project-4-data/katniss-no-capitals.csv')", "_____no_output_____" ], [ "katniss.head()\n#ok the story_text is ok. Whew! Now to figure out how to take out the stop words.\n#The reason it did that is because I didn't tokenize by word first.\n#I need to tokenize the text by words before taking out the stop words. It needs to see the text in units of words.", "_____no_output_____" ], [ "nltk.download('punkt')", "[nltk_data] Downloading package punkt to\n[nltk_data] /Users/amysillman/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ], [ "#Tokenize by word. I imported word_tokenize earlier in the notebook.\n#This should create a new column with the story texts tokenized by word.\n#Apparently there are still quotation marks in the story texts. \n#These need to come out and be replaced by white space\n\nkatniss['story_text'] = katniss['story_text'].str.strip(to_strip='\"')", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "katniss.to_csv('katniss-no-num-punc-quote.csv')", "_____no_output_____" ], [ "#Still seems to be quotation marks at the end of some of the story texts.\n#Will try to tokenize anyway. Getting an error that it expected a \"string or bytes-like object\"\n#need to force the column to a str data type.\n###katniss['story_text_wtokenized'] = word_tokenize(katniss['story_text_no_quotes'])\nkatniss.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3443 entries, 0 to 1717\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 3443 non-null object\n 1 web-scraper-start-url 3443 non-null object\n 2 story_link 3443 non-null object\n 3 story_link-href 3443 non-null object\n 4 story_title 3443 non-null object\n 5 author_id 3443 non-null object\n 6 author_id-href 3443 non-null object\n 7 story_info 3443 non-null object\n 8 story_text 3443 non-null object\ndtypes: object(9)\nmemory usage: 269.0+ KB\n" ], [ "#Getting an error that it expected a \"string or bytes-like object\"\n#need to force the column to a str data type.\nkatniss['story_text']=katniss['story_text'].astype(str)", "_____no_output_____" ], [ "katniss.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3443 entries, 0 to 1717\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 3443 non-null object\n 1 web-scraper-start-url 3443 non-null object\n 2 story_link 3443 non-null object\n 3 story_link-href 3443 non-null object\n 4 story_title 3443 non-null object\n 5 author_id 3443 non-null object\n 6 author_id-href 3443 non-null object\n 7 story_info 3443 non-null object\n 8 story_text 3443 non-null object\ndtypes: object(9)\nmemory usage: 269.0+ KB\n" ], [ "katniss.head()", "_____no_output_____" ], [ "#tokenize by word\nkatniss['story_text'] = katniss['story_text'].apply(word_tokenize)", "_____no_output_____" ], [ "katniss.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3443 entries, 0 to 1717\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 web-scraper-order 3443 non-null object\n 1 web-scraper-start-url 3443 non-null object\n 2 story_link 3443 non-null object\n 3 story_link-href 3443 non-null object\n 4 story_title 3443 non-null object\n 5 author_id 3443 non-null object\n 6 author_id-href 3443 non-null object\n 7 story_info 3443 non-null object\n 8 story_text 3443 non-null object\ndtypes: object(9)\nmemory usage: 269.0+ KB\n" ], [ "katniss.head()", "_____no_output_____" ], [ "katniss.to_csv('katniss-word-tokenized-wcap-new.csv')", "_____no_output_____" ] ], [ [ "I can delete a couple columns to save space. 'story_text' and 'story_text_no_quotes'\nusing: \n \n>katniss.drop([\"story_text\", \"story_text_no_quotes\"], axis=1, inplace=True )", "_____no_output_____" ] ], [ [ "#katniss.to_csv('katniss-word-tokenized_only.csv')", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "#Now I can try to take out the stopwords.\nkatniss['story_text_without_stopwords'] = katniss['story_text'].apply(lambda x: [item for item in x if item not in stop])", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "#Super! It worked! Save it as a .csv\nkatniss.to_csv('katniss-wtok-no-stops-wcaps.csv')", "_____no_output_____" ], [ "#I'll delete the column that still has the stopwords, to save space. 'story_text' \nkatniss.drop([\"story_text\"], axis=1, inplace=True )", "_____no_output_____" ], [ "katniss.head()", "_____no_output_____" ], [ "katniss.to_csv('katniss-nostops-wcaps-only.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c2147c72d1da46948e7885787ceafb143e1eb1
4,163
ipynb
Jupyter Notebook
README.ipynb
GeorgAUT/FrameFun.jl
769c342ae76de06fa986662862ab448e48c1849c
[ "MIT" ]
null
null
null
README.ipynb
GeorgAUT/FrameFun.jl
769c342ae76de06fa986662862ab448e48c1849c
[ "MIT" ]
null
null
null
README.ipynb
GeorgAUT/FrameFun.jl
769c342ae76de06fa986662862ab448e48c1849c
[ "MIT" ]
null
null
null
21.910526
192
0.528225
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c223725870ef7476b67f55a343b9812930fa81
8,150
ipynb
Jupyter Notebook
assignments/Homework2.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
2
2018-10-24T21:31:30.000Z
2019-10-23T20:29:22.000Z
assignments/Homework2.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
1
2019-11-20T21:46:26.000Z
2019-11-20T22:09:00.000Z
assignments/Homework2.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
9
2018-10-31T20:48:42.000Z
2019-11-20T21:47:43.000Z
31.10687
160
0.57227
[ [ [ "<span style=\"font-family:Papyrus; font-size:3em;\">Homework 2</span>\n\n<span style=\"font-family:Papyrus; font-size:2em;\">Cross Validation</span>", "_____no_output_____" ], [ "# Problem", "_____no_output_____" ], [ "In this homework, you will use cross validation to analyze the effect on model quality\nof the number of model parameters and the noise in the observational data.\nYou do this analysis in the context of design of experiments.\nThe two factors are (i) number of model parameters and (ii) the noise in the observational data;\nthe response will be the $R^2$ of the model (actually the $R^2$ averaged across the folds of\ncross validation).\n\nYou will investigate models of linear pathways with 2, 4, 6, 8, 10 parameters.\nFor example, a two parameter model is use $S_1 \\xrightarrow{v_1} S_2 \\xrightarrow{v_3} S_3$,\nwhere $v_i = k_i s_i$, $k_i$ is a parameter to estimate, and $s_i$ is the concentration of $S_i$.\nThe initial concentration of $S_1 = 10$, and the true value of $k_i$ is $i$. Thus, for a two parameter model,\n$k_1 = 1$, $k_2 = 2$.\n\nYou will generate the synthetic data by adding a\nnoise term to the true model.\nThe noise term is drawn from a normal distribution with mean 0\nand standard deviations of 0.2, 0.5, 0.8, 1.0, and 1.5, depending on the experiment.\n\nYou will design experiments, implement codes to run them, run the experiments, and interpret the results.\nThe raw output of these experiments will be\na table structured as the one below.\nCell values will be the average $R^2$ across the folds of the cross validation done with\none level for each factor.\n\n | | 2 | 4 | 6 | 8 | 10\n | -- | -- | -- | -- | -- | -- |\n 0.2 | ? | ? | ? | ? | ?\n 0.5 | ? | ? | ? | ? | ?\n 0.8 | ? | ? | ? | ? | ?\n 1.0 | ? | ? | ? | ? | ?\n 1.5 | ? | ? | ? | ? | ?\n \n\n1. (2 pt) **Generate Models.** Write (or generate) the models in Antimony, and produce plots for their true values. Use a simulation time\nof 10 and 100 points.\n\n1. (1 pt) **Generate Synthetic Data.** Write a function that creates synthetic data given the parameters std \nand numParameter.\n\n1. (1 pt) **Extend ``CrossValidator``.** You will extend ``CrossValidator`` (in ``common/util_crossvalidation.py``)\nby creating a subclass ``ExtendedCrossValidator`` that has the method\n``calcAvgRsq``. The method takes no argument (except ``self``) and returns the average value of\n$R^2$ for the folds. Don't forget to document the function and include at least one tests.\n\n1. (4 pt) **Implement ``runExperiments``.** This function has inputs: (a) list of the number of parameters for the\nmodels to study and (b) list of the standard deviations of the noise terms.\nIt returns a dataframe with: columns are the number of parameters; rows (index) are the standard deviations of noise;\nand values are the average $R^2$ for the folds defined by the levels of the factors.\nRun experiments that produce the tables described above using five hold cross validation and 100 simulation points.\n\n1. (4 pt) **Calculate Effects.** Using the baseline standard deviation of noise of 0.8, number of parameters of 6, calculate $\\mu$, $\\alpha_{i,k_i}$,\n$\\gamma_{i,i_k,j,k_j}$.\n\n1. (3 pt) **Analysis.** Answer the following questions\n 1. What is the effect on $R^2$ as the number of parameters increases? Why?\n 1. How does the noise standard deviation affect $R^2$? Why?\n 1. What are the interaction effects and how do they influence the response (average $R^2$)?\n \n**Please do your homework in a copy of this notebook, maintaining the sections.**", "_____no_output_____" ], [ "# Programming Preliminaries\nThis section provides the setup to run your python codes.", "_____no_output_____" ] ], [ [ "IS_COLAB = False\n#\nif IS_COLAB:\n !pip install tellurium\n !pip install SBstoat\n# \n# Constants for standalone notebook\nif not IS_COLAB:\n CODE_DIR = \"/home/ubuntu/advancing-biomedical-models/common\"\nelse:\n from google.colab import drive\n drive.mount('/content/drive')\n CODE_DIR = \"/content/drive/My Drive/Winter 2021/common\"\nimport sys\nsys.path.insert(0, CODE_DIR)", "_____no_output_____" ], [ "import util_crossvalidation as ucv\nfrom SBstoat.namedTimeseries import NamedTimeseries, TIME\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tellurium as te", "_____no_output_____" ], [ "END_TIME = 5\nNUM_POINT = 100\nNOISE_STD = 0.5\n# Column names\nC_NOISE_STD = \"noisestd\"\nC_NUM_PARAMETER = \"no. parameters\"\nC_VALUE = \"value\"\n#\nNOISE_STDS = [0.2, 0.5, 0.8, 1.0, 1.5]\nNUM_PARAMETERS = [2, 4, 6, 8, 10]", "_____no_output_____" ], [ "def isSame(collection1, collection2):\n \"\"\"\n Determines if two collections have the same elements.\n \"\"\"\n diff = set(collection1).symmetric_difference(collection2)\n return len(diff) == 0\n \n# Tests\nassert(isSame(range(3), [0, 1, 2]))\nassert(not isSame(range(4), range(3)))", "_____no_output_____" ] ], [ [ "# Generate Models", "_____no_output_____" ], [ "# Generate Synthetic Data", "_____no_output_____" ], [ "# ``ExtendedCrossValidator``", "_____no_output_____" ], [ "Hint: Subclass using ``class ExtendedCrossValidator(ucv.CrossValidator):``.", "_____no_output_____" ], [ "# Implement ``runExperiments``", "_____no_output_____" ], [ "# Calculate Effects\nHere, we calculate $\\mu$, $\\alpha_{i, k_i}$, and $\\gamma_{i, k_i, j, k_j}$.", "_____no_output_____" ], [ "# Analysis", "_____no_output_____" ], [ "**What is the effect on $R^2$ as the number of parameters increases? Why?**\n ", "_____no_output_____" ], [ "**How does the noise standard deviation affect $R^2$? Why?**", "_____no_output_____" ], [ "**What are the interaction effects and how do they influence the response (average $R^2$)?**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7c22ca21aaa3900161aad92f7a1d83e49d2854a
18,686
ipynb
Jupyter Notebook
test_agilent_scope.ipynb
CharLee674/rvisa_lightlab
b43e36f3436b60c8c5f3088b4cb0896c5360aa4a
[ "MIT" ]
null
null
null
test_agilent_scope.ipynb
CharLee674/rvisa_lightlab
b43e36f3436b60c8c5f3088b4cb0896c5360aa4a
[ "MIT" ]
null
null
null
test_agilent_scope.ipynb
CharLee674/rvisa_lightlab
b43e36f3436b60c8c5f3088b4cb0896c5360aa4a
[ "MIT" ]
null
null
null
49.829333
2,005
0.603821
[ [ [ "import lightlab.equipment.lab_instruments as instrs", "_____no_output_____" ], [ "dir(instrs)", "_____no_output_____" ], [ "from lightlab.equipment.lab_instruments import Remote_Agilent_Oscope", "_____no_output_____" ], [ "link = 'https://1000000058948db1-api.nwtech.win/'", "_____no_output_____" ], [ "remote = Remote_Agilent_Oscope(address=\"GPIB0::7::INSTR\", url=link)", "_____no_output_____" ], [ "dir(Remote_Agilent_Oscope)", "_____no_output_____" ], [ "dir(remote)", "_____no_output_____" ], [ "remote.driver.clear()", "RESOURCE SUCCESSFULLY CLEARED\nRESOURCE SUCCESSFULLY CLOSED\n" ], [ "remote.driver.query('*IDN?')", "_____no_output_____" ], [ "print(remote.acquire(chans=[1]))", "500 RESPONSE: {'read': 'ERROR: COULD NOT QUERY GPIB0::7::INSTR. EXCEPTION: VI_ERROR_TMO (-1073807339): Timeout expired before operation completed.'}\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c23a8bc14a16321d549571ef76c765585991e5
4,339
ipynb
Jupyter Notebook
content/lessons/10/Now-You-Code/NYC4-Syracuse-Weather.ipynb
jferna22-su/ist256
7fb58e391835588e99a0816d093cb2dec358aa9a
[ "MIT" ]
null
null
null
content/lessons/10/Now-You-Code/NYC4-Syracuse-Weather.ipynb
jferna22-su/ist256
7fb58e391835588e99a0816d093cb2dec358aa9a
[ "MIT" ]
null
null
null
content/lessons/10/Now-You-Code/NYC4-Syracuse-Weather.ipynb
jferna22-su/ist256
7fb58e391835588e99a0816d093cb2dec358aa9a
[ "MIT" ]
null
null
null
32.380597
490
0.574326
[ [ [ "# Now You Code 4: Syracuse Weather\n\nWrite a program to load the Syracuse weather data from Dec 2015 in\nJSON format into a Python list of dictionary. \n\nThe file with the weather data is in your `Now-You-Code` folder: `\"NYC4-syr-weather-dec-2015.json\"`\n\nYou should load this data into a Python list of dictionary using the `json` package. \n\nAfter you load this data, loop over the list of weather items and record whether or not the `'Mean TemperatureF'` is above or below freezing. \n\nSort this information into a separate Python dictionary, called `stats` so you can print it out like this:\n```\n{'below-freezing': 4, 'above-freezing': 27}\n```\n\n", "_____no_output_____" ], [ "## Step 1: Problem Analysis\n\nThis function should get input from the user at run time and return the input address.\n\nInputs:\n\nOutputs: \n\nAlgorithm (Steps in Program):\n", "_____no_output_____" ] ], [ [ "# Step 2: Write code\nimport json\n\ndef load_weather_data(): \n with open('NYC4-syr-weather-dec-2015.json') as f:\n data = f.read()\n weather = json.loads(data)\n return weather\n \n\ndef extract_weather_info(weather):\n info = {}\n info['mean temp'] = weather['Mean TemperatufeF']\n info['high']\n return info\n\nprint(weather)", "_____no_output_____" ] ], [ [ "## Step 3: Questions\n\n1. What are the advantages to storing the number of days above freezing and below freezing in a Python dictionary?\n2. What is the data type of the weather data as it is read from the file `NYC4-syr-weather-dec-2015.json` ?\n3. Could this same program work for weather at other times in other cities? What conditions would need to be met for this to happen?", "_____no_output_____" ], [ "## Reminder of Evaluation Criteria\n\n1. Was the problem attempted (analysis, code, and answered questions) ?\n2. Was the problem analysis thought out? (does the program match the plan?)\n3. Does the code execute without syntax error?\n4. Does the code solve the intended problem?\n5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c23fdb3f0958ca9896de942f3af2029c814be5
13,326
ipynb
Jupyter Notebook
ASHRAE/competitive_reg_models.ipynb
Costigun/kaggle_practice
a84ceec8a982c719126d26fd90e03dafb7e1e123
[ "MIT" ]
null
null
null
ASHRAE/competitive_reg_models.ipynb
Costigun/kaggle_practice
a84ceec8a982c719126d26fd90e03dafb7e1e123
[ "MIT" ]
null
null
null
ASHRAE/competitive_reg_models.ipynb
Costigun/kaggle_practice
a84ceec8a982c719126d26fd90e03dafb7e1e123
[ "MIT" ]
null
null
null
40.259819
460
0.537521
[ [ [ "### Постановка задачи\nРассмотрим несколько моделей линейной регрессии, чтобы выяснить более оптимальную для первых 20 зданий.\n\nДанные:\n* http://video.ittensive.com/machine-learning/ashrae/building_metadata.csv.gz\n* http://video.ittensive.com/machine-learning/ashrae/weather_train.csv.gz\n* http://video.ittensive.com/machine-learning/ashrae/train.0.csv.gz\nСоревнование: https://www.kaggle.com/c/ashrae-energy-prediction/\n\n© ITtensive, 2020", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet, BayesianRidge", "_____no_output_____" ], [ "def reduce_mem_usage (df):\n start_mem = df.memory_usage().sum() / 1024**2 \n for col in df.columns:\n col_type = df[col].dtypes\n if str(col_type)[:5] == \"float\":\n c_min = df[col].min()\n c_max = df[col].max()\n if c_min > np.finfo(\"f2\").min and c_max < np.finfo(\"f2\").max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(\"f4\").min and c_max < np.finfo(\"f4\").max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n elif str(col_type)[:3] == \"int\":\n c_min = df[col].min()\n c_max = df[col].max()\n if c_min > np.iinfo(\"i1\").min and c_max < np.iinfo(\"i1\").max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(\"i2\").min and c_max < np.iinfo(\"i2\").max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(\"i4\").min and c_max < np.iinfo(\"i4\").max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(\"i8\").min and c_max < np.iinfo(\"i8\").max:\n df[col] = df[col].astype(np.int64)\n elif col == \"timestamp\":\n df[col] = pd.to_datetime(df[col])\n elif str(col_type)[:8] != \"datetime\":\n df[col] = df[col].astype(\"category\")\n end_mem = df.memory_usage().sum() / 1024**2\n print('Потребление памяти меньше на', round(start_mem - end_mem, 2), 'Мб (минус', round(100 * (start_mem - end_mem) / start_mem, 1), '%)')\n return df", "_____no_output_____" ], [ "buildings = pd.read_csv(\"http://video.ittensive.com/machine-learning/ashrae/building_metadata.csv.gz\")\nweather = pd.read_csv(\"http://video.ittensive.com/machine-learning/ashrae/weather_train.csv.gz\")\nenergy = pd.read_csv(\"http://video.ittensive.com/machine-learning/ashrae/train.0.csv.gz\")\nenergy = energy[(energy[\"building_id\"]<20)]\nenergy = pd.merge(left=energy, right=buildings, how=\"left\",\n left_on=\"building_id\", right_on=\"building_id\")\nenergy = energy.set_index([\"timestamp\", \"site_id\"])\nweather = weather.set_index([\"timestamp\", \"site_id\"])\nenergy = pd.merge(left=energy, right=weather, how=\"left\",\n left_index=True, right_index=True)\nenergy.reset_index(inplace=True)\nenergy = energy.drop(columns=[\"meter\", \"site_id\", \"year_built\",\n \"square_feet\", \"floor_count\"], axis=1)\ndel buildings\ndel weather\nenergy = reduce_mem_usage(energy)\nprint (energy.info())", "Потребление памяти меньше на 10.39 Мб (минус 70.5 %)\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 175680 entries, 0 to 175679\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 timestamp 175680 non-null datetime64[ns]\n 1 building_id 175680 non-null int8 \n 2 meter_reading 175680 non-null float16 \n 3 primary_use 175680 non-null category \n 4 air_temperature 175620 non-null float16 \n 5 cloud_coverage 99080 non-null float16 \n 6 dew_temperature 175620 non-null float16 \n 7 precip_depth_1_hr 175660 non-null float16 \n 8 sea_level_pressure 173980 non-null float16 \n 9 wind_direction 170680 non-null float16 \n 10 wind_speed 175680 non-null float16 \ndtypes: category(1), datetime64[ns](1), float16(8), int8(1)\nmemory usage: 4.4 MB\nNone\n" ], [ "energy[\"hour\"] = energy[\"timestamp\"].dt.hour.astype(\"int8\")\nenergy[\"weekday\"] = energy[\"timestamp\"].dt.weekday.astype(\"int8\")\nfor weekday in range(0,7):\n energy['is_wday' + str(weekday)] = energy['weekday'].isin([weekday]).astype(\"int8\")\nenergy[\"date\"] = pd.to_datetime(energy[\"timestamp\"].dt.date)\ndates_range = pd.date_range(start='2015-12-31', end='2017-01-01')\nus_holidays = calendar().holidays(start=dates_range.min(),\n end=dates_range.max())\nenergy['is_holiday'] = energy['date'].isin(us_holidays).astype(\"int8\")\nenergy[\"meter_reading_log\"] = np.log(energy[\"meter_reading\"] + 1)", "_____no_output_____" ], [ "energy_train,energy_test = train_test_split(energy[energy['meter_reading'] > 0],test_size=0.2)", "_____no_output_____" ], [ "from sklearn.metrics import *", "_____no_output_____" ], [ "hours = range(0,24)\nbuildings = range(0,energy_train['building_id'].max() + 1)\nlr_columns = ['meter_reading_log','hour','building_id','is_holiday']\nfor wday in range(0,7):\n lr_columns.append('is_wday' + str(wday))\n", "_____no_output_____" ] ], [ [ "Линейная регрессия\n\\begin{equation}\nz = Ax + By + C, |z-z_0|^2 \\rightarrow min\n\\end{equation}\nЛассо + LARS Лассо\n\\begin{equation}\n\\frac{1}{2n}|z-z_0|^2 + a(|A|+|B|) \\rightarrow min\n\\end{equation}\nГребневая регрессия\n\\begin{equation}\n|z-z_0|^2 + a(A^2 + B^2) \\rightarrow min\n\\end{equation}\nElasticNet: Лассо + Гребневая регрессия\n\\begin{equation}\n\\frac{1}{2n}|z-z_0|^2 + \\alpha p|A^2+B^2| + (\\alpha - p)(|A|+|B|)/2 \\rightarrow min\n\\end{equation}", "_____no_output_____" ] ], [ [ "lr_models = {\n \"LinearRegression\":LinearRegression,\n \"Lasso-0.01\":Lasso,\n \"Lasso-0.1\":Lasso,\n \"Lasso-1.0\":Lasso,\n \"Ridge-0.01\":Ridge,\n \"Ridge-0.1\":Ridge,\n \"Ridge-1.0\":Ridge,\n \"ELasticNet-1-1\":ElasticNet,\n \"ELasticNet-0.1-1\":ElasticNet,\n \"ELasticNet-1-0.1\":ElasticNet,\n \"ELasticNet-0.1-0.1\":ElasticNet,\n \"BayesianRidge\":BayesianRidge\n}\nenergy_train_lr = pd.DataFrame(energy_train,columns=lr_columns)", "_____no_output_____" ], [ "lr_models_scores = {}\nfor _ in lr_models:\n lr_model = lr_models[_]\n energy_lr_scores = [[]] * len(buildings)\n for building in buildings:\n energy_lr_scores[building] = [0] * len(hours)\n energy_train_b = energy_train_lr[energy_train_lr['building_id'] == building]\n for hour in hours:\n energy_train_bh = energy_train_b[energy_train_b['hour'] == hour]\n y = energy_train_bh['meter_reading_log']\n x = energy_train_bh.drop(['meter_reading_log','hour','building_id'],axis=1)\n if _ in ['Ridge-0.1','Lasso-0.1']:\n model = lr_model(alpha=0.1,fit_intercept=False).fit(x,y)\n elif _ in ['Ridge-0.01','Lasso-0.01']:\n model = lr_model(alpha=0.01,fit_intercept=False).fit(x,y)\n elif _ == 'ElasticNet-1-1':\n model = lr_model(alpha=1,l1_ratio=1,fit_intercept=False).fit(x,y)\n elif _ == 'ElasticNet-1-0.1':\n model = lr_model(alpha=1,l1_ratio=0.1,fit_intercept=False).fit(x,y)\n elif _ == 'ElasticNet-0.1-1':\n model = lr_model(alpha=0.1,l1_ratio=1,fit_intercept=False).fit(x,y)\n elif _ == 'ElasticNet-0.1-0.1':\n model = lr_model(alpha=0.1,l1_ratio=0.1,fit_intercept=False).fit(x,y)\n else:\n model = lr_model(fit_intercept=False).fit(x,y)\n energy_lr_scores[building][hour] = r2_score(y,model.predict(x))\n lr_models_scores[_] = np.mean(energy_lr_scores)\nprint(lr_models_scores)", "{'LinearRegression': 0.13262979264188432, 'Lasso-0.01': -0.19060262906360775, 'Lasso-0.1': -31.177850789729533, 'Lasso-1.0': -2429.7711638954247, 'Ridge-0.01': 0.13221503036076057, 'Ridge-0.1': 0.09151042473014116, 'Ridge-1.0': -3.6576414865818228, 'ELasticNet-1-1': -2105.6896312858844, 'ELasticNet-0.1-1': -2105.6896312858844, 'ELasticNet-1-0.1': -2105.6896312858844, 'ELasticNet-0.1-0.1': -2105.6896312858844, 'BayesianRidge': 0.13261999641258232}\n" ], [ "energy_lr = []\nenergy_ridge = []\nenergy_br = []\nfor building in buildings:\n energy_lr.append([])\n energy_ridge.append([])\n energy_br.append([])\n energy_train_b = energy_train_lr[energy_train_lr['building_id'] == building]\n for hour in hours:\n energy_lr[building].append([0] * (len(lr_columns)-3))\n energy_ridge[building].append([0] * (len(lr_columns)-3))\n energy_br[building].append([0] * (len(lr_columns)-3))\n energy_train_bh = energy_train_b[energy_train_b['hour'] == hour]\n y = energy_train_bh['meter_reading_log']\n if len(y) > 0:\n x = energy_train_bh.drop(['meter_reading_log','hour','building_id'],axis=1)\n model = LinearRegression(fit_intercept=False).fit(x,y)\n energy_lr[building][hour] = model.coef_\n model = Ridge(alpha=0.01,fit_intercept=False).fit(x,y)\n energy_ridge[building][hour] = model.coef_\n model = BayesianRidge(fit_intercept=False).fit(x,y)\n energy_br[building][hour] = model.coef_\nprint(energy_lr[0][0])\nprint(energy_ridge[0][0])\nprint(energy_br[0][0])", "[-0.05204313 5.44504565 5.41921165 5.47881611 5.41753305 5.43838778\n 5.45137392 5.44059806]\n[-0.04938976 5.44244413 5.41674949 5.47670968 5.41516617 5.43591691\n 5.44949479 5.43872264]\n[-0.05138182 5.44439819 5.41859905 5.47829205 5.41694412 5.43777302\n 5.45090643 5.44013149]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c244ee2a2821559b7a579b9f598ecb34163869
318,529
ipynb
Jupyter Notebook
ChannelFlows/Simulation/ChannelFlowSimulation.ipynb
joaochenriques/MCTE_2022
b999d60b6c4153be5a314da262a18e467cb41d7e
[ "MIT" ]
1
2022-03-06T18:30:41.000Z
2022-03-06T18:30:41.000Z
ChannelFlows/Simulation/ChannelFlowSimulation.ipynb
joaochenriques/MCTE_2022
b999d60b6c4153be5a314da262a18e467cb41d7e
[ "MIT" ]
null
null
null
ChannelFlows/Simulation/ChannelFlowSimulation.ipynb
joaochenriques/MCTE_2022
b999d60b6c4153be5a314da262a18e467cb41d7e
[ "MIT" ]
null
null
null
83.080073
139,287
0.596972
[ [ [ "<a href=\"https://colab.research.google.com/github/joaochenriques/MCTE_2022/blob/main/ChannelFlows/Simulation/ChannelFlowSimulation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as mpl\nimport matplotlib.ticker as plticker\nimport numpy as np\nfrom scipy.optimize import minimize_scalar", "_____no_output_____" ], [ "import pathlib\nif not pathlib.Path(\"mpl_utils.py\").exists():\n !curl -O https://raw.githubusercontent.com/joaochenriques/MCTE_2022/main/libs/mpl_utils.py &> /dev/null\n\nimport mpl_utils as mut\nmut.config_plots()\n\n%config InlineBackend.figure_formats = ['svg']", "_____no_output_____" ], [ "try:\n from tqdm.notebook import tqdm\nexcept ModuleNotFoundError:\n !pip install tdqm\n from tqdm.notebook import tqdm\n\nfrom IPython.display import Markdown, display\ndef printmd(string):\n display(Markdown(string))", "_____no_output_____" ] ], [ [ "# **Setup the problem**", "_____no_output_____" ] ], [ [ "ρw = 1025 # [kg/m³] salt water density \ng = 9.8 # [m/s²] gravity aceleration \n\nT = 12.0*3600.0 + 25.2*60.0 # [s] tide period\n\nL = 20000 # [m] channel length \nh = 60 # [m] channel depth\nb = 4000 # [m] channel width\na = 1.2 # [m] tidal amplitude\nS = h*b # [m²] channel area \n\ntwopi = 2*np.pi\n\nω = twopi / T # [rad/s] tidal frequency\nQ0 = g*a*S / (ω*L) # [-] frictionless channel volumetric flow rate \nqr = S * np.sqrt(g*h) # flow rate based on wave velocity\n\nCd = 0.005 # [-] friction coefficient\nf = 2*Cd # [-] friction coefficient used in the model is twice the value \n # usual used in tidal (non standard model) \n\nFr_0 = Q0 / ( S * np.sqrt( g * h ) )\n\nΘ_T_star = ( 0.5 / S**2 ) * Q0**2 / ( g * a )\nΘ_f_star = Θ_T_star * ( f * L / h )\n\nprintmd( \"$\\mathrm{Fr}_0 = %.3f$\" % Fr_0 )\nprintmd( \"$\\Theta_\\mathrm{f}^* = %.3f$\" % Θ_f_star )\nprintmd( \"$\\Theta_\\mathrm{T}^* = %.3f$\" % Θ_T_star )", "_____no_output_____" ], [ "def local_CT_and_CP( Fr4b, Fr1, B ): \n\n # See Chapter 3 of the MCTE Lecture notes\n\n ζ4 = (1/2.)*Fr1**2 - 1/2.*Fr4b**2 + 1.0\n \n Fr4t = (Fr1 - Fr4b*ζ4 + np.sqrt(B**2*Fr4b**2 - 2*B*Fr1**2 + 2*B*Fr1*Fr4b \\\n + B*ζ4**2 - B + Fr1**2 - 2*Fr1*Fr4b*ζ4 + Fr4b**2*ζ4**2))/B\n\n ζ4b = (Fr1 - Fr4t*ζ4)/(Fr4b - Fr4t)\n ζ4t = -(Fr1 - Fr4b*ζ4)/(Fr4b - Fr4t)\n \n Fr2t = Fr4t*ζ4t/B\n\n C_T = (Fr4b**2 - Fr4t**2)/Fr1**2\n C_P = C_T*Fr2t/Fr1\n\n return C_T, C_P\n\ndef find_minus_CP( Fr4b, Fr1, B ): \n # function created to discard the C_T when calling \"local_CT_and_CP\"\n C_T, C_P = local_CT_and_CP( Fr4b, Fr1, B ) \n return -C_P # Minus C_P to allow minimization", "_____no_output_____" ], [ "def compute_BCT_BCP( Fr_0, B, Q_star ):\n\n Fr1 = np.abs( Fr_0 * Q_star )\n\n if Fr1 < 1E-3:\n return 0.0, 0.0 # all zeros\n\n # find the optimal C_P for the channel conditions\n res = minimize_scalar( find_minus_CP, args=(Fr1, B), bounds=[0,1], \n method='bounded', \n options={ 'xatol': 1e-08, 'maxiter': 500, 'disp': 1 } )\n Fr4b = res.x # optimal value\n\n C_T, C_P = local_CT_and_CP( Fr4b, Fr1, B )\n\n return B*C_T, B*C_P", "_____no_output_____" ] ], [ [ "# **Solution of the ODE**\n\n$\\displaystyle \\frac{dQ^*}{dt^*}=\\cos(t^*) - (\\Theta_\\text{f}^*+BC_\\text{T} \\Theta_\\text{T}^*) \\, Q^* \\, |Q^*|$\n\n$\\displaystyle \\frac{d E_\\text{T}^*}{dt^*}= BC_\\text{P} \\, |{Q^*}^3|$\n\nwhere $B$, $\\Theta_\\text{f}^*$ and $\\Theta_\\text{T}^*$ are constants, and $C_\\text{T}$ and $C_\\text{P}$ are computed as a function of the local Froude number.\n\n\nThis system can be writen as\n\n$$\\dfrac{d \\mathbf{y}^*}{dt^*} = \\mathbf{f}^*\\!\\!\\left( \\mathbf{y}^*, t^* \\right),$$\n\nwith\n\n$$\\mathbf{y} = \n\\begin{pmatrix}\nQ^*\\\\\nE_\\text{T}^*\n\\end{pmatrix}\n\\tag{Eq. 1}\n$$\n\nand\n\n$$\n\\tag{Eq. 2}\n\\mathbf{f}^* = \n\\begin{pmatrix}\n\\cos(t^*) - (\\Theta_\\text{f}^*+BC_T \\Theta_\\text{T}^*) \\, Q^* |Q^*|\\\\[4pt]\nBC_P \\, |{Q^*}^3|\n\\end{pmatrix}\n$$\n\nWe adopt a first order solution of the type\n\n$$\\dfrac{\\mathbf{y}^*(t_n^*+\\Delta t^*)-\\mathbf{y}^*(t_n^*)}{\\Delta t^*} \n= \\mathbf{f}^*\\bigg( t_n^*, \\mathbf{y}^*\\left(t_n^*\\right) \\bigg)$$\n\nresulting\n\n$$\\mathbf{y}^*_{n+1} = \\mathbf{y}^*_n + \\Delta t^* \\, \\mathbf{f}^*\\!\\!\\left( t^*_n,\n\\mathbf{y}^*_n \\right)\n\\tag{Eq. 3}\n$$\n\nwhere\n\n$$\\mathbf{y}^*_{n}=\\mathbf{y}^*(t_n^*)$$\n\n$$\\mathbf{y}^*_{n+1}=\\mathbf{y}^*(t_n^*+\\Delta t^*)$$\n", "_____no_output_____" ], [ "# Define RHS of the ODE, see Eq. (2)", "_____no_output_____" ] ], [ [ "def f_star( ys, ts, Θ_f_star, Θ_T_star, Fr_0, B_rows ):\n ( Q_star, E_star ) = ys \n \n BC_T_rows = np.zeros( len( B_rows ) )\n BC_P_rows = np.zeros( len( B_rows ) )\n\n B_0 = np.nan\n for j, B in enumerate( B_rows ): \n # do not repeat the computations if B is equal to the previous iteration\n if B_0 != B:\n BC_T_j, BC_P_j = compute_BCT_BCP( Fr_0, B, Q_star )\n B_0 = B\n\n BC_T_rows[j] = BC_T_j\n BC_P_rows[j] = BC_P_j\n\n return np.array( \n ( np.cos( ts ) - ( Θ_f_star + np.sum(BC_T_rows) * Θ_T_star ) * Q_star * np.abs( Q_star ), \n np.sum(BC_P_rows) * np.abs( Q_star )**3 ) \n )", "_____no_output_____" ] ], [ [ "# **Solution with channel bed friction and turbines thrust**", "_____no_output_____" ] ], [ [ "periods = 4\nppp = 100 # points per period\nnum = int(ppp*periods)\n\n# stores time vector\nts_vec = np.linspace( 0, (2*np.pi) * periods, num )\nDelta_ts = ts_vec[1] - ts_vec[0]\n\n# vector that stores the lossless solution time series\nys_lossless_vec = np.zeros( ( num, 2 ) )\n\n# solution of (Eq. 3) without \"friction\" term\nfor i, ts in tqdm( enumerate( ts_vec[1:] ) ):\n ys_lossless_vec[i+1] = ys_lossless_vec[i] + \\\n Delta_ts * f_star( ys_lossless_vec[i], ts, 0, 0, 0, [0.0] )", "_____no_output_____" ] ], [ [ "The blockage factor per turbine row $i$ is\n\n$$B_i=\\displaystyle \\frac{\\left( n_\\text{T} A_\\text{T}\\right)_i}{S_i}$$\n\nwhere $\\left( n_\\text{T} A_\\text{T}\\right)_i$ is the area of all turbines of row $i$, and $S_i$ is the cross-sectional area of the channel at section $i$. ", "_____no_output_____" ] ], [ [ "fig, (ax1, ax2) = mpl.subplots(1,2, figsize=(12, 4.5) )\nfig.subplots_adjust( wspace = 0.17 )\n\nB_local = 0.1\nn_step = 18\nfor n_mult in tqdm( ( 0, 1, 2, 4, 8, 16 ) ):\n\n n_rows = n_step * n_mult\n B_rows = [B_local] * n_rows\n\n # vector that stores the solution time series\n ys_vec = np.zeros( ( num, 2 ) )\n\n # solution of (Eq. 3) with \"friction\" terms\n for i, ts in tqdm( enumerate( ts_vec[1:] ) ):\n\n ys_vec[i+1] = ys_vec[i] + \\\n Delta_ts * f_star( ys_vec[i], ts, \\\n Θ_f_star, Θ_T_star, Fr_0,\\\n B_rows )\n\n ax1.plot( ts_vec/twopi, ys_vec[:,0] )\n ax2.plot( ts_vec/twopi, ys_vec[:,1], label=\"$n_\\mathrm{rows}=%i$\" % (n_rows) )\n\nax1.plot( ts_vec/twopi, ys_lossless_vec[:,0], label=\"frictionless\" )\nax1.grid()\nax1.set_title( \"$B_i = %4.2f$\" % B_local )\nax1.set_xlim( ( 0, 4 ) )\nax1.set_ylim( ( -1.1, 1.1 ) )\nax1.set_xlabel( '$t^*\\!/\\,(2\\pi)$ [-]')\nax1.set_ylabel( '$Q^*$ [-]')\n# ax1.legend( loc='lower left', fontsize=12)\nax1.text(-0.15, 1.05, 'a)', transform=ax1.transAxes, size=16, weight='semibold')\n\nax2.plot( np.nan, np.nan, label=\"frictionless\" )\nax2.grid()\nax2.set_title( \"$B_i = %4.2f$\" % B_local )\nax2.set_xlim( ( 0, 4 ) )\nax2.set_xlabel( '$t^*\\!/\\,(2\\pi)$ [-]')\nax2.set_ylabel( '$E_\\mathrm{T}^*$ [-]')\nax2.legend( loc='upper left', fontsize=14, handlelength=2.9,labelspacing=0.25)\nax2.text(-0.15, 1.05, 'b)', transform=ax2.transAxes, size=16, weight='semibold');\n\nmpl.savefig( 'Friction_model.pdf', bbox_inches='tight', pad_inches=0.02);", "_____no_output_____" ] ], [ [ "# **Plot the solution as function of the number of turbines**", "_____no_output_____" ] ], [ [ "n_rows_lst = range( 0, 512+1, 8 ) # number of turbines [-]\nPs_lst = []\n\nB_local = 0.1\nys1_vec = np.zeros( ( num, 2 ) )\n\nfor n_rows in tqdm( n_rows_lst ):\n\n B_rows = [B_local]*n_rows\n\n # solution of (Eq. 3) with \"friction\" terms\n # the initial conditions are always (0,0)\n for i, ts in enumerate( ts_vec[1:] ):\n ys1_vec[i+1] = ys1_vec[i] + \\\n Delta_ts * f_star( ys1_vec[i], ts, \\\n Θ_f_star, Θ_T_star, Fr_0,\\\n B_rows )\n\n # last value of the last period minus the first value of the last period\n Ps = ( ys1_vec[-1,1] - ys1_vec[-ppp,1] )/ (2*np.pi)\n Ps_lst.append( Ps )\n\nmpl.plot( n_rows_lst, Ps_lst )\nmpl.xlim( (0,500) )\nmpl.title( \"$B_i = %4.2f$\" % B_local )\nmpl.xlabel( r\"number of rows, $n_\\mathrm{rows}$\")\nmpl.ylabel( r\"$P_\\mathrm{T}^*$\")\nmpl.grid()\nmpl.savefig( 'Friction_model_Power_nTurbines.pdf', bbox_inches='tight', pad_inches=0.02);", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7c26088d355b8243e9ae3b9e4c156dd77e8139d
16,553
ipynb
Jupyter Notebook
code/0076.ipynb
chaoskey/notes
d9fffc3a55f01c1946eed1f8d6e54d3fb88c0561
[ "MIT" ]
4
2019-04-30T23:24:39.000Z
2022-02-15T05:18:11.000Z
code/0076.ipynb
chaoskey/notes
d9fffc3a55f01c1946eed1f8d6e54d3fb88c0561
[ "MIT" ]
null
null
null
code/0076.ipynb
chaoskey/notes
d9fffc3a55f01c1946eed1f8d6e54d3fb88c0561
[ "MIT" ]
1
2022-01-05T16:58:47.000Z
2022-01-05T16:58:47.000Z
24.342647
375
0.396061
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c26367a182ebf985930c1330a5e72938c25dd0
9,280
ipynb
Jupyter Notebook
Phyton/Manejo_Datos.ipynb
jcms2665/100-Days-Of-ML-Code
92e16bd3adfd7b66f8f2510f2b86908706003c5e
[ "MIT" ]
null
null
null
Phyton/Manejo_Datos.ipynb
jcms2665/100-Days-Of-ML-Code
92e16bd3adfd7b66f8f2510f2b86908706003c5e
[ "MIT" ]
null
null
null
Phyton/Manejo_Datos.ipynb
jcms2665/100-Days-Of-ML-Code
92e16bd3adfd7b66f8f2510f2b86908706003c5e
[ "MIT" ]
null
null
null
27.537092
246
0.409375
[ [ [ "<a href=\"https://colab.research.google.com/github/jcms2665/100-Days-Of-ML-Code/blob/master/Phyton/Manejo_Datos.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#**TOOLS FOR DEMOGRAPHY**\n", "_____no_output_____" ], [ "## Token y Drive", "_____no_output_____" ] ], [ [ "# Token para GEE\n\nimport ee\nfrom google.colab import auth\nauth.authenticate_user()\nee.Authenticate()\nee.Initialize()", "_____no_output_____" ], [ "# Vincular con Drive\n\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ " ## Manejo de Bases de Datos\n---\n", "_____no_output_____" ] ], [ [ "# Instalación de paquetes\n\n!pip install pyreadstat\n!pip install simpledbf", "Requirement already satisfied: pyreadstat in /usr/local/lib/python3.7/dist-packages (1.1.2)\nRequirement already satisfied: pandas>0.24.0 in /usr/local/lib/python3.7/dist-packages (from pyreadstat) (1.1.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>0.24.0->pyreadstat) (2.8.1)\nRequirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from pandas>0.24.0->pyreadstat) (1.19.5)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>0.24.0->pyreadstat) (2018.9)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>0.24.0->pyreadstat) (1.15.0)\nRequirement already satisfied: simpledbf in /usr/local/lib/python3.7/dist-packages (0.2.6)\n" ], [ "# Cargar paquetes\n\nimport os # Directorios\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np # Data frame\nimport pandas as pd\nimport pyreadstat\n\nos.getcwd()", "_____no_output_____" ], [ "a=\"/content/drive/MyDrive/28 Bases/TMODULO.csv\"\ninegi=pd.read_csv(a)\nprint ('Datos importados:',len(inegi))\n", "Datos importados: 71404\n" ], [ "pd.crosstab(inegi.SEXO, inegi.P6_20, inegi.FAC_PER, aggfunc = sum)", "_____no_output_____" ], [ "inegi.SEXO", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7c26eb3797238458836834b1587bf5ea3deb067
382,214
ipynb
Jupyter Notebook
codes/generate_artificial_data.ipynb
biswesh456/Simulated-Dialog-Generation
b1f12e09c3e0be274f03e66eb08402e0f681f97a
[ "Apache-2.0" ]
6
2021-12-12T00:11:25.000Z
2022-03-02T23:23:58.000Z
codes/generate_artificial_data.ipynb
biswesh456/Simulated-Dialog-Generation
b1f12e09c3e0be274f03e66eb08402e0f681f97a
[ "Apache-2.0" ]
null
null
null
codes/generate_artificial_data.ipynb
biswesh456/Simulated-Dialog-Generation
b1f12e09c3e0be274f03e66eb08402e0f681f97a
[ "Apache-2.0" ]
null
null
null
71.575655
1,264
0.534028
[ [ [ "from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoTokenizer, AutoModelWithLMHead, BertTokenizer, LongformerTokenizer, LongformerModel\nimport torch, json, random\nimport numpy as np", "_____no_output_____" ], [ "import gpt.Model as userModel\nimport gpt_agent.Model as agentModel\nimport gpt_query.Model as queryModel\nimport bert_siamese.Model as userSiameseModel\nimport bert_siamese_agent.Model as agentSiameseModel", "_____no_output_____" ], [ "# domain_key = 'restaurant'\npercentage = '2.5'\npath = '/u/vineeku6/storage/gaurav/models/yz/moved/models/'\nsave_path = '/u/vineeku6/storage/gaurav/models/yz/moved/goal_oriented_learning_new/'\ndb_path = '/u/vineeku6/storage/gaurav/models/yz/moved/goal_oriented_learning_new/createData/multiwoz21/db/'\npredicted_file_path = '/u/vineeku6/storage/gaurav/models/yz/moved/predicted'\nuser_top_p = 0.45\nagent_top_p = 0.65\nratio = 1", "_____no_output_____" ], [ "d_train= json.load(open(db_path + 'train_db.json'))\nd_rest = json.load(open(db_path + 'restaurant_db.json'))\nd_hotel = json.load(open(db_path + 'hotel_db.json'))\nd_police = json.load(open(db_path + 'police_db.json'))\nd_hosp = json.load(open(db_path + 'hospital_db.json'))\nd_attr = json.load(open(db_path + 'attraction_db.json'))\nd_taxi = [{\n \"taxi_colors\" : [\"black\",\"white\",\"red\",\"yellow\",\"blue\",\"grey\"],\n \"taxi_types\": [\"toyota\",\"skoda\",\"bmw\",\"honda\",\"ford\",\"audi\",\"lexus\",\"volvo\",\"volkswagen\",\"tesla\"],\n \"taxi_phone\": [\"^[0-9]{10}$\"]\n}]\nentity_db_map = {'train':d_train, 'restaurant': d_rest, 'police': d_police, 'hospital': d_hosp, 'attraction': d_attr, 'taxi':d_taxi,'hotel':d_hotel}", "_____no_output_____" ], [ "query_key = {'train' : list(d_train[0].keys()), \n 'restaurant' : list(d_rest[0].keys()), \n 'hotel' : list(d_hotel[0].keys()),\n 'police' : list(d_police[0].keys()),\n 'hospital' : list(d_hosp[0].keys()),\n 'attraction' : list(d_attr[0].keys()),\n 'taxi' : ['taxi_colors', 'taxi_types', 'taxi_phone'],\n }", "_____no_output_____" ], [ "bert_model_name='bert-base-uncased'\nlongformer_model_name='allenai/longformer-base-4096'\nagent_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\nlongformer_tokenizer = LongformerTokenizer.from_pretrained(longformer_model_name)", "_____no_output_____" ], [ "def getStringKB(kb):\n if topic == 'police':\n return '[KB] Total = 1 [KB]'\n elif topic == 'taxi':\n return '[KB] Total = 1 [KB]'\n \n final_str = \"[KB] \" + \" Total = \" + str(len(kb)) + ' [KB]'\n# for k in kb:\n# final_str += k + \" : \" + str(kb[k]) + \" | \"\n\n# final_str += ' [KB]'\n return final_str", "_____no_output_____" ], [ "def getUserModel():\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/user/gpt_hospital2/checkpoint_hred_user__best_128_512_0.0001_1.pth.tar'\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/user/gpt_hotel2/checkpoint_hred_user__best_128_512_0.0001_1.pth.tar'\n model_path = path + '/user/'+percentage+'p/checkpoint_hred_user__best_128_512_1e-05_1.pth.tar' #takes only the last one context\n checkpoint = torch.load(model_path)\n load_args = checkpoint['args']\n model = userModel.GPT(load_args)\n model.load_state_dict(checkpoint['state_dict'])\n return model", "_____no_output_____" ], [ "def getAgentModel():\n model_path = path + '/agent/'+percentage+'p/checkpoint_hred_user__best_128_512_1.pth.tar'\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/agent/gpt_hotel2/checkpoint_hred_user__best_128_512_1.pth.tar'\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/agent/gpt_hospital2/checkpoint_hred_user__best_128_512_1.pth.tar'\n\n checkpoint = torch.load(model_path)\n load_args = checkpoint['args']\n model = agentModel.GPT(load_args)\n model.cuda()\n model.load_state_dict(checkpoint['state_dict'])\n return model", "_____no_output_____" ], [ "def getQueryModel():\n model_path = path + '/query/'+percentage+'p/checkpoint_hred_query__best.pth.tar'\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/query/gpt_hotel2/checkpoint_hred_user__best_128_512_1.pth.tar'\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/query/gpt_hospital2/checkpoint_hred_user__best_128_512_1.pth.tar'\n\n checkpoint = torch.load(model_path)\n load_args = checkpoint['args']\n model = queryModel.GPT(load_args)\n model.load_state_dict(checkpoint['state_dict'])\n return model", "_____no_output_____" ], [ "def getUserSiameseModel():\n model_path = path + '/siamese/user/'+percentage+'p/checkpoint_hred_user__best_128_128_1.pth.tar' # with neg as generated response\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/siamese/user/train1/checkpoint_hred_user__best_128_128_1.pth.tar'\n checkpoint = torch.load(model_path)\n load_args = checkpoint['args']\n model = userSiameseModel.Siamese_margin(load_args)\n model.load_state_dict(checkpoint['state_dict'])\n return model", "_____no_output_____" ], [ "def getAgentSiameseModel():\n model_path = path + '/siamese/agent/'+percentage+'p/checkpoint_hred_user__best_128_128_1.pth.tar' # with neg as generated response\n\n# model_path = '/u/vineeku6/storage/gaurav/biswesh/models/multiwiz/siamese/agent/train2/checkpoint_hred_user__best_128_128_1.pth.tar'\n checkpoint = torch.load(model_path)\n load_args = checkpoint['args']\n model = agentSiameseModel.Siamese_margin(load_args)\n model.load_state_dict(checkpoint['state_dict'])\n return model", "_____no_output_____" ], [ "def getData():\n data_path = '/u/vineeku6/storage/gaurav/models/yz/moved/goal_oriented_learning_new//data/multiwiz/user/'+percentage+'p/goals_new.json'\n with open(data_path) as f:\n goals = json.load(f)\n \n data_path = '/u/vineeku6/storage/gaurav/models/yz/moved/goal_oriented_learning_new/data/multiwiz/agent/'+percentage+'p/final_state.json'\n with open(data_path) as f:\n state = json.load(f)\n \n data_path = '/u/vineeku6/storage/gaurav/models/yz/moved/goal_oriented_learning_new/data/multiwiz/agent/'+percentage+'p/train_input.json'\n with open(data_path) as f:\n context = json.load(f)\n \n \n return goals, state\n", "_____no_output_____" ], [ "user_gpt = getUserModel()\nuser_model = user_gpt.model\nuser_model.eval()\nuser_model.cuda()", "_____no_output_____" ], [ "agent_gpt = getAgentModel()\nagent_model = agent_gpt.model\nagent_model.eval()\nagent_model.cuda()", "_____no_output_____" ], [ "query_gpt = getQueryModel()\nquery_model = query_gpt.model\nquery_model.eval()\nquery_model.cuda()", "_____no_output_____" ], [ "user_siamese_model = getUserSiameseModel()\nuser_siamese_model.eval()\nuser_siamese_model.cuda()", "_____no_output_____" ], [ "agent_siamese_model = getAgentSiameseModel()\nagent_siamese_model.eval()\nagent_siamese_model.cuda()", "_____no_output_____" ], [ "goals, state = getData()", "_____no_output_____" ], [ "seed = 25\ndef set_seed():\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)", "_____no_output_____" ], [ "importantKeys = {}\nimportantKeys['train'] = ['leaveAt', 'destination', 'departure', 'arriveBy', 'day', 'people']\nimportantKeys['hotel'] = ['name', 'area', 'parking', 'pricerange', 'stars', 'internet', 'type', 'stay', 'day', 'people']\nimportantKeys['restaurant'] = ['food', 'pricerange', 'name', 'area', 'people', 'day', 'time']\nimportantKeys['attraction'] = ['type', 'name', 'area']\nimportantKeys['taxi'] = ['leaveAt', 'destination', 'departure', 'arriveBy']\n\n\ndelexUserKeys = {}\ndelexUserKeys['train'] = ['leaveAt', 'destination', 'departure', 'arriveBy']\ndelexUserKeys['hotel'] = ['name']\ndelexUserKeys['restaurant'] = ['food', 'name', 'time']\ndelexUserKeys['attraction'] = ['type', 'name']\ndelexUserKeys['taxi'] = ['leaveAt', 'destination', 'departure', 'arriveBy']\ndelexUserKeys['hospital'] = ['department']\n\ndef formatQuery(query, context, state, prev_query):\n for d in ['train', 'restaurant', 'hotel', 'attraction', 'police', 'taxi', 'hospital']:\n if d in query.lower():\n topic = d\n \n if topic not in state:\n if '=' in prev_query:\n query = prev_query\n \n query = query.lower().replace('[q]', '').split('|')\n new_query = query[0].strip()\n if topic == 'police' or topic == 'hospital':\n return 'police'\n joined_context = \" \".join(context[1::2])\n intermediate_query = {}\n \n for q in query[1:]:\n q = q.split('=')\n if len(q)>=2:\n key = q[0].strip()\n if key == 'leaveat':\n key = 'leaveAt'\n if key == 'arriveby':\n key = 'arriveBy'\n val = \"\".join(q[1:]).strip()\n# print(val, key, state[topic])\n# if val != '*' and val != 'dontcare' and val != 'none':\n# if key == 'name' and not any([v in joined_context for v in val.split(' ')]):\n# val = '*'\n if val == '':\n val = '*'\n \n for k in delexUserKeys[topic]:\n if k.lower() == key.lower():\n if topic not in state:\n val = '*'\n if val != '*' and 'info' in state[topic] and k in state[topic]['info']:\n if state[topic]['info'][k] in joined_context:\n key = k\n val = state[topic]['info'][k]\n break\n if val != '*' and 'semi' in state[topic] and k in state[topic]['semi']:\n if state[topic]['semi'][k] in joined_context:\n key = k\n val = state[topic]['semi'][k]\n break\n if val != '*' and 'book' in state[topic] and k in state[topic]['book']:\n if state[topic]['book'][k] in joined_context:\n key = k\n val = state[topic]['book'][k]\n break\n if val != '*' and 'fail_info' in state[topic] and k in state[topic]['fail_info']:\n if state[topic]['fail_info'][k] in joined_context:\n key = k\n val = state[topic]['fail_info'][k]\n break\n if val != '*' and 'fail_book' in state[topic] and k in state[topic]['fail_book']:\n if state[topic]['fail_book'][k] in joined_context:\n key = k\n val = state[topic]['fail_book'][k]\n break\n \n \n if key.lower() == 'leaveat' or key.lower() == 'arriveby':\n if topic == 'train' or topic == 'taxi':\n if 'info' in state[topic]:\n if key in state[topic]['info']:\n if state[topic]['info'][key] in joined_context:\n val = state[topic]['info'][key]\n else:\n v = state[topic]['info'][key].replace(':','')\n if v in joined_context:\n val = state[topic]['info'][key]\n \n if topic in val:\n val = '*'\n \n intermediate_query[key] = val\n \n if 'departure' in intermediate_query and 'destination' in intermediate_query:\n if intermediate_query['departure'] == intermediate_query['destination']:\n if 'from' in joined_context or 'depart' in joined_context:\n intermediate_query['destination'] = '*'\n else:\n intermediate_query['departure'] = '*'\n \n for key in importantKeys[topic]: \n if key in intermediate_query:\n new_query += ' | ' + key + ' = ' + intermediate_query[key] \n else:\n new_query += ' | ' + key + ' = ' + '*'\n \n new_query = '[Q] ' + new_query + ' [Q]'\n\n return new_query", "_____no_output_____" ], [ "def getKB(query, utt_no, fail_book, failed):\n not_kb = ['people', 'time', 'stay']\n \n query = query.lower().replace('[q]', '')\n for d in ['train', 'restaurant', 'hotel', 'attraction', 'police', 'taxi', 'hospital']:\n if d in query:\n topic = d\n \n if topic != 'train':\n not_kb.append('day')\n \n \n db = entity_db_map[topic]\n final_query = {}\n fail_query = {}\n \n for q in query.split(' | ')[1:]:\n q = q.split(' = ')\n q[0] = q[0].strip()\n q[1] = q[1].strip()\n \n if q[1] != 'not mentioned' and q[1]!='dontcare' and q[1]!='none' and q[1]!='' and q[1]!='*':\n fail_query[q[0]] = q[1]\n \n for k in query_key[topic]:\n if q[0] == k and q[1] != 'not mentioned' and q[1]!='dontcare' and q[1]!='none' and q[1]!='' and q[1]!='*':\n final_query[k] = q[1]\n \n if topic in fail_book:\n for k in fail_book[topic]:\n if k in fail_query and fail_book[topic][k] == fail_query[k] and failed[topic] == False:\n return '[KB] Total = 0 [KB]', {}\n \n if 'name' in final_query and final_query['name'] != '*':\n return '[KB] Total = 1 [KB]', {}\n \n ret = []\n for row in db:\n match = True\n for k in final_query.keys():\n# print(k, final_query[k], row[k])\n if(k == \"arriveBy\"):\n try:\n if int(final_query[k][:2]) > int(row[k][:2]):\n match=True\n elif int(final_query[k][:2]) == int(row[k][:2]) and int(final_query[k][3:]) >= int(row[k][3:]):\n match=True\n else:\n match=False\n break\n except: \n match=True\n elif(k == \"leaveAt\"):\n try:\n if int(row[k][:2]) > int(final_query[k][:2]):\n match=True\n elif int(row[k][:2]) == int(final_query[k][:2]) and int(row[k][3:]) >= int(final_query[k][3:]):\n match=True\n else:\n match=False\n break \n except:\n match=True\n else:\n val = row[k]\n semi = final_query\n domain = topic\n val = val.strip()\n semi[k] = semi[k].strip()\n if domain == 'attraction' and k == 'type':\n semi[k] = semi[k].replace(' ', '')\n val = val.replace(' ', '')\n if semi[k] == 'pool':\n semi[k] = 'swimmingpool'\n if val == 'pool':\n val = 'swimmingpool'\n if semi[k] == 'sports' or semi[k] == 'multiplesports':\n semi[k] = 'mutliple sports'\n if val == 'mutliplesports' or val == 'sports':\n val = 'mutliple sports'\n if k == 'parking' or k == 'internet':\n semi[k] = semi[k].replace('free', 'yes')\n val = val.replace('free', 'yes')\n if k == 'food':\n semi[k] = semi[k].replace('south indian', 'indian')\n val = val.replace('south indian', 'indian')\n if k == 'name':\n \n val = val.replace('the', '')\n semi[k] = semi[k].replace('the', '')\n val = val.replace(\"b & b\", \"bed and breakfast\")\n semi[k] = semi[k].replace(\"b & b\", \"bed and breakfast\")\n val = val.replace(\"restaurant\", \"\")\n semi[k] = semi[k].replace(\"restaurant\", \"\")\n if \"hotel\" in val and 'gonville' not in val:\n val = val.replace(\" hotel\", \"\")\n if \"hotel\" in semi[k] and 'gonville' not in semi[k]:\n semi[k] = semi[k].replace(\"hotel\", \"\")\n \n \n if k != 'name' and (val!=semi[k]):\n match=False\n break\n\n if(match):\n ret.append(row)\n\n if len(ret) == 0:\n return \"[KB] \" + \" Total = 0 [KB]\", {}\n else:\n# return '[KB] Total : ' + str(len(ret)) + ' ' + str(getStringKB(ret[0])), ret[0]\n return \"[KB] \" + \" Total = \" + str(len(ret)) + ' [KB]', ret[0]", "_____no_output_____" ], [ "BSKeys = {}\nBSKeys['hotel'] = ['name', 'area', 'parking', 'pricerange', 'stars', 'internet']\nBSKeys['restaurant'] = ['food', 'pricerange', 'area']\nBSKeys['attraction'] = ['type', 'area']\nBSKeys['train'] = ['leaveAt', 'destination', 'departure', 'arriveBy']", "_____no_output_____" ], [ "def adjustScore(query, prev_query, last_context, response, kb, turn_no, entire_context):\n for d in ['train', 'restaurant', 'hotel', 'attraction', 'police', 'taxi', 'hospital']:\n if d in query.lower():\n domain_key = d\n query = query.lower().replace('[q]', '').split('|')\n query = query[1:]\n belief_state = {}\n score = 0\n for q in query:\n q = q.split('=')\n if len(q)>=2:\n key = q[0].strip()\n belief_state[key] = \"\".join(q[1:]).strip()\n \n prev_query = prev_query.lower().replace('[q]', '').split('|')[1:]\n prev_belief_state = {}\n score = 0\n for q in prev_query:\n q = q.split('=')\n if len(q)>=2:\n key = q[0].strip()\n prev_belief_state[key] = \"\".join(q[1:]).strip()\n \n requestables = ['phone', 'number', 'address', 'postcode', ' code', 'reference', 'id']\n for r in requestables:\n if r == 'number':\n r = 'phone'\n if r == ' code':\n r = 'postcode'\n if r in last_context:\n if ' ['+domain_key+'_'+r+']' in response:\n score += 0.5 \n if r not in last_context:\n if ' ['+domain_key+'_'+r+']' in response:\n score += -0.1\n \n enquiry = ['area', 'name', 'price', 'internet', 'fee', 'travel time', 'type']\n for e in enquiry:\n if e == 'travel time':\n if 'minute' in response:\n score += 0.3\n elif e == 'fee':\n if 'value_pricerange' in response:\n score +=0.2\n elif e == 'area':\n if 'value_area' in response:\n score +=0.2\n elif e in last_context and e in response:\n score += 0.2\n \n if 'name' in belief_state and '[value_count] ['+domain_key+'_name]' in response:\n return -0.5\n \n bs_count = 0\n if domain_key in BSKeys:\n for b in BSKeys[domain_key]:\n if b in belief_state and b in prev_belief_state:\n if belief_state[b] != '*' and prev_belief_state[b] == '*':\n if '['+domain_key+'_'+'name]' in response and 'book' not in last_context:\n score += 1\n if domain_key == 'train'and '['+domain_key+'_'+'id]' in response and 'book' not in last_context:\n score += 1\n if b in belief_state and belief_state[b] != '*':\n bs_count += 1\n \n if 'name' in belief_state and bs_count == len(BSKeys[domain_key]) and '0' not in kb:\n if '['+domain_key+'_'+'name]' not in entire_context and '['+domain_key+'_'+'name]' in response: \n score += 1\n \n if domain_key == 'train' and bs_count == len(BSKeys[domain_key]) and '0' not in kb and 'book' not in last_context:\n if '['+domain_key+'_'+'id]' not in entire_context and '['+domain_key+'_'+'id]' in response: \n score += 0.4\n \n if 'name' in belief_state and '1 ' in kb:\n if '['+domain_key+'_'+'name]' not in entire_context and '['+domain_key+'_'+'name]' in response: \n score += 1\n \n # do not ask about slots if name is already provided\n if 'name' in belief_state and (belief_state['name'] != '*' or '1' in kb):\n if '['+domain_key+'_'+'name]' not in entire_context and '['+domain_key+'_'+'name]' in response:\n if 'value_count' in response:\n return -0.2\n return 0.4\n return 0\n \n if '0' in kb and domain_key != 'train':\n if ' no ' in response:\n score += 0.4\n elif ' not ' in response:\n score += 0.4\n elif 'unfortunate' in response:\n score += 0.4\n elif 'unavailable' in response:\n score += 0.4\n elif 'unsuccessful' in response:\n score += 0.4\n elif 'sorry' in response:\n score += 0.4\n elif domain_key != 'train':\n if ' no ' in response:\n score += -0.7\n elif ' not ' in response:\n score += -0.7\n elif 'unfortunate' in response:\n score += -0.7\n elif 'unavailable' in response:\n score += -0.7\n elif 'unsuccessful' in response:\n score += -0.7\n elif 'sorry' in response:\n score += -0.7\n \n# if 'book' in context and 'book' in response:\n# if '0' not in response and ' successful' in response:\n# score += 0.3\n \n Flag = True\n if turn_no > 1: \n Flag = False\n \n if domain_key == 'attraction' and turn_no>0:\n Flag = False\n \n if domain_key == 'taxi' and turn_no>1:\n if '[taxi_type]' not in entire_context and '[taxi_type]' in response:\n score += 0.2\n Flag = False\n \n if 'name] -s' in response:\n score -= 0.2\n# if turn_no > 0:\n# if 'name' in belief_state and '['+domain_key+'_'+'name]' not in entire_context and '['+domain_key+'_'+'name]' in response:\n# score += 0.2\n \n if turn_no > 1:\n if domain_key == 'train' and '['+domain_key+'_'+'id]' not in entire_context and '['+domain_key+'_'+'id]' in response and 'book' not in last_context:\n score += 0.25\n \n if 'when' in response:\n if ' leave' in response:\n if 'leaveat' in belief_state and belief_state['leaveat'] != '*':\n score -= 0.25\n elif 'leaveat' in belief_state and Flag:\n print('q1')\n score += 0.2\n\n if ' arrive' in response:\n if 'arriveby' in belief_state and belief_state['arriveby'] != '*':\n score += -0.25\n elif 'arriveby' in belief_state and Flag:\n score += 0.2\n print('q2')\n\n if ('what' in response and 'what about' not in response) or 'do you' in response or 'is there' in response:\n if ' time' in response and (' leave' in response or ' depart' in response):\n if 'leaveat' in belief_state and belief_state['leaveat'] != '*':\n score += -0.25\n elif 'leaveat' in belief_state and Flag:\n score += 0.2\n print('q3')\n\n elif ' time' in response and ' arrive' in response:\n if 'arriveby' in belief_state and belief_state['arriveby'] != '*':\n score += -0.25\n elif 'arriveby' in belief_state and Flag:\n score += 0.2\n print('q4')\n\n elif ' destination' in response:\n if 'destination' in belief_state and belief_state['destination'] != '*':\n score += -0.3\n elif 'destination' in belief_state and Flag:\n score += 0.4\n print('q6')\n\n elif ' departure' in response:\n if 'departure' in belief_state and belief_state['departure'] != '*':\n score += -0.3\n elif 'departure' in belief_state and Flag:\n score += 0.2\n print('q7')\n\n elif ' day' in response:\n if 'day' in belief_state and belief_state['day'] != '*':\n score += -0.25\n elif 'day' in belief_state and Flag:\n score += 0.15\n if 'book' in last_context or 'reserv' in last_context:\n score += 0.3\n print('q8')\n \n elif ' area' in response or ' part of town' in response:\n if 'area' in belief_state and belief_state['area'] != '*':\n score += -0.25\n elif 'area' in belief_state and Flag:\n score += 0.1\n print('q9')\n \n elif ' type of food' in response or ' kind of food' in response:\n if 'food' in belief_state and belief_state['food'] != '*':\n score += -0.25\n elif 'food' in belief_state:\n score += 0.3\n print('q10')\n \n elif ' price range' in response:\n if 'pricerange' in belief_state and belief_state['pricerange'] != '*':\n score += -0.25\n elif 'pricerange' in belief_state and Flag:\n score += 0.2\n print('q11')\n\n if 'where' in response:\n if ' depart' in response or ' leaving from' in response or ' departure' in response or 'travelling from' in response or 'to and from' in response:\n if 'departure' in belief_state and belief_state['departure'] != '*':\n score += -0.3\n elif 'departure' in belief_state and Flag:\n score += 0.2\n print('q12')\n\n elif ' destination' in response or ' going to' or ' travelling to' in response or 'to and from' in response:\n if 'destination' in belief_state and belief_state['destination'] != '*':\n score += -0.3\n elif 'destination' in belief_state and Flag:\n score += 0.4\n print('q13')\n\n if 'how many' in response:\n if ' people' in response or ' ticket' in response:\n if 'people' in belief_state and belief_state['people'] != '*':\n score += -0.25\n elif 'people' in belief_state and ('thank' not in response or 'bye' not in response):\n if 'book' in last_context or 'reserv' in last_context:\n score += 0.5\n\n if 'which' in response:\n if ' day' in response:\n if 'day' in belief_state and belief_state['day'] != '*':\n score += -0.25\n elif 'day' in belief_state and Flag:\n score += 0.15\n print('q14')\n \n elif ' area' in response or ' part of town' in response:\n if 'area' in belief_state and belief_state['area'] != '*':\n score += -0.25\n elif 'area' in belief_state and Flag:\n score += 0.1\n print('q15')\n\n elif ' type of food' in response or ' kind of food' in response:\n if 'food' in belief_state and belief_state['food'] != '*':\n score += -0.25\n elif 'food' in belief_state and Flag:\n score += 0.3\n print('q16')\n \n elif ' price range' in response:\n if 'pricerange' in belief_state and belief_state['pricerange'] != '*':\n score += -0.25\n elif 'pricerange' in belief_state and Flag:\n score += 0.15\n print('q16')\n \n return score", "_____no_output_____" ], [ "def formatResponse(agent_response):\n agent_response = agent_response.replace(',', ' , ')\n agent_response = agent_response.replace('.', ' . ')\n agent_response = agent_response.replace('?', ' ? ')\n agent_response = agent_response.replace('!', ' ! ')\n agent_response = agent_response.replace('[', ' [')\n agent_response = agent_response.replace(']', '] ')\n agent_response = agent_response.replace(' ', ' ')\n \n# requestables = ['phone', 'number', 'address', 'postcode', 'reference', 'id', 'name']\n \n# for r in requestables:\n# for d in ['train', 'attraction', 'restaurant', 'hotel']:\n# if d != domain_key:\n# agent_response = agent_response.replace('['+d+'_'+r+']', '['+domain_key+'_'+r+']') \n\n \n agent_response = agent_response.strip()\n return agent_response", "_____no_output_____" ], [ "\n\ndef formatUserResponse(user_response, state, user_failed):\n user_response = user_response.strip()\n for t in ['train', 'restaurant', 'hotel', 'attraction', 'taxi']:\n if t not in state:\n if t == 'train' and 'taxi' in state and '[train_' in user_response:\n user_response = user_response.replace('[train_', '[taxi_')\n if t == 'taxi' and 'train' in state and '[taxi_' in user_response:\n user_response = user_response.replace('[taxi_', '[train_')\n if t == 'hotel' and 'restaurant' in state and '[hotel_' in user_response:\n user_response = user_response.replace('[hotel_', '[restaurant_')\n if t == 'hotel' and 'attraction' in state and '[hotel_' in user_response:\n user_response = user_response.replace('[hotel_', '[attraction_')\n if t == 'restaurant' and 'hotel' in state and '[restaurant_' in user_response:\n user_response = user_response.replace('[restaurant_', '[hotel_')\n if t == 'restaurant' and 'attraction' in state and '[restaurant_' in user_response:\n user_response = user_response.replace('[restaurant_', '[attraction_')\n if t == 'attraction' and 'hotel' in state and '[attraction_' in user_response:\n user_response = user_response.replace('[attraction_', '[hotel_')\n if t == 'attraction' and 'restaurant' in state and '[attraction_' in user_response:\n user_response = user_response.replace('[attraction_', '[restaurant_')\n \n for topic in ['train', 'restaurant', 'hotel', 'attraction', 'taxi']:\n for k in delexUserKeys[topic]:\n if topic+'_'+k.lower() in user_response and topic in list(state.keys()) and 'fail_info' in state[topic] and k in state[topic]['fail_info'] and user_failed[topic] == True:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', state[topic]['fail_info'][k])\n user_failed[topic] = False\n elif topic+'_'+k.lower() in user_response and topic in list(state.keys()) and 'info' in state[topic] and k in state[topic]['info']:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', state[topic]['info'][k])\n elif topic+'_'+k.lower() in user_response and topic in list(state.keys()) and 'semi' in state[topic] and k in state[topic]['semi']:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', state[topic]['semi'][k])\n elif topic+'_'+k.lower() in user_response and topic in list(state.keys()) and 'fail_book' in state[topic] and k in state[topic]['fail_book'] and user_failed[topic] == True:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', state[topic]['fail_book'][k])\n user_failed[topic] = False\n elif topic+'_'+k.lower() in user_response and topic in list(state.keys()) and 'book' in state[topic] and k in state[topic]['book']:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', state[topic]['book'][k])\n elif topic+'_'+k.lower() in user_response:\n user_response = user_response.replace('['+topic+'_'+k.lower()+']', k.lower())\n \n \n \n if len(user_response) > 0 and user_response[-1] != '.' and user_response[-1] != '?' and user_response[-1] != '!':\n user_response = user_response + '.'\n return user_response, user_failed", "_____no_output_____" ], [ "def adjustUserScore(goal, response, turn_no, entire_context):\n score = 0\n requestables = ['phone', 'number', 'address', 'postcode', 'reference', 'id']\n enquiry = ['area', 'name', 'price', 'internet', 'fee', 'travel time']\n bs_count = 0\n for r in requestables:\n if r in response:\n bs_count += 1\n if r in goal and r not in entire_context and r in response:\n score = 0.2\n if ('thank' in response or 'bye' in response) and r not in entire_context:\n score = -0.2\n if r in goal and r in entire_context and r in response:\n score = -0.1 \n for e in enquiry:\n if e in response:\n bs_count += 1\n if ('thank' in response or 'bye' in response) and e not in entire_context:\n score = -0.2\n if bs_count > 2:\n score = -0.2\n \n return score", "_____no_output_____" ], [ "len(goals)", "_____no_output_____" ], [ "set_seed()\nfinal_contexts = []\nfinal_queries = []\nfinal_kbs = []\n\nfor g in range(len(goals)):\n# print(state[domain_key][0])\n fail_book = {}\n failed = {}\n user_failed = {}\n print(state[g])\n for key in state[g]:\n if 'fail_book' in state[g][key]:\n fail_book[key] = state[g][key]['fail_book']\n failed[key] = False\n user_failed[key] = True\n elif 'fail_info' in state[g][key]:\n fail_book[key] = state[g][key]['fail_info']\n failed[key] = False\n user_failed[key] = True\n print(fail_book)\n else:\n fail_book[key] = {}\n failed[key] = False\n user_failed[key] = True\n \n flag = 0\n context = ['[st@rt]']\n queries = []\n kbs = []\n resp_no = 10\n goal = goals[g]\n police = False\n# goal = 'You are looking for a [key] restaurant [key] . The restaurant should be in the [key] cheap [key] price range and should serve [key] indian [key] food . Restaurant should be in [key] north [key] . Once you find the [key] restaurant [key] you want to book a table for [key] 5 people [key] at [key] 11:30 [key] on [key] sunday [key] . If the booking fails how about [key] 10:30 [key] . Make sure you get the [key] reference number [key] '\n start_index = 0\n prev_query = '[Q] empty [Q]'\n try:\n for i in range(9):\n new_user_input = user_tokenizer.encode(goal + ' GOAL ' + \" \".join(context), return_tensors='pt')\n if len(new_user_input)>1022:\n flag = 1\n # response = user_model.generate(new_user_input.cuda(), max_length=500, pad_token_id=user_tokenizer.eos_token_id, num_beams=20, num_return_sequences=resp_no, early_stopping=True)\n response = user_model.generate(new_user_input[:1022].cuda(), max_length=1000, pad_token_id=user_tokenizer.eos_token_id, do_sample=True, top_k=resp_no, top_p=user_top_p, num_return_sequences=resp_no, early_stopping=True)\n\n max_score = 0\n user_response = ''\n for j in range(resp_no):\n response_j = user_tokenizer.decode(response[j][new_user_input.shape[1]:]).replace('<|endoftext|>', '')\n response_j = response_j.replace('[User]', '')\n # response_j = response_j.replace('User', '')\n # response_j = response_j.replace(']', '')\n if response_j.replace(' ', '') == '' or response_j.replace(' ', '') == '.' or response_j.replace(' ', '') == ',':\n pass\n elif len(response_j.replace('?', '.').split('.')) > 4:\n pass\n else:\n response_j = response_j.strip()\n if response_j[0] == ',' or response_j[0] == '!':\n response_j = response_j[1:]\n response_j = '[User] ' + response_j.strip()\n\n input_context = \" \".join(context)\n encode = longformer_tokenizer.batch_encode_plus([[input_context, response_j]])\n input_ids = torch.tensor(encode.input_ids[:1022]).cuda()\n attention_mask = torch.tensor(encode.attention_mask[:1022]).cuda()\n\n score = user_siamese_model(input_ids, attention_mask)\n score += adjustUserScore(goal, response_j, i, \" \".join(context))\n if response_j == '':\n score = -1\n if score > max_score and len(response_j)<1023:\n user_response = response_j\n max_score = score\n # print('response : ', j, \" : \", response_j, \" score \", score.item(), '\\n')\n # print('*'*100)\n # user_response = input('User : ')\n if user_response == '':\n flag = 1\n\n user_final_response, user_failed = formatUserResponse(user_response, state[g], user_failed)\n context.append(user_final_response)\n\n if user_response.strip().lower().find('e*d') != -1 :\n break\n\n# query_input = query_tokenizer.encode(\" \".join(context)[:1022], return_tensors='pt')\n ct = (' '.join(context[-1:]) + ' ' + prev_query)[:1000] + ' | ' \n query_input = query_tokenizer.encode(ct, return_tensors='pt')\n query_response = query_model.generate(query_input.cuda(), max_length=1022, pad_token_id=query_tokenizer.eos_token_id, num_beams=5, num_return_sequences=5, early_stopping=True)\n\n query = agent_tokenizer.decode(query_response[0][query_input.shape[1]:]).replace('<|endoftext|>', '')\n# print('1 ', query, '\\n')\n temp_query = query\n query = formatQuery(query, context, state[g], prev_query)\n queries.append(query)\n prev_query = query\n if query == 'police':\n# i=10\n# continue\n police = True\n break\n# print('Prev ', prev_query, '\\n')\n print(query, '\\n')\n\n kb, kb_dict = getKB(query, i, fail_book, failed)\n kbs.append(kb)\n # print('KB : ', kb)\n new_agent_input = agent_tokenizer.encode(query + \" \" + kb + \" \" + \" \".join(context), return_tensors='pt')\n if len(new_agent_input)>1022:\n flag = 1\n response = agent_model.generate(new_agent_input[:1022].cuda(), max_length=1022, pad_token_id=agent_tokenizer.eos_token_id, do_sample=True, top_k=resp_no, top_p=agent_top_p, num_return_sequences=resp_no, early_stopping=True)\n\n max_score = 0\n agent_response = ''\n if flag == 0:\n for j in range(len(response)):\n response_j = agent_tokenizer.decode(response[j][new_agent_input.shape[1]:]).replace('<|endoftext|>', '')\n response_j = response_j.replace('[Agent]', '')\n if response_j.replace(' ', '') == '':\n pass\n elif len(response_j.replace('?', '.').split('.')) > 3:\n pass\n else:\n response_j = '[Agent]' + response_j\n encode = longformer_tokenizer.batch_encode_plus([[query + ' ' + kb + ' ' + \" \".join(context), response_j]])\n input_ids = torch.tensor(encode.input_ids[:1022]).cuda()\n attention_mask = torch.tensor(encode.attention_mask[:1022]).cuda()\n\n score = agent_siamese_model(input_ids, attention_mask)\n # print(score.item())\n score = score + adjustScore(query, prev_query, context[-1], response_j, kb, i, \" \".join(context[start_index:]), )\n if response_j == '':\n score = -1\n if score > max_score and len(response_j)<1023:\n agent_response = response_j\n max_score = score\n\n # print('response : ', j, \" : \", response_j, \" score \", score.item(), '\\n')\n\n response = agent_model.generate(new_agent_input.cuda(), max_length=1000, pad_token_id=agent_tokenizer.eos_token_id, num_beams=1, num_return_sequences=1, early_stopping=True)\n for j in range(len(response)):\n response_j = agent_tokenizer.decode(response[j][new_agent_input.shape[1]:]).replace('<|endoftext|>', '')\n\n if response_j.replace(' ', '') == '':\n pass\n elif len(response_j.replace('?', '.').split('.')) > 3:\n pass\n else:\n encode = longformer_tokenizer.batch_encode_plus([[query + ' ' + kb + ' ' + \" \".join(context), response_j]])\n input_ids = torch.tensor(encode.input_ids).cuda()\n attention_mask = torch.tensor(encode.attention_mask).cuda()\n\n score = agent_siamese_model(input_ids, attention_mask)\n score = score + adjustScore(query, prev_query, context[-1], response_j, kb, i, \" \".join(context[start_index:]), )\n\n if score > max_score:\n agent_response = response_j\n max_score = score\n # print('response : ', j, \" : \", response_j, \" score \", score.item(), '\\n')\n\n \n if agent_response == '':\n flag = 1\n agent_response = formatResponse(agent_response)\n if flag == 0:\n context.append(agent_response)\n if any(word in agent_response for word in [' no ', ' not ', 'unavailable', 'unfortunate', 'sorry', 'unable', 'unsuccessful']):\n for d in ['train', 'restaurant', 'hotel', 'attraction', 'police', 'taxi', 'hospital']:\n if d in query.lower():\n temp_topic = d \n failed[temp_topic] = True\n start_index = i\n\n if flag == 1 and police == False:\n print(\"GOAL : \", g, ' ', goal)\n final_ctx = []\n for i,ctx in enumerate(context):\n if ctx.find('[st@rt]') == -1 and ctx.find('e*d') == -1:\n final_ctx.append(ctx)\n print(ctx)\n\n final_contexts.append(final_ctx)\n final_queries.append(queries)\n final_kbs.append(kbs)\n print('*'*100)\n break\n\n if flag == 0 and police == False:\n print(\"GOAL : \", g, ' ', goal)\n final_ctx = []\n for i,ctx in enumerate(context):\n if ctx.find('[st@rt]') == -1 and ctx.find('e*d') == -1:\n final_ctx.append(ctx)\n print(ctx)\n\n final_contexts.append(final_ctx)\n final_queries.append(queries)\n final_kbs.append(kbs)\n print('*'*100)\n except:\n continue\n\n ", "{'restaurant': {'info': {'food': 'italian', 'pricerange': 'expensive', 'area': 'south'}, 'reqt': ['address', 'postcode', 'phone'], 'fail_info': {}}, 'taxi': {'info': {'leaveAt': '16:45'}, 'reqt': ['car type', 'phone'], 'fail_info': {}}, 'hotel': {'info': {'name': 'finches bed and breakfast'}, 'fail_info': {'name': 'acorn guest house'}, 'book': {'people': '8', 'day': 'thursday', 'invalid': False, 'stay': '5'}, 'fail_book': {}}}\n{'restaurant': {}}\n{'restaurant': {}, 'taxi': {}}\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = * | internet = yes | type = * | stay = * | day = * | people = * [Q] \n\nq11\nq11\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = * | internet = yes | type = * | stay = 8 | day = thursday | people = 8 [Q] \n\n[Q] restaurant | food = * | pricerange = * | name = * | area = * | people = 8 | day = thursday | time = * [Q] \n\n[Q] restaurant | food = * | pricerange = * | name = * | area = * | people = 8 | day = thursday | time = * [Q] \n\n[Q] taxi | leaveAt = * | destination = * | departure = * | arriveBy = * [Q] \n\n[Q] taxi | leaveAt = 16:45 | destination = * | departure = * | arriveBy = * [Q] \n\n[Q] taxi | leaveAt = 16:45 | destination = * | departure = * | arriveBy = * [Q] \n\n[Q] taxi | leaveAt = 16:45 | destination = * | departure = * | arriveBy = * [Q] \n\nGOAL : 0 You are planning your trip in Cambridge . You are looking for a [key] particular hotel [key] . Its name is called [key] finches bed and breakfast [key] . Once you find the [key] hotel [key] you want to book it for [key] 8 people [key] and [key] 5 nights [key] starting from [key] thursday [key] . Make sure you get the [key] reference number [key] . You are also looking for a [key] restaurant [key] . The restaurant should serve [key] italian [key] food and should be in the [key] expensive [key] price range . The restaurant should be in the [key] south [key] . Make sure you get [key] address [key] [key] postcode [key] and [key] phone number [key] . You also want to book a [key] taxi [key] to commute between the two places . You want to leave the [key] hotel [key] by [key] 16:45 [key] . Make sure you get [key] contact number [key] and [key] car type [key] \n[User] i am looking for a particular hotel. its name is called acorn guest house.\n[Agent] there is a [hotel_name] in the [value_area] , it s [value_pricerange] -ly priced .\n[User] i need it for 8 people and 5 nights starting from thursday.\n[Agent] [hotel_name] is [value_count] stars and it s [value_pricerange] .\n[User] i also need a restaurant that serves italian food.\n[Agent] [restaurant_name] is a great [value_food] restaurant in the [value_area] .\n[User] can you tell me the address and postcode for that?\n[Agent] [restaurant_address] is in the [value_area] and postcode [restaurant_postcode] .\n[User] great! i also need a taxi to commute between the two places.\n[Agent] [taxi_type] will arrive at [value_time] and travel [value_count] minutes . would you like to book a taxi ?\n[User] yes, i would like to leave the hotel by 16:45.\n[Agent] [hotel_name] is in the [value_area] and arrives at [value_time] . would you like to book it for me ?\n[User] yes, please book it for 8 people for 5 nights.\n[Agent] i have booked it for you , reference number is [taxi_reference] . is there anything else i can help you with today ?\n[User] no that will be all i need today. thanks!\n[Agent] you are welcome ! have a wonderful day !\n****************************************************************************************************\n{'train': {'info': {'destination': 'cambridge', 'day': 'sunday', 'arriveBy': '16:00', 'departure': 'london liverpool street'}, 'fail_info': {}, 'book': {'invalid': True, 'people': '6'}, 'fail_book': {}}, 'hotel': {'info': {'internet': 'yes', 'pricerange': 'moderate', 'area': 'east'}, 'fail_info': {}, 'book': {'stay': '2', 'day': 'sunday', 'invalid': False, 'people': '6'}, 'fail_book': {}}}\n[Q] train | leaveAt = * | destination = * | departure = london liverpool street | arriveBy = * | day = sunday | people = * [Q] \n\nq3\nq3\nq3\nq3\nq3\n[Q] train | leaveAt = * | destination = * | departure = london liverpool street | arriveBy = 16:00 | day = sunday | people = * [Q] \n\nq3\n[Q] train | leaveAt = * | destination = * | departure = london liverpool street | arriveBy = 16:00 | day = sunday | people = 6 [Q] \n\nGOAL : 1 You are looking for information in Cambridge . You are looking for a [key] train [key] . The train should depart from [key] london liverpool street [key] and should leave on [key] sunday [key] . The train should go to [key] cambridge [key] and should [key] arrive by 16:00 [key] . Once you find the train you want to make a booking for [key] 6 people [key] . Make sure you get the [key] reference number [key] . You are also looking for a [key] place to stay [key] . The hotel should be in the [key] moderate [key] price range and should be in the [key] east [key] . The hotel should [key] include free wifi [key] . Once you find the [key] hotel [key] you want to book it for [key] the same group of people [key] and [key] 2 nights [key] starting from [key] the same day [key] . Make sure you get the [key] reference number [key] \n[User] i am looking for information on a train leaving london liverpool street on sunday.\n[Agent] there are [value_count] trains leaving [value_place] on [value_day] and arriving in [value_place] at [value_time] . do you have a specific time you would like to leave or arrive by ?\n[User] i would like to arrive by 16:00.\n[Agent] [train_id] leaves at [value_time] and arrives at [value_place] at [value_time] . would you like to book that for me ?\n[User] yes, please book it for 6 people for 6 people.\n[Agent] booking was successful . reference number is [train_reference] .\n****************************************************************************************************\n{'hotel': {'info': {'name': \"rosa's bed and breakfast\"}, 'restaurant': False, 'fail_info': {'name': 'express by holiday inn cambridge'}, 'hotel': \"rosa's bed and breakfast\", 'book': {'pre_invalid': True, 'people': '7', 'day': 'tuesday', 'invalid': False, 'stay': '3'}, 'fail_book': {'day': 'saturday'}}}\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = * | internet = yes | type = * | stay = * | day = * | people = * [Q] \n\nq11\nq11\nq11\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = 3 | internet = yes | type = * | stay = 7 | day = saturday | people = 3 [Q] \n\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = 3 | internet = yes | type = * | stay = 7 | day = saturday | people = 3 [Q] \n\n[Q] hotel | name = * | area = * | parking = * | pricerange = * | stars = 3 | internet = yes | type = * | stay = 7 | day = saturday | people = 3 [Q] \n\nGOAL : 2 You are looking for a [key] particular hotel [key] . Its name is called [key] rosa's bed and breakfast [key] . Once you find the [key] hotel [key] you want to book it for [key] 7 people [key] and [key] 3 nights [key] starting from [key] saturday [key] . If the booking fails how about different [key] hotel [key] in the same [key] pricerange [key] . Make sure you get the [key] reference number [key] \n[User] i'm looking for a particular hotel. its name is called express by holiday inn cambridge.\n[Agent] [hotel_name] is a [value_pricerange] hotel located in the [value_area] of town . would you like to book it ?\n[User] yes, please book it for 7 people and 3 nights starting on saturday.\n[Agent] i am sorry , i was not able to book that for you . would you like to try another day or a different price range ?\n[User] yes, can you try for saturday instead?\n[Agent] [hotel_name] is a [value_pricerange] [value_count] star hotel in the [value_area] . would you like me to book it for you ?\n[User] yes, please book it for 7 people and 3 nights starting on saturday.\n[Agent] booking was successful , reference number is [hotel_reference] .\n****************************************************************************************************\n{'hotel': {'info': {'type': 'hotel', 'pricerange': 'expensive', 'area': 'centre'}, 'fail_info': {}, 'book': {'stay': '3', 'day': 'saturday', 'invalid': False, 'people': '5'}, 'fail_book': {}}}\n" ], [ "json.dump(final_contexts, open(save_path + '/artificial_data/data_'+domain_key+'_'+percentage+'p.json','w'))", "_____no_output_____" ], [ "len(final_contexts), len(final_queries), len(final_kbs)", "_____no_output_____" ], [ "json.dump(final_kbs, open(save_path +'/artificial_data/kb_'+domain_key+'_'+percentage+'p.json','w'))\njson.dump(final_queries, open(save_path + '/artificial_data/query_'+domain_key+'_'+percentage+'p.json','w'))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c275d1d9a20ea3e2f810a0980bde9a83e1ec5a
509,090
ipynb
Jupyter Notebook
210601 gca data analyses.ipynb
rbnjd/gca_data_analyses
7aaedfd78525ce1c0bb24aa67245fa4cf0b517b2
[ "MIT" ]
null
null
null
210601 gca data analyses.ipynb
rbnjd/gca_data_analyses
7aaedfd78525ce1c0bb24aa67245fa4cf0b517b2
[ "MIT" ]
null
null
null
210601 gca data analyses.ipynb
rbnjd/gca_data_analyses
7aaedfd78525ce1c0bb24aa67245fa4cf0b517b2
[ "MIT" ]
null
null
null
310.042631
123,888
0.88612
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Exploratory-data-analysis\" data-toc-modified-id=\"Exploratory-data-analysis-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Exploratory data analysis</a></span><ul class=\"toc-item\"><li><span><a href=\"#Desribe-data\" data-toc-modified-id=\"Desribe-data-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Desribe data</a></span><ul class=\"toc-item\"><li><span><a href=\"#Sample-size\" data-toc-modified-id=\"Sample-size-1.1.1\"><span class=\"toc-item-num\">1.1.1&nbsp;&nbsp;</span>Sample size</a></span></li><li><span><a href=\"#Descriptive-statistics\" data-toc-modified-id=\"Descriptive-statistics-1.1.2\"><span class=\"toc-item-num\">1.1.2&nbsp;&nbsp;</span>Descriptive statistics</a></span></li><li><span><a href=\"#Shapiro-Wilk-Test\" data-toc-modified-id=\"Shapiro-Wilk-Test-1.1.3\"><span class=\"toc-item-num\">1.1.3&nbsp;&nbsp;</span>Shapiro-Wilk Test</a></span></li><li><span><a href=\"#Histograms\" data-toc-modified-id=\"Histograms-1.1.4\"><span class=\"toc-item-num\">1.1.4&nbsp;&nbsp;</span>Histograms</a></span></li></ul></li><li><span><a href=\"#Kendall's-Tau-correlation\" data-toc-modified-id=\"Kendall's-Tau-correlation-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Kendall's Tau correlation</a></span></li><li><span><a href=\"#Correlation-Heatmap\" data-toc-modified-id=\"Correlation-Heatmap-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Correlation Heatmap</a></span></li></ul></li></ul></div>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import shapiro, kendalltau\nfrom sklearn import linear_model\nimport statsmodels.api as sm", "_____no_output_____" ], [ "df = pd.read_csv('data/cleaned_data_gca.csv')", "_____no_output_____" ] ], [ [ "# Exploratory data analysis", "_____no_output_____" ], [ "## Desribe data", "_____no_output_____" ], [ "### Sample size", "_____no_output_____" ] ], [ [ "print('Sample size socio-demographics =', df[df.columns[0]].count())\nprint('Sample size psychological variables =', df[df.columns[4]].count())", "Sample size socio-demographics = 33\nSample size psychological variables = 34\n" ] ], [ [ "### Descriptive statistics", "_____no_output_____" ], [ "**Descriptive statistics for numeric data**", "_____no_output_____" ] ], [ [ "descriptive_stat = df.describe()\ndescriptive_stat = descriptive_stat.T\ndescriptive_stat['skew'] = df.skew()\ndescriptive_stat['kurtosis'] = df.kurt()\ndescriptive_stat.insert(loc=5, column='median', value=df.median())\n\ndescriptive_stat=descriptive_stat.apply(pd.to_numeric, errors='ignore')", "_____no_output_____" ], [ "descriptive_stat", "_____no_output_____" ] ], [ [ "**Descriptive statistics for categorical data**", "_____no_output_____" ] ], [ [ "for col in list(df[['gender','education level']]):\n print('variable:', col)\n print(df[col].value_counts(dropna=False).to_string())\n print('')", "variable: gender\nMännlich 18\nWeiblich 14\nDivers 1\nNaN 1\n\nvariable: education level\nHochschulabschluss 16\nAbitur 8\nderzeit noch Schüler\\*in 5\nderzeit noch Schüler/*in 3\nFachhochschulabschluss 1\nNaN 1\n\n" ] ], [ [ "### Shapiro-Wilk Test", "_____no_output_____" ] ], [ [ "# define Shapiro Wilk Test function\ndef shapiro_test(data):\n '''calculate K-S Test for and out results in table''' \n data = data._get_numeric_data()\n data_shapiro_test = pd.DataFrame()\n \n # Iterate over columns, calculate test statistic & create table\n for column in data: \n column_shapiro_test = shapiro(data[column])\n shapiro_pvalue_column = column_shapiro_test.pvalue\n if column_shapiro_test.pvalue < .05:\n shapiro_pvalue_column = '{:.6f}'.format(shapiro_pvalue_column) + '*'\n column_distr = 'non-normal'\n else:\n column_distr = 'normal'\n new_row = {'variable': column, \n 'Shapiro Wilk p-value': shapiro_pvalue_column, \n 'Shapiro Wilk statistic': column_shapiro_test.statistic,\n 'distribution': column_distr\n }\n data_shapiro_test = data_shapiro_test.append(new_row, ignore_index=True)\n data_shapiro_test = data_shapiro_test[['variable', 'Shapiro Wilk statistic', 'Shapiro Wilk p-value', 'distribution']]\n return data_shapiro_test", "_____no_output_____" ], [ "shapiro_test(df.dropna())", "_____no_output_____" ] ], [ [ "### Histograms", "_____no_output_____" ], [ "**Histograms: Likert-scale variables**", "_____no_output_____" ] ], [ [ "for column in df._get_numeric_data().drop(columns=['assessed PEB','age']):\n sns.set(rc={'figure.figsize':(5,5)})\n data = df[column]\n sns.histplot(data, bins=np.arange(1,9)-.5) \n plt.xlabel(column)\n plt.show()", "_____no_output_____" ] ], [ [ "**Histogramm: age**", "_____no_output_____" ] ], [ [ "sns.histplot(df['age'], bins=10)", "_____no_output_____" ] ], [ [ "**Histogramm: assessed PEB**", "_____no_output_____" ] ], [ [ "sns.histplot(df['assessed PEB'], bins=np.arange(0,8)-.5)", "_____no_output_____" ] ], [ [ "## Kendall's Tau correlation", "_____no_output_____" ] ], [ [ "# create df with correlation coefficient and p-value indication\ndef kendall_pval(x,y):\n return kendalltau(x,y)[1]\n\n# calculate kendall's tau correlation with p values ( < .01 = ***, < .05 = **, < .1 = *)\ntau = df.corr(method = 'kendall').round(decimals=2)\n\npval = df.corr(method=kendall_pval) - np.eye(*tau.shape)\np = pval.applymap(lambda x: ''.join(['*' for t in [0.1,0.05] if x<=t]))\ntau_corr_with_p_values = tau.round(4).astype(str) + p", "_____no_output_____" ], [ "# set colored highlights for correlation matri\ndef color_sig_blue(val):\n \"\"\"\n color all significant values in blue\n \"\"\"\n color = 'blue' if val.endswith('*') else 'black'\n return 'color: %s' % color", "_____no_output_____" ], [ "tau_corr_with_p_values.style.applymap(color_sig_blue)", "_____no_output_____" ] ], [ [ "## Correlation Heatmap", "_____no_output_____" ], [ "All not significant correlations (p < .05) are not shown.", "_____no_output_____" ] ], [ [ "# calculate correlation coefficient\ncorr = df.corr(method='kendall')\n\n# calculate column correlations and make a seaborn heatmap\nsns.set(rc={'figure.figsize':(12,12)})\n\nax = sns.heatmap(\n corr, \n vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(20, 220, n=200),\n square=True\n)\n\nax.set_xticklabels(\n ax.get_xticklabels(),\n rotation=45,\n horizontalalignment='right'\n);\nheatmap = ax.get_figure()", "_____no_output_____" ], [ "# calculate correlation coefficient and p-values\ncorr_p_values = df.corr(method = kendall_pval)\ncorr = df.corr(method='kendall')\n\n# calculate column correlations and make a seaborn heatmap\nsns.set(rc={'figure.figsize':(12,12)})\n\n#set mask for only significant values (p <= .05)\nmask = np.invert(np.tril(corr_p_values<.05))\n\nax = sns.heatmap(\n corr, \n vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(20, 220, n=200),\n square=True,\n annot=True,\n mask=mask\n)\nax.set_xticklabels(\n ax.get_xticklabels(),\n rotation=45,\n horizontalalignment='right'\n);\nheatmap = ax.get_figure()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
e7c278dfb987b15642d0a55283a5637456dc6f33
274,526
ipynb
Jupyter Notebook
07-homework/cherry-blossoms/Cherry Blossoms.ipynb
giovanafleck/foundations_homework
39068b31741dc196d5e257e2961344e05372662f
[ "MIT" ]
null
null
null
07-homework/cherry-blossoms/Cherry Blossoms.ipynb
giovanafleck/foundations_homework
39068b31741dc196d5e257e2961344e05372662f
[ "MIT" ]
null
null
null
07-homework/cherry-blossoms/Cherry Blossoms.ipynb
giovanafleck/foundations_homework
39068b31741dc196d5e257e2961344e05372662f
[ "MIT" ]
null
null
null
55.158931
67,356
0.518741
[ [ [ "# Cherry Blossoms!\n\nIf we travel back in time a few months, [cherry blossoms](https://en.wikipedia.org/wiki/Cherry_blossom) were in full bloom! We don't live in Japan or DC, but we do have our fair share of the trees - buuut you sadly missed [Brooklyn Botanic Garden's annual festival](https://www.bbg.org/visit/event/sakura_matsuri_2019).\n\nWe'll have to make up for it with data-driven cherry blossoms instead. Once upon a time [Data is Plural](https://tinyletter.com/data-is-plural) linked to [a dataset](http://atmenv.envi.osakafu-u.ac.jp/aono/kyophenotemp4/) about when the cherry trees blossom each year. It's a little out of date, but it's quirky in a real nice way so we're sticking with it.\n\n## 0. Do all of your importing/setup stuff", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 1. Read in the file using pandas, and look at the first five rows", "_____no_output_____" ] ], [ [ "df = pd.read_excel(\"KyotoFullFlower7.xls\")\ndf.head(5)", "_____no_output_____" ] ], [ [ "## 2. Read in the file using pandas CORRECTLY, and look at the first five rows\n\nHrm, how do your column names look? Read the file in again but this time add a parameter to make sure your columns look right.\n\n**TIP: The first year should be 801 AD, and it should not have any dates or anything.**", "_____no_output_____" ] ], [ [ "df=df[25:]\ndf", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df.head(5)\n", "_____no_output_____" ] ], [ [ "## 3. Look at the final five rows of the data", "_____no_output_____" ] ], [ [ "df.tail(5)", "_____no_output_____" ] ], [ [ "## 4. Add some more NaN values", "_____no_output_____" ], [ "It looks like you should probably have some NaN/missing values earlier on in the dataset under \"Reference name.\" Read in the file *one more time*, this time making sure all of those missing reference names actually show up as `NaN` instead of `-`.", "_____no_output_____" ] ], [ [ "df.replace(\"-\", np.nan, inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.rename(columns={'Full-flowering dates of Japanese cherry (Prunus jamasakura) at Kyoto, Japan. (Latest version, Jun. 12, 2012)': 'AD', 'Unnamed:_1': 'Full-flowering date'}, inplace=True)", "_____no_output_____" ], [ "df.rename(columns={'Unnamed: 1': 'DOY', 'Unnamed: 2': 'Full_flowering_date', 'Unnamed: 3': 'Source_code'}, inplace=True)\n", "_____no_output_____" ], [ "df.rename(columns={'Unnamed: 4': 'Data_type_code', 'Unnamed: 5': 'Reference_Name'}, inplace=True)\n", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "## 5. What source is the most common as a reference?", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ], [ "df.Source_code.value_counts()", "_____no_output_____" ] ], [ [ "## 6. Filter the list to only include columns where the `Full-flowering date (DOY)` is not missing\n\nIf you'd like to do it in two steps (which might be easier to think through), first figure out how to test whether a column is empty/missing/null/NaN, get the list of `True`/`False` values, and then later feed it to your `df`.", "_____no_output_____" ] ], [ [ "df.DOY.value_counts(dropna=False)", "_____no_output_____" ] ], [ [ "## 7. Make a histogram of the full-flowering date\n\nIs it not showing up? Remember the \"magic\" command that makes graphs show up in matplotlib notebooks!", "_____no_output_____" ] ], [ [ "df.DOY.value_counts().hist()", "_____no_output_____" ] ], [ [ "## 8. Make another histogram of the full-flowering date, but with 39 bins instead of 10", "_____no_output_____" ] ], [ [ "df.DOY.value_counts().hist(bins=39)", "_____no_output_____" ] ], [ [ "## 9. What's the average number of days it takes for the flowers to blossom? And how many records do we have?\n\nAnswer these both with one line of code.", "_____no_output_____" ] ], [ [ "df.DOY.describe()", "_____no_output_____" ] ], [ [ "## 10. What's the average days into the year cherry flowers normally blossomed before 1900?\n\n", "_____no_output_____" ] ], [ [ "(df.AD>=1900).mean()", "_____no_output_____" ] ], [ [ "## 11. How about after 1900?", "_____no_output_____" ] ], [ [ "(df.AD<=1900).mean()", "_____no_output_____" ] ], [ [ "## 12. How many times was our data from a title in Japanese poetry?\n\nYou'll need to read the documentation inside of the Excel file.", "_____no_output_____" ] ], [ [ "#Data_type_code\n#4=poetry\n\ndf.Data_type_code.value_counts()", "_____no_output_____" ] ], [ [ "## 13. Show only the years where our data was from a title in Japanese poetry", "_____no_output_____" ] ], [ [ "df[df.Data_type_code == 4]\n", "_____no_output_____" ] ], [ [ "## 14. Graph the full-flowering date (DOY) over time", "_____no_output_____" ] ], [ [ "df.DOY.plot(x=\"DOY\", y=\"AD\", figsize=(10,7))", "_____no_output_____" ] ], [ [ "## 15. Smooth out the graph\n\nIt's so jagged! You can use `df.rolling` to calculate a rolling average.\n\nThe following code calculates a **10-year mean**, using the `AD` column as the anchor. If there aren't 20 samples to work with in a row, it'll accept down to 5. Neat, right?\n\n(We're only looking at the final 5)", "_____no_output_____" ] ], [ [ "df.rolling(10, on='AD', min_periods=5)['DOY'].mean().tail()", "_____no_output_____" ], [ "df.rolling(10, on='AD', min_periods=5)['DOY'].mean().tail().plot(ylim=(80, 120))", "_____no_output_____" ] ], [ [ "Use the code above to create a new column called `rolling_date` in our dataset. It should be the 20-year rolling average of the flowering date. Then plot it, with the year on the x axis and the day of the year on the y axis.\n\nTry adding `ylim=(80, 120)` to your `.plot` command to make things look a little less dire.", "_____no_output_____" ], [ "### 16. Add a month column\n\nRight now the \"Full-flowering date\" column is pretty rough. It uses numbers like '402' to mean \"April 2nd\" and \"416\" to mean \"April 16th.\" Let's make a column to explain what month it happened in.\n\n* Every row that happened in April should have 'April' in the `month` column.\n* Every row that happened in March should have 'March' as the `month` column.\n* Every row that happened in May should have 'May' as the `month` column.\n\n**I've given you March as an example**, you just need to add in two more lines to do April and May.", "_____no_output_____" ] ], [ [ "df.loc[df['Full_flowering_date'] < 400, 'month'] = 'March'", "_____no_output_____" ], [ "\ndf.loc[df['Full_flowering_date'] < 500, 'month'] = 'April'", "_____no_output_____" ], [ "df.loc[df['Full_flowering_date'] < 600, 'month'] = 'May'", "_____no_output_____" ] ], [ [ "### 17. Using your new column, how many blossomings happened in each month?", "_____no_output_____" ] ], [ [ "df.month.value_counts()", "_____no_output_____" ] ], [ [ "### 18. Graph how many blossomings happened in each month.", "_____no_output_____" ] ], [ [ "df.month.value_counts().hist()", "_____no_output_____" ] ], [ [ "## 19. Adding a day-of-month column\n\nNow we're going to add a new column called \"day of month.\" It's actually a little tougher than it should be since the `Full-flowering date` column is a *float* instead of an integer.", "_____no_output_____" ] ], [ [ "df.Full_flowering_date.astype(int)", "_____no_output_____" ] ], [ [ "And if you try to convert it to an int, **pandas yells at you!**", "_____no_output_____" ], [ "That's because, as you can read, you can't have an `NaN` be an integer. But, for some reason, it *can* be a float. Ugh! So what we'll do is **drop all of the na values, then convert them to integers to get rid of the decimals.**\n\nI'll show you the first 5 here.", "_____no_output_____" ] ], [ [ "df['Full_flowering_date'].dropna().astype(int).head()", "_____no_output_____" ] ], [ [ "On the next line, I take the first character of the row and add a bunch of exclamation points on it. I want you to edit this code to **return the last TWO digits of the number**. This only shows you the first 5, by the way.\n\nYou might want to look up 'list slicing.'", "_____no_output_____" ] ], [ [ "df['Full_flowering_date'].dropna().astype(int).astype(str).apply(lambda value: value[0] + \"!!!\").head()", "_____no_output_____" ] ], [ [ "Now that you've successfully extracted the last two letters, save them into a new column called `'day-of-month'`", "_____no_output_____" ] ], [ [ "df['day-of-month'] = df['Full_flowering_date'].dropna().astype(int).astype(str).apply(lambda value: value[0] + \"!!!\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "### 20. Adding a date column\n\nNow take the `'month'` and `'day-of-month'` columns and combine them in order to create a new column called `'date'`", "_____no_output_____" ] ], [ [ "df[\"date\"] = df[\"month\"] + df[\"day-of-month\"]", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "# YOU ARE DONE.\n\nAnd **incredible.**", "_____no_output_____" ] ], [ [ "!!!!!!!!!!!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c27e681ca88c6422ee3770325fd0ef776a726d
258,011
ipynb
Jupyter Notebook
homeworks/tarea_02/tarea_02.ipynb
FabianSaulRubilarAlvarez/mat281_portfolio_template
38dccab71084591a55bd71bde3f923321a96d730
[ "MIT" ]
null
null
null
homeworks/tarea_02/tarea_02.ipynb
FabianSaulRubilarAlvarez/mat281_portfolio_template
38dccab71084591a55bd71bde3f923321a96d730
[ "MIT" ]
null
null
null
homeworks/tarea_02/tarea_02.ipynb
FabianSaulRubilarAlvarez/mat281_portfolio_template
38dccab71084591a55bd71bde3f923321a96d730
[ "MIT" ]
null
null
null
156.845593
74,328
0.87025
[ [ [ "\n# Tarea N°02\n## Instrucciones\n1.- Completa tus datos personales (nombre y rol USM) en siguiente celda.\n\n**Nombre**: Fabián Rubilar Álvarez \n\n**Rol**: 201510509-K\n\n2.- Debes pushear este archivo con tus cambios a tu repositorio personal del curso, incluyendo datos, imágenes, scripts, etc.\n\n3.- Se evaluará:\n\n- Soluciones\n- Código\n- Que Binder esté bien configurado.\n- Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error.", "_____no_output_____" ], [ "## I.- Clasificación de dígitos\n\n\nEn este laboratorio realizaremos el trabajo de reconocer un dígito a partir de una imagen.\n", "_____no_output_____" ], [ "![rgb](https://www.wolfram.com/language/11/neural-networks/assets.en/digit-classification/smallthumb_1.png)", "_____no_output_____" ], [ "El objetivo es a partir de los datos, hacer la mejor predicción de cada imagen. Para ellos es necesario realizar los pasos clásicos de un proyecto de _Machine Learning_, como estadística descriptiva, visualización y preprocesamiento. \n\n* Se solicita ajustar al menos tres modelos de clasificación:\n * Regresión logística\n * K-Nearest Neighbours \n * Uno o más algoritmos a su elección [link](https://scikit-learn.org/stable/supervised_learning.html#supervised-learning) (es obligación escoger un _estimator_ que tenga por lo menos un hiperparámetro). \n \n \n* En los modelos que posean hiperparámetros es mandatorio buscar el/los mejores con alguna técnica disponible en `scikit-learn` ([ver más](https://scikit-learn.org/stable/modules/grid_search.html#tuning-the-hyper-parameters-of-an-estimator)).\n* Para cada modelo, se debe realizar _Cross Validation_ con 10 _folds_ utilizando los datos de entrenamiento con tal de determinar un intervalo de confianza para el _score_ del modelo.\n* Realizar una predicción con cada uno de los tres modelos con los datos _test_ y obtener el _score_. \n* Analizar sus métricas de error (**accuracy**, **precision**, **recall**, **f-score**)\n\n", "_____no_output_____" ], [ "### Exploración de los datos\nA continuación se carga el conjunto de datos a utilizar, a través del sub-módulo `datasets` de `sklearn`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n", "_____no_output_____" ], [ "digits_dict = datasets.load_digits()\nprint(digits_dict[\"DESCR\"])", ".. _digits_dataset:\n\nOptical recognition of handwritten digits dataset\n--------------------------------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 5620\n :Number of Attributes: 64\n :Attribute Information: 8x8 image of integer pixels in the range 0..16.\n :Missing Attribute Values: None\n :Creator: E. Alpaydin (alpaydin '@' boun.edu.tr)\n :Date: July; 1998\n\nThis is a copy of the test set of the UCI ML hand-written digits datasets\nhttps://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\nThe data set contains images of hand-written digits: 10 classes where\neach class refers to a digit.\n\nPreprocessing programs made available by NIST were used to extract\nnormalized bitmaps of handwritten digits from a preprinted form. From a\ntotal of 43 people, 30 contributed to the training set and different 13\nto the test set. 32x32 bitmaps are divided into nonoverlapping blocks of\n4x4 and the number of on pixels are counted in each block. This generates\nan input matrix of 8x8 where each element is an integer in the range\n0..16. This reduces dimensionality and gives invariance to small\ndistortions.\n\nFor info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G.\nT. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C.\nL. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469,\n1994.\n\n.. topic:: References\n\n - C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their\n Applications to Handwritten Digit Recognition, MSc Thesis, Institute of\n Graduate Studies in Science and Engineering, Bogazici University.\n - E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika.\n - Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin.\n Linear dimensionalityreduction using relevance weighted LDA. School of\n Electrical and Electronic Engineering Nanyang Technological University.\n 2005.\n - Claudio Gentile. A New Approximate Maximal Margin Classification\n Algorithm. NIPS. 2000.\n" ], [ "digits_dict.keys()", "_____no_output_____" ], [ "digits_dict[\"target\"]", "_____no_output_____" ] ], [ [ "A continuación se crea dataframe declarado como `digits` con los datos de `digits_dict` tal que tenga 65 columnas, las 6 primeras a la representación de la imagen en escala de grises (0-blanco, 255-negro) y la última correspondiente al dígito (`target`) con el nombre _target_.", "_____no_output_____" ] ], [ [ "digits = (\n pd.DataFrame(\n digits_dict[\"data\"],\n )\n .rename(columns=lambda x: f\"c{x:02d}\")\n .assign(target=digits_dict[\"target\"])\n .astype(int)\n)\n\ndigits.head()", "_____no_output_____" ] ], [ [ "### Ejercicio 1\n**Análisis exploratorio:** Realiza tu análisis exploratorio, no debes olvidar nada! Recuerda, cada análisis debe responder una pregunta.\n\nAlgunas sugerencias:\n\n* ¿Cómo se distribuyen los datos?\n* ¿Cuánta memoria estoy utilizando?\n* ¿Qué tipo de datos son?\n* ¿Cuántos registros por clase hay?\n* ¿Hay registros que no se correspondan con tu conocimiento previo de los datos?", "_____no_output_____" ] ], [ [ "#Primero veamos los tipos de datos del DF y cierta información que puede ser de utilidad\n\ndigits.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1797 entries, 0 to 1796\nData columns (total 65 columns):\nc00 1797 non-null int32\nc01 1797 non-null int32\nc02 1797 non-null int32\nc03 1797 non-null int32\nc04 1797 non-null int32\nc05 1797 non-null int32\nc06 1797 non-null int32\nc07 1797 non-null int32\nc08 1797 non-null int32\nc09 1797 non-null int32\nc10 1797 non-null int32\nc11 1797 non-null int32\nc12 1797 non-null int32\nc13 1797 non-null int32\nc14 1797 non-null int32\nc15 1797 non-null int32\nc16 1797 non-null int32\nc17 1797 non-null int32\nc18 1797 non-null int32\nc19 1797 non-null int32\nc20 1797 non-null int32\nc21 1797 non-null int32\nc22 1797 non-null int32\nc23 1797 non-null int32\nc24 1797 non-null int32\nc25 1797 non-null int32\nc26 1797 non-null int32\nc27 1797 non-null int32\nc28 1797 non-null int32\nc29 1797 non-null int32\nc30 1797 non-null int32\nc31 1797 non-null int32\nc32 1797 non-null int32\nc33 1797 non-null int32\nc34 1797 non-null int32\nc35 1797 non-null int32\nc36 1797 non-null int32\nc37 1797 non-null int32\nc38 1797 non-null int32\nc39 1797 non-null int32\nc40 1797 non-null int32\nc41 1797 non-null int32\nc42 1797 non-null int32\nc43 1797 non-null int32\nc44 1797 non-null int32\nc45 1797 non-null int32\nc46 1797 non-null int32\nc47 1797 non-null int32\nc48 1797 non-null int32\nc49 1797 non-null int32\nc50 1797 non-null int32\nc51 1797 non-null int32\nc52 1797 non-null int32\nc53 1797 non-null int32\nc54 1797 non-null int32\nc55 1797 non-null int32\nc56 1797 non-null int32\nc57 1797 non-null int32\nc58 1797 non-null int32\nc59 1797 non-null int32\nc60 1797 non-null int32\nc61 1797 non-null int32\nc62 1797 non-null int32\nc63 1797 non-null int32\ntarget 1797 non-null int32\ndtypes: int32(65)\nmemory usage: 456.4 KB\n" ], [ "#Veamos si hay valores nulos en las columnas\n\nif True not in digits.isnull().any().values:\n print('No existen valores nulos')", "No existen valores nulos\n" ], [ "#Veamos que elementos únicos tenemos en la columna target del DF\n\ndigits.target.unique()", "_____no_output_____" ], [ "#Veamos cuantos registros por clase existen luego de saber que hay 10 tipos de clase en la columna target\n\n(u,v) = np.unique(digits['target'] , return_counts = True)\nfor i in range(0,10):\n print ('Tenemos', v[i], 'registros para', u[i])\n", "Tenemos 178 registros para 0\nTenemos 182 registros para 1\nTenemos 177 registros para 2\nTenemos 183 registros para 3\nTenemos 181 registros para 4\nTenemos 182 registros para 5\nTenemos 181 registros para 6\nTenemos 179 registros para 7\nTenemos 174 registros para 8\nTenemos 180 registros para 9\n" ], [ "#Como tenemos 10 tipos de elementos en target, veamos las caracteristicas que poseen los datos\n\ncaract_datos = [len(digits[digits['target'] ==i ].target) for i in range(0,10)]", "_____no_output_____" ], [ "print ('El total de los datos es:', sum(caract_datos))\nprint ('El máximo de los datos es:', max(caract_datos))\nprint ('El mínimo de los datos es:', min(caract_datos))\nprint ('El promedio de los datos es:', 0.1*sum(caract_datos))", "El total de los datos es: 1797\nEl máximo de los datos es: 183\nEl mínimo de los datos es: 174\nEl promedio de los datos es: 179.70000000000002\n" ] ], [ [ "Por lo tanto, tenemos un promedio de 180 (aproximando por arriba) donde el menor valor es de 174 y el mayor valor es de 183.", "_____no_output_____" ] ], [ [ "#Para mejorar la visualización, construyamos un histograma\n\ndigits.target.plot.hist(bins=12, alpha=0.5)", "_____no_output_____" ] ], [ [ "Sabemos que cada dato corresponde a una matriz cuadrada de dimensión 8 con entradas de 0 a 16. Cada dato proviene de otra matriz cuadrada de dimensión 32, el cual ha sido procesado por un método de reducción de dimensiones. Además, cada dato es una imagen de un número entre 0 a 9, por lo tanto se utilizan 8$\\times$8 = 64 bits, sumado al bit para guardar información. Así, como tenemos 1797 datos, calculamos 1797$\\times$65 = 116805 bits en total. Ahora, si no se aplica la reducción de dimensiones, tendriamos 32$\\times$32$\\times$1797 = 1840128 bits, que es aproximadamente 15,7 veces mayor. ", "_____no_output_____" ], [ "### Ejercicio 2\n**Visualización:** Para visualizar los datos utilizaremos el método `imshow` de `matplotlib`. Resulta necesario convertir el arreglo desde las dimensiones (1,64) a (8,8) para que la imagen sea cuadrada y pueda distinguirse el dígito. Superpondremos además el label correspondiente al dígito, mediante el método `text`. Esto nos permitirá comparar la imagen generada con la etiqueta asociada a los valores. Realizaremos lo anterior para los primeros 25 datos del archivo.", "_____no_output_____" ] ], [ [ "digits_dict[\"images\"][0]", "_____no_output_____" ] ], [ [ "Visualiza imágenes de los dígitos utilizando la llave `images` de `digits_dict`. \n\nSugerencia: Utiliza `plt.subplots` y el método `imshow`. Puedes hacer una grilla de varias imágenes al mismo tiempo!", "_____no_output_____" ] ], [ [ "nx, ny = 5, 5\nfig, axs = plt.subplots(nx, ny, figsize=(12, 12))\nfor x in range(0,5):\n for y in range(0,5):\n axs[x,y].imshow(digits_dict['images'][5*x+y], cmap = 'plasma')\n axs[x,y].text(3,4,s = digits['target'][5*x+y], fontsize = 30)", "_____no_output_____" ] ], [ [ "### Ejercicio 3\n\n**Machine Learning**: En esta parte usted debe entrenar los distintos modelos escogidos desde la librería de `skelearn`. Para cada modelo, debe realizar los siguientes pasos:\n\n* **train-test** \n * Crear conjunto de entrenamiento y testeo (usted determine las proporciones adecuadas).\n * Imprimir por pantalla el largo del conjunto de entrenamiento y de testeo.\n \n \n* **modelo**:\n * Instanciar el modelo objetivo desde la librería sklearn.\n * *Hiper-parámetros*: Utiliza `sklearn.model_selection.GridSearchCV` para obtener la mejor estimación de los parámetros del modelo objetivo.\n\n\n\n\n* **Métricas**:\n * Graficar matriz de confusión.\n * Analizar métricas de error.\n\n\n\n__Preguntas a responder:__\n\n* ¿Cuál modelo es mejor basado en sus métricas?\n* ¿Cuál modelo demora menos tiempo en ajustarse?\n* ¿Qué modelo escoges?\n", "_____no_output_____" ] ], [ [ "X = digits.drop(columns=\"target\").values\ny = digits[\"target\"].values", "_____no_output_____" ], [ "from sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\n#Ahora vemos los conjuntos de testeo y entrenamiento\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)\n\nprint('El conjunto de testeo tiene la siguiente cantidad de datos:', len(y_test))\nprint('El conjunto de entrenamiento tiene la siguiente cantidad de datos:', len(y_train))\n", "El conjunto de testeo tiene la siguiente cantidad de datos: 360\nEl conjunto de entrenamiento tiene la siguiente cantidad de datos: 1437\n" ], [ "#REGRESIÓN LOGÍSTICA\n\nfrom sklearn.linear_model import LogisticRegression\nfrom metrics_classification import *\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import confusion_matrix\n\n#Creando el modelo\nrlog = LogisticRegression()\nrlog.fit(X_train, y_train) #Ajustando el modelo\n\n#Matriz de confusión\ny_true = list(y_test)\ny_pred = list(rlog.predict(X_test))\nprint('\\nMatriz de confusion:\\n ')\nprint(confusion_matrix(y_true,y_pred))\n\n#Métricas\ndf_temp = pd.DataFrame(\n {\n 'y':y_true,\n 'yhat':y_pred\n }\n)\n\ndf_metrics = summary_metrics(df_temp)\nprint(\"\\nMetricas para los regresores\")\nprint(\"\")\nprint(df_metrics)\n", "C:\\Users\\elele\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\elele\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n" ], [ "#K-NEAREST NEIGHBORS\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import neighbors\nfrom sklearn import preprocessing\n\n#Creando el modelo\nknn = neighbors.KNeighborsClassifier()\nknn.fit(X_train,y_train) #Ajustando el modelo\n\n#Matriz de confusión\ny_true = list(y_test)\ny_pred = list(knn.predict(X_test))\nprint('\\nMatriz de confusion:\\n ')\nprint(confusion_matrix(y_true,y_pred))\n\n#Métricas\ndf_temp = pd.DataFrame(\n {\n 'y':y_true,\n 'yhat':y_pred\n }\n)\n\ndf_metrics = summary_metrics(df_temp)\nprint(\"\\nMetricas para los regresores\")\nprint(\"\")\nprint(df_metrics)", "\nMatriz de confusion:\n \n[[33 0 0 0 0 0 0 0 0 0]\n [ 0 28 0 0 0 0 0 0 0 0]\n [ 0 0 33 0 0 0 0 0 0 0]\n [ 0 0 0 34 0 0 0 0 0 0]\n [ 0 0 0 0 46 0 0 0 0 0]\n [ 0 0 0 0 0 45 1 0 0 1]\n [ 0 0 0 0 0 0 35 0 0 0]\n [ 0 0 0 0 0 0 0 33 0 1]\n [ 0 0 0 0 0 0 0 0 30 0]\n [ 0 0 0 0 1 1 0 0 0 38]]\n\nMetricas para los regresores\n\n accuracy recall precision fscore\n0 0.9861 0.9878 0.9879 0.9878\n" ], [ "#ÁRBOL DE DECISIÓN\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n#Creando el modelo\nadd = DecisionTreeClassifier(max_depth=10)\nadd = add.fit(X_train, y_train) #Ajustando el modelo\n\n#Matriz de confusión\ny_true = list(y_test)\ny_pred = list(add.predict(X_test))\nprint('\\nMatriz de confusion:\\n ')\nprint(confusion_matrix(y_true,y_pred))\n\n#Métricas\ndf_temp = pd.DataFrame(\n {\n 'y':y_true,\n 'yhat':y_pred\n }\n)\n\ndf_metrics = summary_metrics(df_temp)\nprint(\"\\nMetricas para los regresores\")\nprint(\"\")\nprint(df_metrics)", "\nMatriz de confusion:\n \n[[29 0 0 0 3 1 0 0 0 0]\n [ 0 21 2 1 1 0 0 0 2 1]\n [ 1 0 28 3 0 0 0 1 0 0]\n [ 0 0 1 30 0 0 0 0 2 1]\n [ 0 0 1 1 38 0 2 3 1 0]\n [ 0 0 1 0 1 43 1 0 0 1]\n [ 0 0 0 0 1 0 33 0 0 1]\n [ 0 1 0 1 1 0 0 31 0 0]\n [ 0 2 1 1 1 2 0 0 23 0]\n [ 0 1 0 2 1 1 0 2 0 33]]\n\nMetricas para los regresores\n\n accuracy recall precision fscore\n0 0.8583 0.8547 0.8591 0.8556\n" ], [ "#GRIDSEARCH\n\nfrom sklearn.model_selection import GridSearchCV\n\nmodel = DecisionTreeClassifier()\n\n# rango de parametros\nrango_criterion = ['gini','entropy']\nrango_max_depth = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 20, 30, 40, 50, 70, 90, 120, 150])\nparam_grid = dict(criterion = rango_criterion, max_depth = rango_max_depth)\nprint(param_grid)\nprint('\\n')\n\ngs = GridSearchCV(estimator=model, \n param_grid=param_grid, \n scoring='accuracy',\n cv=10,\n n_jobs=-1)\n\ngs = gs.fit(X_train, y_train)\n\nprint(gs.best_score_)\nprint('\\n')\nprint(gs.best_params_)\n", "{'criterion': ['gini', 'entropy'], 'max_depth': array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15,\n 20, 30, 40, 50, 70, 90, 120, 150])}\n\n\n0.8761308281141267\n\n\n{'criterion': 'entropy', 'max_depth': 11}\n" ] ], [ [ "### Ejercicio 4\n\n__Comprensión del modelo:__ Tomando en cuenta el mejor modelo entontrado en el `Ejercicio 3`, debe comprender e interpretar minuciosamente los resultados y gráficos asocados al modelo en estudio, para ello debe resolver los siguientes puntos:\n\n\n\n * **Cross validation**: usando **cv** (con n_fold = 10), sacar una especie de \"intervalo de confianza\" sobre alguna de las métricas estudiadas en clases: \n * $\\mu \\pm \\sigma$ = promedio $\\pm$ desviación estandar\n * **Curva de Validación**: Replica el ejemplo del siguiente [link](https://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html#sphx-glr-auto-examples-model-selection-plot-validation-curve-py) pero con el modelo, parámetros y métrica adecuada. Saque conclusiones del gráfico.\n * **Curva AUC–ROC**: Replica el ejemplo del siguiente [link](https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py) pero con el modelo, parámetros y métrica adecuada. Saque conclusiones del gráfico.", "_____no_output_____" ] ], [ [ "#Cross Validation \n\nfrom sklearn.model_selection import cross_val_score\n\nmodel = KNeighborsClassifier()\nprecision = cross_val_score(estimator = model, X = X_train, y = y_train, cv = 10)\nmed = precision.mean()#Media\ndesv = precision.std()#Desviación estandar \na = med - desv\nb = med + desv\nprint('(',a,',', b,')')\n", "( 0.9754505939165444 , 0.9953155512780988 )\n" ], [ "#Curva de Validación\n\nfrom sklearn.model_selection import validation_curve\n\nknn.get_params()", "_____no_output_____" ], [ "parameters = np.arange(1,10)\ntrain_scores, test_scores = validation_curve(model,\n X_train,\n y_train,\n param_name = 'n_neighbors',\n param_range = parameters,\n scoring = 'accuracy',\n n_jobs = -1)\ntrain_scores_mean = np.mean(train_scores, axis = 1)\ntrain_scores_std = np.std(train_scores, axis = 1)\ntest_scores_mean = np.mean(test_scores, axis = 1)\ntest_scores_std = np.std(test_scores, axis = 1)\n\nplt.figure(figsize=(12,8))\nplt.title('Validation Curve (KNeighbors)')\nplt.xlabel('n_neighbors')\nplt.ylabel('scores')\n#Train\nplt.semilogx(parameters,\n train_scores_mean,\n label = 'Training Score',\n color = 'red',\n lw =2)\nplt.fill_between(parameters,\n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std,\n alpha = 0.2,\n color = 'red',\n lw = 2)\n\n#Test\nplt.semilogx(parameters,\n test_scores_mean,\n label = 'Cross Validation Score',\n color = 'navy',\n lw =2)\nplt.fill_between(parameters,\n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std,\n alpha = 0.2,\n color = 'navy',\n lw = 2)\n\nplt.legend(loc = 'Best')\nplt.show()", "C:\\Users\\elele\\Anaconda3\\lib\\site-packages\\sklearn\\model_selection\\_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning.\n warnings.warn(CV_WARNING, FutureWarning)\nC:\\Users\\elele\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:44: MatplotlibDeprecationWarning: Unrecognized location 'Best'. Falling back on 'best'; valid locations are\n\tbest\n\tupper right\n\tupper left\n\tlower left\n\tlower right\n\tright\n\tcenter left\n\tcenter right\n\tlower center\n\tupper center\n\tcenter\nThis will raise an exception in 3.3.\n" ], [ "#Curva AUC–ROC\n", "_____no_output_____" ] ], [ [ "### Ejercicio 5\n__Reducción de la dimensión:__ Tomando en cuenta el mejor modelo encontrado en el `Ejercicio 3`, debe realizar una reducción de dimensionalidad del conjunto de datos. Para ello debe abordar el problema ocupando los dos criterios visto en clases: \n\n* **Selección de atributos**\n* **Extracción de atributos**\n\n__Preguntas a responder:__\n\nUna vez realizado la reducción de dimensionalidad, debe sacar algunas estadísticas y gráficas comparativas entre el conjunto de datos original y el nuevo conjunto de datos (tamaño del dataset, tiempo de ejecución del modelo, etc.)\n", "_____no_output_____" ] ], [ [ "#Selección de atributos \n\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_classif\n\ndf = pd.DataFrame(X)\ndf.columns = [f'P{k}' for k in range(1,X.shape[1]+1)]\ndf['y']=y\nprint('Vemos que el df respectivo es de la forma:')\nprint('\\n')\nprint(df.head())\n\n# Separamos las columnas objetivo\nx_training = df.drop(['y',], axis=1)\ny_training = df['y']\n\n# Aplicando el algoritmo univariante de prueba F.\nk = 40 # número de atributos a seleccionar\ncolumnas = list(x_training.columns.values)\nseleccionadas = SelectKBest(f_classif, k=k).fit(x_training, y_training)\n\ncatrib = seleccionadas.get_support()\natributos = [columnas[i] for i in list(catrib.nonzero()[0])]\nprint('\\n')\nprint('Los atributos quedan como:')\nprint('\\n')\nprint(atributos)\n\n#Veamos que pasa si entrenamos un nuevo modelo K-NEAREST NEIGHBORS con los atributos seleccionados anteriormente\n\nx=df[atributos]\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=42)\n\n#Creando el modelo\nknn = neighbors.KNeighborsClassifier()\nknn.fit(x_train,y_train) #Ajustando el modelo\n\n#Matriz de confusión\ny_true = list(y_test)\ny_pred = list(knn.predict(x_test))\nprint('\\nMatriz de confusion:\\n ')\nprint(confusion_matrix(y_true,y_pred))\n\n#Métricas\ndf_temp = pd.DataFrame(\n {\n 'y':y_true,\n 'yhat':y_pred\n }\n)\n\ndf_metrics = summary_metrics(df_temp)\nprint(\"\\nMetricas para los regresores \")\nprint(\"\")\nprint(df_metrics)", "Vemos que el df respectivo es de la forma:\n\n\n P1 P2 P3 P4 P5 P6 P7 P8 P9 P10 ... P56 P57 P58 P59 P60 P61 \\\n0 0 0 5 13 9 1 0 0 0 0 ... 0 0 0 6 13 10 \n1 0 0 0 12 13 5 0 0 0 0 ... 0 0 0 0 11 16 \n2 0 0 0 4 15 12 0 0 0 0 ... 0 0 0 0 3 11 \n3 0 0 7 15 13 1 0 0 0 8 ... 0 0 0 7 13 13 \n4 0 0 0 1 11 0 0 0 0 0 ... 0 0 0 0 2 16 \n\n P62 P63 P64 y \n0 0 0 0 0 \n1 10 0 0 1 \n2 16 9 0 2 \n3 9 0 0 3 \n4 4 0 0 4 \n\n[5 rows x 65 columns]\n\n\nLos atributos quedan como:\n\n\n['P3', 'P4', 'P6', 'P7', 'P10', 'P11', 'P14', 'P18', 'P19', 'P20', 'P21', 'P22', 'P26', 'P27', 'P28', 'P29', 'P30', 'P31', 'P34', 'P35', 'P36', 'P37', 'P38', 'P39', 'P42', 'P43', 'P44', 'P45', 'P46', 'P47', 'P51', 'P52', 'P53', 'P54', 'P55', 'P59', 'P60', 'P61', 'P62', 'P63']\n\nMatriz de confusion:\n \n[[33 0 0 0 0 0 0 0 0 0]\n [ 0 28 0 0 0 0 0 0 0 0]\n [ 0 0 33 0 0 0 0 0 0 0]\n [ 0 0 0 34 0 0 0 0 0 0]\n [ 0 0 0 0 46 0 0 0 0 0]\n [ 0 0 0 0 0 46 0 0 0 1]\n [ 0 0 0 0 0 0 35 0 0 0]\n [ 0 0 0 0 0 0 0 33 0 1]\n [ 0 1 0 0 0 0 0 0 29 0]\n [ 0 0 0 1 1 1 0 0 0 37]]\n\nMetricas para los regresores \n\n accuracy recall precision fscore\n0 0.9833 0.9841 0.9843 0.9841\n" ], [ "#Extracción de atributos \n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nx = StandardScaler().fit_transform(X)\n\nn_components = 50\npca = PCA(n_components)\nprincipalComponents = pca.fit_transform(x)\n\n# Graficar varianza por componente\npercent_variance = np.round(pca.explained_variance_ratio_* 100, decimals =2)\ncolumns = [ 'P'+str(i) for i in range(n_components)]\n\nplt.figure(figsize=(20,4))\nplt.bar(x= range(0,n_components), height=percent_variance, tick_label=columns)\nplt.ylabel('Percentate of Variance Explained')\nplt.xlabel('Principal Component')\nplt.title('PCA Scree Plot')\nplt.show()\n\n# graficar varianza por la suma acumulada de los componente\n\npercent_variance_cum = np.cumsum(percent_variance)\ncolumns = [ 'P' + str(0) + '+...+P' + str(i) for i in range(n_components) ]\n\nplt.figure(figsize=(20,4))\nplt.bar(x= range(0,n_components), height=percent_variance_cum, tick_label=columns)\nplt.xticks(range(len(columns)), columns, rotation=90)\nplt.xlabel('Principal Component Cumsum')\nplt.title('PCA Scree Plot')\nplt.show()\n", "_____no_output_____" ] ], [ [ "### Ejercicio 6\n\n\n__Visualizando Resultados:__ A continuación se provee código para comparar las etiquetas predichas vs las etiquetas reales del conjunto de _test_. \n", "_____no_output_____" ] ], [ [ "def mostar_resultados(digits,model,nx=5, ny=5,label = \"correctos\"):\n \"\"\"\n Muestra los resultados de las prediciones de un modelo \n de clasificacion en particular. Se toman aleatoriamente los valores\n de los resultados.\n \n - label == 'correcto': retorna los valores en que el modelo acierta.\n - label == 'incorrecto': retorna los valores en que el modelo no acierta.\n\n \n Observacion: El modelo que recibe como argumento debe NO encontrarse\n 'entrenado'.\n \n \n :param digits: dataset 'digits'\n :param model: modelo de sklearn\n :param nx: numero de filas (subplots)\n :param ny: numero de columnas (subplots)\n :param label: datos correctos o incorrectos\n :return: graficos matplotlib\n \"\"\"\n \n X = digits.drop(columns = \"target\").values\n y = digits[\"target\"].values\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) \n model.fit(X_train, y_train) # ajustando el modelo\n y_pred = model.predict(X_test)\n\n # Mostrar los datos correctos\n if label == \"correctos\":\n mask = (y_pred == y_test)\n color = \"green\"\n \n # Mostrar los datos correctos\n elif label == \"incorrectos\":\n mask = (y_pred != y_test)\n color = \"red\"\n \n else:\n raise ValueError(\"Valor incorrecto\")\n \n\n X_aux = X_test[mask]\n y_aux_true = y_test[mask]\n y_aux_pred = y_pred[mask]\n\n \n # We'll plot the first 100 examples, randomly choosen\n fig, ax = plt.subplots(nx, ny, figsize=(12,12))\n for i in range(nx):\n for j in range(ny):\n index = j + ny * i\n data = X_aux[index, :].reshape(8,8)\n label_pred = str(int(y_aux_pred[index]))\n label_true = str(int(y_aux_true[index]))\n ax[i][j].imshow(data, interpolation = 'nearest', cmap = 'gray_r')\n ax[i][j].text(0, 0, label_pred, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 10, color = color)\n ax[i][j].text(7, 0, label_true, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 10, color = 'blue')\n ax[i][j].get_xaxis().set_visible(False)\n ax[i][j].get_yaxis().set_visible(False)\n plt.show()\n", "_____no_output_____" ] ], [ [ "**Pregunta**\n\n* Tomando en cuenta el mejor modelo entontrado en el `Ejercicio 3`, grafique los resultados cuando:\n * el valor predicho y original son iguales\n * el valor predicho y original son distintos \n\n\n* Cuando el valor predicho y original son distintos , ¿Por qué ocurren estas fallas?", "_____no_output_____" ] ], [ [ "mostar_resultados(digits, KNeighborsClassifier(), nx=5, ny=5,label = \"correctos\")", "_____no_output_____" ], [ "mostar_resultados(digits, neighbors.KNeighborsClassifier(), nx=5, ny=5,label = \"incorrectos\")\n", "_____no_output_____" ] ], [ [ "### Ejercicio 7\n**Conclusiones**: Entrega tu veredicto, responde las preguntas iniciales, visualizaciones, trabajos futuros, dificultades, etc.", "_____no_output_____" ], [ "Vemos que las métricas tenían valores cercanos a uno, pero nunca llegando a la unidad, lo mismo ocurre con las matrices de confusión. Hay errores, pero son pequeños. Ahora, para algún trabajo futuro, se podría realizar un estudio de como encontrar mejores modelos y además mejorar la experiencia y manejo del tema por parte del alumno. \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
e7c28c96923a51cfe847899df428cb78d243458e
6,553
ipynb
Jupyter Notebook
Multi-Class Classification - Naive Bayes - Iris Dataset.ipynb
jash2349/Machine-Learning
ba5b4dbedba1e7b34142aaa6d176387da86d2f7a
[ "Unlicense" ]
1
2020-05-21T20:42:47.000Z
2020-05-21T20:42:47.000Z
Multi-Class Classification - Naive Bayes - Iris Dataset.ipynb
jash2349/Machine-Learning
ba5b4dbedba1e7b34142aaa6d176387da86d2f7a
[ "Unlicense" ]
null
null
null
Multi-Class Classification - Naive Bayes - Iris Dataset.ipynb
jash2349/Machine-Learning
ba5b4dbedba1e7b34142aaa6d176387da86d2f7a
[ "Unlicense" ]
null
null
null
29.786364
92
0.515489
[ [ [ "import pandas as pd\nfrom pandas import Series,DataFrame\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Gaussian Naive Bayes\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB", "_____no_output_____" ], [ "# load the iris datasets\niris = datasets.load_iris()\n\n# Grab features (X) and the Target (Y)\nX = iris.data\n\nY = iris.target\n\n# Show the Built-in Data Description\nprint iris.DESCR", ".. _iris_dataset:\n\nIris plants dataset\n--------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 150 (50 in each of three classes)\n :Number of Attributes: 4 numeric, predictive attributes and the class\n :Attribute Information:\n - sepal length in cm\n - sepal width in cm\n - petal length in cm\n - petal width in cm\n - class:\n - Iris-Setosa\n - Iris-Versicolour\n - Iris-Virginica\n \n :Summary Statistics:\n\n ============== ==== ==== ======= ===== ====================\n Min Max Mean SD Class Correlation\n ============== ==== ==== ======= ===== ====================\n sepal length: 4.3 7.9 5.84 0.83 0.7826\n sepal width: 2.0 4.4 3.05 0.43 -0.4194\n petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)\n petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)\n ============== ==== ==== ======= ===== ====================\n\n :Missing Attribute Values: None\n :Class Distribution: 33.3% for each of 3 classes.\n :Creator: R.A. Fisher\n :Donor: Michael Marshall (MARSHALL%[email protected])\n :Date: July, 1988\n\nThe famous Iris database, first used by Sir R.A. Fisher. The dataset is taken\nfrom Fisher's paper. Note that it's the same as in R, but not as in the UCI\nMachine Learning Repository, which has two wrong data points.\n\nThis is perhaps the best known database to be found in the\npattern recognition literature. Fisher's paper is a classic in the field and\nis referenced frequently to this day. (See Duda & Hart, for example.) The\ndata set contains 3 classes of 50 instances each, where each class refers to a\ntype of iris plant. One class is linearly separable from the other 2; the\nlatter are NOT linearly separable from each other.\n\n.. topic:: References\n\n - Fisher, R.A. \"The use of multiple measurements in taxonomic problems\"\n Annual Eugenics, 7, Part II, 179-188 (1936); also in \"Contributions to\n Mathematical Statistics\" (John Wiley, NY, 1950).\n - Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis.\n (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.\n - Dasarathy, B.V. (1980) \"Nosing Around the Neighborhood: A New System\n Structure and Classification Rule for Recognition in Partially Exposed\n Environments\". IEEE Transactions on Pattern Analysis and Machine\n Intelligence, Vol. PAMI-2, No. 1, 67-71.\n - Gates, G.W. (1972) \"The Reduced Nearest Neighbor Rule\". IEEE Transactions\n on Information Theory, May 1972, 431-433.\n - See also: 1988 MLC Proceedings, 54-64. Cheeseman et al\"s AUTOCLASS II\n conceptual clustering system finds 3 classes in the data.\n - Many, many more ...\n" ], [ "# Fit a Naive Bayes model to the data\nmodel = GaussianNB()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n# Split the data into Trainging and Testing sets\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y)", "_____no_output_____" ], [ "# Fit the training model\nmodel.fit(X_train,Y_train)", "_____no_output_____" ], [ "# Predicted outcomes\npredicted = model.predict(X_test)\n\n# Actual Expected Outvomes\nexpected = Y_test", "_____no_output_____" ], [ "print metrics.accuracy_score(expected, predicted)", "0.9736842105263158\n" ], [ "#We have about 97.35% accuracy using Naive Bayes", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c292dff84b69c141f310b4b5cdb08ae3dbf3de
128,798
ipynb
Jupyter Notebook
development/hfbasin.ipynb
DamienIrving/ocean-analysis
23a6dbf616fb84e6e158e32534ffd394e0df2e3e
[ "MIT" ]
7
2017-06-06T20:20:58.000Z
2020-02-05T23:28:41.000Z
development/hfbasin.ipynb
DamienIrving/ocean-analysis
23a6dbf616fb84e6e158e32534ffd394e0df2e3e
[ "MIT" ]
17
2017-04-06T04:46:37.000Z
2021-07-01T00:47:50.000Z
development/hfbasin.ipynb
DamienIrving/ocean-analysis
23a6dbf616fb84e6e158e32534ffd394e0df2e3e
[ "MIT" ]
4
2021-01-19T01:31:40.000Z
2022-03-15T00:50:11.000Z
154.619448
28,332
0.871302
[ [ [ "# Ocean heat transport in CMIP5 models\n\n", "_____no_output_____" ], [ "## Read data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport iris\nimport iris.plot as iplt\nimport iris.coord_categorisation\nimport cf_units\nimport numpy", "_____no_output_____" ], [ "%matplotlib inline ", "_____no_output_____" ], [ "infile = '/g/data/ua6/DRSv2/CMIP5/NorESM1-M/rcp85/mon/ocean/r1i1p1/hfbasin/latest/hfbasin_Omon_NorESM1-M_rcp85_r1i1p1_200601-210012.nc'", "_____no_output_____" ], [ "cube = iris.load_cube(infile)", "/g/data/r87/dbi599/miniconda3/envs/ocean/lib/python3.6/site-packages/iris/fileformats/cf.py:1143: IrisDeprecation: NetCDF default loading behaviour currently does not expose variables which define reference surfaces for dimensionless vertical coordinates as independent Cubes. This behaviour is deprecated in favour of automatic promotion to Cubes. To switch to the new behaviour, set iris.FUTURE.netcdf_promote to True.\n warn_deprecated(msg)\n" ], [ "print(cube)", "northward_ocean_heat_transport / (W) (time: 1140; -- : 3; latitude: 166)\n Dimension coordinates:\n time x - -\n latitude - - x\n Auxiliary coordinates:\n region - x -\n Attributes:\n Conventions: CF-1.4\n associated_files: baseURL: http://cmip-pcmdi.llnl.gov/CMIP5/dataLocation gridspecFile: g...\n branch_time: 56940.0\n cmor_version: 2.6.0\n contact: Please send any requests or bug reports to [email protected].\n creation_date: 2011-05-28T20:59:53Z\n experiment: RCP8.5\n experiment_id: rcp85\n forcing: GHG, SA, Oz, Sl, BC, OC\n frequency: mon\n history: 2011-05-28T20:59:53Z altered by CMOR: replaced missing value flag (1e+20)...\n initialization_method: 1\n institute_id: NCC\n institution: Norwegian Climate Centre\n model_id: NorESM1-M\n modeling_realm: ocean\n original_name: mhflx\n parent_experiment: historical\n parent_experiment_id: historical\n parent_experiment_rip: r1i1p1\n physics_version: 1\n product: output\n project_id: CMIP5\n realization: 1\n source: NorESM1-M 2011 atmosphere: CAM-Oslo (CAM4-Oslo-noresm-ver1_cmip5-r112,...\n table_id: Table Omon (27 April 2011) 340eddd4fd838d90fa9ffe1345ecbd73\n title: NorESM1-M model output prepared for CMIP5 RCP8.5\n tracking_id: 8896324a-8d1f-4890-af1d-fd9041f99694\n Cell methods:\n mean: time\n mean: longitude\n" ], [ "dim_coord_names = [coord.name() for coord in cube.dim_coords]\nprint(dim_coord_names)", "['time', 'latitude']\n" ], [ "cube.coord('latitude').points", "_____no_output_____" ], [ "aux_coord_names = [coord.name() for coord in cube.aux_coords]\nprint(aux_coord_names)", "['region']\n" ], [ "cube.coord('region')", "_____no_output_____" ], [ "global_cube = cube.extract(iris.Constraint(region='global_ocean'))", "_____no_output_____" ], [ "def convert_to_annual(cube):\n \"\"\"Convert data to annual timescale.\n Args:\n cube (iris.cube.Cube)\n full_months(bool): only include years with data for all 12 months\n \"\"\"\n\n iris.coord_categorisation.add_year(cube, 'time')\n iris.coord_categorisation.add_month(cube, 'time')\n\n cube = cube.aggregated_by(['year'], iris.analysis.MEAN)\n cube.remove_coord('year')\n cube.remove_coord('month')\n\n return cube", "_____no_output_____" ], [ "global_cube_annual = convert_to_annual(global_cube)", "/g/data/r87/dbi599/miniconda3/envs/ocean/lib/python3.6/site-packages/iris/coords.py:495: VisibleDeprecationWarning: an index can only have a single Ellipsis (`...`); replace all but one with slices (`:`).\n bounds = bounds[keys + (Ellipsis, )]\n" ], [ "print(global_cube_annual)", "northward_ocean_heat_transport / (W) (time: 95; latitude: 166)\n Dimension coordinates:\n time x -\n latitude - x\n Scalar coordinates:\n region: global_ocean\n Attributes:\n Conventions: CF-1.4\n associated_files: baseURL: http://cmip-pcmdi.llnl.gov/CMIP5/dataLocation gridspecFile: g...\n branch_time: 56940.0\n cmor_version: 2.6.0\n contact: Please send any requests or bug reports to [email protected].\n creation_date: 2011-05-28T20:59:53Z\n experiment: RCP8.5\n experiment_id: rcp85\n forcing: GHG, SA, Oz, Sl, BC, OC\n frequency: mon\n history: 2011-05-28T20:59:53Z altered by CMOR: replaced missing value flag (1e+20)...\n initialization_method: 1\n institute_id: NCC\n institution: Norwegian Climate Centre\n model_id: NorESM1-M\n modeling_realm: ocean\n original_name: mhflx\n parent_experiment: historical\n parent_experiment_id: historical\n parent_experiment_rip: r1i1p1\n physics_version: 1\n product: output\n project_id: CMIP5\n realization: 1\n source: NorESM1-M 2011 atmosphere: CAM-Oslo (CAM4-Oslo-noresm-ver1_cmip5-r112,...\n table_id: Table Omon (27 April 2011) 340eddd4fd838d90fa9ffe1345ecbd73\n title: NorESM1-M model output prepared for CMIP5 RCP8.5\n tracking_id: 8896324a-8d1f-4890-af1d-fd9041f99694\n Cell methods:\n mean: time\n mean: longitude\n mean: year\n" ], [ "iplt.plot(global_cube_annual[5, ::])\niplt.plot(global_cube_annual[20, ::])\nplt.show()", "_____no_output_____" ] ], [ [ "So for any given year, the annual mean shows ocean heat transport away from the tropics.", "_____no_output_____" ], [ "## Trends", "_____no_output_____" ] ], [ [ "def convert_to_seconds(time_axis):\n \"\"\"Convert time axis units to seconds.\n \n Args:\n time_axis(iris.DimCoord)\n \n \"\"\"\n\n old_units = str(time_axis.units)\n old_timestep = old_units.split(' ')[0]\n new_units = old_units.replace(old_timestep, 'seconds') \n\n new_unit = cf_units.Unit(new_units, calendar=time_axis.units.calendar) \n time_axis.convert_units(new_unit)\n\n return time_axis\n\n\ndef linear_trend(data, time_axis):\n \"\"\"Calculate the linear trend.\n \n polyfit returns [a, b] corresponding to y = a + bx\n\n \"\"\" \n\n masked_flag = False\n\n if type(data) == numpy.ma.core.MaskedArray:\n if type(data.mask) == numpy.bool_:\n if data.mask:\n masked_flag = True\n elif data.mask[0]:\n masked_flag = True\n \n if masked_flag:\n return data.fill_value\n else:\n return numpy.polynomial.polynomial.polyfit(time_axis, data, 1)[-1]\n\n\ndef calc_trend(cube):\n \"\"\"Calculate linear trend.\n Args:\n cube (iris.cube.Cube)\n running_mean(bool, optional): \n A 12-month running mean can first be applied to the data\n yr (bool, optional):\n Change units from per second to per year\n \"\"\"\n\n time_axis = cube.coord('time')\n time_axis = convert_to_seconds(time_axis)\n\n trend = numpy.ma.apply_along_axis(linear_trend, 0, cube.data, time_axis.points)\n trend = numpy.ma.masked_values(trend, cube.data.fill_value)\n\n return trend", "_____no_output_____" ], [ "trend_data = calc_trend(global_cube_annual)\n\ntrend_cube = global_cube_annual[0, ::].copy()\ntrend_cube.data = trend_data\ntrend_cube.remove_coord('time')\n\n#trend_unit = ' yr-1'\n#trend_cube.units = str(global_cube_annual.units) + trend_unit", "_____no_output_____" ], [ "iplt.plot(trend_cube)\nplt.show()", "_____no_output_____" ] ], [ [ "So the trends in ocean heat transport suggest reduced transport in the RCP 8.5 simulation (i.e. the trend plot is almost the inverse of the climatology plot).", "_____no_output_____" ], [ "## Convergence", "_____no_output_____" ] ], [ [ "print(global_cube_annual)", "northward_ocean_heat_transport / (W) (time: 95; latitude: 166)\n Dimension coordinates:\n time x -\n latitude - x\n Scalar coordinates:\n region: global_ocean\n Attributes:\n Conventions: CF-1.4\n associated_files: baseURL: http://cmip-pcmdi.llnl.gov/CMIP5/dataLocation gridspecFile: g...\n branch_time: 56940.0\n cmor_version: 2.6.0\n contact: Please send any requests or bug reports to [email protected].\n creation_date: 2011-05-28T20:59:53Z\n experiment: RCP8.5\n experiment_id: rcp85\n forcing: GHG, SA, Oz, Sl, BC, OC\n frequency: mon\n history: 2011-05-28T20:59:53Z altered by CMOR: replaced missing value flag (1e+20)...\n initialization_method: 1\n institute_id: NCC\n institution: Norwegian Climate Centre\n model_id: NorESM1-M\n modeling_realm: ocean\n original_name: mhflx\n parent_experiment: historical\n parent_experiment_id: historical\n parent_experiment_rip: r1i1p1\n physics_version: 1\n product: output\n project_id: CMIP5\n realization: 1\n source: NorESM1-M 2011 atmosphere: CAM-Oslo (CAM4-Oslo-noresm-ver1_cmip5-r112,...\n table_id: Table Omon (27 April 2011) 340eddd4fd838d90fa9ffe1345ecbd73\n title: NorESM1-M model output prepared for CMIP5 RCP8.5\n tracking_id: 8896324a-8d1f-4890-af1d-fd9041f99694\n Cell methods:\n mean: time\n mean: longitude\n mean: year\n" ], [ "diffs_data = numpy.diff(global_cube_annual.data, axis=1)\nlats = global_cube_annual.coord('latitude').points\ndiffs_lats = (lats[1:] + lats[:-1]) / 2.", "_____no_output_____" ], [ "print(diffs_data.shape)\nprint(len(diffs_lats))", "(95, 165)\n165\n" ], [ "plt.plot(diffs_lats, diffs_data[0, :])\nplt.plot(lats, global_cube_annual[0, ::].data / 10.0)\nplt.show()", "_____no_output_____" ] ], [ [ "## Convergence trend", "_____no_output_____" ] ], [ [ "time_axis = global_cube_annual.coord('time')\ntime_axis = convert_to_seconds(time_axis)\n\ndiffs_trend = numpy.ma.apply_along_axis(linear_trend, 0, diffs_data, time_axis.points)\ndiffs_trend = numpy.ma.masked_values(diffs_trend, global_cube_annual.data.fill_value)", "_____no_output_____" ], [ "print(diffs_trend.shape)", "(165,)\n" ], [ "plt.plot(diffs_lats, diffs_trend * -1)\nplt.axhline(y=0)\nplt.show()", "_____no_output_____" ], [ "plt.plot(diffs_lats, diffs_trend * -1, color='black')\nplt.axhline(y=0)\nplt.axvline(x=30)\nplt.axvline(x=50)\nplt.axvline(x=77)\nplt.xlim(20, 90)\nplt.show()", "_____no_output_____" ] ], [ [ "When I try and replicate the HTC curve in Figure 1 of [Nummelin et al (2016)](http://onlinelibrary.wiley.com/doi/10.1002/2016GL071333/abstract;jsessionid=7BC4C1DF16F35341AE3D3689F363955B.f02t01) (above) it looks different because I've plotted $W s^{-1}$, whereas they plot $W m^{-2}$. So I need to divide by area and mutliply by their delta $\\delta t/2$ (i.e. ($60 \\times 60 \\times 24 \\times 365.25 \\times 95) / 2$). \n\nFIXME: Probably easiest to re-do this NorESM1-M analysis with hfy.\n\nOnce I've done that to confirm that I can reproduce their results, I may actually want to stick with $W s^{-1}$. Dividing by area inflates the high latitude regions, whereas I'm more interested in where the big heat transports are taking place.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
e7c29417f996db799fb9f9c29c393622626d4a64
26,325
ipynb
Jupyter Notebook
titanic/Titanic Clean.ipynb
bhi5hmaraj/Applied-ML
dc8eeb57ce4ac0869988d2d2d5f3f707421ece49
[ "MIT" ]
null
null
null
titanic/Titanic Clean.ipynb
bhi5hmaraj/Applied-ML
dc8eeb57ce4ac0869988d2d2d5f3f707421ece49
[ "MIT" ]
null
null
null
titanic/Titanic Clean.ipynb
bhi5hmaraj/Applied-ML
dc8eeb57ce4ac0869988d2d2d5f3f707421ece49
[ "MIT" ]
null
null
null
43.014706
5,768
0.678025
[ [ [ "import chardet \nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing", "_____no_output_____" ], [ "X = pd.read_csv('train.csv')\ny = df_raw['Survived']", "_____no_output_____" ] ], [ [ "## Baseline\n", "_____no_output_____" ] ], [ [ "def customVectorizer(df, toRemove):\n# leEmbarked.fit(df_raw['Embarked'])\n leSex = preprocessing.LabelEncoder()\n leEmbarked = preprocessing.LabelEncoder()\n df.fillna(inplace=True, value=0)\n leSex.fit(df['Sex'])\n# leEmbarked.fit(df['Embarked'])\n# df['Embarked'] = leEmbarked.transform(df['Embarked'])\n df['Sex'] = leSex.transform(df['Sex'])\n return df.drop(labels=toRemove, axis=1)", "_____no_output_____" ], [ "X = customVectorizer(X, ['Embarked', 'PassengerId', 'Name', 'Age', 'Ticket', 'Cabin'])\nprint(X.shape)", "(891, 5)\n" ], [ "from sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression(random_state=42)\n\n\nfrom sklearn.model_selection import cross_val_score\n\nlr_model = LogisticRegression()\ncv_scores = cross_val_score(lr_model, X=X, y=y, cv=5, n_jobs=4)\nprint(cv_scores)", "[0.80446927 0.80446927 0.78089888 0.76404494 0.8079096 ]\n" ], [ "model.fit(X,y)", "/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ], [ "df_raw_test = pd.read_csv('test.csv')\ndf_test = baselineVectorizer(df_raw_test)\ny_test_predicted = model.predict(df_test)", "_____no_output_____" ], [ "print('\\n'.join([\"{},{}\".format(892 + i, y_test_predicted[i]) for i in range(len(y_test_predicted))]) , file=open('test_pred.csv', 'w'))", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "plt.hist(X['Age'], bins=30)", "_____no_output_____" ] ], [ [ "### Train a classifer for Age and use it to fill gaps\n\nSo first we split the train.csv into 2 parts (one with non null age and the other with null) . We train a regressor on the non null data points to predict age and use this trained regressor to fill the missing ages. Now we combine the 2 split data sets into a single dataset and train a logistic regression classifier.", "_____no_output_____" ] ], [ [ "X = pd.read_csv('train.csv')\nage_present = X['Age'] > 0", "_____no_output_____" ], [ "age_present.describe()", "_____no_output_____" ], [ "False in age_present", "_____no_output_____" ], [ "X_age_p = X[age_present]", "_____no_output_____" ], [ "X_age_p.shape", "_____no_output_____" ], [ "age = X_age_p['Age']\n\n# X_age_p\n\nX_age_p = customVectorizer(X_age_p, ['Embarked', 'Age', 'PassengerId', 'Name', 'Ticket', 'Cabin'])\nfrom sklearn.linear_model import LinearRegression\nreg = LinearRegression().fit(X_age_p, age)\n\n\nX_null_age = X[X['Age'].isnull()]\n\npred_age = reg.predict(customVectorizer(X_null_age, ['Embarked', 'Age', 'PassengerId', 'Name', 'Ticket', 'Cabin']))", "/home/bhishma/anaconda3/lib/python3.7/site-packages/pandas/core/frame.py:3790: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n downcast=downcast, **kwargs)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n # Remove the CWD from sys.path while we load stuff.\n/home/bhishma/anaconda3/lib/python3.7/site-packages/pandas/core/frame.py:3790: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n downcast=downcast, **kwargs)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n # Remove the CWD from sys.path while we load stuff.\n" ], [ "age.mean()", "_____no_output_____" ], [ "pred_age = list(map(lambda x : max(0, x), pred_age))", "_____no_output_____" ], [ "X_null_age['Age'] = pred_age", "/home/bhishma/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "y = X['Survived']", "_____no_output_____" ], [ "# from sklearn.model_selection import cross_val_score\n# lr_model = LogisticRegression()\n\nX_age_p['Age'] = age", "_____no_output_____" ], [ "X_age_p.shape", "_____no_output_____" ], [ "X = pd.concat([X_age_p, customVectorizer(X_null_age, ['Embarked', 'PassengerId', 'Name', 'Ticket', 'Cabin'])])", "/home/bhishma/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "y = X['Survived']\nX = X.drop(labels=['Survived'], axis=1)", "_____no_output_____" ], [ "lr_model = LogisticRegression()\ncv_scores = cross_val_score(lr_model, X=X, y=y, cv=10, n_jobs=4)\nprint(cv_scores)", "[0.76666667 0.76666667 0.85393258 0.7752809 0.80898876 0.78651685\n 0.79775281 0.80898876 0.86516854 0.79545455]\n" ] ], [ [ "### Try ensemble with LR, SVC, RF", "_____no_output_____" ] ], [ [ "# taken from https://machinelearningmastery.com/ensemble-machine-learning-algorithms-python-scikit-learn/\nfrom sklearn import model_selection\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import VotingClassifier\n\nkfold = model_selection.KFold(n_splits=10, random_state=42)\n# create the sub models\nestimators = []\nmodel1 = LogisticRegression()\nestimators.append(('logistic', model1))\nmodel2 = DecisionTreeClassifier()\nestimators.append(('cart', model2))\nmodel3 = SVC()\nestimators.append(('svm', model3))\n# create the ensemble model\n\nensemble = VotingClassifier(estimators)\nresults = model_selection.cross_val_score(ensemble, X, y, cv=kfold)\nprint(results.mean())\nprint(results)\n", "/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/bhishma/anaconda3/lib/python3.7/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c29d3a178e0e447da5f8dd641c8055d86879ac
39,011
ipynb
Jupyter Notebook
DataClean/DataClean.ipynb
namiyao/Kaggle_QQR
9a67bd402536cca4aa59a80d30017768d02eddb4
[ "MIT" ]
null
null
null
DataClean/DataClean.ipynb
namiyao/Kaggle_QQR
9a67bd402536cca4aa59a80d30017768d02eddb4
[ "MIT" ]
null
null
null
DataClean/DataClean.ipynb
namiyao/Kaggle_QQR
9a67bd402536cca4aa59a80d30017768d02eddb4
[ "MIT" ]
null
null
null
40.892034
160
0.581016
[ [ [ "Use data clean from script_LSTM.py\n\nLSTM(64)\nDENSE(64)\nBATCH_SIZE = 256\nweights.002-0.2777.hdf5\n212s - loss: 0.2390 - acc: 0.8283 - val_loss: 0.2777 - val_acc: 0.8053\n\nLSTM(128,0.5,0.5)\nDENSE(128,0.5)\nBatchNormalization()\nBATCH_SIZE = 2048\nweights.022-0.2778.hdf5\n111s - loss: 0.2682 - acc: 0.7932 - val_loss: 0.2778 - val_acc: 0.7855\n\nLSTM(128,0.5,0.5)\nDENSE(128,0.5)\nBATCH_SIZE = 2048\nweights.025-0.2798.hdf5\n110s - loss: 0.2660 - acc: 0.7969 - val_loss: 0.2798 - val_acc: 0.7826", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nimport re\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport datetime, time, json, os, math, pickle, sys\nfrom string import punctuation\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import concatenate, Embedding, Dense, Input, Dropout, Bidirectional, LSTM, BatchNormalization, TimeDistributed\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras import backend as K\n", "Using TensorFlow backend.\n" ], [ "DATA_DIR = '../data/'\nMODEL = 'DataClean'\nif os.getcwd().split('/')[-1] != MODEL:\n print('WRONG MODEL DIR!!!')\nCHECKPOINT_DIR = './checkpoint/'\nif not os.path.exists(CHECKPOINT_DIR):\n os.mkdir(CHECKPOINT_DIR)\nLOG_DIR = './log/'\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nOUTPUT_DIR = './output/'\nif not os.path.exists(OUTPUT_DIR):\n os.mkdir(OUTPUT_DIR)\n \nMAX_LEN = 40\nEMBEDDING_DIM = 300\nVALID_SPLIT = 0.05\nRE_WEIGHT = True # whether to re-weight classes to fit the 17.5% share in test set\n# VOCAB_SIZE = 10000\n\n\ndef get_best_model(checkpoint_dir = CHECKPOINT_DIR):\n files = glob.glob(checkpoint_dir+'*')\n val_losses = [float(f.split('-')[-1][:-5]) for f in files]\n index = val_losses.index(min(val_losses))\n print('Loading model from checkpoint file ' + files[index])\n model = load_model(files[index])\n model_name = files[index].split('/')[-1]\n print('Loading model Done!')\n return (model, model_name)", "_____no_output_____" ], [ "trainval_df = pd.read_csv(DATA_DIR+\"train.csv\")\ntest_df = pd.read_csv(DATA_DIR+\"test.csv\")\nprint(trainval_df.shape)\nprint(test_df.shape)", "(404290, 6)\n(2345796, 3)\n" ] ], [ [ "# Check for any null values\n# inds = pd.isnull(trainval_df).any(1).nonzero()[0]\n# trainval_df.loc[inds]\n# inds = pd.isnull(test_df).any(1).nonzero()[0]\n# test_df.loc[inds]\n\n# # Add the string 'empty' to empty strings\n# trainval_df = trainval_df.fillna('empty')\n# test_df = test_df.fillna('empty')", "_____no_output_____" ] ], [ [ "# data cleaning\ndef text_to_wordlist(text, remove_stopwords=False, stem_words=False):\n # Clean the text, with the option to remove stopwords and to stem words.\n \n if isinstance(text,float):\n # turn nan to empty string\n text = \"\"\n else:\n # Convert words to lower case and split them\n text = text.lower().split()\n\n # Optionally, remove stop words\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n text = [w for w in text if not w in stops]\n\n text = \" \".join(text)\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"60k\", \" 60000 \", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n # Optionally, shorten words to their stems\n if stem_words:\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n\n # Return a list of words\n return(text)", "_____no_output_____" ], [ "# question to word list by data cleaning\n\nfile_name = 'trainval_df.pickle'\nif os.path.exists(OUTPUT_DIR+file_name):\n print ('Loading from file '+file_name)\n trainval_df = pd.read_pickle(OUTPUT_DIR+file_name)\nelse:\n print ('Generating file '+file_name) \n trainval_df['question1_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)\n trainval_df['question2_WL'] = trainval_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)\n trainval_df.to_pickle(OUTPUT_DIR+file_name) \n\nfile_name = 'test_df.pickle'\nif os.path.exists(OUTPUT_DIR+file_name):\n print ('Loading from file '+file_name)\n test_df = pd.read_pickle(OUTPUT_DIR+file_name)\nelse:\n print ('Generating file '+file_name) \n test_df['question1_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question1']), axis=1)\n test_df['question2_WL'] = test_df.apply(lambda row: text_to_wordlist(row['question2']), axis=1)\n test_df.to_pickle(OUTPUT_DIR+file_name) \n \ntest_size = trainval_df.shape[0]-int(math.ceil(trainval_df.shape[0]*(1-VALID_SPLIT)/1024)*1024)\ntrain_df, valid_df = train_test_split(trainval_df, test_size=test_size, random_state=1986, stratify=trainval_df['is_duplicate'])", "Generating file trainval_df.pickle\nGenerating file test_df.pickle\n" ] ], [ [ "# trainval_df['len1'] = trainval_df.apply(lambda row: len(row['question1_WL'].split()), axis=1)\n# trainval_df['len2'] = trainval_df.apply(lambda row: len(row['question2_WL'].split()), axis=1)\n\ntest_df['len1'] = test_df.apply(lambda row: len(row['question1_WL'].split()), axis=1)\ntest_df['len2'] = test_df.apply(lambda row: len(row['question2_WL'].split()), axis=1)\n\nlengths = pd.concat([test_df['len1'],test_df['len2']], axis=0)\nprint(lengths.describe())\nprint(np.percentile(lengths, 99.0))\nprint(np.percentile(lengths, 99.4))\nprint(np.percentile(lengths, 99.5))\nprint(np.percentile(lengths, 99.9))", "_____no_output_____" ] ], [ [ "# tokenize and pad\n\nall_questions = pd.concat([trainval_df['question1_WL'],trainval_df['question2_WL'],test_df['question1_WL'],test_df['question2_WL']], axis=0)\ntokenizer = Tokenizer(num_words=None, lower=True)\ntokenizer.fit_on_texts(all_questions)\nword_index = tokenizer.word_index\nnb_words = len(word_index)\nprint(\"Words in index: %d\" % nb_words) #120594\n\ntrain_q1 = pad_sequences(tokenizer.texts_to_sequences(train_df['question1_WL']), maxlen = MAX_LEN)\ntrain_q2 = pad_sequences(tokenizer.texts_to_sequences(train_df['question2_WL']), maxlen = MAX_LEN)\nvalid_q1 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question1_WL']), maxlen = MAX_LEN)\nvalid_q2 = pad_sequences(tokenizer.texts_to_sequences(valid_df['question2_WL']), maxlen = MAX_LEN)\ny_train = train_df.is_duplicate.values\ny_valid = valid_df.is_duplicate.values\n\ntrain_q1_Double = np.vstack((train_q1, train_q2))\ntrain_q2_Double = np.vstack((train_q2, train_q1))\nvalid_q1_Double = np.vstack((valid_q1, valid_q2))\nvalid_q2_Double = np.vstack((valid_q2, valid_q1))\ny_train_Double = np.hstack((y_train, y_train))\ny_valid_Double = np.hstack((y_valid, y_valid))\n\nval_sample_weights = np.ones(len(y_valid_Double))\nif RE_WEIGHT:\n class_weight = {0: 1.309028344, 1: 0.472001959}\n val_sample_weights *= 0.472001959\n val_sample_weights[y_valid_Double==0] = 1.309028344\nelse:\n class_weight = None\n val_sample_weights = None", "Words in index: 120594\n" ], [ "# load word_embedding_matrix\n\nW2V = 'glove.840B.300d.txt'\nfile_name = W2V + '.word_embedding_matrix.pickle'\nif os.path.exists(OUTPUT_DIR+file_name):\n print ('Loading from file '+file_name)\n with open(OUTPUT_DIR+file_name, 'rb') as f:\n word_embedding_matrix = pickle.load(f)\nelse:\n print ('Generating file '+file_name) \n # Load GloVe to use pretrained vectors\n embeddings_index = {}\n with open(DATA_DIR+'/WordEmbedding/'+W2V) as f:\n for line in f:\n values = line.split(' ')\n word = values[0]\n embedding = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = embedding\n print('Word embeddings:', len(embeddings_index)) #1,505,774\n\n # Need to use EMBEDDING_DIM for embedding dimensions to match GloVe's vectors.\n nb_words = len(word_index)\n null_embedding_words = []\n word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n word_embedding_matrix[i] = embedding_vector\n else:\n null_embedding_words.append(word)\n print('Null word embeddings: %d' %len(null_embedding_words)) #37,412\n\n with open(OUTPUT_DIR+file_name, 'wb') as f:\n pickle.dump(word_embedding_matrix, f)\n ", "Generating file glove.840B.300d.txt.word_embedding_matrix.pickle\nWord embeddings: 1505774\nNull word embeddings: 37412\n" ] ], [ [ "word_counts = tokenizer.word_counts\nnull_embedding_word_counts = { word: word_counts[word] for word in null_embedding_words }\nprint(sum(null_embedding_word_counts.values())) #454210\n\nword_docs = tokenizer.word_docs\nnull_embedding_word_docs = { word: word_docs[word] for word in null_embedding_words }\nprint(sum(null_embedding_word_docs.values())) #446584\n# 446584/(404290+2345796)/2 = 0.08119", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 2048\nEMBEDDING_TRAINABLE = False\nRNNCELL_SIZE = 128\nRNNCELL_LAYERS = 1\nRNNCELL_DROPOUT = 0.5\nRNNCELL_RECURRENT_DROPOUT = 0.5\nRNNCELL_BIDIRECT = False\nDENSE_SIZE = 128\nDENSE_LAYERS = 1\nDENSE_DROPOUT = 0.5", "_____no_output_____" ], [ "encode_model = Sequential()\nencode_model.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length=MAX_LEN, trainable=EMBEDDING_TRAINABLE))\nif RNNCELL_BIDIRECT:\n for i in range(RNNCELL_LAYERS-1):\n encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, \n unroll=True, implementation=2, return_sequences=True)))\n encode_model.add(Bidirectional(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, \n unroll=True, implementation=2)))\nelse:\n for i in range(RNNCELL_LAYERS-1):\n encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, \n unroll=True, implementation=2, return_sequences=True))\n encode_model.add(LSTM(RNNCELL_SIZE, dropout=RNNCELL_DROPOUT, recurrent_dropout=RNNCELL_RECURRENT_DROPOUT, \n unroll=True, implementation=2))\n\nsequence1_input = Input(shape=(MAX_LEN,), name='q1')\nsequence2_input = Input(shape=(MAX_LEN,), name='q2')\nencoded_1 = encode_model(sequence1_input)\nencoded_2 = encode_model(sequence2_input)\nmerged = concatenate([encoded_1, encoded_2], axis=-1)\nmerged = Dropout(DENSE_DROPOUT)(merged)\n# merged = BatchNormalization()(merged)\nfor i in range(DENSE_LAYERS):\n merged = Dense(DENSE_SIZE, activation='relu', kernel_initializer='he_normal')(merged)\n merged = Dropout(DENSE_DROPOUT)(merged)\n# merged = BatchNormalization()(merged)\npredictions = Dense(1, activation='sigmoid')(merged)\nmodel = Model(inputs=[sequence1_input, sequence2_input], outputs=predictions)\n", "_____no_output_____" ], [ "encode_model.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_6 (Embedding) (None, 40, 300) 36178500 \n_________________________________________________________________\nlstm_6 (LSTM) (None, 128) 219648 \n=================================================================\nTotal params: 36,398,148.0\nTrainable params: 219,648.0\nNon-trainable params: 36,178,500.0\n_________________________________________________________________\n" ], [ "model.summary()", "____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\nq1 (InputLayer) (None, 40) 0 \n____________________________________________________________________________________________________\nq2 (InputLayer) (None, 40) 0 \n____________________________________________________________________________________________________\nsequential_9 (Sequential) (None, 128) 36398148 \n____________________________________________________________________________________________________\nconcatenate_6 (Concatenate) (None, 256) 0 \n____________________________________________________________________________________________________\ndropout_12 (Dropout) (None, 256) 0 \n____________________________________________________________________________________________________\ndense_12 (Dense) (None, 128) 32896 \n____________________________________________________________________________________________________\ndropout_13 (Dropout) (None, 128) 0 \n____________________________________________________________________________________________________\ndense_13 (Dense) (None, 1) 129 \n====================================================================================================\nTotal params: 36,431,173.0\nTrainable params: 252,673.0\nNon-trainable params: 36,178,500.0\n____________________________________________________________________________________________________\n" ], [ "optimizer = Adam(lr=1e-3)\nmodel.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\ncallbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1),\n ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True),\n TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)]\n\nprint('BATCH_SIZE:', BATCH_SIZE)\nmodel.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, \n batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, \n validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), \n shuffle=True, class_weight=class_weight, initial_epoch=0)", "BATCH_SIZE: 2048\nTrain on 770048 samples, validate on 38532 samples\nEpoch 1/100\nEpoch 00000: val_loss improved from inf to 0.36693, saving model to ./checkpoint/weights.000-0.3669.hdf5\n120s - loss: 0.4086 - acc: 0.6682 - val_loss: 0.3669 - val_acc: 0.6945\nEpoch 2/100\nEpoch 00001: val_loss improved from 0.36693 to 0.34885, saving model to ./checkpoint/weights.001-0.3489.hdf5\n110s - loss: 0.3662 - acc: 0.7073 - val_loss: 0.3489 - val_acc: 0.7372\nEpoch 3/100\nEpoch 00002: val_loss improved from 0.34885 to 0.33290, saving model to ./checkpoint/weights.002-0.3329.hdf5\n110s - loss: 0.3488 - acc: 0.7225 - val_loss: 0.3329 - val_acc: 0.7380\nEpoch 4/100\nEpoch 00003: val_loss improved from 0.33290 to 0.32665, saving model to ./checkpoint/weights.003-0.3267.hdf5\n110s - loss: 0.3375 - acc: 0.7317 - val_loss: 0.3267 - val_acc: 0.7467\nEpoch 5/100\nEpoch 00004: val_loss improved from 0.32665 to 0.31697, saving model to ./checkpoint/weights.004-0.3170.hdf5\n110s - loss: 0.3288 - acc: 0.7386 - val_loss: 0.3170 - val_acc: 0.7451\nEpoch 6/100\nEpoch 00005: val_loss improved from 0.31697 to 0.31174, saving model to ./checkpoint/weights.005-0.3117.hdf5\n110s - loss: 0.3216 - acc: 0.7454 - val_loss: 0.3117 - val_acc: 0.7542\nEpoch 7/100\nEpoch 00006: val_loss improved from 0.31174 to 0.30906, saving model to ./checkpoint/weights.006-0.3091.hdf5\n110s - loss: 0.3156 - acc: 0.7510 - val_loss: 0.3091 - val_acc: 0.7563\nEpoch 8/100\nEpoch 00007: val_loss improved from 0.30906 to 0.30893, saving model to ./checkpoint/weights.007-0.3089.hdf5\n110s - loss: 0.3098 - acc: 0.7560 - val_loss: 0.3089 - val_acc: 0.7658\nEpoch 9/100\nEpoch 00008: val_loss improved from 0.30893 to 0.30424, saving model to ./checkpoint/weights.008-0.3042.hdf5\n110s - loss: 0.3056 - acc: 0.7597 - val_loss: 0.3042 - val_acc: 0.7729\nEpoch 10/100\nEpoch 00009: val_loss improved from 0.30424 to 0.29670, saving model to ./checkpoint/weights.009-0.2967.hdf5\n110s - loss: 0.3015 - acc: 0.7636 - val_loss: 0.2967 - val_acc: 0.7691\nEpoch 11/100\nEpoch 00010: val_loss improved from 0.29670 to 0.29603, saving model to ./checkpoint/weights.010-0.2960.hdf5\n110s - loss: 0.2974 - acc: 0.7669 - val_loss: 0.2960 - val_acc: 0.7715\nEpoch 12/100\nEpoch 00011: val_loss improved from 0.29603 to 0.29311, saving model to ./checkpoint/weights.011-0.2931.hdf5\n110s - loss: 0.2945 - acc: 0.7699 - val_loss: 0.2931 - val_acc: 0.7709\nEpoch 13/100\nEpoch 00012: val_loss did not improve\n110s - loss: 0.2912 - acc: 0.7730 - val_loss: 0.2949 - val_acc: 0.7748\nEpoch 14/100\nEpoch 00013: val_loss did not improve\n110s - loss: 0.2884 - acc: 0.7754 - val_loss: 0.2975 - val_acc: 0.7789\nEpoch 15/100\nEpoch 00014: val_loss improved from 0.29311 to 0.29163, saving model to ./checkpoint/weights.014-0.2916.hdf5\n110s - loss: 0.2855 - acc: 0.7782 - val_loss: 0.2916 - val_acc: 0.7777\nEpoch 16/100\nEpoch 00015: val_loss improved from 0.29163 to 0.28918, saving model to ./checkpoint/weights.015-0.2892.hdf5\n110s - loss: 0.2835 - acc: 0.7803 - val_loss: 0.2892 - val_acc: 0.7813\nEpoch 17/100\nEpoch 00016: val_loss improved from 0.28918 to 0.28915, saving model to ./checkpoint/weights.016-0.2892.hdf5\n110s - loss: 0.2815 - acc: 0.7816 - val_loss: 0.2892 - val_acc: 0.7837\nEpoch 18/100\nEpoch 00017: val_loss did not improve\n110s - loss: 0.2791 - acc: 0.7835 - val_loss: 0.2913 - val_acc: 0.7830\nEpoch 19/100\nEpoch 00018: val_loss did not improve\n110s - loss: 0.2769 - acc: 0.7864 - val_loss: 0.2894 - val_acc: 0.7864\nEpoch 20/100\nEpoch 00019: val_loss improved from 0.28915 to 0.28266, saving model to ./checkpoint/weights.019-0.2827.hdf5\n110s - loss: 0.2753 - acc: 0.7877 - val_loss: 0.2827 - val_acc: 0.7795\nEpoch 21/100\nEpoch 00020: val_loss did not improve\n110s - loss: 0.2735 - acc: 0.7897 - val_loss: 0.2848 - val_acc: 0.7843\nEpoch 22/100\nEpoch 00021: val_loss did not improve\n110s - loss: 0.2719 - acc: 0.7912 - val_loss: 0.2865 - val_acc: 0.7894\nEpoch 23/100\nEpoch 00022: val_loss improved from 0.28266 to 0.28210, saving model to ./checkpoint/weights.022-0.2821.hdf5\n110s - loss: 0.2706 - acc: 0.7930 - val_loss: 0.2821 - val_acc: 0.7872\nEpoch 24/100\nEpoch 00023: val_loss did not improve\n110s - loss: 0.2688 - acc: 0.7943 - val_loss: 0.2877 - val_acc: 0.7927\nEpoch 25/100\nEpoch 00024: val_loss improved from 0.28210 to 0.28169, saving model to ./checkpoint/weights.024-0.2817.hdf5\n110s - loss: 0.2674 - acc: 0.7953 - val_loss: 0.2817 - val_acc: 0.7895\nEpoch 26/100\nEpoch 00025: val_loss improved from 0.28169 to 0.27978, saving model to ./checkpoint/weights.025-0.2798.hdf5\n110s - loss: 0.2660 - acc: 0.7969 - val_loss: 0.2798 - val_acc: 0.7826\nEpoch 27/100\nEpoch 00026: val_loss did not improve\n110s - loss: 0.2647 - acc: 0.7979 - val_loss: 0.2865 - val_acc: 0.7964\nEpoch 28/100\nEpoch 00027: val_loss did not improve\n110s - loss: 0.2632 - acc: 0.7987 - val_loss: 0.2822 - val_acc: 0.7901\nEpoch 29/100\nEpoch 00028: val_loss did not improve\n110s - loss: 0.2626 - acc: 0.7999 - val_loss: 0.2842 - val_acc: 0.7968\nEpoch 30/100\nEpoch 00029: val_loss did not improve\n110s - loss: 0.2610 - acc: 0.8016 - val_loss: 0.2873 - val_acc: 0.7985\nEpoch 31/100\nEpoch 00030: val_loss did not improve\n110s - loss: 0.2603 - acc: 0.8019 - val_loss: 0.2828 - val_acc: 0.7964\nEpoch 32/100\nEpoch 00031: val_loss did not improve\n110s - loss: 0.2583 - acc: 0.8037 - val_loss: 0.2824 - val_acc: 0.7944\nEpoch 00031: early stopping\n" ] ], [ [ "#resume training\n\nmodel, model_name = get_best_model()\n# model = load_model(CHECKPOINT_DIR + 'weights.025-0.4508.hdf5')\n# model_name = 'weights.025-0.4508.hdf5'\n# print('model_name', model_name)\n\n# #try increasing learningrate\n# optimizer = Adam(lr=1e-4)\n# model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\n# callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1),\n# EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1),\n# ModelCheckpoint(filepath=CHECKPOINT_DIR+'weights.{epoch:03d}-{val_loss:.4f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True),\n# TensorBoard(log_dir=LOG_DIR, histogram_freq=0, write_graph=False, write_images=True)]\n\nprint('BATCH_SIZE:', BATCH_SIZE)\nmodel.fit({'q1': train_q1_Double, 'q2': train_q2_Double}, y_train_Double, \n batch_size=BATCH_SIZE, epochs=100, verbose=2, callbacks=callbacks, \n validation_data=({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, val_sample_weights), \n shuffle=True, class_weight=class_weight, initial_epoch=)", "_____no_output_____" ] ], [ [ "model = load_model(CHECKPOINT_DIR + 'weights.022-0.2778.hdf5')\nmodel_name = 'weights.022-0.2778.hdf5'\nprint('model_name', model_name)\nval_loss = model.evaluate({'q1': valid_q1_Double, 'q2': valid_q2_Double}, y_valid_Double, sample_weight=val_sample_weights, batch_size=8192, verbose=2)\nval_loss", "model_name weights.022-0.2778.hdf5\n" ], [ "#Create submission\ntest_q1 = pad_sequences(tokenizer.texts_to_sequences(test_df['question1_WL']), maxlen = MAX_LEN)\ntest_q2 = pad_sequences(tokenizer.texts_to_sequences(test_df['question2_WL']), maxlen = MAX_LEN)\npredictions = model.predict({'q1': test_q1, 'q2': test_q2}, batch_size=8192, verbose=2)\npredictions += model.predict({'q1': test_q2, 'q2': test_q1}, batch_size=8192, verbose=2)\npredictions /= 2\n\nsubmission = pd.DataFrame(predictions, columns=['is_duplicate'])\nsubmission.insert(0, 'test_id', test_df.test_id)\nfile_name = MODEL+'_'+model_name+'_LSTM{:d}*{:d}_DENSE{:d}*{:d}_valloss{:.4f}.csv' \\\n.format(RNNCELL_SIZE,RNNCELL_LAYERS,DENSE_SIZE,DENSE_LAYERS,val_loss[0])\nsubmission.to_csv(OUTPUT_DIR+file_name, index=False)\nprint(file_name)", "DataClean_weights.022-0.2778.hdf5_LSTM128*1_DENSE128*1_valloss0.2778.csv\n" ] ], [ [ "sys.stdout = open(OUTPUT_DIR+'training_output.txt', 'a')\nhistory = model.fit({'q1': train_q1, 'q2': train_q2}, y_train, batch_size=BATCH_SIZE, epochs=3, verbose=2, callbacks=callbacks, \n validation_data=({'q1': valid_q1, 'q2': valid_q2}, y_valid), shuffle=True, initial_epoch=0)\nsys.stdout = sys.__stdout__", "_____no_output_____" ], [ "summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ],\n 'train_acc': history.history['acc'],\n 'valid_acc': history.history['val_acc'],\n 'train_loss': history.history['loss'],\n 'valid_loss': history.history['val_loss']})\nsummary_stats\n\nplt.plot(summary_stats.train_loss) # blue\nplt.plot(summary_stats.valid_loss) # green\nplt.show()", "_____no_output_____" ], [ "units = 128 # Number of nodes in the Dense layers\ndropout = 0.25 # Percentage of nodes to drop\nnb_filter = 32 # Number of filters to use in Convolution1D\nfilter_length = 3 # Length of filter for Convolution1D\n# Initialize weights and biases for the Dense layers\nweights = initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=2)\nbias = bias_initializer='zeros'\n\nmodel1 = Sequential()\nmodel1.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False))\nmodel1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same'))\nmodel1.add(BatchNormalization())\nmodel1.add(Activation('relu'))\nmodel1.add(Dropout(dropout))\nmodel1.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same'))\nmodel1.add(BatchNormalization())\nmodel1.add(Activation('relu'))\nmodel1.add(Dropout(dropout))\nmodel1.add(Flatten())\n\n\nmodel2 = Sequential()\nmodel2.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False))\nmodel2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same'))\nmodel2.add(BatchNormalization())\nmodel2.add(Activation('relu'))\nmodel2.add(Dropout(dropout))\nmodel2.add(Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same'))\nmodel2.add(BatchNormalization())\nmodel2.add(Activation('relu'))\nmodel2.add(Dropout(dropout))\nmodel2.add(Flatten())\n\n\nmodel3 = Sequential()\nmodel3.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False))\nmodel3.add(TimeDistributed(Dense(EMBEDDING_DIM)))\nmodel3.add(BatchNormalization())\nmodel3.add(Activation('relu'))\nmodel3.add(Dropout(dropout))\nmodel3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, )))\n\n\nmodel4 = Sequential()\nmodel4.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], input_length = MAX_LEN, trainable = False))\nmodel4.add(TimeDistributed(Dense(EMBEDDING_DIM)))\nmodel4.add(BatchNormalization())\nmodel4.add(Activation('relu'))\nmodel4.add(Dropout(dropout))\nmodel4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, )))\n\n\nmodela = Sequential()\nmodela.add(Merge([model1, model2], mode='concat'))\nmodela.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias))\nmodela.add(BatchNormalization())\nmodela.add(Activation('relu'))\nmodela.add(Dropout(dropout))\nmodela.add(Dense(units, kernel_initializer=weights, bias_initializer=bias))\nmodela.add(BatchNormalization())\nmodela.add(Activation('relu'))\nmodela.add(Dropout(dropout))\n\n\nmodelb = Sequential()\nmodelb.add(Merge([model3, model4], mode='concat'))\nmodelb.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias))\nmodelb.add(BatchNormalization())\nmodelb.add(Activation('relu'))\nmodelb.add(Dropout(dropout))\nmodelb.add(Dense(units, kernel_initializer=weights, bias_initializer=bias))\nmodelb.add(BatchNormalization())\nmodelb.add(Activation('relu'))\nmodelb.add(Dropout(dropout))\n\n\nmodel = Sequential()\nmodel.add(Merge([modela, modelb], mode='concat'))\nmodel.add(Dense(units*2, kernel_initializer=weights, bias_initializer=bias))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(units, kernel_initializer=weights, bias_initializer=bias))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(units, kernel_initializer=weights, bias_initializer=bias))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(1, kernel_initializer=weights, bias_initializer=bias))\nmodel.add(BatchNormalization())\nmodel.add(Activation('sigmoid'))", "_____no_output_____" ] ] ]
[ "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw" ]
[ [ "raw" ], [ "code", "code", "code" ], [ "raw" ], [ "code", "code" ], [ "raw" ], [ "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code" ], [ "raw" ], [ "code", "code" ], [ "raw", "raw", "raw" ] ]
e7c2a778c3d4598068ca0ee7de2abfe6dfa5cd28
10,384
ipynb
Jupyter Notebook
notebooks/00_Notebooks.ipynb
raidery/practicalAI
87aeb4ec9ee0ae6b58aad2dab3debace7a77d3eb
[ "MIT" ]
1
2022-01-04T06:26:36.000Z
2022-01-04T06:26:36.000Z
notebooks/00_Notebooks.ipynb
raidery/practicalAI
87aeb4ec9ee0ae6b58aad2dab3debace7a77d3eb
[ "MIT" ]
null
null
null
notebooks/00_Notebooks.ipynb
raidery/practicalAI
87aeb4ec9ee0ae6b58aad2dab3debace7a77d3eb
[ "MIT" ]
2
2020-04-14T21:36:08.000Z
2020-07-04T08:38:19.000Z
32.551724
281
0.516564
[ [ [ "<a href=\"https://practicalai.me\"><img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/rounded_logo.png\" width=\"100\" align=\"left\" hspace=\"20px\" vspace=\"20px\"></a>\n\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/colab.png\" width=\"200\" vspace=\"20px\" hspace=\"20px\" align=\"right\">\n\n<div align=\"left\">\n<h1>Notebook Basics</h1>\n\nIn this lesson we'll learn how to work with **notebooks**. Notebooks allow us to do interactive and visual computing which makes it a great learning tool. We'll use notebooks to code in Python and learn the basics of machine learning.\n</div>", "_____no_output_____" ], [ "<table align=\"center\">\n <td>\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/rounded_logo.png\" width=\"25\"><a target=\"_blank\" href=\"https://practicalai.me\"> View on practicalAI</a>\n </td>\n <td>\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/colab_logo.png\" width=\"25\"><a target=\"_blank\" href=\"https://colab.research.google.com/github/practicalAI/practicalAI/blob/master/notebooks/00_Notebooks.ipynb\"> Run in Google Colab</a>\n </td>\n <td>\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/github_logo.png\" width=\"22\"><a target=\"_blank\" href=\"https://github.com/practicalAI/practicalAI/blob/master/notebooks/00_Notebooks.ipynb\"> View code on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "# Set Up", "_____no_output_____" ], [ "1. Sign into your [Google](https://accounts.google.com/signin) account to start using the notebook. If you don't want to save your work, you can skip the steps below.\n2. If you do want to save your work, click the **COPY TO DRIVE** button on the toolbar. This will open a new notebook in a new tab.\n\n<div align=\"left\">\n&emsp;&emsp;<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/copy_to_drive.png\" width=\"320\">\n</div>\n\n3. Rename this new notebook by removing the words `Copy of` from the title (change \"`Copy of 00_Notebooks`\" to \"`00_Notebooks`\").\n\n<div align=\"left\">\n&emsp;&emsp;<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/rename.gif\" width=\"320\">\n</div>\n\n4. Now you can run the code, make changes and it's all saved to your personal Google Drive.\n", "_____no_output_____" ], [ "# Types of cells", "_____no_output_____" ], [ "Notebooks are made up of cells. Each cell can either be a **code cell** or a **text cell**. \n\n* **code cell**: used for writing and executing code.\n* **text cell**: used for writing text, HTML, Markdown, etc.\n\n\n", "_____no_output_____" ], [ "# Creating cells", "_____no_output_____" ], [ "First, let's create a text cell. Click on a desired location in the notebook and create the cell by clicking on the **➕TEXT** (located in the top left corner). \n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/add_text.png\" width=\"320\">\n<div align=\"left\">\n\nOnce you create the cell, click on it and type the following inside it:\n\n\n```\n### This is a header\nHello world!\n```", "_____no_output_____" ], [ "### This is a header\nHello world!", "_____no_output_____" ], [ "# Running cells", "_____no_output_____" ], [ "Once you type inside the cell, press the **SHIFT** and **RETURN** (enter key) together to run the cell.", "_____no_output_____" ], [ "# Editing cells", "_____no_output_____" ], [ "To edit a cell, double click on it and you can edit it.", "_____no_output_____" ], [ "# Moving cells", "_____no_output_____" ], [ "Once you create the cell, you can move it up and down by clicking on the cell and then pressing the ⬆️ and ⬇️ button on the top right of the cell. \n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/move_cells.png\" width=\"500\">\n</div>", "_____no_output_____" ], [ "# Deleting cells", "_____no_output_____" ], [ "You can delete the cell by clicking on it and pressing the trash can button 🗑️ on the top right corner of the cell. Alternatively, you can also press ⌘/Ctrl + M + D.\n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/delete_cells.png\" width=\"500\">\n</div>", "_____no_output_____" ], [ "# Creating a code cell\n", "_____no_output_____" ], [ "You can repeat the steps above to create and edit a *code* cell. You can create a code cell by clicking on the ➕CODE (located in the top left corner).\n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/practicalAI/images/master/images/00_Notebooks/add_code.png\" width=\"320\">\n</div>\n\nOnce you've created the code cell, double click on it, type the following inside it and then press `Shift + Enter` to execute the code.\n\n```\nprint (\"Hello world!\")\n```", "_____no_output_____" ] ], [ [ "print (\"Hello world!\")", "Hello world!\n" ] ], [ [ "---\n<div align=\"center\">\n\nSubscribe to our <a href=\"https://practicalai.me/#newsletter\">newsletter</a> and follow us on social media to get the latest updates!\n\n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://github.com/GokuMohandas/practicalAI\"><img src=\"https://img.shields.io/github/stars/GokuMohandas/practicalAI.svg?style=social&label=Star\"></a>&nbsp;&nbsp;\n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://www.linkedin.com/company/practicalai-me\"><img src=\"https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social\"></a>&nbsp;&nbsp;\n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://twitter.com/GokuMohandas\"><img src=\"https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social\"></a>&nbsp;&nbsp;\n\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7c2bfe73f71d3da6349d7bf68c928eb71fd8a7e
65,587
ipynb
Jupyter Notebook
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial2.ipynb
eduardojdiniz/course-content-dl
8d66641683651bce7b0179b6d890aef5a048a8b9
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2021-09-04T01:57:41.000Z
2021-09-04T01:57:41.000Z
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial2.ipynb
eduardojdiniz/course-content-dl
8d66641683651bce7b0179b6d890aef5a048a8b9
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial2.ipynb
eduardojdiniz/course-content-dl
8d66641683651bce7b0179b6d890aef5a048a8b9
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
35.054516
602
0.547822
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>", "_____no_output_____" ], [ "# Tutorial 2: Learning Hyperparameters\n**Week 1, Day 2: Linear Deep Learning**\n\n**By Neuromatch Academy**\n\n__Content creators:__ Saeed Salehi, Andrew Saxe\n\n__Content reviewers:__ Polina Turishcheva, Antoine De Comite, Kelson Shilling-Scrivo\n\n__Content editors:__ Anoop Kulkarni\n\n__Production editors:__ Khalid Almubarak, Spiros Chavlis\n", "_____no_output_____" ], [ "**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**\n\n<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>", "_____no_output_____" ], [ "---\n# Tutorial Objectives\n\n* Training landscape\n* The effect of depth\n* Choosing a learning rate\n* Initialization matters\n", "_____no_output_____" ] ], [ [ "# @title Tutorial slides\n\n# @markdown These are the slides for the videos in the tutorial\n# @markdown If you want to locally dowload the slides, click [here](https://osf.io/sne2m/download)\nfrom IPython.display import IFrame\nIFrame(src=f\"https://mfr.ca-1.osf.io/render?url=https://osf.io/sne2m/?direct%26mode=render%26action=download%26mode=render\", width=854, height=480)", "_____no_output_____" ] ], [ [ "---\n# Setup\n\nThis a GPU-Free tutorial!", "_____no_output_____" ] ], [ [ "# @title Install dependencies\n!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet\n\nfrom evaltools.airtable import AirtableForm", "_____no_output_____" ], [ "# Imports\nimport time\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# @title Figure settings\n\nfrom ipywidgets import interact, IntSlider, FloatSlider, fixed\nfrom ipywidgets import HBox, interactive_output, ToggleButton, Layout\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle\")", "_____no_output_____" ], [ "# @title Plotting functions\n\ndef plot_x_y_(x_t_, y_t_, x_ev_, y_ev_, loss_log_, weight_log_):\n \"\"\"\n \"\"\"\n plt.figure(figsize=(12, 4))\n plt.subplot(1, 3, 1)\n plt.scatter(x_t_, y_t_, c='r', label='training data')\n plt.plot(x_ev_, y_ev_, c='b', label='test results', linewidth=2)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.legend()\n plt.subplot(1, 3, 2)\n plt.plot(loss_log_, c='r')\n plt.xlabel('epochs')\n plt.ylabel('mean squared error')\n plt.subplot(1, 3, 3)\n plt.plot(weight_log_)\n plt.xlabel('epochs')\n plt.ylabel('weights')\n plt.show()\n\n\ndef plot_vector_field(what, init_weights=None):\n \"\"\"\n \"\"\"\n n_epochs=40\n lr=0.15\n x_pos = np.linspace(2.0, 0.5, 100, endpoint=True)\n y_pos = 1. / x_pos\n xx, yy = np.mgrid[-1.9:2.0:0.2, -1.9:2.0:0.2]\n zz = np.empty_like(xx)\n x, y = xx[:, 0], yy[0]\n\n x_temp, y_temp = gen_samples(10, 1.0, 0.0)\n\n cmap = matplotlib.cm.plasma\n plt.figure(figsize=(8, 7))\n ax = plt.gca()\n\n if what == 'all' or what == 'vectors':\n for i, a in enumerate(x):\n for j, b in enumerate(y):\n temp_model = ShallowNarrowLNN([a, b])\n da, db = temp_model.dloss_dw(x_temp, y_temp)\n zz[i, j] = temp_model.loss(temp_model.forward(x_temp), y_temp)\n scale = min(40 * np.sqrt(da**2 + db**2), 50)\n ax.quiver(a, b, - da, - db, scale=scale, color=cmap(np.sqrt(da**2 + db**2)))\n\n if what == 'all' or what == 'trajectory':\n if init_weights is None:\n for init_weights in [[0.5, -0.5], [0.55, -0.45], [-1.8, 1.7]]:\n temp_model = ShallowNarrowLNN(init_weights)\n _, temp_records = temp_model.train(x_temp, y_temp, lr, n_epochs)\n ax.scatter(temp_records[:, 0], temp_records[:, 1],\n c=np.arange(len(temp_records)), cmap='Greys')\n ax.scatter(temp_records[0, 0], temp_records[0, 1], c='blue', zorder=9)\n ax.scatter(temp_records[-1, 0], temp_records[-1, 1], c='red', marker='X', s=100, zorder=9)\n else:\n temp_model = ShallowNarrowLNN(init_weights)\n _, temp_records = temp_model.train(x_temp, y_temp, lr, n_epochs)\n ax.scatter(temp_records[:, 0], temp_records[:, 1],\n c=np.arange(len(temp_records)), cmap='Greys')\n ax.scatter(temp_records[0, 0], temp_records[0, 1], c='blue', zorder=9)\n ax.scatter(temp_records[-1, 0], temp_records[-1, 1], c='red', marker='X', s=100, zorder=9)\n\n if what == 'all' or what == 'loss':\n contplt = ax.contourf(x, y, np.log(zz+0.001), zorder=-1, cmap='coolwarm', levels=100)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(contplt, cax=cax)\n cbar.set_label('log (Loss)')\n\n ax.set_xlabel(\"$w_1$\")\n ax.set_ylabel(\"$w_2$\")\n ax.set_xlim(-1.9, 1.9)\n ax.set_ylim(-1.9, 1.9)\n\n plt.show()\n\n\ndef plot_loss_landscape():\n \"\"\"\n \"\"\"\n x_temp, y_temp = gen_samples(10, 1.0, 0.0)\n\n xx, yy = np.mgrid[-1.9:2.0:0.2, -1.9:2.0:0.2]\n zz = np.empty_like(xx)\n x, y = xx[:, 0], yy[0]\n\n for i, a in enumerate(x):\n for j, b in enumerate(y):\n temp_model = ShallowNarrowLNN([a, b])\n zz[i, j] = temp_model.loss(temp_model.forward(x_temp), y_temp)\n\n temp_model = ShallowNarrowLNN([-1.8, 1.7])\n loss_rec_1, w_rec_1 = temp_model.train(x_temp, y_temp, 0.02, 240)\n\n temp_model = ShallowNarrowLNN([1.5, -1.5])\n loss_rec_2, w_rec_2 = temp_model.train(x_temp, y_temp, 0.02, 240)\n\n plt.figure(figsize=(12, 8))\n ax = plt.subplot(1, 1, 1, projection='3d')\n ax.plot_surface(xx, yy, np.log(zz+0.5), cmap='coolwarm', alpha=0.5)\n ax.scatter3D(w_rec_1[:, 0], w_rec_1[:, 1], np.log(loss_rec_1+0.5),\n c='k', s=50, zorder=9)\n ax.scatter3D(w_rec_2[:, 0], w_rec_2[:, 1], np.log(loss_rec_2+0.5),\n c='k', s=50, zorder=9)\n plt.axis(\"off\")\n ax.view_init(45, 260)\n\n plt.show()\n\n\ndef depth_widget(depth):\n if depth == 0:\n depth_lr_init_interplay(depth, 0.02, 0.9)\n else:\n depth_lr_init_interplay(depth, 0.01, 0.9)\n\n\ndef lr_widget(lr):\n depth_lr_init_interplay(50, lr, 0.9)\n\n\ndef depth_lr_interplay(depth, lr):\n depth_lr_init_interplay(depth, lr, 0.9)\n\n\ndef depth_lr_init_interplay(depth, lr, init_weights):\n n_epochs = 600\n\n x_train, y_train = gen_samples(100, 2.0, 0.1)\n model = DeepNarrowLNN(np.full((1, depth+1), init_weights))\n\n plt.figure(figsize=(10, 5))\n plt.plot(model.train(x_train, y_train, lr, n_epochs),\n linewidth=3.0, c='m')\n\n plt.title(\"Training a {}-layer LNN with\"\n \" $\\eta=${} initialized with $w_i=${}\".format(depth, lr, init_weights), pad=15)\n plt.yscale('log')\n plt.xlabel('epochs')\n plt.ylabel('Log mean squared error')\n plt.ylim(0.001, 1.0)\n plt.show()\n\n\ndef plot_init_effect():\n depth = 15\n n_epochs = 250\n lr = 0.02\n\n x_train, y_train = gen_samples(100, 2.0, 0.1)\n\n plt.figure(figsize=(12, 6))\n for init_w in np.arange(0.7, 1.09, 0.05):\n model = DeepNarrowLNN(np.full((1, depth), init_w))\n plt.plot(model.train(x_train, y_train, lr, n_epochs),\n linewidth=3.0, label=\"initial weights {:.2f}\".format(init_w))\n plt.title(\"Training a {}-layer narrow LNN with $\\eta=${}\".format(depth, lr), pad=15)\n plt.yscale('log')\n plt.xlabel('epochs')\n plt.ylabel('Log mean squared error')\n plt.legend(loc='lower left', ncol=4)\n plt.ylim(0.001, 1.0)\n plt.show()\n\n\nclass InterPlay:\n def __init__(self):\n self.lr = [None]\n self.depth = [None]\n self.success = [None]\n self.min_depth, self.max_depth = 5, 65\n self.depth_list = np.arange(10, 61, 10)\n self.i_depth = 0\n self.min_lr, self.max_lr = 0.001, 0.105\n self.n_epochs = 600\n self.x_train, self.y_train = gen_samples(100, 2.0, 0.1)\n self.converged = False\n self.button = None\n self.slider = None\n\n def train(self, lr, update=False, init_weights=0.9):\n if update and self.converged and self.i_depth < len(self.depth_list):\n depth = self.depth_list[self.i_depth]\n self.plot(depth, lr)\n self.i_depth += 1\n self.lr.append(None)\n self.depth.append(None)\n self.success.append(None)\n self.converged = False\n self.slider.value = 0.005\n if self.i_depth < len(self.depth_list):\n self.button.value = False\n self.button.description = 'Explore!'\n self.button.disabled = True\n self.button.button_style = 'danger'\n else:\n self.button.value = False\n self.button.button_style = ''\n self.button.disabled = True\n self.button.description = 'Done!'\n time.sleep(1.0)\n\n elif self.i_depth < len(self.depth_list):\n depth = self.depth_list[self.i_depth]\n # assert self.min_depth <= depth <= self.max_depth\n assert self.min_lr <= lr <= self.max_lr\n self.converged = False\n\n model = DeepNarrowLNN(np.full((1, depth), init_weights))\n self.losses = np.array(model.train(self.x_train, self.y_train, lr, self.n_epochs))\n if np.any(self.losses < 1e-2):\n success = np.argwhere(self.losses < 1e-2)[0][0]\n if np.all((self.losses[success:] < 1e-2)):\n self.converged = True\n self.success[-1] = success\n self.lr[-1] = lr\n self.depth[-1] = depth\n self.button.disabled = False\n self.button.button_style = 'success'\n self.button.description = 'Register!'\n else:\n self.button.disabled = True\n self.button.button_style = 'danger'\n self.button.description = 'Explore!'\n else:\n self.button.disabled = True\n self.button.button_style = 'danger'\n self.button.description = 'Explore!'\n self.plot(depth, lr)\n\n def plot(self, depth, lr):\n fig = plt.figure(constrained_layout=False, figsize=(10, 8))\n gs = fig.add_gridspec(2, 2)\n ax1 = fig.add_subplot(gs[0, :])\n ax2 = fig.add_subplot(gs[1, 0])\n ax3 = fig.add_subplot(gs[1, 1])\n\n ax1.plot(self.losses, linewidth=3.0, c='m')\n ax1.set_title(\"Training a {}-layer LNN with\"\n \" $\\eta=${}\".format(depth, lr), pad=15, fontsize=16)\n ax1.set_yscale('log')\n ax1.set_xlabel('epochs')\n ax1.set_ylabel('Log mean squared error')\n ax1.set_ylim(0.001, 1.0)\n\n ax2.set_xlim(self.min_depth, self.max_depth)\n ax2.set_ylim(-10, self.n_epochs)\n ax2.set_xlabel('Depth')\n ax2.set_ylabel('Learning time (Epochs)')\n ax2.set_title(\"Learning time vs depth\", fontsize=14)\n ax2.scatter(np.array(self.depth), np.array(self.success), c='r')\n\n # ax3.set_yscale('log')\n ax3.set_xlim(self.min_depth, self.max_depth)\n ax3.set_ylim(self.min_lr, self.max_lr)\n ax3.set_xlabel('Depth')\n ax3.set_ylabel('Optimial learning rate')\n ax3.set_title(\"Empirically optimal $\\eta$ vs depth\", fontsize=14)\n ax3.scatter(np.array(self.depth), np.array(self.lr), c='r')\n\n plt.show()", "_____no_output_____" ], [ "# @title Helper functions\n\natform = AirtableForm('appn7VdPRseSoMXEG','W1D2_T2','https://portal.neuromatchacademy.org/api/redirect/to/9c55f6cb-cdf9-4429-ac1c-ec44fe64c303')\n\n\ndef gen_samples(n, a, sigma):\n \"\"\"\n Generates `n` samples with `y = z * x + noise(sgma)` linear relation.\n\n Args:\n n : int\n a : float\n sigma : float\n Retutns:\n x : np.array\n y : np.array\n \"\"\"\n assert n > 0\n assert sigma >= 0\n\n if sigma > 0:\n x = np.random.rand(n)\n noise = np.random.normal(scale=sigma, size=(n))\n y = a * x + noise\n else:\n x = np.linspace(0.0, 1.0, n, endpoint=True)\n y = a * x\n return x, y\n\n\nclass ShallowNarrowLNN:\n \"\"\"\n Shallow and narrow (one neuron per layer) linear neural network\n \"\"\"\n def __init__(self, init_ws):\n \"\"\"\n init_ws: initial weights as a list\n \"\"\"\n assert isinstance(init_ws, list)\n assert len(init_ws) == 2\n self.w1 = init_ws[0]\n self.w2 = init_ws[1]\n\n def forward(self, x):\n \"\"\"\n The forward pass through netwrok y = x * w1 * w2\n \"\"\"\n y = x * self.w1 * self.w2\n return y\n\n def loss(self, y_p, y_t):\n \"\"\"\n Mean squared error (L2) with 1/2 for convenience\n \"\"\"\n assert y_p.shape == y_t.shape\n mse = ((y_t - y_p)**2).mean()\n return mse\n\n def dloss_dw(self, x, y_t):\n \"\"\"\n partial derivative of loss with respect to weights\n\n Args:\n x : np.array\n y_t : np.array\n \"\"\"\n assert x.shape == y_t.shape\n Error = y_t - self.w1 * self.w2 * x\n dloss_dw1 = - (2 * self.w2 * x * Error).mean()\n dloss_dw2 = - (2 * self.w1 * x * Error).mean()\n return dloss_dw1, dloss_dw2\n\n def train(self, x, y_t, eta, n_ep):\n \"\"\"\n Gradient descent algorithm\n\n Args:\n x : np.array\n y_t : np.array\n eta: float\n n_ep : int\n \"\"\"\n assert x.shape == y_t.shape\n\n loss_records = np.empty(n_ep) # pre allocation of loss records\n weight_records = np.empty((n_ep, 2)) # pre allocation of weight records\n\n for i in range(n_ep):\n y_p = self.forward(x)\n loss_records[i] = self.loss(y_p, y_t)\n dloss_dw1, dloss_dw2 = self.dloss_dw(x, y_t)\n self.w1 -= eta * dloss_dw1\n self.w2 -= eta * dloss_dw2\n weight_records[i] = [self.w1, self.w2]\n\n return loss_records, weight_records\n\n\nclass DeepNarrowLNN:\n \"\"\"\n Deep but thin (one neuron per layer) linear neural network\n \"\"\"\n def __init__(self, init_ws):\n \"\"\"\n init_ws: initial weights as a numpy array\n \"\"\"\n self.n = init_ws.size\n self.W = init_ws.reshape(1, -1)\n\n def forward(self, x):\n \"\"\"\n x : np.array\n input features\n \"\"\"\n y = np.prod(self.W) * x\n return y\n\n def loss(self, y_t, y_p):\n \"\"\"\n mean squared error (L2 loss)\n\n Args:\n y_t : np.array\n y_p : np.array\n \"\"\"\n assert y_p.shape == y_t.shape\n mse = ((y_t - y_p)**2 / 2).mean()\n return mse\n\n def dloss_dw(self, x, y_t, y_p):\n \"\"\"\n analytical gradient of weights\n\n Args:\n x : np.array\n y_t : np.array\n y_p : np.array\n \"\"\"\n E = y_t - y_p # = y_t - x * np.prod(self.W)\n Ex = np.multiply(x, E).mean()\n Wp = np.prod(self.W) / (self.W + 1e-9)\n dW = - Ex * Wp\n return dW\n\n def train(self, x, y_t, eta, n_epochs):\n \"\"\"\n training using gradient descent\n\n Args:\n x : np.array\n y_t : np.array\n eta: float\n n_epochs : int\n \"\"\"\n loss_records = np.empty(n_epochs)\n loss_records[:] = np.nan\n for i in range(n_epochs):\n y_p = self.forward(x)\n loss_records[i] = self.loss(y_t, y_p).mean()\n dloss_dw = self.dloss_dw(x, y_t, y_p)\n if np.isnan(dloss_dw).any() or np.isinf(dloss_dw).any():\n return loss_records\n self.W -= eta * dloss_dw\n return loss_records", "_____no_output_____" ], [ "#@title Set random seed\n\n#@markdown Executing `set_seed(seed=seed)` you are setting the seed\n\n# for DL its critical to set the random seed so that students can have a\n# baseline to compare their results to expected results.\n# Read more here: https://pytorch.org/docs/stable/notes/randomness.html\n\n# Call `set_seed` function in the exercises to ensure reproducibility.\nimport random\nimport torch\n\ndef set_seed(seed=None, seed_torch=True):\n if seed is None:\n seed = np.random.choice(2 ** 32)\n random.seed(seed)\n np.random.seed(seed)\n if seed_torch:\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n print(f'Random seed {seed} has been set.')\n\n\n# In case that `DataLoader` is used\ndef seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "_____no_output_____" ], [ "#@title Set device (GPU or CPU). Execute `set_device()`\n# especially if torch modules used.\n\n# inform the user if the notebook uses GPU or CPU.\n\ndef set_device():\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if device != \"cuda\":\n print(\"GPU is not enabled in this notebook. \\n\"\n \"If you want to enable it, in the menu under `Runtime` -> \\n\"\n \"`Hardware accelerator.` and select `GPU` from the dropdown menu\")\n else:\n print(\"GPU is enabled in this notebook. \\n\"\n \"If you want to disable it, in the menu under `Runtime` -> \\n\"\n \"`Hardware accelerator.` and select `None` from the dropdown menu\")\n\n return device", "_____no_output_____" ], [ "SEED = 2021\nset_seed(seed=SEED)\nDEVICE = set_device()", "_____no_output_____" ] ], [ [ "---\n# Section 1: A Shallow Narrow Linear Neural Network\n\n*Time estimate: ~30 mins*", "_____no_output_____" ] ], [ [ "# @title Video 1: Shallow Narrow Linear Net\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1F44y117ot\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"6e5JIYsqVvU\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('video 1: Shallow Narrow Linear Net')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "## Section 1.1: A Shallow Narrow Linear Net", "_____no_output_____" ], [ "To better understand the behavior of neural network training with gradient descent, we start with the incredibly simple case of a shallow narrow linear neural net, since state-of-the-art models are impossible to dissect and comprehend with our current mathematical tools.\n\nThe model we use has one hidden layer, with only one neuron, and two weights. We consider the squared error (or L2 loss) as the cost function. As you may have already guessed, we can visualize the model as a neural network:\n\n<center><img src=\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D2_LinearDeepLearning/static/shallow_narrow_nn.png\" width=\"400\"/></center>\n\n<br/>\n\nor by its computation graph:\n\n<center><img src=\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D2_LinearDeepLearning/static/shallow_narrow.png\" alt=\"Shallow Narrow Graph\" width=\"400\"/></center>\n\nor on a rare occasion, even as a reasonably compact mapping:\n\n$$ loss = (y - w_1 \\cdot w_2 \\cdot x)^2 $$\n\n<br/>\n\nImplementing a neural network from scratch without using any Automatic Differentiation tool is rarely necessary. The following two exercises are therefore **Bonus** (optional) exercises. Please ignore them if you have any time-limits or pressure and continue to Section 1.2.", "_____no_output_____" ], [ "### Analytical Exercise 1.1: Loss Gradients (Optional)\n\nOnce again, we ask you to calculate the network gradients analytically, since you will need them for the next exercise. We understand how annoying this is.\n\n$\\dfrac{\\partial{loss}}{\\partial{w_1}} = ?$\n\n$\\dfrac{\\partial{loss}}{\\partial{w_2}} = ?$\n\n<br/>\n\n---\n#### Solution\n\n$\\dfrac{\\partial{loss}}{\\partial{w_1}} = -2 \\cdot w_2 \\cdot x \\cdot (y - w_1 \\cdot w_2 \\cdot x)$\n\n$\\dfrac{\\partial{loss}}{\\partial{w_2}} = -2 \\cdot w_1 \\cdot x \\cdot (y - w_1 \\cdot w_2 \\cdot x)$\n\n---\n", "_____no_output_____" ], [ "### Coding Exercise 1.1: Implement simple narrow LNN (Optional)\n\nNext, we ask you to implement the `forward` pass for our model from scratch without using PyTorch.\n\nAlso, although our model gets a single input feature and outputs a single prediction, we could calculate the loss and perform training for multiple samples at once. This is the common practice for neural networks, since computers are incredibly fast doing matrix (or tensor) operations on batches of data, rather than processing samples one at a time through `for` loops. Therefore, for the `loss` function, please implement the **mean** squared error (MSE), and adjust your analytical gradients accordingly when implementing the `dloss_dw` function.\n\nFinally, complete the `train` function for the gradient descent algorithm:\n\n\\begin{equation}\n\\mathbf{w}^{(t+1)} = \\mathbf{w}^{(t)} - \\eta \\nabla loss (\\mathbf{w}^{(t)})\n\\end{equation}", "_____no_output_____" ] ], [ [ "class ShallowNarrowExercise:\n \"\"\"Shallow and narrow (one neuron per layer) linear neural network\n \"\"\"\n def __init__(self, init_weights):\n \"\"\"\n Args:\n init_weights (list): initial weights\n \"\"\"\n assert isinstance(init_weights, (list, np.ndarray, tuple))\n assert len(init_weights) == 2\n self.w1 = init_weights[0]\n self.w2 = init_weights[1]\n\n\n def forward(self, x):\n \"\"\"The forward pass through netwrok y = x * w1 * w2\n\n Args:\n x (np.ndarray): features (inputs) to neural net\n\n returns:\n (np.ndarray): neural network output (prediction)\n \"\"\"\n #################################################\n ## Implement the forward pass to calculate prediction\n ## Note that prediction is not the loss\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Forward Pass `forward`\")\n #################################################\n y = ...\n return y\n\n\n def dloss_dw(self, x, y_true):\n \"\"\"Gradient of loss with respect to weights\n\n Args:\n x (np.ndarray): features (inputs) to neural net\n y_true (np.ndarray): true labels\n\n returns:\n (float): mean gradient of loss with respect to w1\n (float): mean gradient of loss with respect to w2\n \"\"\"\n assert x.shape == y_true.shape\n #################################################\n ## Implement the gradient computation function\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Gradient of Loss `dloss_dw`\")\n #################################################\n dloss_dw1 = ...\n dloss_dw2 = ...\n return dloss_dw1, dloss_dw2\n\n\n def train(self, x, y_true, lr, n_ep):\n \"\"\"Training with Gradient descent algorithm\n\n Args:\n x (np.ndarray): features (inputs) to neural net\n y_true (np.ndarray): true labels\n lr (float): learning rate\n n_ep (int): number of epochs (training iterations)\n\n returns:\n (list): training loss records\n (list): training weight records (evolution of weights)\n \"\"\"\n assert x.shape == y_true.shape\n\n loss_records = np.empty(n_ep) # pre allocation of loss records\n weight_records = np.empty((n_ep, 2)) # pre allocation of weight records\n\n for i in range(n_ep):\n y_prediction = self.forward(x)\n loss_records[i] = loss(y_prediction, y_true)\n dloss_dw1, dloss_dw2 = self.dloss_dw(x, y_true)\n #################################################\n ## Implement the gradient descent step\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Training loop `train`\")\n #################################################\n self.w1 -= ...\n self.w2 -= ...\n weight_records[i] = [self.w1, self.w2]\n\n return loss_records, weight_records\n\n\ndef loss(y_prediction, y_true):\n \"\"\"Mean squared error\n\n Args:\n y_prediction (np.ndarray): model output (prediction)\n y_true (np.ndarray): true label\n\n returns:\n (np.ndarray): mean squared error loss\n \"\"\"\n assert y_prediction.shape == y_true.shape\n #################################################\n ## Implement the MEAN squared error\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Loss function `loss`\")\n #################################################\n mse = ...\n return mse\n\n\n#add event to airtable\natform.add_event('Coding Exercise 1.1: Implement simple narrow LNN')\n\nset_seed(seed=SEED)\nn_epochs = 211\nlearning_rate = 0.02\ninitial_weights = [1.4, -1.6]\nx_train, y_train = gen_samples(n=73, a=2.0, sigma=0.2)\nx_eval = np.linspace(0.0, 1.0, 37, endpoint=True)\n## Uncomment to run\n# sn_model = ShallowNarrowExercise(initial_weights)\n# loss_log, weight_log = sn_model.train(x_train, y_train, learning_rate, n_epochs)\n# y_eval = sn_model.forward(x_eval)\n# plot_x_y_(x_train, y_train, x_eval, y_eval, loss_log, weight_log)", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D2_LinearDeepLearning/solutions/W1D2_Tutorial2_Solution_46492cd6.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1696.0 height=544.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D2_LinearDeepLearning/static/W1D2_Tutorial2_Solution_46492cd6_1.png>\n\n", "_____no_output_____" ], [ "## Section 1.2: Learning landscapes", "_____no_output_____" ] ], [ [ "# @title Video 2: Training Landscape\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1Nv411J71X\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"k28bnNAcOEg\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 2: Training Landscape')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "As you may have already asked yourself, we can analytically find $w_1$ and $w_2$ without using gradient descent:\n\n\\begin{equation}\nw_1 \\cdot w_2 = \\dfrac{y}{x}\n\\end{equation}\n\nIn fact, we can plot the gradients, the loss function and all the possible solutions in one figure. In this example, we use the $y = 1x$ mapping:\n\n**Blue ribbon**: shows all possible solutions: $~ w_1 w_2 = \\dfrac{y}{x} = \\dfrac{x}{x} = 1 \\Rightarrow w_1 = \\dfrac{1}{w_2}$\n\n**Contour background**: Shows the loss values, red being higher loss\n\n**Vector field (arrows)**: shows the gradient vector field. The larger yellow arrows show larger gradients, which correspond to bigger steps by gradient descent.\n\n**Scatter circles**: the trajectory (evolution) of weights during training for three different initializations, with blue dots marking the start of training and red crosses ( **x** ) marking the end of training. You can also try your own initializations (keep the initial values between `-2.0` and `2.0`) as shown here:\n```python\nplot_vector_field('all', [1.0, -1.0])\n```\n\nFinally, if the plot is too crowded, feel free to pass one of the following strings as argument:\n\n```python\nplot_vector_field('vectors') # for vector field\nplot_vector_field('trajectory') # for training trajectory\nplot_vector_field('loss') # for loss contour\n```\n\n**Think!**\n\nExplore the next two plots. Try different initial values. Can you find the saddle point? Why does training slow down near the minima?", "_____no_output_____" ] ], [ [ "plot_vector_field('all')", "_____no_output_____" ] ], [ [ "Here, we also visualize the loss landscape in a 3-D plot, with two training trajectories for different initial conditions.\nNote: the trajectories from the 3D plot and the previous plot are independent and different.", "_____no_output_____" ] ], [ [ "plot_loss_landscape()", "_____no_output_____" ], [ "# @title Student Response\nfrom ipywidgets import widgets\n\n\ntext=widgets.Textarea(\n value='Type your answer here and click on `Submit!`',\n placeholder='Type something',\n description='',\n disabled=False\n)\n\nbutton = widgets.Button(description=\"Submit!\")\n\ndisplay(text,button)\n\ndef on_button_clicked(b):\n atform.add_answer('q1', text.value)\n print(\"Submission successful!\")\n\nbutton.on_click(on_button_clicked)", "_____no_output_____" ], [ "# @title Video 3: Training Landscape - Discussion\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1py4y1j7cv\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"0EcUGgxOdkI\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 3: Training Landscape - Discussiond')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "---\n# Section 2: Depth, Learning rate, and initialization\n*Time estimate: ~45 mins*", "_____no_output_____" ], [ "Successful deep learning models are often developed by a team of very clever people, spending many many hours \"tuning\" learning hyperparameters, and finding effective initializations. In this section, we look at three basic (but often not simple) hyperparameters: depth, learning rate, and initialization.", "_____no_output_____" ], [ "## Section 2.1: The effect of depth", "_____no_output_____" ] ], [ [ "# @title Video 4: Effect of Depth\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1z341167di\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"Ii_As9cRR5Q\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 4: Effect of Depth')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "Why might depth be useful? What makes a network or learning system \"deep\"? The reality is that shallow neural nets are often incapable of learning complex functions due to data limitations. On the other hand, depth seems like magic. Depth can change the functions a network can represent, the way a network learns, and how a network generalizes to unseen data. \n\nSo let's look at the challenges that depth poses in training a neural network. Imagine a single input, single output linear network with 50 hidden layers and only one neuron per layer (i.e. a narrow deep neural network). The output of the network is easy to calculate:\n\n$$ prediction = x \\cdot w_1 \\cdot w_2 \\cdot \\cdot \\cdot w_{50} $$\n\nIf the initial value for all the weights is $w_i = 2$, the prediction for $x=1$ would be **exploding**: $y_p = 2^{50} \\approx 1.1256 \\times 10^{15}$. On the other hand, for weights initialized to $w_i = 0.5$, the output is **vanishing**: $y_p = 0.5^{50} \\approx 8.88 \\times 10^{-16}$. Similarly, if we recall the chain rule, as the graph gets deeper, the number of elements in the chain multiplication increases, which could lead to exploding or vanishing gradients. To avoid such numerical vulnerablities that could impair our training algorithm, we need to understand the effect of depth.\n", "_____no_output_____" ], [ "### Interactive Demo 2.1: Depth widget\n\nUse the widget to explore the impact of depth on the training curve (loss evolution) of a deep but narrow neural network.\n\n**Think!**\n\nWhich networks trained the fastest? Did all networks eventually \"work\" (converge)? What is the shape of their learning trajectory?", "_____no_output_____" ] ], [ [ "# @markdown Make sure you execute this cell to enable the widget!\n\n_ = interact(depth_widget,\n depth = IntSlider(min=0, max=51,\n step=5, value=0,\n continuous_update=False))", "_____no_output_____" ], [ "# @title Video 5: Effect of Depth - Discussion\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1Qq4y1H7uk\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"EqSDkwmSruk\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 5: Effect of Depth - Discussion')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "## Section 2.2: Choosing a learning rate", "_____no_output_____" ], [ "The learning rate is a common hyperparameter for most optimization algorithms. How should we set it? Sometimes the only option is to try all the possibilities, but sometimes knowing some key trade-offs will help guide our search for good hyperparameters.", "_____no_output_____" ] ], [ [ "# @title Video 6: Learning Rate\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV11f4y157MT\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"w_GrCVM-_Qo\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 6: Learning Rate')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "### Interactive Demo 2.2: Learning rate widget\n\nHere, we fix the network depth to 50 layers. Use the widget to explore the impact of learning rate $\\eta$ on the training curve (loss evolution) of a deep but narrow neural network.\n\n**Think!**\n\nCan we say that larger learning rates always lead to faster learning? Why not? ", "_____no_output_____" ] ], [ [ "# @markdown Make sure you execute this cell to enable the widget!\n\n_ = interact(lr_widget,\n lr = FloatSlider(min=0.005, max=0.045, step=0.005, value=0.005,\n continuous_update=False, readout_format='.3f',\n description='eta'))", "_____no_output_____" ], [ "# @title Video 7: Learning Rate - Discussion\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1Aq4y1p7bh\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"cmS0yqImz2E\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 7: Learning Rate - Discussion')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "## Section 2.3: Depth vs Learning Rate", "_____no_output_____" ] ], [ [ "# @title Video 8: Depth and Learning Rate\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1V44y1177e\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"J30phrux_3k\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 8: Depth and Learning Rate')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "### Interactive Demo 2.3: Depth and Learning-Rate\n", "_____no_output_____" ], [ "**Important instruction**\nThe exercise starts with 10 hidden layers. Your task is to find the learning rate that delivers fast but robust convergence (learning). When you are confident about the learning rate, you can **Register** the optimal learning rate for the given depth. Once you press register, a deeper model is instantiated, so you can find the next optimal learning rate. The Register button turns green only when the training converges, but does not imply the fastest convergence. Finally, be patient :) the widgets are slow.\n\n\n**Think!**\n\nCan you explain the relationship between the depth and optimal learning rate?", "_____no_output_____" ] ], [ [ "# @markdown Make sure you execute this cell to enable the widget!\nintpl_obj = InterPlay()\n\nintpl_obj.slider = FloatSlider(min=0.005, max=0.105, step=0.005, value=0.005,\n layout=Layout(width='500px'),\n continuous_update=False,\n readout_format='.3f',\n description='eta')\n\nintpl_obj.button = ToggleButton(value=intpl_obj.converged, description='Register')\n\nwidgets_ui = HBox([intpl_obj.slider, intpl_obj.button])\nwidgets_out = interactive_output(intpl_obj.train,\n {'lr': intpl_obj.slider,\n 'update': intpl_obj.button,\n 'init_weights': fixed(0.9)})\n\ndisplay(widgets_ui, widgets_out)", "_____no_output_____" ], [ "# @title Video 9: Depth and Learning Rate - Discussion\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV15q4y1p7Uq\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"7Fl8vH7cgco\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 9: Depth and Learning Rate - Discussion')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "## Section 2.4: Why initialization is important", "_____no_output_____" ] ], [ [ "# @title Video 10: Initialization Matters\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1UL411J7vu\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"KmqCz95AMzY\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 10: Initialization Matters')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "We’ve seen, even in the simplest of cases, that depth can slow learning. Why? From the chain rule, gradients are multiplied by the current weight at each layer, so the product can vanish or explode. Therefore, weight initialization is a fundamentally important hyperparameter.\n\nAlthough in practice initial values for learnable parameters are often sampled from different $\\mathcal{Uniform}$ or $\\mathcal{Normal}$ probability distribution, here we use a single value for all the parameters.\n\nThe figure below shows the effect of initialization on the speed of learning for the deep but narrow LNN. We have excluded initializations that lead to numerical errors such as `nan` or `inf`, which are the consequence of smaller or larger initializations.", "_____no_output_____" ] ], [ [ "# @markdown Make sure you execute this cell to see the figure!\n\nplot_init_effect()", "_____no_output_____" ], [ "# @title Video 11: Initialization Matters Explained\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1hM4y1T7gJ\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"vKktGdiQDsE\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 11: Initialization Matters Explained')\n\ndisplay(out)", "_____no_output_____" ] ], [ [ "---\n# Summary\n\nIn the second tutorial, we have learned what is the training landscape, and also we have see in depth the effect of the depth of the network and the learning rate, and their interplay. Finally, we have seen that initialization matters and why we need smart ways of initialization.", "_____no_output_____" ] ], [ [ "# @title Video 12: Tutorial 2 Wrap-up\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = \"https://player.bilibili.com/player.html?bvid={0}&page={1}\".format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=f\"BV1P44y117Pd\", width=854, height=480, fs=1)\n print(\"Video available at https://www.bilibili.com/video/{0}\".format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=f\"r3K8gtak3wA\", width=854, height=480, fs=1, rel=0)\n print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\n#add event to airtable\natform.add_event('Video 12: Tutorial 2 Wrap-up')\n\ndisplay(out)", "_____no_output_____" ], [ "\n# @title Airtable Submission Link\nfrom IPython import display as IPydisplay\nIPydisplay.HTML(\n f\"\"\"\n <div>\n <a href= \"{atform.url()}\" target=\"_blank\">\n <img src=\"https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/AirtableSubmissionButton.png?raw=1\"\n alt=\"button link to Airtable\" style=\"width:410px\"></a>\n </div>\"\"\" )", "_____no_output_____" ] ], [ [ "---\n# Bonus", "_____no_output_____" ], [ "## Hyperparameter interaction\n\nFinally, let's put everything we learned together and find best initial weights and learning rate for a given depth. By now you should have learned the interactions and know how to find the optimal values quickly. If you get `numerical overflow` warnings, don't be discouraged! They are often caused by \"exploding\" or \"vanishing\" gradients.\n\n**Think!**\n\nDid you experience any surprising behaviour \nor difficulty finding the optimal parameters?", "_____no_output_____" ] ], [ [ "# @markdown Make sure you execute this cell to enable the widget!\n\n_ = interact(depth_lr_init_interplay,\n depth = IntSlider(min=10, max=51, step=5, value=25,\n continuous_update=False),\n lr = FloatSlider(min=0.001, max=0.1,\n step=0.005, value=0.005,\n continuous_update=False,\n readout_format='.3f',\n description='eta'),\n init_weights = FloatSlider(min=0.1, max=3.0,\n step=0.1, value=0.9,\n continuous_update=False,\n readout_format='.3f',\n description='initial weights'))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e7c2c3acd935e82913bd1b2cb85db489cb5bb345
28,689
ipynb
Jupyter Notebook
Web scraping with Python.ipynb
allanjamesvestal/teaching-guide-python-scraping
50d0df4db7d21d463471fa1dc4ccbb9e5725aeb8
[ "MIT" ]
null
null
null
Web scraping with Python.ipynb
allanjamesvestal/teaching-guide-python-scraping
50d0df4db7d21d463471fa1dc4ccbb9e5725aeb8
[ "MIT" ]
null
null
null
Web scraping with Python.ipynb
allanjamesvestal/teaching-guide-python-scraping
50d0df4db7d21d463471fa1dc4ccbb9e5725aeb8
[ "MIT" ]
1
2022-03-05T18:55:51.000Z
2022-03-05T18:55:51.000Z
36.31519
565
0.603088
[ [ [ "# Web scraping with Python\n\nThis notebook demonstrates how you can use the Python programming language to scrape information from a web page. The goal today: Scrape the main table on [the first page of Maryland's list of WARN letters](https://www.dllr.state.md.us/employment/warn.shtml) and, if time, write the data to a CSV.\n\nIf you're relatively new to Python, it might be helpful to have [this Python syntax cheat sheet](Python%20syntax%20cheat%20sheet.ipynb) open in another tab as you work through this notebook.\n\n### Table of contents\n\n- [Using Jupyter notebooks](#Using-Jupyter-notebooks)\n- [What _is_ a web page, anyway?](#What-is-a-web-page,-anyway?)\n- [Inspect the source](#Inspect-the-source)\n- [Import libraries](#Import-libraries)\n- [Request the page](#Request-the-page)\n- [Turn your HTML into soup](#Turn-your-HTML-into-soup)\n- [Targeting and extracting data](#Targeting-and-extracting-data)\n- [Write the results to file](#Write-the-results-to-file)", "_____no_output_____" ], [ "### Using Jupyter notebooks\n\nThere are several ways to write and run Python code on your computer. One way -- the method we're using today -- is to use [Jupyter notebooks](https://jupyter.org/), which run in your browser and allow you to intersperse documentation with your code. They're handy for bundling your code with a human-readable explanation of what's happening at each step. Check out some examples from the [L.A. Times](https://github.com/datadesk/notebooks) and [BuzzFeed News](https://github.com/BuzzFeedNews/everything#data-and-analyses).\n\n**To add a new cell to your notebook**: Click the + button in the menu or press the `b` button on your keyboard.\n\n**To run a cell of code**: Select the cell and click the \"Run\" button in the menu, or you can press Shift+Enter.\n\n**One common gotcha**: The notebook doesn't \"know\" about code you've written until you've _run_ the cell containing it. For example, if you define a variable called `my_name` in one cell, and later, when you try to access that variable in another cell but get an error that says `NameError: name 'my_name' is not defined`, the most likely solution is to run (or re-run) the cell in which you defined `my_name`.", "_____no_output_____" ], [ "### What _is_ a web page, anyway?\n\nGenerally, a web page consists of a bunch of specifically formatted text files stored on a computer (a _server_) that's probably sitting on a rack in a giant data center somewhere.\n\nMostly you'll be dealing with `.html` (HyperText Markup Language) files that might include references to `.css` (Cascading Style Sheet) files, which determine how the page looks, and/or `.js` (JavaScript) files, which add interactivity, and other specially formatted text files.\n\nToday, we'll focus on the HTML, which gives structure to the page.\n\nMost HTML elements are represented by a pair of tags -- an opening tag and a closing tag.\n\nA table, for example, starts with `<table>` and ends with `</table>`. The first tag tells the browser: \"Hey! I got a table here! Render it as a table.\" The closing tag (note the forward slash!) tells the browser: \"Hey! I'm all done with that table, thanks.\" Inside the table are nested more HTML tags representing rows (`<tr>`) and cells (`<td>`).\n\nHTML elements can have any number of attributes, such as classes --\n\n`<table class=\"cool-table\">`\n\n-- styles --\n\n`<table style=\"width:95%;\">`\n\n-- hyperlinks to other pages --\n\n`<a href=\"https://ire.org\">Click here to visit IRE's website</a>`\n\n-- and IDs --\n\n`<table id=\"cool-table\">`\n\n-- that will be useful to know about when we're scraping.", "_____no_output_____" ], [ "### Inspect the source\n\nYou can look at the HTML that makes up a web page by _inspecting the source_ in a web browser. We like Chrome and Firefox for this; today, we'll use Chrome.\n\nYou can inspect specific elements on the page by right-clicking on the page and selecting \"Inspect\" or \"Inspect Element\" from the context menu that pops up. Hover over elements in the \"Elements\" tab to highlight them on the page.\n\nTo examine all of the source code that makes up a page, you can \"view source.\" In Chrome, hit `Ctrl+U` on a PC or `⌘+Opt+U` on a Mac. (It's also in the menu bar: View > Developer > View Page Source.)\n\nYou'll get a page showing you all of the HTML code that makes up that page. Ignore 99% of it and try to locate the element(s) that you want to target (use `Ctrl+F` on a PC and `⌘+F` to find).\n\nOpen up a Chrome browser and inspect the table on the [the first page of Maryland's list of WARN letters](https://www.dllr.state.md.us/employment/warn.shtml). Find the table we want to scrape.\n\nIs it the only table on the page? If not, does it have any attributes that would allow you to target it?", "_____no_output_____" ], [ "### Import libraries\n\nStep one is to _import_ two third-party Python libraries that will help us scrape this page:\n- `requests` is the de facto standard for making HTTP requests, similar to what happens when you type a URL into a browser window and hit enter.\n- `bs4`, or BeautifulSoup, is a popular library for parsing HTML into a data structure that Python can work with.\n\nThese libraries are installed separately from Python on a per-project basis ([read more about our recommendations for setting up Python projects here](https://docs.google.com/document/d/1cYmpfZEZ8r-09Q6Go917cKVcQk_d0P61gm0q8DAdIdg/edit#heading=h.od2v1nkge5t1)).\n\nRun this cell (you'll only have to do this once):", "_____no_output_____" ] ], [ [ "import requests\nimport bs4", "_____no_output_____" ] ], [ [ "### Request the page\n\nNext, we'll use the `get()` method of the `requests` library (which we just imported) to grab the web page.\n\nWhile we're at it, we'll _assign_ all the stuff that comes back to a new variable using `=`.\n\nThe variable name is arbitrary, but it's usually good to pick something that describes the value it's pointing to.\n\nNotice that the URL we're grabbing is wrapped in quotes, making it a _string_ that Python will interepret as text (as opposed to numbers, booleans, etc.). You can read up more on Python data types and variable assignment [here](Python%20syntax%20cheat%20sheet.ipynb).\n\nRun these two cells:", "_____no_output_____" ] ], [ [ "URL = 'http://www.dllr.state.md.us/employment/warn.shtml'", "_____no_output_____" ], [ "warn_page = requests.get(URL)", "_____no_output_____" ] ], [ [ "Nothing appears to have happened, which is (usually) a good sign.\n\nIf you want to make sure that your request was successful, you can check the `status_code` attribute of the Python object that was returned:", "_____no_output_____" ] ], [ [ "warn_page.status_code", "_____no_output_____" ] ], [ [ "A `200` code means all is well. `404` means the page wasn't found, etc. ([Here's one of our favorite lists of HTTP status codes](https://http.cat/) ([or here, if you prefer dogs](https://httpstatusdogs.com/)).)\n\nThe object being stored as the `warn_page` variable came back with a lot of potentially useful information we could access. Today, we're mostly interested in the `.text` attribute -- the HTML that makes up the web page, same as if we'd viewed the page source. Let's take a look:", "_____no_output_____" ] ], [ [ "warn_page.text", "_____no_output_____" ] ], [ [ "### ✍️ Try it yourself\n\nUse the code blocks below to experiment with requesting web pages and checking out the HTML that gets returned.\n\nSome ideas to get you started:\n- `'http://ire.org'`\n- `'https://web.archive.org/web/20031202214318/http://www.tdcj.state.tx.us:80/stat/finalmeals.htm'`\n- `'https://www.nrc.gov/reactors/operating/list-power-reactor-units.html'`", "_____no_output_____" ], [ "### Turn your HTML into soup\n\nThe HTML in the `.text` attribute of the request object is just a string -- a big ol' chunk of text.\n\nBefore we start targeting and extracting pieces of data in the HTML, we need to turn that chunk of text into a data structure that Python can work with. That's where the [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) (`bs4`) library comes in.\n\nWe'll create a new instance of a `BeautifulSoup` object, which lives under the top-level `bs4` library that we imported earlier. We need to give it two things:\n- The HTML we'd like to parse -- `warn_page.text`\n- A string with the name of the type of parser to use -- `html.parser` is the default and usually fine, but [there are other options](https://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser)\n\nWe'll save the parsed HTML as a new variable, `soup`.", "_____no_output_____" ] ], [ [ "soup = bs4.BeautifulSoup(warn_page.text, 'html.parser')", "_____no_output_____" ] ], [ [ "Nothing happened, which is good! You can take a look at what `soup` is, but it looks pretty much like `warn_page.text`:", "_____no_output_____" ] ], [ [ "soup", "_____no_output_____" ] ], [ [ "If you want to be sure, you can use the Python function `type()` to check what sort of object you're dealing with:", "_____no_output_____" ] ], [ [ "# the `str` type means a string, or text\ntype(warn_page.text)", "_____no_output_____" ], [ "# the `bs4.BeautifulSoup` type means we successfully created the object\ntype(soup)", "_____no_output_____" ] ], [ [ "### ✍️ Try it yourself\n\nUse the code blocks below to experiment fetching HTML and turning it into soup (if you fetched some pages earlier and saved them as variables, that'd be a good start).", "_____no_output_____" ], [ "### Targeting and extracting data\n\nNow that we have BeautifulSoup object loaded up, we can go hunting for the specific HTML elements that contain the data we need. Our general strategy:\n1. Find the main table with the data we want to grab\n2. Get a list of rows (the `tr` element, which stands for \"table row\") in that table\n3. Use a Python `for loop` to go through each table row and find the data inside it (`td`, or \"table data\")\n\nTo accomplish this, we'll use two `bs4` methods:\n- [`find()`](https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find), which returns the first element that matches whatever criteria you hand it\n- [`find_all()`](https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-all), which returns a _list_ of elements that match the criteria. ([Here's how Python lists work](Python%20syntax%20cheat%20sheet.ipynb#Lists).)", "_____no_output_____" ], [ "#### Find the table\n\nTo start with, we need to find the table. There are several ways to accomplish this, but because this is the only table on the page (view source and `Ctrl+F` to search for `<table` to confirm), we can simply say, \"Look through the `soup` object and find the table tag.\"\n\nTranslated, the code is: `soup.find('table')`. While we're at it, save the results of that search to a new variable, `table`.\n\nRun these cells:", "_____no_output_____" ] ], [ [ "table = soup.find('table')", "_____no_output_____" ], [ "table", "_____no_output_____" ] ], [ [ "#### Find the rows in the table\n\nNext, use the `find_all()` method to drill down and get a list of rows in the table:", "_____no_output_____" ] ], [ [ "rows = table.find_all('tr')", "_____no_output_____" ], [ "rows", "_____no_output_____" ] ], [ [ "To see how many items are in this list -- in other words, how many rows are in the table -- you can use the `len()` function:", "_____no_output_____" ] ], [ [ "len(rows)", "_____no_output_____" ] ], [ [ "#### Loop through the rows and extract the data\n\nNext, we can use a [`for` loop](Python%20syntax%20cheat%20sheet.ipynb#for-loops) to go through the list of rows and start grabbing data from each one.\n\nQuick refresher on _for loop_ syntax: Start with the word `for` (lowercase), then a variable name to stand in for each item in the list that you're looping over, then the word `in` (lowercase), then the name of the list holding the items (`rows`, in our case), then a colon, then an indented block of code describing what we're doing to each item in the list.\n\nEach piece of data in the row will be stored in a `td` tag, which stands for \"table data.\" So inside the loop -- in the indented block -- we'll use the `find_all()` method to get a list of every `td` tag inside the row. And from there, we can access the content inside each tag.\n\nOur goal is to end up with a _list_ of data for each row that we will eventually write out to a file. Typically you'd probably do the work of looping and inspecting the results, step by step, in one code cell. But to show the thinking of how you might approach this (and to practice the syntax), we'll start by just printing out each row and then build from there. (`print('='*80)` will print a line of 80 equals signs -- a way to help us see exactly what we're working with in each row.)", "_____no_output_____" ] ], [ [ "for row in rows:\n print(row)\n print('='*80)", "_____no_output_____" ] ], [ [ "Notice that the first item that prints is the header row with the column labels. You are free to keep these headers if you want, but I typically skip that row and define my own list of column names.\n\n(Another thing to consider: On better-constructed web pages, the cells in the header row will be represented by `th` (\"table header\") tags, not `td` (\"table data\") tags. The next step in our `for` loop is, \"Find all of the `td` tags in this row,\" so that would be something you would need to deal with.)\n\nWe can skip the first row by using _list slicing_: adding square brackets after the name of the list with some instructions about which items in the list we want to select.\n\nHere, the syntax would be: `rows[1:]`, which means, take everything in the `rows` list starting with the item in position 1 (the second item) to the end of the list. Like many programming languages, Python starts counting at 0, so the result will leave off the first item in the list -- i.e. the item in position 0, i.e. the headers.", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n print(row)\n print('='*80)", "_____no_output_____" ] ], [ [ "Now we're cooking with gas. Let's start pulling out the data in each row. Start by using `find_all()` to grab a list of `td` tags:", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n cells = row.find_all('td')\n print(cells)\n print('='*80)", "_____no_output_____" ] ], [ [ "Now we have, for each row, a _list_ of `td` tags. Next step is to look at the table and start grabbing specific values based on their position in the list and assigning them to human-readable variable names.\n\nQuick refresher on list syntax: To access a specific item in a list, use square brackets `[]` and the index number of the item you'd like to access. For instance, to get the first cell in the row -- the date that each WARN report was issued -- use `[0]`.", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n cells = row.find_all('td')\n warn_date = cells[0]\n print(warn_date)\n print('='*80)", "_____no_output_____" ] ], [ [ "This is returning the entire `Tag` object -- we just want the contents inside it. You can access the `.text` attribute of the tag to get the text inside:", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n cells = row.find_all('td')\n warn_date = cells[0].text \n print(warn_date)", "_____no_output_____" ] ], [ [ "In the next cell (`[1]`), the `.text` attribute will give you the NAICS code. In the third cell (`[2]`) you'll get the name of the business. Etc.\n\nIt's also generally good practice to trim off external whitespace for each value, and you can use the Python built-in string method `strip()` to accomplish this as you march across the row.\n\nWhich gets us this far:", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n cells = row.find_all('td')\n warn_date = cells[0].text.strip()\n naics_code = cells[1].text.strip()\n biz = cells[2].text.strip()\n print(warn_date, naics_code, biz)", "_____no_output_____" ] ], [ [ "### ✍️ Try it yourself\n\nNow that you've gotten this far, see if you can isolate the other pieces of data in each row.", "_____no_output_____" ] ], [ [ "for row in rows[1:]:\n cells = row.find_all('td')\n warn_date = cells[0].text.strip()\n naics_code = cells[1].text.strip()\n biz = cells[2].text.strip()\n \n # address\n \n # wia_code\n \n # total_employees\n \n # effective_date\n \n # type_code\n\n # print()", "_____no_output_____" ] ], [ [ "### Write the results to file\n\nNow that we've targeted our lists of data for each row, we can use Python's built-in [`csv`](https://docs.python.org/3/library/csv.html) module to write each list to a CSV file.\n\nFirst, import the csv module.", "_____no_output_____" ] ], [ [ "import csv", "_____no_output_____" ] ], [ [ "Now define a list of headers to match the data (each column header will be a string) -- run this cell:", "_____no_output_____" ] ], [ [ "HEADERS = [\n 'warn_date',\n 'naics_code',\n 'biz',\n 'address',\n 'wia_code',\n 'total_employees',\n 'effective_date',\n 'type_code'\n]", "_____no_output_____" ] ], [ [ "Now, using something called a `with` block, open a new CSV file to write to and write some code to do the following things:\n- Create a `csv.writer` object\n- Write out the list of headers using the `writerow()` method of the `csv.writer` object\n- Drop in the `for` loop you just wrote and, instead of just printing the contents of each cell, create a list of items and use the `writerow()` method of the `csv.writer` object to write your list of data to file", "_____no_output_____" ] ], [ [ "# create a file called 'warn-data.csv' in write ('w') mode\n# specify that newlines are terminated by an empty string (this deals with a PC-specific problem)\n# and use the `as` keyword to name the open file handler (the variable name `outfile` is arbitrary)\nwith open('warn-data.csv', 'w', newline='') as outfile:\n # go to the csv module we imported and make a new .writer object attached to the open file\n # and save it to a variable\n writer = csv.writer(outfile)\n\n # write out the list of headers\n writer.writerow(HEADERS)\n \n # paste in the for loop you wrote earlier here -- watch the indentation!\n # it should be at this indentation level =>\n # for row in rows[1:]:\n # cells = row.find_all('td')\n # etc. ...\n # but at the end, instead of `print(warn_date, naics_code, ...etc.)`\n # make it something like\n # data_out = [warn_date, naics_code, ...etc.]\n # `writer.writerow(data_out)`", "_____no_output_____" ] ], [ [ "If you look in the folder, you should see a new file: `warn-data.csv`. Hooray!\n\n🎉 🎉 🎉", "_____no_output_____" ], [ "### ✍️ Try it yourself\n\nPutting it all together:\n- Find a website you'd like to scrape\n- Use `requests` to fetch the HTML\n- Use `bs4` to parse the HTML and isolate the data you're interested in\n- Use `csv` to write the data to file", "_____no_output_____" ], [ "### Extra credit problems\n\n1. **Remove internal whitespace:** Looking over the data, you probably noticed that some of the values have some unnecessary internal whitespace, which you could fix before you wrote each row to file. Python does not have a built-in string method to remove internal whitespace, unfortunately, but [Googling around](https://www.google.com/search?q=python+remove+internal+whitespace) will yield you a common strategy: Using the `split()` method to separate individual words in the string, then `join()`ing the resulting list on a single space. As an example:\n\n```python\nmy_text = 'hello world how are you?'\n\n# split() will turn this into a list of words\nmy_text_words = my_text.split()\n# ['hello', 'world', 'how', 'are', 'you?']\n\n# join on a single space\nmy_text_clean = ' '.join(my_text_words)\nprint(my_text_clean)\n# prints 'hello world how are you?'\n\n# or, as a one-liner\nmy_text_clean = ' '.join(my_text.split())\n```\n\n2. **Fetch multiple years:** The table we scraped has WARN notices for the current year, but the agency also maintains pages with WARN notices for previous years -- there's a list of them in a section [toward the bottom of the page](https://www.dllr.state.md.us/employment/warn.shtml). See if you can figure out how to loop over multiple pages and scrape the contents of each into a single CSV.\n\n\n3. **Build a lookup table:** Each numeric code in the \"WIA Code\" column correspondes to a local area. See if you can figure out how to create a lookup dictionary that maps the numbers to their locations, then as you're looping over the data table, replace the numeric value in that column with the name of the local area instead. Here's a hint:\n\n```python\n lookup_dict = {\n '1': 'hello',\n '2': 'world'\n }\n\n print(lookup_dict.get('1'))\n # prints 'hello'\n\n print(lookup_dict.get('3'))\n # prints None\n\n```\n\n\n4. **Fix encoding errors:** You might have noticed a few encoding problems -- e.g., `Nestlé` is being renedered as `Nestlé`. This is due to an encoding problem -- the `warn_page.text` is not encoded as `utf-8`. Using `decode()` and `encode()`, see if you can fix this. (Hint! It looks like the state of Maryland is a big fan of `latin-1`.)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e7c2c785c59b6b0e51c05944a56a1cf8f2d92038
12,818
ipynb
Jupyter Notebook
module3/assignment_kaggle_challenge_3.ipynb
mikedcurry/DS-Unit-2-Kaggle-Challenge
7fb7378b22e1c02bf674d3dd7a46180b9e39bbfd
[ "MIT" ]
1
2019-08-13T15:23:00.000Z
2019-08-13T15:23:00.000Z
module3/assignment_kaggle_challenge_3.ipynb
Gabe-flomo/DS-Unit-2-Kaggle-Challenge
20e682bd966fb130cf88a61706224ca45349b874
[ "MIT" ]
null
null
null
module3/assignment_kaggle_challenge_3.ipynb
Gabe-flomo/DS-Unit-2-Kaggle-Challenge
20e682bd966fb130cf88a61706224ca45349b874
[ "MIT" ]
null
null
null
59.342593
316
0.629973
[ [ [ "Lambda School Data Science, Unit 2: Predictive Modeling\n\n# Kaggle Challenge, Module 3\n\n## Assignment\n- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2/portfolio-project/ds6), then choose your dataset, and [submit this form](https://forms.gle/nyWURUg65x1UTRNV9), due today at 4pm Pacific.\n- [ ] Continue to participate in our Kaggle challenge.\n- [ ] Try xgboost.\n- [ ] Get your model's permutation importances.\n- [ ] Try feature selection with permutation importances.\n- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)\n- [ ] Commit your notebook to your fork of the GitHub repo.\n\n## Stretch Goals\n\n### Doing\n- [ ] Add your own stretch goal(s) !\n- [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.\n- [ ] Try other categorical encodings.\n- [ ] Try other Python libraries for gradient boosting.\n- [ ] Look at the bonus notebook in the repo, about monotonic constraints with gradient boosting.\n- [ ] Make visualizations and share on Slack.\n\n### Reading\n\nTop recommendations in _**bold italic:**_\n\n#### Permutation Importances\n- _**[Kaggle / Dan Becker: Machine Learning Explainability](https://www.kaggle.com/dansbecker/permutation-importance)**_\n- [Christoph Molnar: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/feature-importance.html)\n\n#### (Default) Feature Importances\n - [Ando Saabas: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/)\n - [Terence Parr, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html)\n\n#### Gradient Boosting\n - [A Gentle Introduction to the Gradient Boosting Algorithm for Machine Learning](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/)\n - _**[A Kaggle Master Explains Gradient Boosting](http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/)**_\n - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/ISLR%20Seventh%20Printing.pdf) Chapter 8\n - [Gradient Boosting Explained](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html)\n - _**[Boosting](https://www.youtube.com/watch?v=GM3CDQfQ4sw) (2.5 minute video)**_\n\n#### Categorical encoding for trees\n- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)\n- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)\n- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_\n- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_\n- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)\n- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html)\n\n#### Imposter Syndrome\n- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)\n- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)\n- [\"I am not a real data scientist\"](https://brohrer.github.io/imposter_syndrome.html)\n- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_\n\n\n\n\n", "_____no_output_____" ], [ "### Python libraries for Gradient Boosting\n- [scikit-learn Gradient Tree Boosting](https://scikit-learn.org/stable/modules/ensemble.html#gradient-boosting) — slower than other libraries, but [the new version may be better](https://twitter.com/amuellerml/status/1129443826945396737)\n - Anaconda: already installed\n - Google Colab: already installed\n- [xgboost](https://xgboost.readthedocs.io/en/latest/) — can accept missing values and enforce [monotonic constraints](https://xiaoxiaowang87.github.io/monotonicity_constraint/)\n - Anaconda, Mac/Linux: `conda install -c conda-forge xgboost`\n - Windows: `conda install -c anaconda py-xgboost`\n - Google Colab: already installed\n- [LightGBM](https://lightgbm.readthedocs.io/en/latest/) — can accept missing values and enforce [monotonic constraints](https://blog.datadive.net/monotonicity-constraints-in-machine-learning/)\n - Anaconda: `conda install -c conda-forge lightgbm`\n - Google Colab: already installed\n- [CatBoost](https://catboost.ai/) — can accept missing values and use [categorical features](https://catboost.ai/docs/concepts/algorithm-main-stages_cat-to-numberic.html) without preprocessing\n - Anaconda: `conda install -c conda-forge catboost`\n - Google Colab: `pip install catboost`", "_____no_output_____" ], [ "### Categorical Encodings\n\n**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:\n\n- **\"Categorical Encoding\":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.\n- **Numeric Encoding:** Synonymous with Label Encoding, or \"Ordinal\" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).\n- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).\n- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).\n\n\n**2.** The short video \n**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.\n\nCategory Encoders has multiple implementations of this general concept:\n\n- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)\n- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)\n- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)\n- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)\n- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)\n- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)\n\nCategory Encoder's mean encoding implementations work for regression problems or binary classification problems. \n\nFor multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:\n\n```python\nencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting\nX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')\nX_val_encoded = encoder.transform(X_train, y_val=='functional')\n```\n\n**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.\n\n```python\n dirty_cat.TargetEncoder(clf_type='multiclass-clf')\n```\nIt also implements an interesting idea called [\"Similarity Encoder\" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).\n\nHowever, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.\n\n**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals.\n\n_**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categorcals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_", "_____no_output_____" ] ], [ [ "# If you're in Colab...\nimport os, sys\nin_colab = 'google.colab' in sys.modules\n\nif in_colab:\n # Install required python packages:\n # category_encoders, version >= 2.0\n # eli5, version >= 0.9\n # pandas-profiling, version >= 2.0\n # plotly, version >= 4.0\n !pip install --upgrade category_encoders eli5 pandas-profiling plotly\n \n # Pull files from Github repo\n os.chdir('/content')\n !git init .\n !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git\n !git pull origin master\n \n # Change into directory for module\n os.chdir('module3')", "_____no_output_____" ], [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), \n pd.read_csv('../data/tanzania/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv('../data/tanzania/test_features.csv')\nsample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
e7c2ffc8e4fb1800d5c3afbd835a9564ce7b940f
16,744
ipynb
Jupyter Notebook
Outliers.ipynb
wf539/MLDataSciDeepLearningPython
aed1170b5460b373c37774bbdd12eb32f347be64
[ "MIT" ]
null
null
null
Outliers.ipynb
wf539/MLDataSciDeepLearningPython
aed1170b5460b373c37774bbdd12eb32f347be64
[ "MIT" ]
null
null
null
Outliers.ipynb
wf539/MLDataSciDeepLearningPython
aed1170b5460b373c37774bbdd12eb32f347be64
[ "MIT" ]
null
null
null
90.508108
6,444
0.866101
[ [ [ "# Dealing with Outliers", "_____no_output_____" ], [ "Sometimes outliers can mess up an analysis; you usually don't want a handful of data points to skew the overall results. Let's revisit our example of income data, with some random billionaire thrown in:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\n\nincomes = np.random.normal(27000, 15000, 10000)\nincomes = np.append(incomes, [1000000000])\n\nimport matplotlib.pyplot as plt\nplt.hist(incomes, 50)\nplt.show()", "_____no_output_____" ] ], [ [ "That's not very helpful to look at. One billionaire ended up squeezing everybody else into a single line in my histogram. Plus it skewed my mean income significantly:", "_____no_output_____" ] ], [ [ "incomes.mean()", "_____no_output_____" ] ], [ [ "It's important to dig into what is causing your outliers, and understand where they are coming from. You also need to think about whether removing them is a valid thing to do, given the spirit of what it is you're trying to analyze. If I know I want to understand more about the incomes of \"typical Americans\", filtering out billionaires seems like a legitimate thing to do.\n\nHere's something a little more robust than filtering out billionaires - it filters out anything beyond two standard deviations of the median value in the data set:", "_____no_output_____" ] ], [ [ "def reject_outliers(data):\n u = np.median(data)\n s = np.std(data)\n filtered = [e for e in data if (u - 2 * s < e < u + 2 * s)]\n return filtered\n\nfiltered = reject_outliers(incomes)\n\nplt.hist(filtered, 50)\nplt.show()", "_____no_output_____" ] ], [ [ "That looks better. And, our mean is more, well, meangingful now as well:", "_____no_output_____" ] ], [ [ "np.mean(filtered)", "_____no_output_____" ] ], [ [ "## Activity", "_____no_output_____" ], [ "Instead of a single outlier, add several randomly-generated outliers to the data. Experiment with different values of the multiple of the standard deviation to identify outliers, and see what effect it has on the final results.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c30ad7d38f2c5776be454bb483c10e3625af52
85,913
ipynb
Jupyter Notebook
Course-5-Sequence-Models/week1/Building+a+Recurrent+Neural+Network+-+Step+by+Step+-+v3.ipynb
xnone/coursera-deep-learning
999d919792be449b2aa523ac6d85c294ac2b4dad
[ "MIT" ]
2
2019-04-04T10:17:26.000Z
2020-09-26T19:30:59.000Z
Course-5-Sequence-Models/week1/Building+a+Recurrent+Neural+Network+-+Step+by+Step+-+v3.ipynb
xnone/coursera-deep-learning
999d919792be449b2aa523ac6d85c294ac2b4dad
[ "MIT" ]
null
null
null
Course-5-Sequence-Models/week1/Building+a+Recurrent+Neural+Network+-+Step+by+Step+-+v3.ipynb
xnone/coursera-deep-learning
999d919792be449b2aa523ac6d85c294ac2b4dad
[ "MIT" ]
3
2018-08-31T14:37:27.000Z
2020-04-02T01:05:41.000Z
40.259138
683
0.464237
[ [ [ "# Building your Recurrent Neural Network - Step by Step\n\nWelcome to Course 5's first assignment! In this assignment, you will implement your first Recurrent Neural Network in numpy.\n\nRecurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have \"memory\". They can read inputs $x^{\\langle t \\rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a uni-directional RNN to take information from the past to process later inputs. A bidirection RNN can take context from both the past and the future. \n\n**Notation**:\n- Superscript $[l]$ denotes an object associated with the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n- Superscript $(i)$ denotes an object associated with the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n\n- Superscript $\\langle t \\rangle$ denotes an object at the $t^{th}$ time-step. \n - Example: $x^{\\langle t \\rangle}$ is the input x at the $t^{th}$ time-step. $x^{(i)\\langle t \\rangle}$ is the input at the $t^{th}$ timestep of example $i$.\n \n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$.\n\nWe assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!", "_____no_output_____" ], [ "Let's first import all the packages that you will need during this assignment.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom rnn_utils import *", "_____no_output_____" ] ], [ [ "## 1 - Forward propagation for the basic Recurrent Neural Network\n\nLater this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$. ", "_____no_output_____" ], [ "<img src=\"images/RNN.png\" style=\"width:500;height:300px;\">\n<caption><center> **Figure 1**: Basic RNN model </center></caption>", "_____no_output_____" ], [ "Here's how you can implement an RNN: \n\n**Steps**:\n1. Implement the calculations needed for one time-step of the RNN.\n2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time. \n\nLet's go!\n\n## 1.1 - RNN cell\n\nA Recurrent neural network can be seen as the repetition of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell. \n\n<img src=\"images/rnn_step_forward.png\" style=\"width:700px;height:300px;\">\n<caption><center> **Figure 2**: Basic RNN cell. Takes as input $x^{\\langle t \\rangle}$ (current input) and $a^{\\langle t - 1\\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\\langle t \\rangle}$ which is given to the next RNN cell and also used to predict $y^{\\langle t \\rangle}$ </center></caption>\n\n**Exercise**: Implement the RNN-cell described in Figure (2).\n\n**Instructions**:\n1. Compute the hidden state with tanh activation: $a^{\\langle t \\rangle} = \\tanh(W_{aa} a^{\\langle t-1 \\rangle} + W_{ax} x^{\\langle t \\rangle} + b_a)$.\n2. Using your new hidden state $a^{\\langle t \\rangle}$, compute the prediction $\\hat{y}^{\\langle t \\rangle} = softmax(W_{ya} a^{\\langle t \\rangle} + b_y)$. We provided you a function: `softmax`.\n3. Store $(a^{\\langle t \\rangle}, a^{\\langle t-1 \\rangle}, x^{\\langle t \\rangle}, parameters)$ in cache\n4. Return $a^{\\langle t \\rangle}$ , $y^{\\langle t \\rangle}$ and cache\n\nWe will vectorize over $m$ examples. Thus, $x^{\\langle t \\rangle}$ will have dimension $(n_x,m)$, and $a^{\\langle t \\rangle}$ will have dimension $(n_a,m)$. ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: rnn_cell_forward\n\ndef rnn_cell_forward(xt, a_prev, parameters):\n \"\"\"\n Implements a single forward step of the RNN-cell as described in Figure (2)\n\n Arguments:\n xt -- your input data at timestep \"t\", numpy array of shape (n_x, m).\n a_prev -- Hidden state at timestep \"t-1\", numpy array of shape (n_a, m)\n parameters -- python dictionary containing:\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n ba -- Bias, numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n Returns:\n a_next -- next hidden state, of shape (n_a, m)\n yt_pred -- prediction at timestep \"t\", numpy array of shape (n_y, m)\n cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)\n \"\"\"\n \n # Retrieve parameters from \"parameters\"\n Wax = parameters[\"Wax\"]\n Waa = parameters[\"Waa\"]\n Wya = parameters[\"Wya\"]\n ba = parameters[\"ba\"]\n by = parameters[\"by\"]\n \n ### START CODE HERE ### (≈2 lines)\n # compute next activation state using the formula given above\n a_next = np.tanh(np.matmul(Waa, a_prev) + np.matmul(Wax, xt) + ba)\n # compute output of the current cell using the formula given above\n yt_pred = softmax(np.matmul(Wya, a_next) + by) \n ### END CODE HERE ###\n \n # store values you need for backward propagation in cache\n cache = (a_next, a_prev, xt, parameters)\n \n return a_next, yt_pred, cache", "_____no_output_____" ], [ "np.random.seed(1)\nxt = np.random.randn(3,10)\na_prev = np.random.randn(5,10)\nWaa = np.random.randn(5,5)\nWax = np.random.randn(5,3)\nWya = np.random.randn(2,5)\nba = np.random.randn(5,1)\nby = np.random.randn(2,1)\nparameters = {\"Waa\": Waa, \"Wax\": Wax, \"Wya\": Wya, \"ba\": ba, \"by\": by}\n\na_next, yt_pred, cache = rnn_cell_forward(xt, a_prev, parameters)\nprint(\"a_next[4] = \", a_next[4])\nprint(\"a_next.shape = \", a_next.shape)\nprint(\"yt_pred[1] =\", yt_pred[1])\nprint(\"yt_pred.shape = \", yt_pred.shape)", "a_next[4] = [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978\n -0.18887155 0.99815551 0.6531151 0.82872037]\na_next.shape = (5, 10)\nyt_pred[1] = [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212\n 0.36920224 0.9966312 0.9982559 0.17746526]\nyt_pred.shape = (2, 10)\n" ] ], [ [ "**Expected Output**: \n\n<table>\n <tr>\n <td>\n **a_next[4]**:\n </td>\n <td>\n [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978\n -0.18887155 0.99815551 0.6531151 0.82872037]\n </td>\n </tr>\n <tr>\n <td>\n **a_next.shape**:\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **yt[1]**:\n </td>\n <td>\n [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212\n 0.36920224 0.9966312 0.9982559 0.17746526]\n </td>\n </tr>\n <tr>\n <td>\n **yt.shape**:\n </td>\n <td>\n (2, 10)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "## 1.2 - RNN forward pass \n\nYou can see an RNN as the repetition of the cell you've just built. If your input sequence of data is carried over 10 time steps, then you will copy the RNN cell 10 times. Each cell takes as input the hidden state from the previous cell ($a^{\\langle t-1 \\rangle}$) and the current time-step's input data ($x^{\\langle t \\rangle}$). It outputs a hidden state ($a^{\\langle t \\rangle}$) and a prediction ($y^{\\langle t \\rangle}$) for this time-step.\n\n\n<img src=\"images/rnn.png\" style=\"width:800px;height:300px;\">\n<caption><center> **Figure 3**: Basic RNN. The input sequence $x = (x^{\\langle 1 \\rangle}, x^{\\langle 2 \\rangle}, ..., x^{\\langle T_x \\rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\\langle 1 \\rangle}, y^{\\langle 2 \\rangle}, ..., y^{\\langle T_x \\rangle})$. </center></caption>\n\n\n\n**Exercise**: Code the forward propagation of the RNN described in Figure (3).\n\n**Instructions**:\n1. Create a vector of zeros ($a$) that will store all the hidden states computed by the RNN.\n2. Initialize the \"next\" hidden state as $a_0$ (initial hidden state).\n3. Start looping over each time step, your incremental index is $t$ :\n - Update the \"next\" hidden state and the cache by running `rnn_cell_forward`\n - Store the \"next\" hidden state in $a$ ($t^{th}$ position) \n - Store the prediction in y\n - Add the cache to the list of caches\n4. Return $a$, $y$ and caches", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: rnn_forward\n\ndef rnn_forward(x, a0, parameters):\n \"\"\"\n Implement the forward propagation of the recurrent neural network described in Figure (3).\n\n Arguments:\n x -- Input data for every time-step, of shape (n_x, m, T_x).\n a0 -- Initial hidden state, of shape (n_a, m)\n parameters -- python dictionary containing:\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n ba -- Bias numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n\n Returns:\n a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)\n y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)\n caches -- tuple of values needed for the backward pass, contains (list of caches, x)\n \"\"\"\n \n # Initialize \"caches\" which will contain the list of all caches\n caches = []\n \n # Retrieve dimensions from shapes of x and parameters[\"Wya\"]\n n_x, m, T_x = x.shape\n n_y, n_a = parameters[\"Wya\"].shape\n \n ### START CODE HERE ###\n \n # initialize \"a\" and \"y\" with zeros (≈2 lines)\n a = np.zeros((n_a, m, T_x))\n y_pred = np.zeros((n_y, m, T_x))\n \n # Initialize a_next (≈1 line)\n a_next = a0\n \n # loop over all time-steps\n for t in range(T_x):\n # Update next hidden state, compute the prediction, get the cache (≈1 line)\n a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t], a_next, parameters)\n # Save the value of the new \"next\" hidden state in a (≈1 line)\n a[:,:,t] = a_next\n # Save the value of the prediction in y (≈1 line)\n y_pred[:,:,t] = yt_pred\n # Append \"cache\" to \"caches\" (≈1 line)\n caches.append(cache)\n \n ### END CODE HERE ###\n \n # store values needed for backward propagation in cache\n caches = (caches, x)\n \n return a, y_pred, caches", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(3,10,4)\na0 = np.random.randn(5,10)\nWaa = np.random.randn(5,5)\nWax = np.random.randn(5,3)\nWya = np.random.randn(2,5)\nba = np.random.randn(5,1)\nby = np.random.randn(2,1)\nparameters = {\"Waa\": Waa, \"Wax\": Wax, \"Wya\": Wya, \"ba\": ba, \"by\": by}\n\na, y_pred, caches = rnn_forward(x, a0, parameters)\nprint(\"a[4][1] = \", a[4][1])\nprint(\"a.shape = \", a.shape)\nprint(\"y_pred[1][3] =\", y_pred[1][3])\nprint(\"y_pred.shape = \", y_pred.shape)\nprint(\"caches[1][1][3] =\", caches[1][1][3])\nprint(\"len(caches) = \", len(caches))", "a[4][1] = [-0.99999375 0.77911235 -0.99861469 -0.99833267]\na.shape = (5, 10, 4)\ny_pred[1][3] = [ 0.79560373 0.86224861 0.11118257 0.81515947]\ny_pred.shape = (2, 10, 4)\ncaches[1][1][3] = [-1.1425182 -0.34934272 -0.20889423 0.58662319]\nlen(caches) = 2\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **a[4][1]**:\n </td>\n <td>\n [-0.99999375 0.77911235 -0.99861469 -0.99833267]\n </td>\n </tr>\n <tr>\n <td>\n **a.shape**:\n </td>\n <td>\n (5, 10, 4)\n </td>\n </tr>\n <tr>\n <td>\n **y[1][3]**:\n </td>\n <td>\n [ 0.79560373 0.86224861 0.11118257 0.81515947]\n </td>\n </tr>\n <tr>\n <td>\n **y.shape**:\n </td>\n <td>\n (2, 10, 4)\n </td>\n </tr>\n <tr>\n <td>\n **cache[1][1][3]**:\n </td>\n <td>\n [-1.1425182 -0.34934272 -0.20889423 0.58662319]\n </td>\n </tr>\n <tr>\n <td>\n **len(cache)**:\n </td>\n <td>\n 2\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. This will work well enough for some applications, but it suffers from vanishing gradient problems. So it works best when each output $y^{\\langle t \\rangle}$ can be estimated using mainly \"local\" context (meaning information from inputs $x^{\\langle t' \\rangle}$ where $t'$ is not too far from $t$). \n\nIn the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps. ", "_____no_output_____" ], [ "## 2 - Long Short-Term Memory (LSTM) network\n\nThis following figure shows the operations of an LSTM-cell.\n\n<img src=\"images/LSTM.png\" style=\"width:500;height:400px;\">\n<caption><center> **Figure 4**: LSTM-cell. This tracks and updates a \"cell state\" or memory variable $c^{\\langle t \\rangle}$ at every time-step, which can be different from $a^{\\langle t \\rangle}$. </center></caption>\n\nSimilar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a for-loop to have it process an input with $T_x$ time-steps. \n\n### About the gates\n\n#### - Forget gate\n\nFor the sake of this illustration, lets assume we are reading words in a piece of text, and want use an LSTM to keep track of grammatical structures, such as whether the subject is singular or plural. If the subject changes from a singular word to a plural word, we need to find a way to get rid of our previously stored memory value of the singular/plural state. In an LSTM, the forget gate lets us do this: \n\n$$\\Gamma_f^{\\langle t \\rangle} = \\sigma(W_f[a^{\\langle t-1 \\rangle}, x^{\\langle t \\rangle}] + b_f)\\tag{1} $$\n\nHere, $W_f$ are weights that govern the forget gate's behavior. We concatenate $[a^{\\langle t-1 \\rangle}, x^{\\langle t \\rangle}]$ and multiply by $W_f$. The equation above results in a vector $\\Gamma_f^{\\langle t \\rangle}$ with values between 0 and 1. This forget gate vector will be multiplied element-wise by the previous cell state $c^{\\langle t-1 \\rangle}$. So if one of the values of $\\Gamma_f^{\\langle t \\rangle}$ is 0 (or close to 0) then it means that the LSTM should remove that piece of information (e.g. the singular subject) in the corresponding component of $c^{\\langle t-1 \\rangle}$. If one of the values is 1, then it will keep the information. \n\n#### - Update gate\n\nOnce we forget that the subject being discussed is singular, we need to find a way to update it to reflect that the new subject is now plural. Here is the formulat for the update gate: \n\n$$\\Gamma_u^{\\langle t \\rangle} = \\sigma(W_u[a^{\\langle t-1 \\rangle}, x^{\\{t\\}}] + b_u)\\tag{2} $$ \n\nSimilar to the forget gate, here $\\Gamma_u^{\\langle t \\rangle}$ is again a vector of values between 0 and 1. This will be multiplied element-wise with $\\tilde{c}^{\\langle t \\rangle}$, in order to compute $c^{\\langle t \\rangle}$.\n\n#### - Updating the cell \n\nTo update the new subject we need to create a new vector of numbers that we can add to our previous cell state. The equation we use is: \n\n$$ \\tilde{c}^{\\langle t \\rangle} = \\tanh(W_c[a^{\\langle t-1 \\rangle}, x^{\\langle t \\rangle}] + b_c)\\tag{3} $$\n\nFinally, the new cell state is: \n\n$$ c^{\\langle t \\rangle} = \\Gamma_f^{\\langle t \\rangle}* c^{\\langle t-1 \\rangle} + \\Gamma_u^{\\langle t \\rangle} *\\tilde{c}^{\\langle t \\rangle} \\tag{4} $$\n\n\n#### - Output gate\n\nTo decide which outputs we will use, we will use the following two formulas: \n\n$$ \\Gamma_o^{\\langle t \\rangle}= \\sigma(W_o[a^{\\langle t-1 \\rangle}, x^{\\langle t \\rangle}] + b_o)\\tag{5}$$ \n$$ a^{\\langle t \\rangle} = \\Gamma_o^{\\langle t \\rangle}* \\tanh(c^{\\langle t \\rangle})\\tag{6} $$\n\nWhere in equation 5 you decide what to output using a sigmoid function and in equation 6 you multiply that by the $\\tanh$ of the previous state. ", "_____no_output_____" ], [ "### 2.1 - LSTM cell\n\n**Exercise**: Implement the LSTM cell described in the Figure (3).\n\n**Instructions**:\n1. Concatenate $a^{\\langle t-1 \\rangle}$ and $x^{\\langle t \\rangle}$ in a single matrix: $concat = \\begin{bmatrix} a^{\\langle t-1 \\rangle} \\\\ x^{\\langle t \\rangle} \\end{bmatrix}$\n2. Compute all the formulas 1-6. You can use `sigmoid()` (provided) and `np.tanh()`.\n3. Compute the prediction $y^{\\langle t \\rangle}$. You can use `softmax()` (provided).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: lstm_cell_forward\n\ndef lstm_cell_forward(xt, a_prev, c_prev, parameters):\n \"\"\"\n Implement a single forward step of the LSTM-cell as described in Figure (4)\n\n Arguments:\n xt -- your input data at timestep \"t\", numpy array of shape (n_x, m).\n a_prev -- Hidden state at timestep \"t-1\", numpy array of shape (n_a, m)\n c_prev -- Memory state at timestep \"t-1\", numpy array of shape (n_a, m)\n parameters -- python dictionary containing:\n Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)\n bf -- Bias of the forget gate, numpy array of shape (n_a, 1)\n Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)\n bi -- Bias of the update gate, numpy array of shape (n_a, 1)\n Wc -- Weight matrix of the first \"tanh\", numpy array of shape (n_a, n_a + n_x)\n bc -- Bias of the first \"tanh\", numpy array of shape (n_a, 1)\n Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)\n bo -- Bias of the output gate, numpy array of shape (n_a, 1)\n Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n \n Returns:\n a_next -- next hidden state, of shape (n_a, m)\n c_next -- next memory state, of shape (n_a, m)\n yt_pred -- prediction at timestep \"t\", numpy array of shape (n_y, m)\n cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)\n \n Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),\n c stands for the memory value\n \"\"\"\n\n # Retrieve parameters from \"parameters\"\n Wf = parameters[\"Wf\"]\n bf = parameters[\"bf\"]\n Wi = parameters[\"Wi\"]\n bi = parameters[\"bi\"]\n Wc = parameters[\"Wc\"]\n bc = parameters[\"bc\"]\n Wo = parameters[\"Wo\"]\n bo = parameters[\"bo\"]\n Wy = parameters[\"Wy\"]\n by = parameters[\"by\"]\n \n # Retrieve dimensions from shapes of xt and Wy\n n_x, m = xt.shape\n n_y, n_a = Wy.shape\n\n ### START CODE HERE ###\n # Concatenate a_prev and xt (≈3 lines)\n concat = np.zeros((n_x + n_a, m))\n concat[: n_a, :] = a_prev\n concat[n_a :, :] = xt\n\n # Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)\n ft = sigmoid(np.matmul(Wf, concat) + bf)\n it = sigmoid(np.matmul(Wi, concat) + bi)\n cct = np.tanh(np.matmul(Wc, concat) + bc)\n c_next = ft * c_prev + it * cct\n ot = sigmoid(np.matmul(Wo, concat) + bo)\n a_next = ot * np.tanh(c_next)\n \n # Compute prediction of the LSTM cell (≈1 line)\n yt_pred = softmax(np.matmul(Wy, a_next) + by)\n ### END CODE HERE ###\n\n # store values needed for backward propagation in cache\n cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)\n\n return a_next, c_next, yt_pred, cache", "_____no_output_____" ], [ "np.random.seed(1)\nxt = np.random.randn(3,10)\na_prev = np.random.randn(5,10)\nc_prev = np.random.randn(5,10)\nWf = np.random.randn(5, 5+3)\nbf = np.random.randn(5,1)\nWi = np.random.randn(5, 5+3)\nbi = np.random.randn(5,1)\nWo = np.random.randn(5, 5+3)\nbo = np.random.randn(5,1)\nWc = np.random.randn(5, 5+3)\nbc = np.random.randn(5,1)\nWy = np.random.randn(2,5)\nby = np.random.randn(2,1)\n\nparameters = {\"Wf\": Wf, \"Wi\": Wi, \"Wo\": Wo, \"Wc\": Wc, \"Wy\": Wy, \"bf\": bf, \"bi\": bi, \"bo\": bo, \"bc\": bc, \"by\": by}\n\na_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)\nprint(\"a_next[4] = \", a_next[4])\nprint(\"a_next.shape = \", c_next.shape)\nprint(\"c_next[2] = \", c_next[2])\nprint(\"c_next.shape = \", c_next.shape)\nprint(\"yt[1] =\", yt[1])\nprint(\"yt.shape = \", yt.shape)\nprint(\"cache[1][3] =\", cache[1][3])\nprint(\"len(cache) = \", len(cache))", "a_next[4] = [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482\n 0.76566531 0.34631421 -0.00215674 0.43827275]\na_next.shape = (5, 10)\nc_next[2] = [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942\n 0.76449811 -0.0981561 -0.74348425 -0.26810932]\nc_next.shape = (5, 10)\nyt[1] = [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381\n 0.00943007 0.12666353 0.39380172 0.07828381]\nyt.shape = (2, 10)\ncache[1][3] = [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874\n 0.07651101 -1.03752894 1.41219977 -0.37647422]\nlen(cache) = 10\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **a_next[4]**:\n </td>\n <td>\n [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482\n 0.76566531 0.34631421 -0.00215674 0.43827275]\n </td>\n </tr>\n <tr>\n <td>\n **a_next.shape**:\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **c_next[2]**:\n </td>\n <td>\n [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942\n 0.76449811 -0.0981561 -0.74348425 -0.26810932]\n </td>\n </tr>\n <tr>\n <td>\n **c_next.shape**:\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **yt[1]**:\n </td>\n <td>\n [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381\n 0.00943007 0.12666353 0.39380172 0.07828381]\n </td>\n </tr>\n <tr>\n <td>\n **yt.shape**:\n </td>\n <td>\n (2, 10)\n </td>\n </tr>\n <tr>\n <td>\n **cache[1][3]**:\n </td>\n <td>\n [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874\n 0.07651101 -1.03752894 1.41219977 -0.37647422]\n </td>\n </tr>\n <tr>\n <td>\n **len(cache)**:\n </td>\n <td>\n 10\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 2.2 - Forward pass for LSTM\n\nNow that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs. \n\n<img src=\"images/LSTM_rnn.png\" style=\"width:500;height:300px;\">\n<caption><center> **Figure 4**: LSTM over multiple time-steps. </center></caption>\n\n**Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps. \n\n**Note**: $c^{\\langle 0 \\rangle}$ is initialized with zeros.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: lstm_forward\n\ndef lstm_forward(x, a0, parameters):\n \"\"\"\n Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (3).\n\n Arguments:\n x -- Input data for every time-step, of shape (n_x, m, T_x).\n a0 -- Initial hidden state, of shape (n_a, m)\n parameters -- python dictionary containing:\n Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)\n bf -- Bias of the forget gate, numpy array of shape (n_a, 1)\n Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)\n bi -- Bias of the update gate, numpy array of shape (n_a, 1)\n Wc -- Weight matrix of the first \"tanh\", numpy array of shape (n_a, n_a + n_x)\n bc -- Bias of the first \"tanh\", numpy array of shape (n_a, 1)\n Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)\n bo -- Bias of the output gate, numpy array of shape (n_a, 1)\n Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n \n Returns:\n a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)\n y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)\n caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)\n \"\"\"\n\n # Initialize \"caches\", which will track the list of all the caches\n caches = []\n \n ### START CODE HERE ###\n # Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)\n n_x, m, T_x = x.shape\n n_y, n_a = parameters['Wy'].shape\n \n # initialize \"a\", \"c\" and \"y\" with zeros (≈3 lines)\n a = np.zeros((n_a, m, T_x))\n c = np.zeros((n_a, m, T_x))\n y = np.zeros((n_y, m, T_x))\n \n # Initialize a_next and c_next (≈2 lines)\n a_next = a0\n c_next = np.zeros((n_a, m))\n \n # loop over all time-steps\n for t in range(T_x):\n # Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)\n a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)\n # Save the value of the new \"next\" hidden state in a (≈1 line)\n a[:,:,t] = a_next\n # Save the value of the prediction in y (≈1 line)\n y[:,:,t] = yt\n # Save the value of the next cell state (≈1 line)\n c[:,:,t] = c_next\n # Append the cache into caches (≈1 line)\n caches.append(cache)\n \n ### END CODE HERE ###\n \n # store values needed for backward propagation in cache\n caches = (caches, x)\n\n return a, y, c, caches", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(3,10,7)\na0 = np.random.randn(5,10)\nWf = np.random.randn(5, 5+3)\nbf = np.random.randn(5,1)\nWi = np.random.randn(5, 5+3)\nbi = np.random.randn(5,1)\nWo = np.random.randn(5, 5+3)\nbo = np.random.randn(5,1)\nWc = np.random.randn(5, 5+3)\nbc = np.random.randn(5,1)\nWy = np.random.randn(2,5)\nby = np.random.randn(2,1)\n\nparameters = {\"Wf\": Wf, \"Wi\": Wi, \"Wo\": Wo, \"Wc\": Wc, \"Wy\": Wy, \"bf\": bf, \"bi\": bi, \"bo\": bo, \"bc\": bc, \"by\": by}\n\na, y, c, caches = lstm_forward(x, a0, parameters)\nprint(\"a[4][3][6] = \", a[4][3][6])\nprint(\"a.shape = \", a.shape)\nprint(\"y[1][4][3] =\", y[1][4][3])\nprint(\"y.shape = \", y.shape)\nprint(\"caches[1][1[1]] =\", caches[1][1][1])\nprint(\"c[1][2][1]\", c[1][2][1])\nprint(\"len(caches) = \", len(caches))", "a[4][3][6] = 0.172117767533\na.shape = (5, 10, 7)\ny[1][4][3] = 0.95087346185\ny.shape = (2, 10, 7)\ncaches[1][1[1]] = [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139\n 0.41005165]\nc[1][2][1] -0.855544916718\nlen(caches) = 2\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **a[4][3][6]** =\n </td>\n <td>\n 0.172117767533\n </td>\n </tr>\n <tr>\n <td>\n **a.shape** =\n </td>\n <td>\n (5, 10, 7)\n </td>\n </tr>\n <tr>\n <td>\n **y[1][4][3]** =\n </td>\n <td>\n 0.95087346185\n </td>\n </tr>\n <tr>\n <td>\n **y.shape** =\n </td>\n <td>\n (2, 10, 7)\n </td>\n </tr>\n <tr>\n <td>\n **caches[1][1][1]** =\n </td>\n <td>\n [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139\n 0.41005165]\n </td>\n \n </tr>\n <tr>\n <td>\n **c[1][2][1]** =\n </td>\n <td>\n -0.855544916718\n </td>\n </tr> \n \n </tr>\n <tr>\n <td>\n **len(caches)** =\n </td>\n <td>\n 2\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance. \n\nThe rest of this notebook is optional, and will not be graded.", "_____no_output_____" ], [ "## 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED)\n\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook. \n\nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below. ", "_____no_output_____" ], [ "### 3.1 - Basic RNN backward pass\n\nWe will start by computing the backward pass for the basic RNN-cell.\n\n<img src=\"images/rnn_cell_backprop.png\" style=\"width:500;height:300px;\"> <br>\n<caption><center> **Figure 5**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the RNN by following the chain-rule from calculas. The chain-rule is also used to calculate $(\\frac{\\partial J}{\\partial W_{ax}},\\frac{\\partial J}{\\partial W_{aa}},\\frac{\\partial J}{\\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. </center></caption>", "_____no_output_____" ], [ "#### Deriving the one step backward functions: \n\nTo compute the `rnn_cell_backward` you need to compute the following equations. It is a good exercise to derive them by hand. \n\nThe derivative of $\\tanh$ is $1-\\tanh(x)^2$. You can find the complete proof [here](https://www.wyzant.com/resources/lessons/math/calculus/derivative_proofs/tanx). Note that: $ \\text{sech}(x)^2 = 1 - \\tanh(x)^2$\n\nSimilarly for $\\frac{ \\partial a^{\\langle t \\rangle} } {\\partial W_{ax}}, \\frac{ \\partial a^{\\langle t \\rangle} } {\\partial W_{aa}}, \\frac{ \\partial a^{\\langle t \\rangle} } {\\partial b}$, the derivative of $\\tanh(u)$ is $(1-\\tanh(u)^2)du$. \n\nThe final two equations also follow same rule and are derived using the $\\tanh$ derivative. Note that the arrangement is done in a way to get the same dimensions to match.", "_____no_output_____" ] ], [ [ "def rnn_cell_backward(da_next, cache):\n \"\"\"\n Implements the backward pass for the RNN-cell (single time-step).\n\n Arguments:\n da_next -- Gradient of loss with respect to next hidden state\n cache -- python dictionary containing useful values (output of rnn_cell_forward())\n\n Returns:\n gradients -- python dictionary containing:\n dx -- Gradients of input data, of shape (n_x, m)\n da_prev -- Gradients of previous hidden state, of shape (n_a, m)\n dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)\n dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)\n dba -- Gradients of bias vector, of shape (n_a, 1)\n \"\"\"\n \n # Retrieve values from cache\n (a_next, a_prev, xt, parameters) = cache\n \n # Retrieve values from parameters\n Wax = parameters[\"Wax\"]\n Waa = parameters[\"Waa\"]\n Wya = parameters[\"Wya\"]\n ba = parameters[\"ba\"]\n by = parameters[\"by\"]\n\n ### START CODE HERE ###\n # compute the gradient of tanh with respect to a_next (≈1 line)\n dtanh = (1 - a_next ** 2) * da_next\n\n # compute the gradient of the loss with respect to Wax (≈2 lines)\n dxt = np.matmul(Wax.T, dtanh)\n dWax = np.matmul(dtanh, xt.T)\n\n # compute the gradient with respect to Waa (≈2 lines)\n da_prev = np.matmul(Waa.T, dtanh)\n dWaa = np.matmul(dtanh, a_prev.T)\n\n # compute the gradient with respect to b (≈1 line)\n dba = np.sum(dtanh, keepdims=True, axis=1)\n\n ### END CODE HERE ###\n \n # Store the gradients in a python dictionary\n gradients = {\"dxt\": dxt, \"da_prev\": da_prev, \"dWax\": dWax, \"dWaa\": dWaa, \"dba\": dba}\n \n return gradients", "_____no_output_____" ], [ "np.random.seed(1)\nxt = np.random.randn(3,10)\na_prev = np.random.randn(5,10)\nWax = np.random.randn(5,3)\nWaa = np.random.randn(5,5)\nWya = np.random.randn(2,5)\nb = np.random.randn(5,1)\nby = np.random.randn(2,1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"ba\": ba, \"by\": by}\n\na_next, yt, cache = rnn_cell_forward(xt, a_prev, parameters)\n\nda_next = np.random.randn(5,10)\ngradients = rnn_cell_backward(da_next, cache)\nprint(\"gradients[\\\"dxt\\\"][1][2] =\", gradients[\"dxt\"][1][2])\nprint(\"gradients[\\\"dxt\\\"].shape =\", gradients[\"dxt\"].shape)\nprint(\"gradients[\\\"da_prev\\\"][2][3] =\", gradients[\"da_prev\"][2][3])\nprint(\"gradients[\\\"da_prev\\\"].shape =\", gradients[\"da_prev\"].shape)\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWax\\\"].shape =\", gradients[\"dWax\"].shape)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWaa\\\"].shape =\", gradients[\"dWaa\"].shape)\nprint(\"gradients[\\\"dba\\\"][4] =\", gradients[\"dba\"][4])\nprint(\"gradients[\\\"dba\\\"].shape =\", gradients[\"dba\"].shape)", "gradients[\"dxt\"][1][2] = -0.460564103059\ngradients[\"dxt\"].shape = (3, 10)\ngradients[\"da_prev\"][2][3] = 0.0842968653807\ngradients[\"da_prev\"].shape = (5, 10)\ngradients[\"dWax\"][3][1] = 0.393081873922\ngradients[\"dWax\"].shape = (5, 3)\ngradients[\"dWaa\"][1][2] = -0.28483955787\ngradients[\"dWaa\"].shape = (5, 5)\ngradients[\"dba\"][4] = [ 0.80517166]\ngradients[\"dba\"].shape = (5, 1)\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **gradients[\"dxt\"][1][2]** =\n </td>\n <td>\n -0.460564103059\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dxt\"].shape** =\n </td>\n <td>\n (3, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da_prev\"][2][3]** =\n </td>\n <td>\n 0.0842968653807\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da_prev\"].shape** =\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWax\"][3][1]** =\n </td>\n <td>\n 0.393081873922\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWax\"].shape** =\n </td>\n <td>\n (5, 3)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWaa\"][1][2]** = \n </td>\n <td>\n -0.28483955787\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWaa\"].shape** =\n </td>\n <td>\n (5, 5)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dba\"][4]** = \n </td>\n <td>\n [ 0.80517166]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dba\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n</table>", "_____no_output_____" ], [ "#### Backward pass through the RNN\n\nComputing the gradients of the cost with respect to $a^{\\langle t \\rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$.\n\n**Instructions**:\n\nImplement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly.", "_____no_output_____" ] ], [ [ "def rnn_backward(da, caches):\n \"\"\"\n Implement the backward pass for a RNN over an entire sequence of input data.\n\n Arguments:\n da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x)\n caches -- tuple containing information from the forward pass (rnn_forward)\n \n Returns:\n gradients -- python dictionary containing:\n dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x)\n da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m)\n dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x)\n dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a)\n dba -- Gradient w.r.t the bias, of shape (n_a, 1)\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve values from the first cache (t=1) of caches (≈2 lines)\n (caches, x) = caches\n (a1, a0, x1, parameters) = caches[0]\n \n # Retrieve dimensions from da's and x1's shapes (≈2 lines)\n n_a, m, T_x = da.shape\n n_x, m = x1.shape\n \n # initialize the gradients with the right sizes (≈6 lines)\n dx = np.zeros((n_x, m, T_x))\n dWax = np.zeros((n_a, n_x))\n dWaa = np.zeros((n_a, n_a))\n dba = np.zeros((n_a, 1))\n da0 = np.zeros((n_a, m))\n da_prevt = np.zeros((n_a, m))\n \n # Loop through all the time steps\n for t in reversed(range(T_x)):\n # Compute gradients at time step t. Choose wisely the \"da_next\" and the \"cache\" to use in the backward propagation step. (≈1 line)\n gradients = rnn_cell_backward(da[:,:,t] + da_prevt, caches[t])\n # Retrieve derivatives from gradients (≈ 1 line)\n dxt, da_prevt, dWaxt, dWaat, dbat = gradients[\"dxt\"], gradients[\"da_prev\"], gradients[\"dWax\"], gradients[\"dWaa\"], gradients[\"dba\"]\n # Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines)\n dx[:, :, t] = dxt\n dWax += dWaxt\n dWaa += dWaat\n dba += dbat\n \n # Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line) \n da0 = da_prevt\n ### END CODE HERE ###\n\n # Store the gradients in a python dictionary\n gradients = {\"dx\": dx, \"da0\": da0, \"dWax\": dWax, \"dWaa\": dWaa,\"dba\": dba}\n \n return gradients", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(3,10,4)\na0 = np.random.randn(5,10)\nWax = np.random.randn(5,3)\nWaa = np.random.randn(5,5)\nWya = np.random.randn(2,5)\nba = np.random.randn(5,1)\nby = np.random.randn(2,1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"ba\": ba, \"by\": by}\na, y, caches = rnn_forward(x, a0, parameters)\nda = np.random.randn(5, 10, 4)\ngradients = rnn_backward(da, caches)\n\nprint(\"gradients[\\\"dx\\\"][1][2] =\", gradients[\"dx\"][1][2])\nprint(\"gradients[\\\"dx\\\"].shape =\", gradients[\"dx\"].shape)\nprint(\"gradients[\\\"da0\\\"][2][3] =\", gradients[\"da0\"][2][3])\nprint(\"gradients[\\\"da0\\\"].shape =\", gradients[\"da0\"].shape)\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWax\\\"].shape =\", gradients[\"dWax\"].shape)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWaa\\\"].shape =\", gradients[\"dWaa\"].shape)\nprint(\"gradients[\\\"dba\\\"][4] =\", gradients[\"dba\"][4])\nprint(\"gradients[\\\"dba\\\"].shape =\", gradients[\"dba\"].shape)", "gradients[\"dx\"][1][2] = [-2.07101689 -0.59255627 0.02466855 0.01483317]\ngradients[\"dx\"].shape = (3, 10, 4)\ngradients[\"da0\"][2][3] = -0.314942375127\ngradients[\"da0\"].shape = (5, 10)\ngradients[\"dWax\"][3][1] = 11.2641044965\ngradients[\"dWax\"].shape = (5, 3)\ngradients[\"dWaa\"][1][2] = 2.30333312658\ngradients[\"dWaa\"].shape = (5, 5)\ngradients[\"dba\"][4] = [-0.74747722]\ngradients[\"dba\"].shape = (5, 1)\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **gradients[\"dx\"][1][2]** =\n </td>\n <td>\n [-2.07101689 -0.59255627 0.02466855 0.01483317]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dx\"].shape** =\n </td>\n <td>\n (3, 10, 4)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da0\"][2][3]** =\n </td>\n <td>\n -0.314942375127\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da0\"].shape** =\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWax\"][3][1]** =\n </td>\n <td>\n 11.2641044965\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWax\"].shape** =\n </td>\n <td>\n (5, 3)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWaa\"][1][2]** = \n </td>\n <td>\n 2.30333312658\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWaa\"].shape** =\n </td>\n <td>\n (5, 5)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dba\"][4]** = \n </td>\n <td>\n [-0.74747722]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dba\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n</table>", "_____no_output_____" ], [ "## 3.2 - LSTM backward pass", "_____no_output_____" ], [ "### 3.2.1 One Step backward\n\nThe LSTM backward pass is slighltly more complicated than the forward one. We have provided you with all the equations for the LSTM backward pass below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.) \n\n### 3.2.2 gate derivatives\n\n$$d \\Gamma_o^{\\langle t \\rangle} = da_{next}*\\tanh(c_{next}) * \\Gamma_o^{\\langle t \\rangle}*(1-\\Gamma_o^{\\langle t \\rangle})\\tag{7}$$\n\n$$d\\tilde c^{\\langle t \\rangle} = dc_{next}*\\Gamma_u^{\\langle t \\rangle}+ \\Gamma_o^{\\langle t \\rangle} (1-\\tanh(c_{next})^2) * i_t * da_{next} * \\tilde c^{\\langle t \\rangle} * (1-\\tanh(\\tilde c)^2) \\tag{8}$$\n\n$$d\\Gamma_u^{\\langle t \\rangle} = dc_{next}*\\tilde c^{\\langle t \\rangle} + \\Gamma_o^{\\langle t \\rangle} (1-\\tanh(c_{next})^2) * \\tilde c^{\\langle t \\rangle} * da_{next}*\\Gamma_u^{\\langle t \\rangle}*(1-\\Gamma_u^{\\langle t \\rangle})\\tag{9}$$\n\n$$d\\Gamma_f^{\\langle t \\rangle} = dc_{next}*\\tilde c_{prev} + \\Gamma_o^{\\langle t \\rangle} (1-\\tanh(c_{next})^2) * c_{prev} * da_{next}*\\Gamma_f^{\\langle t \\rangle}*(1-\\Gamma_f^{\\langle t \\rangle})\\tag{10}$$\n\n### 3.2.3 parameter derivatives \n\n$$ dW_f = d\\Gamma_f^{\\langle t \\rangle} * \\begin{pmatrix} a_{prev} \\\\ x_t\\end{pmatrix}^T \\tag{11} $$\n$$ dW_u = d\\Gamma_u^{\\langle t \\rangle} * \\begin{pmatrix} a_{prev} \\\\ x_t\\end{pmatrix}^T \\tag{12} $$\n$$ dW_c = d\\tilde c^{\\langle t \\rangle} * \\begin{pmatrix} a_{prev} \\\\ x_t\\end{pmatrix}^T \\tag{13} $$\n$$ dW_o = d\\Gamma_o^{\\langle t \\rangle} * \\begin{pmatrix} a_{prev} \\\\ x_t\\end{pmatrix}^T \\tag{14}$$\n\nTo calculate $db_f, db_u, db_c, db_o$ you just need to sum across the horizontal (axis= 1) axis on $d\\Gamma_f^{\\langle t \\rangle}, d\\Gamma_u^{\\langle t \\rangle}, d\\tilde c^{\\langle t \\rangle}, d\\Gamma_o^{\\langle t \\rangle}$ respectively. Note that you should have the `keep_dims = True` option.\n\nFinally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input.\n\n$$ da_{prev} = W_f^T*d\\Gamma_f^{\\langle t \\rangle} + W_u^T * d\\Gamma_u^{\\langle t \\rangle}+ W_c^T * d\\tilde c^{\\langle t \\rangle} + W_o^T * d\\Gamma_o^{\\langle t \\rangle} \\tag{15}$$\nHere, the weights for equations 13 are the first n_a, (i.e. $W_f = W_f[:n_a,:]$ etc...)\n\n$$ dc_{prev} = dc_{next}\\Gamma_f^{\\langle t \\rangle} + \\Gamma_o^{\\langle t \\rangle} * (1- \\tanh(c_{next})^2)*\\Gamma_f^{\\langle t \\rangle}*da_{next} \\tag{16}$$\n$$ dx^{\\langle t \\rangle} = W_f^T*d\\Gamma_f^{\\langle t \\rangle} + W_u^T * d\\Gamma_u^{\\langle t \\rangle}+ W_c^T * d\\tilde c_t + W_o^T * d\\Gamma_o^{\\langle t \\rangle}\\tag{17} $$\nwhere the weights for equation 15 are from n_a to the end, (i.e. $W_f = W_f[n_a:,:]$ etc...)\n\n**Exercise:** Implement `lstm_cell_backward` by implementing equations $7-17$ below. Good luck! :)", "_____no_output_____" ] ], [ [ "def lstm_cell_backward(da_next, dc_next, cache):\n \"\"\"\n Implement the backward pass for the LSTM-cell (single time-step).\n\n Arguments:\n da_next -- Gradients of next hidden state, of shape (n_a, m)\n dc_next -- Gradients of next cell state, of shape (n_a, m)\n cache -- cache storing information from the forward pass\n\n Returns:\n gradients -- python dictionary containing:\n dxt -- Gradient of input data at time-step t, of shape (n_x, m)\n da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)\n dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x)\n dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)\n dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)\n dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)\n dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)\n dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)\n dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)\n dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)\n dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1)\n \"\"\"\n\n # Retrieve information from \"cache\"\n (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache\n \n ### START CODE HERE ###\n # Retrieve dimensions from xt's and a_next's shape (≈2 lines)\n n_x, m = xt.shape\n n_a, m = a_next.shape\n \n # Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines)\n dot = da_next * np.tanh(c_next) * ot * (1 - ot)\n dcct = (dc_next * it + ot * (1 - np.square(np.tanh(c_next))) * it * da_next) * (1 - np.square(cct))\n dit = (dc_next * cct + ot * (1 - np.square(np.tanh(c_next))) * cct * da_next) * it * (1 - it)\n dft = (dc_next * c_prev + ot * (1 - np.square(np.tanh(c_next))) * c_prev * da_next) * ft * (1 - ft)\n\n # Compute parameters related derivatives. Use equations (11)-(14) (≈8 lines)\n temp = np.concatenate((a_prev, xt), axis=0).T\n dWf = np.dot(dft, temp)\n dWi = np.dot(dit, temp)\n dWc = np.dot(dcct, temp)\n dWo = np.dot(dot, temp)\n dbf = np.sum(dft, axis=1, keepdims=True)\n dbi = np.sum(dit, axis=1, keepdims=True)\n dbc = np.sum(dcct, axis=1, keepdims=True)\n dbo = np.sum(dot, axis=1, keepdims=True)\n\n # Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (15)-(17). (≈3 lines)\n da_prev = np.dot(parameters['Wf'][:,:n_a].T, dft) + np.dot(parameters['Wi'][:,:n_a].T, dit) + np.dot(parameters['Wc'][:,:n_a].T, dcct) + np.dot(parameters['Wo'][:,:n_a].T, dot)\n dc_prev = dc_next * ft + ot * (1 - np.square(np.tanh(c_next))) * ft * da_next\n dxt = np.dot(parameters['Wf'][:,n_a:].T, dft) + np.dot(parameters['Wi'][:,n_a:].T, dit) + np.dot(parameters['Wc'][:,n_a:].T, dcct) + np.dot(parameters['Wo'][:,n_a:].T, dot)\n ### END CODE HERE ###\n \n # Save gradients in dictionary\n gradients = {\"dxt\": dxt, \"da_prev\": da_prev, \"dc_prev\": dc_prev, \"dWf\": dWf,\"dbf\": dbf, \"dWi\": dWi,\"dbi\": dbi,\n \"dWc\": dWc,\"dbc\": dbc, \"dWo\": dWo,\"dbo\": dbo}\n\n return gradients", "_____no_output_____" ], [ "np.random.seed(1)\nxt = np.random.randn(3,10)\na_prev = np.random.randn(5,10)\nc_prev = np.random.randn(5,10)\nWf = np.random.randn(5, 5+3)\nbf = np.random.randn(5,1)\nWi = np.random.randn(5, 5+3)\nbi = np.random.randn(5,1)\nWo = np.random.randn(5, 5+3)\nbo = np.random.randn(5,1)\nWc = np.random.randn(5, 5+3)\nbc = np.random.randn(5,1)\nWy = np.random.randn(2,5)\nby = np.random.randn(2,1)\n\nparameters = {\"Wf\": Wf, \"Wi\": Wi, \"Wo\": Wo, \"Wc\": Wc, \"Wy\": Wy, \"bf\": bf, \"bi\": bi, \"bo\": bo, \"bc\": bc, \"by\": by}\n\na_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)\n\nda_next = np.random.randn(5,10)\ndc_next = np.random.randn(5,10)\ngradients = lstm_cell_backward(da_next, dc_next, cache)\nprint(\"gradients[\\\"dxt\\\"][1][2] =\", gradients[\"dxt\"][1][2])\nprint(\"gradients[\\\"dxt\\\"].shape =\", gradients[\"dxt\"].shape)\nprint(\"gradients[\\\"da_prev\\\"][2][3] =\", gradients[\"da_prev\"][2][3])\nprint(\"gradients[\\\"da_prev\\\"].shape =\", gradients[\"da_prev\"].shape)\nprint(\"gradients[\\\"dc_prev\\\"][2][3] =\", gradients[\"dc_prev\"][2][3])\nprint(\"gradients[\\\"dc_prev\\\"].shape =\", gradients[\"dc_prev\"].shape)\nprint(\"gradients[\\\"dWf\\\"][3][1] =\", gradients[\"dWf\"][3][1])\nprint(\"gradients[\\\"dWf\\\"].shape =\", gradients[\"dWf\"].shape)\nprint(\"gradients[\\\"dWi\\\"][1][2] =\", gradients[\"dWi\"][1][2])\nprint(\"gradients[\\\"dWi\\\"].shape =\", gradients[\"dWi\"].shape)\nprint(\"gradients[\\\"dWc\\\"][3][1] =\", gradients[\"dWc\"][3][1])\nprint(\"gradients[\\\"dWc\\\"].shape =\", gradients[\"dWc\"].shape)\nprint(\"gradients[\\\"dWo\\\"][1][2] =\", gradients[\"dWo\"][1][2])\nprint(\"gradients[\\\"dWo\\\"].shape =\", gradients[\"dWo\"].shape)\nprint(\"gradients[\\\"dbf\\\"][4] =\", gradients[\"dbf\"][4])\nprint(\"gradients[\\\"dbf\\\"].shape =\", gradients[\"dbf\"].shape)\nprint(\"gradients[\\\"dbi\\\"][4] =\", gradients[\"dbi\"][4])\nprint(\"gradients[\\\"dbi\\\"].shape =\", gradients[\"dbi\"].shape)\nprint(\"gradients[\\\"dbc\\\"][4] =\", gradients[\"dbc\"][4])\nprint(\"gradients[\\\"dbc\\\"].shape =\", gradients[\"dbc\"].shape)\nprint(\"gradients[\\\"dbo\\\"][4] =\", gradients[\"dbo\"][4])\nprint(\"gradients[\\\"dbo\\\"].shape =\", gradients[\"dbo\"].shape)", "gradients[\"dxt\"][1][2] = 3.23055911511\ngradients[\"dxt\"].shape = (3, 10)\ngradients[\"da_prev\"][2][3] = -0.0639621419711\ngradients[\"da_prev\"].shape = (5, 10)\ngradients[\"dc_prev\"][2][3] = 0.797522038797\ngradients[\"dc_prev\"].shape = (5, 10)\ngradients[\"dWf\"][3][1] = -0.147954838164\ngradients[\"dWf\"].shape = (5, 8)\ngradients[\"dWi\"][1][2] = 1.05749805523\ngradients[\"dWi\"].shape = (5, 8)\ngradients[\"dWc\"][3][1] = 2.30456216369\ngradients[\"dWc\"].shape = (5, 8)\ngradients[\"dWo\"][1][2] = 0.331311595289\ngradients[\"dWo\"].shape = (5, 8)\ngradients[\"dbf\"][4] = [ 0.18864637]\ngradients[\"dbf\"].shape = (5, 1)\ngradients[\"dbi\"][4] = [-0.40142491]\ngradients[\"dbi\"].shape = (5, 1)\ngradients[\"dbc\"][4] = [ 0.25587763]\ngradients[\"dbc\"].shape = (5, 1)\ngradients[\"dbo\"][4] = [ 0.13893342]\ngradients[\"dbo\"].shape = (5, 1)\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **gradients[\"dxt\"][1][2]** =\n </td>\n <td>\n 3.23055911511\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dxt\"].shape** =\n </td>\n <td>\n (3, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da_prev\"][2][3]** =\n </td>\n <td>\n -0.0639621419711\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da_prev\"].shape** =\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dc_prev\"][2][3]** =\n </td>\n <td>\n 0.797522038797\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dc_prev\"].shape** =\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWf\"][3][1]** = \n </td>\n <td>\n -0.147954838164\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWf\"].shape** =\n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWi\"][1][2]** = \n </td>\n <td>\n 1.05749805523\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWi\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWc\"][3][1]** = \n </td>\n <td>\n 2.30456216369\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWc\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWo\"][1][2]** = \n </td>\n <td>\n 0.331311595289\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWo\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbf\"][4]** = \n </td>\n <td>\n [ 0.18864637]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbf\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbi\"][4]** = \n </td>\n <td>\n [-0.40142491]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbi\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbc\"][4]** = \n </td>\n <td>\n [ 0.25587763]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbc\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbo\"][4]** = \n </td>\n <td>\n [ 0.13893342]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbo\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n</table>", "_____no_output_____" ], [ "### 3.3 Backward pass through the LSTM RNN\n\nThis part is very similar to the `rnn_backward` function you implemented above. You will first create variables of the same dimension as your return variables. You will then iterate over all the time steps starting from the end and call the one step function you implemented for LSTM at each iteration. You will then update the parameters by summing them individually. Finally return a dictionary with the new gradients. \n\n**Instructions**: Implement the `lstm_backward` function. Create a for loop starting from $T_x$ and going backward. For each step call `lstm_cell_backward` and update the your old gradients by adding the new gradients to them. Note that `dxt` is not updated but is stored.", "_____no_output_____" ] ], [ [ "def lstm_backward(da, caches):\n \n \"\"\"\n Implement the backward pass for the RNN with LSTM-cell (over a whole sequence).\n\n Arguments:\n da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x)\n dc -- Gradients w.r.t the memory states, numpy-array of shape (n_a, m, T_x)\n caches -- cache storing information from the forward pass (lstm_forward)\n\n Returns:\n gradients -- python dictionary containing:\n dx -- Gradient of inputs, of shape (n_x, m, T_x)\n da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)\n dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)\n dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)\n dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)\n dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x)\n dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)\n dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)\n dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)\n dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1)\n \"\"\"\n\n # Retrieve values from the first cache (t=1) of caches.\n (caches, x) = caches\n (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0]\n \n ### START CODE HERE ###\n # Retrieve dimensions from da's and x1's shapes (≈2 lines)\n n_a, m, T_x = da.shape\n n_x, m = x1.shape\n \n # initialize the gradients with the right sizes (≈12 lines)\n dx = np.zeros((n_x, m, T_x))\n da0 = np.zeros((n_a, m))\n da_prevt = np.zeros((n_a, m))\n dc_prevt = np.zeros((n_a, m))\n dWf = np.zeros((n_a, n_a + n_x))\n dWi = np.zeros((n_a, n_a + n_x))\n dWc = np.zeros((n_a, n_a + n_x))\n dWo = np.zeros((n_a, n_a + n_x))\n dbf = np.zeros((n_a, 1))\n dbi = np.zeros((n_a, 1))\n dbc = np.zeros((n_a, 1))\n dbo = np.zeros((n_a, 1))\n \n # loop back over the whole sequence\n for t in reversed(range(T_x)):\n # Compute all gradients using lstm_cell_backward\n gradients = lstm_cell_backward(da[:,:,t] + da_prevt, dc_prevt, caches[t])\n # Store or add the gradient to the parameters' previous step's gradient\n dx[:,:,t] = gradients[\"dxt\"]\n dWf += gradients[\"dWf\"]\n dWi += gradients[\"dWi\"]\n dWc += gradients[\"dWc\"]\n dWo += gradients[\"dWo\"]\n dbf += gradients[\"dbf\"]\n dbi += gradients[\"dbi\"]\n dbc += gradients[\"dbc\"]\n dbo += gradients[\"dbo\"]\n # Set the first activation's gradient to the backpropagated gradient da_prev.\n da0 = gradients[\"da_prev\"]\n \n ### END CODE HERE ###\n\n # Store the gradients in a python dictionary\n gradients = {\"dx\": dx, \"da0\": da0, \"dWf\": dWf,\"dbf\": dbf, \"dWi\": dWi,\"dbi\": dbi,\n \"dWc\": dWc,\"dbc\": dbc, \"dWo\": dWo,\"dbo\": dbo}\n \n return gradients", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(3,10,7)\na0 = np.random.randn(5,10)\nWf = np.random.randn(5, 5+3)\nbf = np.random.randn(5,1)\nWi = np.random.randn(5, 5+3)\nbi = np.random.randn(5,1)\nWo = np.random.randn(5, 5+3)\nbo = np.random.randn(5,1)\nWc = np.random.randn(5, 5+3)\nbc = np.random.randn(5,1)\n\nparameters = {\"Wf\": Wf, \"Wi\": Wi, \"Wo\": Wo, \"Wc\": Wc, \"Wy\": Wy, \"bf\": bf, \"bi\": bi, \"bo\": bo, \"bc\": bc, \"by\": by}\n\na, y, c, caches = lstm_forward(x, a0, parameters)\n\nda = np.random.randn(5, 10, 4)\ngradients = lstm_backward(da, caches)\n\nprint(\"gradients[\\\"dx\\\"][1][2] =\", gradients[\"dx\"][1][2])\nprint(\"gradients[\\\"dx\\\"].shape =\", gradients[\"dx\"].shape)\nprint(\"gradients[\\\"da0\\\"][2][3] =\", gradients[\"da0\"][2][3])\nprint(\"gradients[\\\"da0\\\"].shape =\", gradients[\"da0\"].shape)\nprint(\"gradients[\\\"dWf\\\"][3][1] =\", gradients[\"dWf\"][3][1])\nprint(\"gradients[\\\"dWf\\\"].shape =\", gradients[\"dWf\"].shape)\nprint(\"gradients[\\\"dWi\\\"][1][2] =\", gradients[\"dWi\"][1][2])\nprint(\"gradients[\\\"dWi\\\"].shape =\", gradients[\"dWi\"].shape)\nprint(\"gradients[\\\"dWc\\\"][3][1] =\", gradients[\"dWc\"][3][1])\nprint(\"gradients[\\\"dWc\\\"].shape =\", gradients[\"dWc\"].shape)\nprint(\"gradients[\\\"dWo\\\"][1][2] =\", gradients[\"dWo\"][1][2])\nprint(\"gradients[\\\"dWo\\\"].shape =\", gradients[\"dWo\"].shape)\nprint(\"gradients[\\\"dbf\\\"][4] =\", gradients[\"dbf\"][4])\nprint(\"gradients[\\\"dbf\\\"].shape =\", gradients[\"dbf\"].shape)\nprint(\"gradients[\\\"dbi\\\"][4] =\", gradients[\"dbi\"][4])\nprint(\"gradients[\\\"dbi\\\"].shape =\", gradients[\"dbi\"].shape)\nprint(\"gradients[\\\"dbc\\\"][4] =\", gradients[\"dbc\"][4])\nprint(\"gradients[\\\"dbc\\\"].shape =\", gradients[\"dbc\"].shape)\nprint(\"gradients[\\\"dbo\\\"][4] =\", gradients[\"dbo\"][4])\nprint(\"gradients[\\\"dbo\\\"].shape =\", gradients[\"dbo\"].shape)", "gradients[\"dx\"][1][2] = [-0.00173313 0.08287442 -0.30545663 -0.43281115]\ngradients[\"dx\"].shape = (3, 10, 4)\ngradients[\"da0\"][2][3] = -0.095911501954\ngradients[\"da0\"].shape = (5, 10)\ngradients[\"dWf\"][3][1] = -0.0698198561274\ngradients[\"dWf\"].shape = (5, 8)\ngradients[\"dWi\"][1][2] = 0.102371820249\ngradients[\"dWi\"].shape = (5, 8)\ngradients[\"dWc\"][3][1] = -0.0624983794927\ngradients[\"dWc\"].shape = (5, 8)\ngradients[\"dWo\"][1][2] = 0.0484389131444\ngradients[\"dWo\"].shape = (5, 8)\ngradients[\"dbf\"][4] = [-0.0565788]\ngradients[\"dbf\"].shape = (5, 1)\ngradients[\"dbi\"][4] = [-0.15399065]\ngradients[\"dbi\"].shape = (5, 1)\ngradients[\"dbc\"][4] = [-0.29691142]\ngradients[\"dbc\"].shape = (5, 1)\ngradients[\"dbo\"][4] = [-0.29798344]\ngradients[\"dbo\"].shape = (5, 1)\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **gradients[\"dx\"][1][2]** =\n </td>\n <td>\n [-0.00173313 0.08287442 -0.30545663 -0.43281115]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dx\"].shape** =\n </td>\n <td>\n (3, 10, 4)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da0\"][2][3]** =\n </td>\n <td>\n -0.095911501954\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"da0\"].shape** =\n </td>\n <td>\n (5, 10)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWf\"][3][1]** = \n </td>\n <td>\n -0.0698198561274\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWf\"].shape** =\n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWi\"][1][2]** = \n </td>\n <td>\n 0.102371820249\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWi\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWc\"][3][1]** = \n </td>\n <td>\n -0.0624983794927\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWc\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWo\"][1][2]** = \n </td>\n <td>\n 0.0484389131444\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dWo\"].shape** = \n </td>\n <td>\n (5, 8)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbf\"][4]** = \n </td>\n <td>\n [-0.0565788]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbf\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbi\"][4]** = \n </td>\n <td>\n [-0.06997391]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbi\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbc\"][4]** = \n </td>\n <td>\n [-0.27441821]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbc\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbo\"][4]** = \n </td>\n <td>\n [ 0.16532821]\n </td>\n </tr>\n <tr>\n <td>\n **gradients[\"dbo\"].shape** = \n </td>\n <td>\n (5, 1)\n </td>\n </tr>\n</table>", "_____no_output_____" ], [ "### Congratulations !\n\nCongratulations on completing this assignment. You now understand how recurrent neural networks work! \n\nLets go on to the next exercise, where you'll use an RNN to build a character-level language model.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
e7c31659b2ffaf03a1effcb59742a1dc6e747541
11,964
ipynb
Jupyter Notebook
how-to-use-azureml/azure-databricks/03.Build_model_runHistory.ipynb
keerthiadu/MachineLearningNotebooks
a01da5980fe1ba70ec490f9ad3375ba319b359ea
[ "MIT" ]
3
2019-06-06T14:13:22.000Z
2020-07-16T00:13:20.000Z
how-to-use-azureml/azure-databricks/03.Build_model_runHistory.ipynb
keerthiadu/MachineLearningNotebooks
a01da5980fe1ba70ec490f9ad3375ba319b359ea
[ "MIT" ]
null
null
null
how-to-use-azureml/azure-databricks/03.Build_model_runHistory.ipynb
keerthiadu/MachineLearningNotebooks
a01da5980fe1ba70ec490f9ad3375ba319b359ea
[ "MIT" ]
1
2020-10-01T09:27:20.000Z
2020-10-01T09:27:20.000Z
30.13602
117
0.563106
[ [ [ "Azure ML & Azure Databricks notebooks by Parashar Shah.\n\nCopyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.", "_____no_output_____" ], [ "![04ACI](files/tables/image2.JPG)", "_____no_output_____" ], [ "#Model Building", "_____no_output_____" ] ], [ [ "import os\nimport pprint\nimport numpy as np\n\nfrom pyspark.ml import Pipeline, PipelineModel\nfrom pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder", "_____no_output_____" ], [ "import azureml.core\n\n# Check core SDK version number\nprint(\"SDK version:\", azureml.core.VERSION)", "_____no_output_____" ], [ "##TESTONLY\n# import auth creds from notebook parameters\ntenant = dbutils.widgets.get('tenant_id')\nusername = dbutils.widgets.get('service_principal_id')\npassword = dbutils.widgets.get('service_principal_password')\n\nauth = azureml.core.authentication.ServicePrincipalAuthentication(tenant, username, password)", "_____no_output_____" ], [ "# import the Workspace class and check the azureml SDK version\nfrom azureml.core import Workspace\n\nws = Workspace.from_config(auth = auth)\nprint('Workspace name: ' + ws.name, \n 'Azure region: ' + ws.location, \n 'Subscription id: ' + ws.subscription_id, \n 'Resource group: ' + ws.resource_group, sep = '\\n')", "_____no_output_____" ], [ "##PUBLISHONLY\n## import the Workspace class and check the azureml SDK version\n#from azureml.core import Workspace\n#\n#ws = Workspace.from_config()\n#print('Workspace name: ' + ws.name, \n# 'Azure region: ' + ws.location, \n# 'Subscription id: ' + ws.subscription_id, \n# 'Resource group: ' + ws.resource_group, sep = '\\n')", "_____no_output_____" ], [ "#get the train and test datasets\ntrain_data_path = \"AdultCensusIncomeTrain\"\ntest_data_path = \"AdultCensusIncomeTest\"\n\ntrain = spark.read.parquet(train_data_path)\ntest = spark.read.parquet(test_data_path)\n\nprint(\"train: ({}, {})\".format(train.count(), len(train.columns)))\nprint(\"test: ({}, {})\".format(test.count(), len(test.columns)))\n\ntrain.printSchema()", "_____no_output_____" ] ], [ [ "#Define Model", "_____no_output_____" ] ], [ [ "label = \"income\"\ndtypes = dict(train.dtypes)\ndtypes.pop(label)\n\nsi_xvars = []\nohe_xvars = []\nfeatureCols = []\nfor idx,key in enumerate(dtypes):\n if dtypes[key] == \"string\":\n featureCol = \"-\".join([key, \"encoded\"])\n featureCols.append(featureCol)\n \n tmpCol = \"-\".join([key, \"tmp\"])\n # string-index and one-hot encode the string column\n #https://spark.apache.org/docs/2.3.0/api/java/org/apache/spark/ml/feature/StringIndexer.html\n #handleInvalid: Param for how to handle invalid data (unseen labels or NULL values). \n #Options are 'skip' (filter out rows with invalid data), 'error' (throw an error), \n #or 'keep' (put invalid data in a special additional bucket, at index numLabels). Default: \"error\"\n si_xvars.append(StringIndexer(inputCol=key, outputCol=tmpCol, handleInvalid=\"skip\"))\n ohe_xvars.append(OneHotEncoder(inputCol=tmpCol, outputCol=featureCol))\n else:\n featureCols.append(key)\n\n# string-index the label column into a column named \"label\"\nsi_label = StringIndexer(inputCol=label, outputCol='label')\n\n# assemble the encoded feature columns in to a column named \"features\"\nassembler = VectorAssembler(inputCols=featureCols, outputCol=\"features\")", "_____no_output_____" ], [ "from azureml.core.run import Run\nfrom azureml.core.experiment import Experiment\nimport numpy as np\nimport os\nimport shutil\n\nmodel_name = \"AdultCensus_runHistory.mml\"\nmodel_dbfs = os.path.join(\"/dbfs\", model_name)\nrun_history_name = 'spark-ml-notebook'\n\n# start a training run by defining an experiment\nmyexperiment = Experiment(ws, \"Ignite_AI_Talk\")\nroot_run = myexperiment.start_logging()\n\n# Regularization Rates - \nregs = [0.0001, 0.001, 0.01, 0.1]\n \n# try a bunch of regularization rate in a Logistic Regression model\nfor reg in regs:\n print(\"Regularization rate: {}\".format(reg))\n # create a bunch of child runs\n with root_run.child_run(\"reg-\" + str(reg)) as run:\n # create a new Logistic Regression model.\n lr = LogisticRegression(regParam=reg)\n \n # put together the pipeline\n pipe = Pipeline(stages=[*si_xvars, *ohe_xvars, si_label, assembler, lr])\n\n # train the model\n model_p = pipe.fit(train)\n \n # make prediction\n pred = model_p.transform(test)\n \n # evaluate. note only 2 metrics are supported out of the box by Spark ML.\n bce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\n au_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\n au_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n\n print(\"Area under ROC: {}\".format(au_roc))\n print(\"Area Under PR: {}\".format(au_prc))\n \n # log reg, au_roc, au_prc and feature names in run history\n run.log(\"reg\", reg)\n run.log(\"au_roc\", au_roc)\n run.log(\"au_prc\", au_prc)\n run.log_list(\"columns\", train.columns)\n\n # save model\n model_p.write().overwrite().save(model_name)\n \n # upload the serialized model into run history record\n mdl, ext = model_name.split(\".\")\n model_zip = mdl + \".zip\"\n shutil.make_archive(mdl, 'zip', model_dbfs)\n run.upload_file(\"outputs/\" + model_name, model_zip) \n #run.upload_file(\"outputs/\" + model_name, path_or_stream = model_dbfs) #cannot deal with folders\n\n # now delete the serialized model from local folder since it is already uploaded to run history \n shutil.rmtree(model_dbfs)\n os.remove(model_zip)\n \n# Declare run completed\nroot_run.complete()\nroot_run_id = root_run.id\nprint (\"run id:\", root_run.id)", "_____no_output_____" ], [ "metrics = root_run.get_metrics(recursive=True)\nbest_run_id = max(metrics, key = lambda k: metrics[k]['au_roc'])\nprint(best_run_id, metrics[best_run_id]['au_roc'], metrics[best_run_id]['reg'])", "_____no_output_____" ], [ "#Get the best run\nchild_runs = {}\n\nfor r in root_run.get_children():\n child_runs[r.id] = r\n \nbest_run = child_runs[best_run_id]", "_____no_output_____" ], [ "#Download the model from the best run to a local folder\nbest_model_file_name = \"best_model.zip\"\nbest_run.download_file(name = 'outputs/' + model_name, output_file_path = best_model_file_name)", "_____no_output_____" ] ], [ [ "#Model Evaluation", "_____no_output_____" ] ], [ [ "##unzip the model to dbfs (as load() seems to require that) and load it.\nif os.path.isfile(model_dbfs) or os.path.isdir(model_dbfs):\n shutil.rmtree(model_dbfs)\nshutil.unpack_archive(best_model_file_name, model_dbfs)\n\nmodel_p_best = PipelineModel.load(model_name)", "_____no_output_____" ], [ "# make prediction\npred = model_p_best.transform(test)\noutput = pred[['hours_per_week','age','workclass','marital_status','income','prediction']]\ndisplay(output.limit(5))", "_____no_output_____" ], [ "# evaluate. note only 2 metrics are supported out of the box by Spark ML.\nbce = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction')\nau_roc = bce.setMetricName('areaUnderROC').evaluate(pred)\nau_prc = bce.setMetricName('areaUnderPR').evaluate(pred)\n\nprint(\"Area under ROC: {}\".format(au_roc))\nprint(\"Area Under PR: {}\".format(au_prc))", "_____no_output_____" ] ], [ [ "#Model Persistence", "_____no_output_____" ] ], [ [ "##NOTE: by default the model is saved to and loaded from /dbfs/ instead of cwd!\nmodel_p_best.write().overwrite().save(model_name)\nprint(\"saved model to {}\".format(model_dbfs))", "_____no_output_____" ], [ "%sh\n\nls -la /dbfs/AdultCensus_runHistory.mml/*", "_____no_output_____" ], [ "dbutils.notebook.exit(\"success\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c321882c3719889c5330c2489acab2f373f90d
162,887
ipynb
Jupyter Notebook
Copie de test_tokens(1).ipynb
asma-miladi/DupBugRep-Scripts
925c8bf1256972ae1c7a111fd89a5786fe31c7f7
[ "MIT" ]
null
null
null
Copie de test_tokens(1).ipynb
asma-miladi/DupBugRep-Scripts
925c8bf1256972ae1c7a111fd89a5786fe31c7f7
[ "MIT" ]
null
null
null
Copie de test_tokens(1).ipynb
asma-miladi/DupBugRep-Scripts
925c8bf1256972ae1c7a111fd89a5786fe31c7f7
[ "MIT" ]
1
2021-12-18T13:45:56.000Z
2021-12-18T13:45:56.000Z
162,887
162,887
0.631063
[ [ [ "##**Installing the transformers library**\n\n", "_____no_output_____" ] ], [ [ "!cat /proc/meminfo\n!df -h", "_____no_output_____" ], [ "!pip install transformers", "Collecting transformers\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d8/f4/9f93f06dd2c57c7cd7aa515ffbf9fcfd8a084b92285732289f4a5696dd91/transformers-3.2.0-py3-none-any.whl (1.0MB)\n\u001b[K |████████████████████████████████| 1.0MB 9.0MB/s \n\u001b[?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers) (2019.12.20)\nCollecting sacremoses\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)\n\u001b[K |████████████████████████████████| 890kB 35.5MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers) (20.4)\nCollecting tokenizers==0.8.1.rc2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/80/83/8b9fccb9e48eeb575ee19179e2bdde0ee9a1904f97de5f02d19016b8804f/tokenizers-0.8.1rc2-cp36-cp36m-manylinux1_x86_64.whl (3.0MB)\n\u001b[K |████████████████████████████████| 3.0MB 46.8MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers) (4.41.1)\nRequirement already satisfied: dataclasses; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from transformers) (0.7)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers) (3.0.12)\nCollecting sentencepiece!=0.1.92\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d4/a4/d0a884c4300004a78cca907a6ff9a5e9fe4f090f5d95ab341c53d28cbc58/sentencepiece-0.1.91-cp36-cp36m-manylinux1_x86_64.whl (1.1MB)\n\u001b[K |████████████████████████████████| 1.1MB 45.9MB/s \n\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers) (1.18.5)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (0.16.0)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers) (2.4.7)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2020.6.20)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2.10)\nBuilding wheels for collected packages: sacremoses\n Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for sacremoses: filename=sacremoses-0.0.43-cp36-none-any.whl size=893257 sha256=d602654d8ce06566bfa73540ba28fc0736d3471c05b2dc28adc49e037e943519\n Stored in directory: /root/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45\nSuccessfully built sacremoses\nInstalling collected packages: sacremoses, tokenizers, sentencepiece, transformers\nSuccessfully installed sacremoses-0.0.43 sentencepiece-0.1.91 tokenizers-0.8.1rc2 transformers-3.2.0\n" ] ], [ [ "##**Importing the tools**", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nimport torch\nimport transformers as ppb\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nimport warnings\nimport re\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "##**Importing the dataset from Drive**", "_____no_output_____" ] ], [ [ "from google.colab import drive \ndrive.mount('/content/gdrive')", "Mounted at /content/gdrive\n" ], [ "df=pd.read_csv('gdrive/My Drive/Total_cleaned.csv',delimiter=';')", "_____no_output_____" ], [ "df1=pd.read_csv('gdrive/My Drive/Final_DUP.csv',delimiter=';')", "_____no_output_____" ], [ "df2=pd.read_csv('gdrive/My Drive/Final_NDUP.csv',delimiter=';')", "_____no_output_____" ], [ "df[3]", "_____no_output_____" ] ], [ [ "##**Loading the Pre-trained BERT model**", "_____no_output_____" ] ], [ [ "model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')\ntokenizer = tokenizer_class.from_pretrained(pretrained_weights)\nmodel = model_class.from_pretrained(pretrained_weights)", "_____no_output_____" ], [ "model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')\n#tokenizer = ppb.DistilBertTokenizer.from_pretrained(distil_bert, do_lower_case=True, add_special_tokens=True, max_length=128, pad_to_max_length=True)\ntokenizer = tokenizer_class.from_pretrained(pretrained_weights)\nmodel = model_class.from_pretrained(pretrained_weights)\n#model = TFDistilBertModel.from_pretrained('distilbert-base-uncased')", "_____no_output_____" ] ], [ [ "##**Lower case**", "_____no_output_____" ] ], [ [ "df[0]= df[0].str.lower()\ndf[1]= df[1].str.lower()\ndf[2]= df[2].str.lower()\ndf[3]= df[3].str.lower()\ndf[4]= df[4].str.lower()\ndf[5]= df[5].str.lower()", "_____no_output_____" ] ], [ [ "## **Remove Digits**", "_____no_output_____" ] ], [ [ "df[3] = df[3].str.replace(r'0', '')\ndf[3] = df[3].str.replace(r'1', '')\ndf[3] = df[3].str.replace(r'2', '')\ndf[3] = df[3].str.replace(r'3', '')\ndf[3] = df[3].str.replace(r'4', '')\ndf[3] = df[3].str.replace(r'5', '')\ndf[3] = df[3].str.replace(r'6', '')\ndf[3] = df[3].str.replace(r'7', '')\ndf[3] = df[3].str.replace(r'8', '')\ndf[3] = df[3].str.replace(r'9', '')", "_____no_output_____" ] ], [ [ "##**Remove special characters**", "_____no_output_____" ] ], [ [ "df[3] = df[3].str.replace(r'/', '')\ndf[3] = df[3].str.replace(r'@ ?', '')\ndf[3] = df[3].str.replace(r'!', '')\ndf[3] = df[3].str.replace(r'+', '')\ndf[3] = df[3].str.replace(r'-', '')\ndf[3] = df[3].str.replace(r'/', '')\ndf[3] = df[3].str.replace(r':', '')\ndf[3] = df[3].str.replace(r';', '')\ndf[3] = df[3].str.replace(r'>', '')\ndf[3] = df[3].str.replace(r'=', '')\ndf[3] = df[3].str.replace(r'<', '')\ndf[3] = df[3].str.replace(r'(', '')\ndf[3] = df[3].str.replace(r')', '')\ndf[3] = df[3].str.replace(r'#', '')\ndf[3] = df[3].str.replace(r'$', '')\ndf[3] = df[3].str.replace(r'&', '')\ndf[3] = df[3].str.replace(r'*', '')\ndf[3] = df[3].str.replace(r'%', '')\ndf[3] = df[3].str.replace(r'_', '')", "_____no_output_____" ] ], [ [ "##**Convert to String type**", "_____no_output_____" ] ], [ [ "df[3] = pd.Series(df[3], dtype=\"string\") # Pblm tokenize : \" Input is not valid ,Should be a string, a list/tuple of strings or a list/tuple of integers\"\ndf[2] = pd.Series(df[2], dtype=\"string\")\ndf[2] = df[2].astype(\"|S\")\ndf[2].str.decode(\"utf-8\")\ndf[3] = df[3].astype(\"|S\")\ndf[3].str.decode(\"utf-8\")", "_____no_output_____" ], [ "df[3].str.len()\n", "_____no_output_____" ] ], [ [ "##**Tokenization**", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ], [ "batch_31=df1[:3000]\nbatch_32=df2[:3000]\ndf3 = pd.concat([batch_31,batch_32], ignore_index=True)\nbatch_41=df1[3000:6000]\nbatch_42=df2[3000:6000]\ndf4 = pd.concat([batch_41,batch_42], ignore_index=True)\nbatch_51=df1[6000:9000]\nbatch_52=df2[6000:9000]\ndf5 = pd.concat([batch_51,batch_52], ignore_index=True)\nbatch_61=df1[9000:12000]\nbatch_62=df2[9000:12000]\ndf6 = pd.concat([batch_61,batch_62], ignore_index=True)\nbatch_71=df1[12000:15000]\nbatch_72=df2[12000:15000]\ndf7 = pd.concat([batch_71,batch_72], ignore_index=True)\nbatch_81=df1[15000:18000]\nbatch_82=df2[15000:18000]\ndf8 = pd.concat([batch_81,batch_82], ignore_index=True)\nbatch_91=df1[18000:21000]\nbatch_92=df2[18000:21000]\ndf9 = pd.concat([batch_91,batch_92], ignore_index=True)\nbatch_101=df1[21000:]\nbatch_102=df2[21000:]\ndf10 = pd.concat([batch_101,batch_102], ignore_index=True)", "_____no_output_____" ], [ "batch_31=df1[:4000]\nbatch_32=df2[:4000]\ndf3 = pd.concat([batch_31,batch_32], ignore_index=True)\nbatch_41=df1[4000:8000]\nbatch_42=df2[4000:8000]\ndf4 = pd.concat([batch_41,batch_42], ignore_index=True)\nbatch_51=df1[8000:12000]\nbatch_52=df2[8000:12000]\ndf5 = pd.concat([batch_51,batch_52], ignore_index=True)\nbatch_61=df1[12000:16000]\nbatch_62=df2[12000:16000]\ndf6 = pd.concat([batch_61,batch_62], ignore_index=True)\nbatch_71=df1[16000:20000]\nbatch_72=df2[16000:20000]\ndf7 = pd.concat([batch_71,batch_72], ignore_index=True)\nbatch_81=df1[20000:24000]\nbatch_82=df2[20000:24000]\ndf8 = pd.concat([batch_81,batch_82], ignore_index=True)\n", "_____no_output_____" ], [ "def _get_segments3(tokens, max_seq_length):\n \"\"\"Segments: 0 for the first sequence, 1 for the second\"\"\"\n if len(tokens)>max_seq_length:\n raise IndexError(\"Token length more than max seq length!\")\n segments = []\n first_sep = False\n current_segment_id = 0 \n for token in tokens:\n segments.append(current_segment_id)\n #print(token)\n if token == 102:\n #if first_sep:\n #first_sep = False \n #else:\n current_segment_id = 1\n return segments + [0] * (max_seq_length - len(tokens))", "_____no_output_____" ] ], [ [ "#**df3**", "_____no_output_____" ] ], [ [ "pair3= df3['Title1'] + [\" [SEP] \"] + df3['Title2'] \ntokenized3 = pair3.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len3 = 0 # padding all lists to the same size\nfor i in tokenized3.values:\n if len(i) > max_len3:\n max_len3 = len(i)", "_____no_output_____" ], [ "max_len3 =120\npadded3 = np.array([i + [0]*(max_len3-len(i)) for i in tokenized3.values])\n\nnp.array(padded3).shape ", "_____no_output_____" ], [ "attention_mask3 = np.where(padded3 != 0, 1, 0)\nattention_mask3.shape\ninput_ids3 = torch.tensor(padded3) \nattention_mask3 = torch.tensor(attention_mask3)", "_____no_output_____" ], [ "input_segments3= np.array([_get_segments3(token, max_len3)for token in tokenized3.values])\n", "_____no_output_____" ], [ "token_type_ids3 = torch.tensor(input_segments3)\ninput_segments3 = torch.tensor(input_segments3)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states3 = model(input_ids3, attention_mask=attention_mask3, token_type_ids=input_segments3) # <<< 600 rows only !!!", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states3 = model(input_ids3, attention_mask=attention_mask3)", "_____no_output_____" ], [ "features3 = last_hidden_states3[0][:,0,:].numpy()\nfeatures3", "_____no_output_____" ] ], [ [ "#**df4**", "_____no_output_____" ] ], [ [ "pair4= df4['Title1'] + [\" [SEP] \"] + df4['Title2'] \ntokenized4 = pair4.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len4 = 0 # padding all lists to the same size\nfor i in tokenized4.values:\n if len(i) > max_len4:\n max_len4 = len(i)", "_____no_output_____" ], [ "max_len4 =120\npadded4 = np.array([i + [0]*(max_len4-len(i)) for i in tokenized4.values])\n\nnp.array(padded4).shape ", "_____no_output_____" ], [ "attention_mask4 = np.where(padded4 != 0, 1, 0)\nattention_mask4.shape\ninput_ids4 = torch.tensor(padded4) \nattention_mask4 = torch.tensor(attention_mask4)", "_____no_output_____" ], [ "def _get_segments3(tokens, max_seq_length):\n \"\"\"Segments: 0 for the first sequence, 1 for the second\"\"\"\n if len(tokens)>max_seq_length:\n raise IndexError(\"Token length more than max seq length!\")\n segments = []\n first_sep = False\n current_segment_id = 0 \n for token in tokens:\n segments.append(current_segment_id)\n #print(token)\n if token == 102:\n #if first_sep:\n #first_sep = False \n #else:\n current_segment_id = 1\n return segments + [0] * (max_seq_length - len(tokens))", "_____no_output_____" ], [ "input_segments4= np.array([_get_segments3(token, max_len4)for token in tokenized4.values])\n", "_____no_output_____" ], [ "token_type_ids4 = torch.tensor(input_segments4)\ninput_segments4 = torch.tensor(input_segments4)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states4 = model(input_ids4, attention_mask=attention_mask4, token_type_ids=input_segments4) # <<< 600 rows only !!!", "_____no_output_____" ], [ "features4 = last_hidden_states4[0][:,0,:].numpy()\nfeatures4", "_____no_output_____" ] ], [ [ "#**df5**", "_____no_output_____" ] ], [ [ "pair5= df5['Title1'] + [\" [SEP] \"] + df5['Title2'] ", "_____no_output_____" ], [ "tokenized5 = pair5.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "pair5.shape\ntokenized5.shape", "_____no_output_____" ] ], [ [ "##**Padding**", "_____no_output_____" ] ], [ [ "max_len5 = 0 # padding all lists to the same size\nfor i in tokenized5.values:\n if len(i) > max_len5:\n max_len5 = len(i)", "_____no_output_____" ], [ "max_len5 =120", "_____no_output_____" ], [ "padded5 = np.array([i + [0]*(max_len5-len(i)) for i in tokenized5.values])\n\nnp.array(padded5).shape # Dimensions of the padded variable", "_____no_output_____" ] ], [ [ "##**Masking**", "_____no_output_____" ] ], [ [ "attention_mask5 = np.where(padded5 != 0, 1, 0)\nattention_mask5.shape\ninput_ids5 = torch.tensor(padded5) \nattention_mask5 = torch.tensor(attention_mask5)", "_____no_output_____" ], [ "input_ids[0] ######## TITLE 2", "_____no_output_____" ] ], [ [ "##**Running the `model()` function through BERT**", "_____no_output_____" ] ], [ [ "def _get_segments3(tokens, max_seq_length):\n \"\"\"Segments: 0 for the first sequence, 1 for the second\"\"\"\n if len(tokens)>max_seq_length:\n raise IndexError(\"Token length more than max seq length!\")\n segments = []\n first_sep = False\n current_segment_id = 0 \n for token in tokens:\n segments.append(current_segment_id)\n #print(token)\n if token == 102:\n #if first_sep:\n #first_sep = False \n #else:\n current_segment_id = 1\n return segments + [0] * (max_seq_length - len(tokens))", "_____no_output_____" ], [ "input_segments5= np.array([_get_segments3(token, max_len5)for token in tokenized5.values])", "_____no_output_____" ], [ "token_type_ids5 = torch.tensor(input_segments5)\ninput_segments5 = torch.tensor(input_segments5)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states5 = model(input_ids5, attention_mask=attention_mask5, token_type_ids=input_segments5) # <<< 600 rows only !!!", "_____no_output_____" ] ], [ [ "##**Slicing the part of the output of BERT : [cls]**", "_____no_output_____" ] ], [ [ "features5 = last_hidden_states5[0][:,0,:].numpy()\nfeatures5", "_____no_output_____" ] ], [ [ "#**df6**", "_____no_output_____" ] ], [ [ "pair6= df6['Title1'] + [\" [SEP] \"] + df6['Title2'] \ntokenized6 = pair6.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len6 = 0 # padding all lists to the same size\nfor i in tokenized6.values:\n if len(i) > max_len6:\n max_len6 = len(i)", "_____no_output_____" ], [ "max_len6=120", "_____no_output_____" ], [ "padded6 = np.array([i + [0]*(max_len6-len(i)) for i in tokenized6.values])\n\nnp.array(padded6).shape # Dimensions of the padded variable", "_____no_output_____" ], [ "attention_mask6 = np.where(padded6 != 0, 1, 0)\nattention_mask6.shape\ninput_ids6 = torch.tensor(padded6) \nattention_mask6 = torch.tensor(attention_mask6)", "_____no_output_____" ], [ "input_segments6= np.array([_get_segments3(token, max_len6)for token in tokenized6.values])", "_____no_output_____" ], [ "token_type_ids6 = torch.tensor(input_segments6)\ninput_segments6 = torch.tensor(input_segments6)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states6 = model(input_ids6, attention_mask=attention_mask6, token_type_ids=input_segments6) # <<< 600 rows only !!!", "_____no_output_____" ], [ "features6 = last_hidden_states6[0][:,0,:].numpy()\nfeatures6", "_____no_output_____" ] ], [ [ "#**df7**", "_____no_output_____" ] ], [ [ "pair7= df7['Title1'] + [\" [SEP] \"] + df7['Title2'] \ntokenized7 = pair7.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len7 = 0 # padding all lists to the same size\nfor i in tokenized7.values:\n if len(i) > max_len7:\n max_len7 = len(i)", "_____no_output_____" ], [ "max_len7=120", "_____no_output_____" ], [ "padded7 = np.array([i + [0]*(max_len7-len(i)) for i in tokenized7.values])\n\nnp.array(padded7).shape # Dimensions of the padded variable", "_____no_output_____" ], [ "attention_mask7 = np.where(padded7 != 0, 1, 0)\nattention_mask7.shape\ninput_ids7 = torch.tensor(padded7) \nattention_mask7 = torch.tensor(attention_mask7)", "_____no_output_____" ], [ "input_segments7= np.array([_get_segments3(token, max_len7)for token in tokenized7.values])", "_____no_output_____" ], [ "token_type_ids7 = torch.tensor(input_segments7)\ninput_segments7 = torch.tensor(input_segments7)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states7 = model(input_ids7, attention_mask=attention_mask7, token_type_ids=input_segments7) # <<< 600 rows only !!!", "_____no_output_____" ], [ "features7 = last_hidden_states7[0][:,0,:].numpy()\nfeatures7", "_____no_output_____" ] ], [ [ "#**df8**", "_____no_output_____" ] ], [ [ "pair8= df8['Title1'] + [\" [SEP] \"] + df8['Title2'] \ntokenized8 = pair8.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len8 = 0 # padding all lists to the same size\nfor i in tokenized8.values:\n if len(i) > max_len8:\n max_len8 = len(i)", "_____no_output_____" ], [ "max_len8=120\npadded8 = np.array([i + [0]*(max_len8-len(i)) for i in tokenized8.values])\n\nnp.array(padded8).shape # Dimensions of the padded variable", "_____no_output_____" ], [ "attention_mask8 = np.where(padded8 != 0, 1, 0)\nattention_mask8.shape\ninput_ids8 = torch.tensor(padded8) \nattention_mask8 = torch.tensor(attention_mask8)", "_____no_output_____" ], [ "input_segments8= np.array([_get_segments3(token, max_len8)for token in tokenized8.values])", "_____no_output_____" ], [ "token_type_ids8 = torch.tensor(input_segments8)\ninput_segments8 = torch.tensor(input_segments8)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states8 = model(input_ids8, attention_mask=attention_mask8, token_type_ids=input_segments8) # <<< 600 rows only !!!", "_____no_output_____" ], [ "features8 = last_hidden_states8[0][:,0,:].numpy()\nfeatures8", "_____no_output_____" ] ], [ [ "#**df9**", "_____no_output_____" ] ], [ [ "pair9= df9['Title1'] + [\" [SEP] \"] + df9['Title2'] \ntokenized9 = pair9.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len9 = 0 # padding all lists to the same size\nfor i in tokenized9.values:\n if len(i) > max_len9:\n max_len9 = len(i)", "_____no_output_____" ], [ "max_len9=120\npadded9 = np.array([i + [0]*(max_len9-len(i)) for i in tokenized9.values])\n\nnp.array(padded9).shape # Dimensions of the padded variable", "_____no_output_____" ], [ "attention_mask9 = np.where(padded9 != 0, 1, 0)\nattention_mask9.shape\ninput_ids9 = torch.tensor(padded9) \nattention_mask9 = torch.tensor(attention_mask9)", "_____no_output_____" ], [ "input_segments9= np.array([_get_segments3(token, max_len9)for token in tokenized9.values])", "_____no_output_____" ], [ "token_type_ids9 = torch.tensor(input_segments9)\ninput_segments9 = torch.tensor(input_segments9)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states9 = model(input_ids9, attention_mask=attention_mask9, token_type_ids=input_segments9) ", "_____no_output_____" ], [ "features9 = last_hidden_states9[0][:,0,:].numpy()\nfeatures9", "_____no_output_____" ] ], [ [ "#**df10**", "_____no_output_____" ] ], [ [ "pair10= df10['Title1'] + [\" [SEP] \"] + df10['Title2'] \ntokenized10 = pair10.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=512)))", "_____no_output_____" ], [ "max_len10 = 0 # padding all lists to the same size\nfor i in tokenized10.values:\n if len(i) > max_len10:\n max_len10 = len(i)", "_____no_output_____" ], [ "max_len10=120\npadded10 = np.array([i + [0]*(max_len10-len(i)) for i in tokenized10.values])\n\nnp.array(padded10).shape # Dimensions of the padded variable", "_____no_output_____" ], [ "attention_mask10 = np.where(padded10 != 0, 1, 0)\nattention_mask10.shape\ninput_ids10 = torch.tensor(padded10) \nattention_mask10 = torch.tensor(attention_mask10)", "_____no_output_____" ], [ "input_segments10= np.array([_get_segments3(token, max_len10)for token in tokenized10.values])", "_____no_output_____" ], [ "token_type_ids10 = torch.tensor(input_segments10)\ninput_segments10 = torch.tensor(input_segments10)", "_____no_output_____" ], [ "with torch.no_grad():\n last_hidden_states10 = model(input_ids10, attention_mask=attention_mask10, token_type_ids=input_segments10) # <<< 600 rows only !!!", "_____no_output_____" ], [ "features10 = last_hidden_states10[0][:,0,:].numpy()\nfeatures10", "_____no_output_____" ] ], [ [ "#**Classification**", "_____no_output_____" ] ], [ [ "features=np.concatenate([features3,features4,features5,features6,features7])", "_____no_output_____" ], [ "features=features3", "_____no_output_____" ], [ "features.shape", "_____no_output_____" ], [ "Total = pd.concat([df3,df4,df5,df6,df7], ignore_index=True)", "_____no_output_____" ], [ "Total", "_____no_output_____" ], [ "labels =df3['Label']", "_____no_output_____" ], [ "labels =Total['Label']", "_____no_output_____" ], [ "labels", "_____no_output_____" ], [ "train_features, test_features, train_labels, test_labels = train_test_split(features, labels,test_size=0.2,random_state=42)", "_____no_output_____" ], [ "train_labels.shape\ntest_labels.shape\ntest_features.shape", "_____no_output_____" ], [ "#n_splits=2\n#cross_val_score=5\nparameters = {'C': np.linspace(0.0001, 100, 20)}\ngrid_search = GridSearchCV(LogisticRegression(), parameters, cv=5)\ngrid_search.fit(train_features, train_labels)\nprint('best parameters: ', grid_search.best_params_)\nprint('best scrores: ', grid_search.best_score_)", "best parameters: {'C': 36.842168421052634}\nbest scrores: 0.8771666666666667\n" ], [ "lr_clf = LogisticRegression(C=36.84)\nlr_clf.fit(train_features, train_labels)\n", "_____no_output_____" ], [ "lr_clf.score(test_features, test_labels)", "_____no_output_____" ], [ "scores = cross_val_score(lr_clf, test_features, test_labels, cv=10)\nprint(\"mean: {:.3f} (std: {:.3f})\".format(scores.mean(),\n scores.std()),\n end=\"\\n\\n\" )", "mean: 0.865 (std: 0.010)\n\n" ], [ "from sklearn.dummy import DummyClassifier\nclf = DummyClassifier()\n\nscores = cross_val_score(clf, train_features, train_labels)\nprint(\"Dummy classifier score: %0.3f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))", "Dummy classifier score: 0.500 (+/- 0.02)\n" ], [ "df.fillna(0) # Pblm de Nan ", "_____no_output_____" ], [ "#Import svm model\nfrom sklearn import svm\n\n#Create a svm Classifier\nclf = svm.SVC(kernel='linear') # Linear Kernel", "_____no_output_____" ], [ "clf.fit(train_features, train_labels)", "_____no_output_____" ], [ "y_pred = clf.predict(test_features)", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "###############\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nfrom sklearn import metrics\n\n# Model Accuracy: how often is the classifier correct?\nprint(\"Accuracy:\",metrics.accuracy_score(test_features, y_pred))", "_____no_output_____" ] ], [ [ "#**Decision tree**", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier", "_____no_output_____" ], [ "#n_splits=2\n#cross_val_score=5\nparameters = {'C': max_leaf_nodes=0)}\ngrid_search = GridSearchCV(DecisionTreeClassifier(), parameters, cv=5)\ngrid_search.fit(train_features, train_labels)\nprint('best parameters: ', grid_search.best_params_)\nprint('best scrores: ', grid_search.best_score_)", "_____no_output_____" ], [ "clf = DecisionTreeClassifier(max_depth = 2, random_state = 0,criterion='gini')", "_____no_output_____" ], [ "clf.fit(train_features, train_labels)", "_____no_output_____" ], [ "# Predict for 1 observation\nclf.predict(test_features)\n# Predict for multiple observations\n#clf.predict(test_features[0:200])", "_____no_output_____" ], [ "# The score method returns the accuracy of the model\nscore = clf.score(test_features, test_labels)\nprint(score)", "0.8098333333333333\n" ], [ "scores = cross_val_score(clf, test_features, test_labels, cv=10)\nprint(\"mean: {:.3f} (std: {:.3f})\".format(scores.mean(),\n scores.std()),\n end=\"\\n\\n\" )", "mean: 0.814 (std: 0.012)\n\n" ] ], [ [ "#**SVM**", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC", "_____no_output_____" ], [ "#n_splits=2\n#cross_val_score=5\nparameters = {'C': np.linspace(0.0001, 100, 20)}\ngrid_search = GridSearchCV(SVC(), parameters, cv=5)\ngrid_search.fit(train_features, train_labels)\nprint('best parameters: ', grid_search.best_params_)\nprint('best scrores: ', grid_search.best_score_)", "_____no_output_____" ], [ "\nsvclassifier = SVC(kernel='linear')\nsvclassifier.fit(train_features, train_labels)", "_____no_output_____" ], [ "y_pred = svclassifier.predict(test_features)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report, confusion_matrix\nprint(confusion_matrix(test_labels,y_pred))\nprint(classification_report(test_labels,y_pred))", "[[2767 238]\n [ 396 2599]]\n precision recall f1-score support\n\n 0 0.87 0.92 0.90 3005\n 1 0.92 0.87 0.89 2995\n\n accuracy 0.89 6000\n macro avg 0.90 0.89 0.89 6000\nweighted avg 0.90 0.89 0.89 6000\n\n" ], [ "param_grid = {'C':[1,10,100,1000],'gamma':[1,0.1,0.001,0.0001], 'kernel':['linear','rbf']}", "_____no_output_____" ], [ "grid = GridSearchCV(SVC(),param_grid,refit = True, verbose=2)", "_____no_output_____" ], [ "grid.fit(train_features,train_labels)", "Fitting 5 folds for each of 32 candidates, totalling 160 fits\n[CV] C=1, gamma=1, kernel=linear .....................................\n" ], [ "grid.best_params_", "_____no_output_____" ], [ "predic = grid.predict(test_features)", "_____no_output_____" ], [ "print(classification_report(test_labels,predic))\nprint(confusion_matrix(test_labels, predic))", "_____no_output_____" ] ], [ [ "#Cross_Val_SVC", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "from sklearn import svm", "_____no_output_____" ], [ "clf = svm.SVC(kernel='linear', C=36.84)", "_____no_output_____" ], [ "scores = cross_val_score(clf, test_labels, y_pred, cv=5)", "_____no_output_____" ], [ "scores = cross_val_score(clf, test_features, test_labels, cv=10)\nprint(\"mean: {:.3f} (std: {:.3f})\".format(scores.mean(),\n scores.std()),\n end=\"\\n\\n\" )", "mean: 0.850 (std: 0.011)\n\n" ] ], [ [ "#**MLP Best params**", "_____no_output_____" ] ], [ [ "from sklearn.neural_network import MLPClassifier\nmlp = MLPClassifier(max_iter=100)\nfrom sklearn.datasets import make_classification", "_____no_output_____" ], [ "parameter_space = {\n 'hidden_layer_sizes': [(50,50,50), (50,100,50), (100,)],\n 'activation': ['tanh', 'relu'],\n 'solver': ['sgd', 'adam'],\n 'alpha': [0.0001, 0.05],\n 'learning_rate': ['constant','adaptive'],\n}", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV\n\nclf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3)\nclf.fit(train_features, train_labels)", "_____no_output_____" ], [ "# Best paramete set\nprint('Best parameters found:\\n', clf.best_params_)\n\n# All results\nmeans = clf.cv_results_['mean_test_score']\nstds = clf.cv_results_['std_test_score']\nfor mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n", "Best parameters found:\n {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.876 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.877 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.876 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.874 (+/-0.006) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.877 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.877 (+/-0.003) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.877 (+/-0.006) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.890 (+/-0.003) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'adam'}\n0.876 (+/-0.006) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.888 (+/-0.011) for {'activation': 'tanh', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.871 (+/-0.015) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.878 (+/-0.005) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.878 (+/-0.002) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.873 (+/-0.003) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.879 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.881 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.876 (+/-0.002) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.882 (+/-0.007) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'adam'}\n0.875 (+/-0.002) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.890 (+/-0.004) for {'activation': 'tanh', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.871 (+/-0.007) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.876 (+/-0.004) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.870 (+/-0.008) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.876 (+/-0.009) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.877 (+/-0.002) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.878 (+/-0.003) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.874 (+/-0.008) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.876 (+/-0.002) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.882 (+/-0.002) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'adam'}\n0.875 (+/-0.002) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.880 (+/-0.003) for {'activation': 'relu', 'alpha': 0.0001, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.879 (+/-0.003) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.875 (+/-0.006) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.878 (+/-0.007) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.877 (+/-0.006) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 50, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.876 (+/-0.005) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n0.878 (+/-0.004) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.859 (+/-0.012) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (50, 100, 50), 'learning_rate': 'adaptive', 'solver': 'adam'}\n0.875 (+/-0.003) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'sgd'}\n0.888 (+/-0.005) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'solver': 'adam'}\n0.875 (+/-0.004) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'sgd'}\n0.885 (+/-0.007) for {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (100,), 'learning_rate': 'adaptive', 'solver': 'adam'}\n" ], [ "y_true, y_pred = test_labels , clf.predict(test_features)\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nprint('Results on the test set:')\nprint(classification_report(y_true, y_pred))\nprint(confusion_matrix(y_true, y_pred))", "Results on the test set:\n precision recall f1-score support\n\n 0 0.90 0.89 0.90 3005\n 1 0.89 0.91 0.90 2995\n\n accuracy 0.90 6000\n macro avg 0.90 0.90 0.90 6000\nweighted avg 0.90 0.90 0.90 6000\n\n[[2667 338]\n [ 280 2715]]\n" ], [ "clf = MLPClassifier(random_state=1, max_iter=300).fit(train_features, train_labels)\nclf.predict_proba(test_features[:1])", "_____no_output_____" ], [ "clf.predict(test_features[:1000, :])", "_____no_output_____" ], [ "clf.score(test_features, test_labels)", "_____no_output_____" ], [ "from sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "clf = MLPClassifier.mlp(kernel='linear')", "_____no_output_____" ], [ "scores = cross_val_score(clf, test_labels, y_pred, cv=5)", "_____no_output_____" ], [ "scores = cross_val_score(clf, test_features, test_labels, cv=10)\nprint(\"mean: {:.3f} (std: {:.3f})\".format(scores.mean(),\n scores.std()),\n end=\"\\n\\n\" )", "mean: 0.872 (std: 0.014)\n\n" ] ], [ [ "#**Random Forest**", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nregressor = RandomForestRegressor(n_estimators=20, random_state=0)\nregressor.fit(train_features, train_labels)\ny_pred1 = regressor.predict(test_features)", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nprint(confusion_matrix(test_labels,y_pred))\nprint(classification_report(test_features,test_labels))\nprint(accuracy_score(test_features, test_labels))", "[[616 73]\n [ 86 625]]\n" ] ], [ [ "#**Naive Bayes**", "_____no_output_____" ], [ "#Gaussian", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import GaussianNB\ngnb = GaussianNB()\ngnb.fit(train_features, train_labels)", "_____no_output_____" ], [ "y_pred = gnb.predict(test_features)", "_____no_output_____" ], [ "from sklearn import metrics\nprint(\"Accuracy:\",metrics.accuracy_score(test_labels, y_pred))", "Accuracy: 0.8358333333333333\n" ] ], [ [ "*Cross Validation*", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ], [ "print(\"Cross Validation:\",cross_val_score(gnb, digits.data, digits.target, scoring='accuracy', cv=10).mean())", "_____no_output_____" ] ], [ [ "#Bernoulli:", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import BernoulliNB\nbnb = BernoulliNB(binarize=0.0)\nbnb.fit(train_features, train_labels)", "_____no_output_____" ], [ "print(\"Score: \",bnb.score(test_features, test_labels))", "Score: 0.829\n" ] ], [ [ "*Cross Validation*", "_____no_output_____" ] ], [ [ "print(\"Cross Validation:\",cross_val_score(bnb, digits.data, digits.target, scoring='accuracy', cv=10).mean())", "_____no_output_____" ] ], [ [ "#Multinomial", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import MultinomialNB\nmnb = MultinomialNB()\nmnb.fit(train_features, train_labels)\nmnb.score(test_features, test_labels)", "_____no_output_____" ], [ "mnb = MultinomialNB()\ncross_val_score(mnb, digits.data, digits.target, scoring='accuracy', cv=10).mean()\n", "_____no_output_____" ] ], [ [ "#Best params SVC", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC", "_____no_output_____" ], [ "model = SVC()", "_____no_output_____" ], [ "model.fit(train_features, train_labels)", "_____no_output_____" ], [ "prediction = model.predict(test_features)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report, confusion_matrix\nprint(classification_report(test_labels,prediction))\nprint(confusion_matrix(test_labels, prediction))", " precision recall f1-score support\n\n 0 0.82 0.94 0.88 587\n 1 0.93 0.81 0.87 613\n\n accuracy 0.87 1200\n macro avg 0.88 0.87 0.87 1200\nweighted avg 0.88 0.87 0.87 1200\n\n[[551 36]\n [117 496]]\n" ], [ "param_grid = {'C':[1,10,100,1000],'gamma':[1,0.1,0.001,0.0001], 'kernel':['linear','rbf']}\n", "_____no_output_____" ], [ "grid = GridSearchCV(SVC(),param_grid,refit = True, verbose=2)", "_____no_output_____" ], [ "grid.fit(train_features,train_labels)", "Fitting 5 folds for each of 32 candidates, totalling 160 fits\n[CV] C=1, gamma=1, kernel=linear .....................................\n" ], [ "grid.best_params_", "_____no_output_____" ], [ "predic = grid.predict(test_features)", "_____no_output_____" ], [ "print(classification_report(test_labels,predic))\nprint(confusion_matrix(test_labels, predic))", " precision recall f1-score support\n\n 0 0.86 0.95 0.90 587\n 1 0.95 0.85 0.90 613\n\n accuracy 0.90 1200\n macro avg 0.91 0.90 0.90 1200\nweighted avg 0.91 0.90 0.90 1200\n\n[[558 29]\n [ 89 524]]\n" ] ], [ [ "#**Random Forest Best params**", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier\n\nrfc=RandomForestClassifier(random_state=42)", "_____no_output_____" ], [ "param_grid = { \n 'n_estimators': [200, 500],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_depth' : [4,5,6,7,8],\n 'criterion' :['gini', 'entropy']\n}", "_____no_output_____" ], [ "CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)\nCV_rfc.fit(train_features, train_labels)", "_____no_output_____" ], [ "CV_rfc.best_params_", "_____no_output_____" ], [ "rfc1=RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 200, max_depth=8, criterion='gini')", "_____no_output_____" ], [ "rfc1.fit(train_features, train_labels)", "_____no_output_____" ], [ "\npred=rfc1.predict(test_features)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\nprint(\"Accuracy for Random Forest on CV data: \",accuracy_score(test_labels,pred))", "Accuracy for Random Forest on CV data: 0.8441666666666666\n" ], [ "from dnn_classifier import DNNClassifier", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c324abdbffba2815f21a21216b1c8ba42b1eba
34,166
ipynb
Jupyter Notebook
08-machine_learning_jupyter/.ipynb_checkpoints/stacking-checkpoint.ipynb
iproduct/coulse-ml
65577fd4202630d3d5cb6333ddc51cede750fb5a
[ "Apache-2.0" ]
1
2020-10-02T15:48:42.000Z
2020-10-02T15:48:42.000Z
08-machine_learning_jupyter/.ipynb_checkpoints/stacking-checkpoint.ipynb
iproduct/coulse-ml
65577fd4202630d3d5cb6333ddc51cede750fb5a
[ "Apache-2.0" ]
null
null
null
08-machine_learning_jupyter/.ipynb_checkpoints/stacking-checkpoint.ipynb
iproduct/coulse-ml
65577fd4202630d3d5cb6333ddc51cede750fb5a
[ "Apache-2.0" ]
null
null
null
187.725275
15,568
0.900252
[ [ [ "from numpy import mean\nfrom numpy import std\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import StackingClassifier\nfrom matplotlib import pyplot\nfrom sklearn.datasets import load_wine,load_iris\nfrom matplotlib.pyplot import figure\nfigure(num=2, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')\n\n\n\n# get a stacking ensemble of models\ndef get_stacking():\n # define the base models\n level0 = list()\n level0.append(('lr', LogisticRegression()))\n level0.append(('knn', KNeighborsClassifier()))\n level0.append(('cart', DecisionTreeClassifier()))\n level0.append(('svm', SVC()))\n level0.append(('bayes', GaussianNB()))\n # define meta learner model\n level1 = LogisticRegression()\n # define the stacking ensemble\n model = StackingClassifier(estimators=level0, final_estimator=level1, cv=5)\n return model", "_____no_output_____" ], [ "# get a list of models to evaluate\ndef get_models():\n models = dict()\n models['LogisticRegression'] = LogisticRegression()\n models['KNeighborsClassifier'] = KNeighborsClassifier()\n models['Decision tree'] = DecisionTreeClassifier()\n models['svm'] = SVC()\n models['GaussianNB'] = GaussianNB()\n models['stacking'] = get_stacking()\n return models\n\n# evaluate a give model using cross-validation\ndef evaluate_model(model):\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')\n scores1 = cross_val_score(model, X1, y1, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')\n return scores,scores1", "_____no_output_____" ], [ "# define dataset\nX,y = load_wine().data,load_wine().target\nX1,y1= load_iris().data,load_iris().target\n# get the models to evaluate\nmodels = get_models()\n# evaluate the models and store results\nresults, names, results1 = list(), list(),list()\nfor name, model in models.items():\n scores,scores1= evaluate_model(model)\n results.append(scores)\n results1.append(scores1)\n names.append(name)\n print('>%s -> %.3f (%.3f)---Wine dataset' % (name, mean(scores), std(scores)))\n print('>%s -> %.3f (%.3f)---Iris dataset' % (name, mean(scores1), std(scores1)))\n# plot model performance for comparison\npyplot.rcParams[\"figure.figsize\"] = (15,6)\npyplot.boxplot(results, labels=[s+\"-wine\" for s in names], showmeans=True)\npyplot.show()\npyplot.boxplot(results1, labels=[s+\"-iris\" for s in names], showmeans=True)\npyplot.show()", ">LogisticRegression -> 0.950 (0.055)---Wine dataset\n>LogisticRegression -> 0.964 (0.041)---Iris dataset\n>KNeighborsClassifier -> 0.710 (0.094)---Wine dataset\n>KNeighborsClassifier -> 0.964 (0.037)---Iris dataset\n>Decision tree -> 0.875 (0.086)---Wine dataset\n>Decision tree -> 0.949 (0.056)---Iris dataset\n>svm -> 0.687 (0.096)---Wine dataset\n>svm -> 0.964 (0.045)---Iris dataset\n>GaussianNB -> 0.978 (0.037)---Wine dataset\n>GaussianNB -> 0.956 (0.047)---Iris dataset\n>stacking -> 0.968 (0.043)---Wine dataset\n>stacking -> 0.964 (0.037)---Iris dataset\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7c334ed584ea986d4c334560f3a1429842b132d
105,398
ipynb
Jupyter Notebook
module4-logistic-regression/Logistic_Regression_Assignment.ipynb
afroman32/DS-Unit-2-Linear-Models
d8c765ae5c1394be056fd2655029eec058058bf2
[ "MIT" ]
null
null
null
module4-logistic-regression/Logistic_Regression_Assignment.ipynb
afroman32/DS-Unit-2-Linear-Models
d8c765ae5c1394be056fd2655029eec058058bf2
[ "MIT" ]
1
2020-03-18T03:17:52.000Z
2020-03-18T03:17:52.000Z
module4-logistic-regression/Logistic_Regression_Assignment.ipynb
afroman32/DS-Unit-2-Linear-Models
d8c765ae5c1394be056fd2655029eec058058bf2
[ "MIT" ]
null
null
null
36.045828
290
0.258231
[ [ [ "<a href=\"https://colab.research.google.com/github/afroman32/DS-Unit-2-Linear-Models/blob/master/module4-logistic-regression/Logistic_Regression_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 2, Sprint 1, Module 4*\n\n---", "_____no_output_____" ], [ "# Logistic Regression\n\n\n## Assignment 🌯\n\nYou'll use a [**dataset of 400+ burrito reviews**](https://srcole.github.io/100burritos/). How accurately can you predict whether a burrito is rated 'Great'?\n\n> We have developed a 10-dimensional system for rating the burritos in San Diego. ... Generate models for what makes a burrito great and investigate correlations in its dimensions.\n\n- [ ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later.\n- [ ] Begin with baselines for classification.\n- [ ] Use scikit-learn for logistic regression.\n- [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.)\n- [ ] Get your model's test accuracy. (One time, at the end.)\n- [ ] Commit your notebook to your fork of the GitHub repo.\n\n\n## Stretch Goals\n\n- [ ] Add your own stretch goal(s) !\n- [ ] Make exploratory visualizations.\n- [ ] Do one-hot encoding.\n- [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).\n- [ ] Get and plot your coefficients.\n- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).", "_____no_output_____" ], [ "# **Load Data**", "_____no_output_____" ] ], [ [ "%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'", "_____no_output_____" ], [ "# Load data downloaded from https://srcole.github.io/100burritos/\nimport pandas as pd\ndf = pd.read_csv(DATA_PATH+'burritos/burritos.csv')", "_____no_output_____" ], [ "# Derive binary classification target:\n# We define a 'Great' burrito as having an\n# overall rating of 4 or higher, on a 5 point scale.\n# Drop unrated burritos.\ndf = df.dropna(subset=['overall'])\ndf['Great'] = df['overall'] >= 4", "_____no_output_____" ], [ "# Clean/combine the Burrito categories\ndf['Burrito'] = df['Burrito'].str.lower()\n\ncalifornia = df['Burrito'].str.contains('california')\nasada = df['Burrito'].str.contains('asada')\nsurf = df['Burrito'].str.contains('surf')\ncarnitas = df['Burrito'].str.contains('carnitas')\n\ndf.loc[california, 'Burrito'] = 'California'\ndf.loc[asada, 'Burrito'] = 'Asada'\ndf.loc[surf, 'Burrito'] = 'Surf & Turf'\ndf.loc[carnitas, 'Burrito'] = 'Carnitas'\ndf.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other'", "_____no_output_____" ], [ "# Drop some high cardinality categoricals\ndf = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood'])", "_____no_output_____" ], [ "# Drop some columns to prevent \"leakage\"\ndf = df.drop(columns=['Rec', 'overall'])", "_____no_output_____" ] ], [ [ "#**Train, Val, Test split**", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ], [ "# reset index because it skipped a couple of numbers\ndf.reset_index(inplace = True, drop = True)\ndf.head(174)", "_____no_output_____" ], [ "# convert Great column to 1 for True and 0 for False\nkey = {True: 1, False:0}\ndf['Great'].replace(key, inplace = True)\n\n# convert date column to datetime\ndf['Date'] = pd.to_datetime(df['Date'])", "_____no_output_____" ], [ "df.head(20)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "train = pd.DataFrame()\nval = pd.DataFrame()\ntest = pd.DataFrame()\n\n# train, val, test split\nfor i in range(0, df.shape[0]):\n\n if df['Date'][i].year <= 2016:\n train = train.append(pd.DataFrame(df.loc[i]).T)\n \n \n elif df['Date'][i].year == 2017:\n val = val.append(pd.DataFrame(df.loc[i]).T)\n\n else:\n test = test.append(pd.DataFrame(df.loc[i]).T)\n\nprint(train.shape, val.shape, test.shape)", "(298, 59) (85, 59) (38, 59)\n" ], [ "# check to make sure the split is correct\nprint(train['Date'].describe(), '\\n')\nprint(val['Date'].describe(), '\\n')\nprint(test['Date'].describe())", "count 298\nunique 110\ntop 2016-08-30 00:00:00\nfreq 29\nfirst 2011-05-16 00:00:00\nlast 2016-12-15 00:00:00\nName: Date, dtype: object \n\ncount 85\nunique 42\ntop 2017-04-07 00:00:00\nfreq 6\nfirst 2017-01-04 00:00:00\nlast 2017-12-29 00:00:00\nName: Date, dtype: object \n\ncount 38\nunique 17\ntop 2019-08-27 00:00:00\nfreq 9\nfirst 2018-01-02 00:00:00\nlast 2026-04-25 00:00:00\nName: Date, dtype: object\n" ] ], [ [ "#**Baseline**", "_____no_output_____" ] ], [ [ "target = 'Great'\nfeatures = ['Yelp', 'Google', 'Cost', 'Hunger', 'Tortilla', 'Temp', 'Meat', \n 'Fillings', 'Meat:filling', 'Uniformity', 'Salsa', 'Synergy', 'Wrap']\n\nX_train = train[features]\ny_train = train[target].astype(int)\n\nX_val = val[features]\ny_val = val[target].astype(int)", "_____no_output_____" ], [ "y_train.value_counts(normalize = True)", "_____no_output_____" ], [ "y_val.value_counts(normalize = True)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\n\nmajority_case = y_train.mode()[0]\ny_pred = [majority_case] * len(y_train)\n\naccuracy_score(y_train, y_pred)", "_____no_output_____" ], [ "from sklearn.dummy import DummyClassifier\n# Fit the Dummy Classifier\nbaseline = DummyClassifier(strategy = 'most_frequent')\nbaseline.fit(X_train, y_train)\n\n# Make Predictions on validation data\ny_pred = baseline.predict(X_val)\naccuracy_score(y_val, y_pred)", "_____no_output_____" ] ], [ [ "#**Logistic Regression Model**", "_____no_output_____" ] ], [ [ "import category_encoders as ce\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.preprocessing import StandardScaler\n\ntarget = 'Great'\nfeatures = ['Yelp', 'Google', 'Cost', 'Hunger', 'Tortilla', 'Temp', 'Meat', \n 'Fillings', 'Meat:filling', 'Uniformity', 'Salsa', 'Synergy', 'Wrap']\n\nX_train = train[features]\ny_train = train[target].astype(int)\n\nX_val = val[features]\ny_val = val[target].astype(int)\n\n# one hot encode\nencoder = ce.OneHotEncoder(use_cat_names=True)\nX_train_encoded = encoder.fit_transform(X_train)\nX_val_encoded = encoder.transform(X_val)\n\n# fill missing values\nimputer = SimpleImputer(strategy = 'mean')\nX_train_imputed = imputer.fit_transform(X_train_encoded)\nX_val_imputed = imputer.transform(X_val_encoded)\n\n# scale it\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train_imputed)\nX_val_scaled = scaler.transform(X_val_imputed)\n\n# validation error\nmodel = LogisticRegressionCV(cv=5, n_jobs=-1, random_state=42)\nmodel.fit(X_train_scaled, y_train)\nprint('Validation Accuracy:', model.score(X_val_scaled, y_val))", "Validation Accuracy: 0.8235294117647058\n" ], [ "# define test X matrix\nX_test = test[features]\ny_test = test[target].astype(int)\n\n# encode X_test\nX_test_encoded = encoder.transform(X_test)\n\n# fill missing values\nX_test_imputed = imputer.transform(X_test_encoded)\n\n# scale X_test\nX_test_scaled = scaler.transform(X_test_imputed)\n\n# get predictions\ny_pred = model.predict(X_test_scaled)\n\nprint(f'Test Accuracy: {model.score(X_test_scaled, y_test)}')", "Test Accuracy: 0.7631578947368421\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c3373489b16483cc8d6a0aa90529eb15cb0a17
53,105
ipynb
Jupyter Notebook
week6/practice_mcts.ipynb
Iramuk-ganh/practical-rl
22398c698f47bad73f9be279983a7f8587446b72
[ "MIT" ]
6
2020-07-29T04:27:02.000Z
2021-11-11T14:26:43.000Z
practice_mcts.ipynb
rahul263-stack/quarantine
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
[ "MIT" ]
1
2020-11-19T16:16:24.000Z
2020-11-19T16:16:24.000Z
practice_mcts.ipynb
rahul263-stack/quarantine
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
[ "MIT" ]
7
2020-09-21T14:30:40.000Z
2022-01-04T14:03:36.000Z
68.170732
7,532
0.751172
[ [ [ "import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# This code creates a virtual display to draw game images on. \n# If you are running locally, just ignore it\nimport os\nif type(os.environ.get(\"DISPLAY\")) is not str or len(os.environ.get(\"DISPLAY\")) == 0:\n !bash ../xvfb start\n os.environ['DISPLAY'] = ':1'", "Starting virtual X frame buffer: Xvfb.\r\n" ] ], [ [ "## Seminar: Monte-carlo tree search\n\nIn this seminar, we'll implement a vanilla MCTS planning and use it to solve some Gym envs.\n\nBut before we do that, we first need to modify gym env to allow saving and loading game states to facilitate backtracking.", "_____no_output_____" ] ], [ [ "from collections import namedtuple\nfrom pickle import dumps, loads\n\nfrom gym.core import Wrapper\n\n# a container for get_result function below. Works just like tuple, but prettier\nActionResult = namedtuple(\n \"action_result\", (\"snapshot\", \"observation\", \"reward\", \"is_done\", \"info\"))\n\n\nclass WithSnapshots(Wrapper):\n \"\"\"\n Creates a wrapper that supports saving and loading environemnt states.\n Required for planning algorithms.\n\n This class will have access to the core environment as self.env, e.g.:\n - self.env.reset() #reset original env\n - self.env.ale.cloneState() #make snapshot for atari. load with .restoreState()\n - ...\n\n You can also use reset, step and render directly for convenience.\n - s, r, done, _ = self.step(action) #step, same as self.env.step(action)\n - self.render(close=True) #close window, same as self.env.render(close=True)\n \"\"\"\n\n def get_snapshot(self, render=False):\n \"\"\"\n :returns: environment state that can be loaded with load_snapshot \n Snapshots guarantee same env behaviour each time they are loaded.\n\n Warning! Snapshots can be arbitrary things (strings, integers, json, tuples)\n Don't count on them being pickle strings when implementing MCTS.\n\n Developer Note: Make sure the object you return will not be affected by \n anything that happens to the environment after it's saved.\n You shouldn't, for example, return self.env. \n In case of doubt, use pickle.dumps or deepcopy.\n\n \"\"\"\n if render:\n self.render() # close popup windows since we can't pickle them\n self.close()\n\n if self.unwrapped.viewer is not None:\n self.unwrapped.viewer.close()\n self.unwrapped.viewer = None\n return dumps(self.env)\n\n def load_snapshot(self, snapshot, render=False):\n \"\"\"\n Loads snapshot as current env state.\n Should not change snapshot inplace (in case of doubt, deepcopy).\n \"\"\"\n\n assert not hasattr(self, \"_monitor\") or hasattr(\n self.env, \"_monitor\"), \"can't backtrack while recording\"\n\n if render:\n self.render() # close popup windows since we can't load into them\n self.close()\n\n self.env = loads(snapshot)\n\n def get_result(self, snapshot, action):\n \"\"\"\n A convenience function that \n - loads snapshot, \n - commits action via self.step,\n - and takes snapshot again :)\n\n :returns: next snapshot, next_observation, reward, is_done, info\n\n Basically it returns next snapshot and everything that env.step would have returned.\n \"\"\"\n\n self.load_snapshot(snapshot)\n s, r, done, _ = self.step(action)\n next_snapshot = self.get_snapshot()\n return ActionResult(next_snapshot, #fill in the variables\n s, \n r, done, _)", "_____no_output_____" ] ], [ [ "### try out snapshots:\n", "_____no_output_____" ] ], [ [ "# make env\nenv = WithSnapshots(gym.make(\"CartPole-v0\"))\nenv.reset()\n\nn_actions = env.action_space.n", "_____no_output_____" ], [ "print(\"initial_state:\")\nplt.imshow(env.render('rgb_array'))\nenv.close()\n\n# create first snapshot\nsnap0 = env.get_snapshot()", "initial_state:\n" ], [ "# play without making snapshots (faster)\nwhile True:\n is_done = env.step(env.action_space.sample())[2]\n if is_done:\n print(\"Whoops! We died!\")\n break\n\nprint(\"final state:\")\nplt.imshow(env.render('rgb_array'))\nenv.close()", "Whoops! We died!\nfinal state:\n" ], [ "# reload initial state\nenv.load_snapshot(snap0)\n\nprint(\"\\n\\nAfter loading snapshot\")\nplt.imshow(env.render('rgb_array'))\nenv.close()", "\n\nAfter loading snapshot\n" ], [ "# get outcome (snapshot, observation, reward, is_done, info)\nres = env.get_result(snap0, env.action_space.sample())\n\nsnap1, observation, reward = res[:3]\n\n# second step\nres2 = env.get_result(snap1, env.action_space.sample())", "_____no_output_____" ] ], [ [ "# MCTS: Monte-Carlo tree search\n\nIn this section, we'll implement the vanilla MCTS algorithm with UCB1-based node selection.\n\nWe will start by implementing the `Node` class - a simple class that acts like MCTS node and supports some of the MCTS algorithm steps.\n\nThis MCTS implementation makes some assumptions about the environment, you can find those _in the notes section at the end of the notebook_.", "_____no_output_____" ] ], [ [ "assert isinstance(env,WithSnapshots)", "_____no_output_____" ], [ "class Node:\n \"\"\" a tree node for MCTS \"\"\"\n \n #metadata:\n parent = None #parent Node\n value_sum = 0. #sum of state values from all visits (numerator)\n times_visited = 0 #counter of visits (denominator)\n\n \n def __init__(self,parent,action):\n \"\"\"\n Creates and empty node with no children.\n Does so by commiting an action and recording outcome.\n \n :param parent: parent Node\n :param action: action to commit from parent Node\n \n \"\"\"\n \n self.parent = parent\n self.action = action \n self.children = set() #set of child nodes\n\n #get action outcome and save it\n res = env.get_result(parent.snapshot,action)\n self.snapshot,self.observation,self.immediate_reward,self.is_done,_ = res\n \n \n def is_leaf(self):\n return len(self.children)==0\n \n def is_root(self):\n return self.parent is None\n \n def get_mean_value(self):\n return self.value_sum / self.times_visited if self.times_visited !=0 else 0\n \n def ucb_score(self,scale=10,max_value=1e100):\n \"\"\"\n Computes ucb1 upper bound using current value and visit counts for node and it's parent.\n \n :param scale: Multiplies upper bound by that. From hoeffding inequality, assumes reward range to be [0,scale].\n :param max_value: a value that represents infinity (for unvisited nodes)\n \n \"\"\"\n \n if self.times_visited == 0:\n return max_value\n \n #compute ucb-1 additive component (to be added to mean value)\n #hint: you can use self.parent.times_visited for N times node was considered,\n # and self.times_visited for n times it was visited\n \n U = np.sqrt(2*np.log(self.parent.times_visited)/self.times_visited)\n \n return self.get_mean_value() + scale*U\n \n \n #MCTS steps\n \n def select_best_leaf(self):\n \"\"\"\n Picks the leaf with highest priority to expand\n Does so by recursively picking nodes with best UCB-1 score until it reaches the leaf.\n \n \"\"\"\n if self.is_leaf():\n return self\n \n children = self.children\n \n# best_child = <select best child node in terms of node.ucb_score()>\n best_child = max([(child.ucb_score(), child) for child in children], key=lambda x: x[0])[1]\n \n return best_child.select_best_leaf()\n \n def expand(self):\n \"\"\"\n Expands the current node by creating all possible child nodes.\n Then returns one of those children.\n \"\"\"\n \n assert not self.is_done, \"can't expand from terminal state\"\n\n for action in range(n_actions):\n self.children.add(Node(self,action))\n \n return self.select_best_leaf()\n \n def rollout(self,t_max=10**4):\n \"\"\"\n Play the game from this state to the end (done) or for t_max steps.\n \n On each step, pick action at random (hint: env.action_space.sample()).\n \n Compute sum of rewards from current state till \n Note 1: use env.action_space.sample() for random action\n Note 2: if node is terminal (self.is_done is True), just return 0\n \n \"\"\"\n \n #set env into the appropriate state\n env.load_snapshot(self.snapshot)\n obs = self.observation\n is_done = self.is_done\n \n #<your code here - rollout and compute reward>\n rollout_reward = 0\n while not is_done and t_max>0:\n t_max-=1\n _, r, is_done, _ = env.step(env.action_space.sample())\n rollout_reward += r\n\n return rollout_reward\n \n def propagate(self,child_value):\n \"\"\"\n Uses child value (sum of rewards) to update parents recursively.\n \"\"\"\n #compute node value\n my_value = self.immediate_reward + child_value\n \n #update value_sum and times_visited\n self.value_sum+=my_value\n self.times_visited+=1\n \n #propagate upwards\n if not self.is_root():\n self.parent.propagate(my_value)\n \n def safe_delete(self):\n \"\"\"safe delete to prevent memory leak in some python versions\"\"\"\n del self.parent\n for child in self.children:\n child.safe_delete()\n del child", "_____no_output_____" ], [ "class Root(Node):\n def __init__(self,snapshot,observation):\n \"\"\"\n creates special node that acts like tree root\n :snapshot: snapshot (from env.get_snapshot) to start planning from\n :observation: last environment observation\n \"\"\"\n \n self.parent = self.action = None\n self.children = set() #set of child nodes\n \n #root: load snapshot and observation\n self.snapshot = snapshot\n self.observation = observation\n self.immediate_reward = 0\n self.is_done=False\n \n @staticmethod\n def from_node(node):\n \"\"\"initializes node as root\"\"\"\n root = Root(node.snapshot,node.observation)\n #copy data\n copied_fields = [\"value_sum\",\"times_visited\",\"children\",\"is_done\"]\n for field in copied_fields:\n setattr(root,field,getattr(node,field))\n return root", "_____no_output_____" ] ], [ [ "## Main MCTS loop\n\nWith all we implemented, MCTS boils down to a trivial piece of code.", "_____no_output_____" ] ], [ [ "def plan_mcts(root,n_iters=10):\n \"\"\"\n builds tree with monte-carlo tree search for n_iters iterations\n :param root: tree node to plan from\n :param n_iters: how many select-expand-simulate-propagete loops to make\n \"\"\"\n for _ in range(n_iters):\n\n # PUT CODE HERE\n \n node = root.select_best_leaf()\n\n if node.is_done:\n node.propagate(0)\n\n else: #node is not terminal\n #<expand-simulate-propagate loop>\n child = node.expand()\n rollout_reward = child.rollout()\n node.propagate(rollout_reward)", "_____no_output_____" ] ], [ [ "## Plan and execute\nIn this section, we use the MCTS implementation to find optimal policy.", "_____no_output_____" ] ], [ [ "env = WithSnapshots(gym.make(\"CartPole-v0\"))\nroot_observation = env.reset()\nroot_snapshot = env.get_snapshot()\nroot = Root(root_snapshot, root_observation)", "_____no_output_____" ], [ "#plan from root:\nplan_mcts(root,n_iters=1000)", "_____no_output_____" ], [ "from IPython.display import clear_output\nfrom itertools import count\nfrom gym.wrappers import Monitor\n\ntotal_reward = 0 #sum of rewards\ntest_env = loads(root_snapshot) #env used to show progress\n\nfor i in count():\n \n #get best child\n# best_child = <select child with highest mean reward>\n best_child = max([(child.get_mean_value(), child) for child in root.children], key=lambda x: x[0])[1]\n \n #take action\n s,r,done,_ = test_env.step(best_child.action)\n \n #show image\n clear_output(True)\n plt.title(\"step %i\"%i)\n plt.imshow(test_env.render('rgb_array'))\n plt.show()\n\n total_reward += r\n if done:\n print(\"Finished with reward = \",total_reward)\n break\n \n #discard unrealized part of the tree [because not every child matters :(]\n for child in root.children:\n if child != best_child:\n child.safe_delete()\n\n #declare best child a new root\n root = Root.from_node(best_child)\n \n# assert not root.is_leaf(), \"We ran out of tree! Need more planning! Try growing tree right inside the loop.\"\n \n #you may want to expand tree here\n #<your code here>\n if root.is_leaf():\n plan_mcts(root,n_iters=10)", "_____no_output_____" ] ], [ [ "### Submit to Coursera", "_____no_output_____" ] ], [ [ "from submit import submit_mcts\n\nsubmit_mcts(total_reward, \"[email protected]\", \"xx\")", "Submitted to Coursera platform. See results on assignment page!\n" ] ], [ [ "## More stuff\n\nThere's a few things you might want to try if you want to dig deeper:\n\n### Node selection and expansion\n\n\"Analyze this\" assignment\n\nUCB-1 is a weak bound as it relies on a very general bounds (Hoeffding Inequality, to be exact). \n* Try playing with alpha. The theoretically optimal alpha for CartPole is 200 (max reward). \n* Use using a different exploration strategy (bayesian UCB, for example)\n* Expand not all but several random actions per `expand` call. See __the notes below__ for details.\n\nThe goal is to find out what gives the optimal performance for `CartPole-v0` for different time budgets (i.e. different n_iter in plan_mcts.\n\nEvaluate your results on `AcroBot-v1` - do the results change and if so, how can you explain it?\n\n\n### Atari-RAM\n\n\"Build this\" assignment\n\nApply MCTS to play atari games. In particular, let's start with ```gym.make(\"MsPacman-ramDeterministic-v0\")```.\n\nThis requires two things:\n* Slightly modify WithSnapshots wrapper to work with atari.\n\n * Atari has a special interface for snapshots:\n ``` \n snapshot = self.env.ale.cloneState()\n ...\n self.env.ale.restoreState(snapshot)\n ```\n * Try it on the env above to make sure it does what you told it to.\n \n* Run MCTS on the game above. \n * Start with small tree size to speed-up computations\n * You will probably want to rollout for 10-100 steps (t_max) for starters\n * Consider using discounted rewards (see __notes at the end__)\n * Try a better rollout policy\n \n \n### Integrate learning into planning\n\nPlanning on each iteration is a costly thing to do. You can speed things up drastically if you train a classifier to predict which action will turn out to be best according to MCTS.\n\nTo do so, just record which action did the MCTS agent take on each step and fit something to [state, mcts_optimal_action]\n* You can also use optimal actions from discarded states to get more (dirty) samples. Just don't forget to fine-tune without them.\n* It's also worth a try to use P(best_action|state) from your model to select best nodes in addition to UCB\n* If your model is lightweight enough, try using it as a rollout policy.\n\nWhile CartPole is glorious enough, try expanding this to ```gym.make(\"MsPacmanDeterministic-v0\")```\n* See previous section on how to wrap atari\n\n* Also consider what [AlphaGo Zero](https://deepmind.com/blog/alphago-zero-learning-scratch/) did in this area.\n\n### Integrate planning into learning \n_(this will likely take long time, better consider this as side project when all other deadlines are met)_\n\nIncorporate planning into the agent architecture. \n\nThe goal is to implement [Value Iteration Networks](https://arxiv.org/abs/1602.02867)\n\nFor starters, remember [week5 assignment](https://github.com/yandexdataschool/Practical_RL/blob/coursera/week5_policy_based/practice_a3c.ipynb)? If not, use [this](http://bit.ly/2oZ34Ap) instead.\n\nYou will need to switch it into a maze-like game, consider MsPacman or the games from week7 [Bonus: Neural Maps from here](https://github.com/yandexdataschool/Practical_RL/blob/master/week7/7.3_homework.ipynb).\n\nYou will need to implement a special layer that performs value iteration-like update to a recurrent memory. This can be implemented the same way you did attention from week7 or week8.", "_____no_output_____" ], [ "## Notes\n\n\n#### Assumptions\n\nThe full list of assumptions is\n* __Finite actions__ - we enumerate all actions in `expand`\n* __Episodic (finite) MDP__ - while technically it works for infinite mdp, we rollout for $ 10^4$ steps. If you are knowingly infinite, please adjust `t_max` to something more reasonable.\n* __No discounted rewards__ - we assume $\\gamma=1$. If that isn't the case, you only need to change two lines in `rollout` and use `my_R = r + gamma*child_R` for `propagate`\n* __pickleable env__ - won't work if e.g. your env is connected to a web-browser surfing the internet. For custom envs, you may need to modify get_snapshot/load_snapshot from `WithSnapshots`.\n\n#### On `get_best_leaf` and `expand` functions\n\nThis MCTS implementation only selects leaf nodes for expansion.\nThis doesn't break things down because `expand` adds all possible actions. Hence, all non-leaf nodes are by design fully expanded and shouldn't be selected.\n\nIf you want to only add a few random action on each expand, you will also have to modify `get_best_leaf` to consider returning non-leafs.\n\n#### Rollout policy\n\nWe use a simple uniform policy for rollouts. This introduces a negative bias to good situations that can be messed up completely with random bad action. As a simple example, if you tend to rollout with uniform policy, you better don't use sharp knives and walk near cliffs.\n\nYou can improve that by integrating a reinforcement _learning_ algorithm with a computationally light agent. You can even train this agent on optimal policy found by the tree search.\n\n#### Contributions\n* Reusing some code from 5vision [solution for deephack.RL](https://github.com/5vision/uct_atari), code by Mikhail Pavlov\n* Using some code from [this gist](https://gist.github.com/blole/dfebbec182e6b72ec16b66cc7e331110)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c33f4c373a56bef70043a62b150b339333138e
26,587
ipynb
Jupyter Notebook
transformers_doc/quicktour.ipynb
mwunderlich/notebooks
0e7780ba3afed1a700ce54968273ecfdf6c25249
[ "Apache-2.0" ]
null
null
null
transformers_doc/quicktour.ipynb
mwunderlich/notebooks
0e7780ba3afed1a700ce54968273ecfdf6c25249
[ "Apache-2.0" ]
null
null
null
transformers_doc/quicktour.ipynb
mwunderlich/notebooks
0e7780ba3afed1a700ce54968273ecfdf6c25249
[ "Apache-2.0" ]
null
null
null
34.845347
916
0.638508
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c34e41fc5dfe67995aa492330ad12bf2a24a97
35,728
ipynb
Jupyter Notebook
Learn Math/3. Linear Algebra/3.3 Identity and Inverse Matrices/3.3 Identity and Inverse Matrices.ipynb
mcallistercs/learning-data-science
ef92355d979abfc3061f5cbd6aed244231d93e21
[ "Unlicense" ]
1
2019-05-08T23:47:46.000Z
2019-05-08T23:47:46.000Z
Learn Math/3. Linear Algebra/3.3 Identity and Inverse Matrices/3.3 Identity and Inverse Matrices.ipynb
mcallistercs/learning-data-science
ef92355d979abfc3061f5cbd6aed244231d93e21
[ "Unlicense" ]
null
null
null
Learn Math/3. Linear Algebra/3.3 Identity and Inverse Matrices/3.3 Identity and Inverse Matrices.ipynb
mcallistercs/learning-data-science
ef92355d979abfc3061f5cbd6aed244231d93e21
[ "Unlicense" ]
3
2018-10-30T09:18:32.000Z
2019-05-02T13:57:13.000Z
47.510638
15,776
0.703846
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "# Plot parameters\nsns.set()\n%pylab inline\npylab.rcParams['figure.figsize'] = (4, 4)", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "# Avoid inaccurate floating values (for inverse matrices in dot product for instance)\n# See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors\nnp.set_printoptions(suppress=True)", "_____no_output_____" ], [ "%%html\n<style>\n.pquote {\n text-align: left;\n margin: 40px 0 40px auto;\n width: 70%;\n font-size: 1.5em;\n font-style: italic;\n display: block;\n line-height: 1.3em;\n color: #5a75a7;\n font-weight: 600;\n border-left: 5px solid rgba(90, 117, 167, .1);\n padding-left: 6px;\n}\n.notes {\n font-style: italic;\n display: block;\n margin: 40px 10%;\n}\nimg + em {\n text-align: center;\n display: block;\n color: gray;\n font-size: 0.9em;\n font-weight: 600;\n}\n</style>", "_____no_output_____" ] ], [ [ "$$\n\\newcommand\\bs[1]{\\boldsymbol{#1}}\n$$", "_____no_output_____" ], [ "# Introduction\n\nThis chapter is light but contains some important definitions. The identity matrix and the inverse of a matrix are concepts that will be very useful in subsequent chapters. \n\nUsing these concepts, we will see how vectors and matrices can be transformed. To fully understand the intuition behind these operations, we'll take a look at the geometric intrepreation of linear algebra. This will help us visualize otherwise abstract operatins.\n\nThen, at the end of this chapter, we'll use the concepts of matrix inversion and the identity matrix to solve a simple system of linear equations! Once you see this approach, you'll never want to use the algebraic methods of substitution or elimination that you learned in high school!", "_____no_output_____" ], [ "# 3.3 Identity and Inverse Matrices", "_____no_output_____" ], [ "# Identity matrices\n\nThe identity matrix $\\bs{I}_n$ is a special matrix of shape ($n \\times n$) that is filled with $0$ except for the diagonal, which is filled with $1$.\n\n<img src=\"images/identity-matrix.png\" width=\"150\" alt=\"Example of an identity matrix\" title=\"Identity matrix\">\n<em>A 3 by 3 identity matrix</em>\n\nMore generally,\n$$I_1 = \\begin{bmatrix}\n1 \\end{bmatrix}\n,\\ \nI_2 = \\begin{bmatrix}\n1 & 0 \\\\\n0 & 1 \\end{bmatrix}\n,\\ \nI_3 = \\begin{bmatrix}\n1 & 0 & 0 \\\\\n0 & 1 & 0 \\\\\n0 & 0 & 1 \\end{bmatrix}\n,\\ \\cdots ,\\ \nI_n = \\begin{bmatrix}\n1 & 0 & 0 & \\cdots & 0 \\\\\n0 & 1 & 0 & \\cdots & 0 \\\\\n0 & 0 & 1 & \\cdots & 0 \\\\\n\\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\\n0 & 0 & 0 & \\cdots & 1 \\end{bmatrix}$$", "_____no_output_____" ], [ "An identity matrix can be created with the Numpy function `eye()`:", "_____no_output_____" ] ], [ [ "np.eye(3) # 3 rows, and 3 columns", "_____no_output_____" ] ], [ [ "When you \"apply\" the identity matrix to a vector using the dot product, the result is the same vector:\n\n$$\\bs{I}_n\\bs{x} = \\bs{x}$$\n\n### Example 1\n\n$$\n\\begin{bmatrix}\n 1 & 0 & 0 \\\\\\\\\n 0 & 1 & 0 \\\\\\\\\n 0 & 0 & 1\n\\end{bmatrix}\n\\times\n\\begin{bmatrix}\n x_{1} \\\\\\\\\n x_{2} \\\\\\\\\n x_{3}\n\\end{bmatrix}=\n\\begin{bmatrix}\n 1 \\times x_1 + 0 \\times x_2 + 0\\times x_3 \\\\\\\\\n 0 \\times x_1 + 1 \\times x_2 + 0\\times x_3 \\\\\\\\\n 0 \\times x_1 + 0 \\times x_2 + 1\\times x_3\n\\end{bmatrix}=\n\\begin{bmatrix}\n x_{1} \\\\\\\\\n x_{2} \\\\\\\\\n x_{3}\n\\end{bmatrix}\n$$\n\nHence, the name **identity** matrix.", "_____no_output_____" ] ], [ [ "x = np.array([[2], [6], [3]])\nx", "_____no_output_____" ], [ "x_id = np.eye(x.shape[0]).dot(x)\nx_id", "_____no_output_____" ] ], [ [ "More generally, when $\\bs{A}$ is an $m\\times n$ matrix, it is a property of matrix multiplication that:\n\n$$I_m\\bs{A} = \\bs{A}I_n = \\bs{A}$$", "_____no_output_____" ], [ "## Visualizing the intuition\nVectors and matrices occupy $n$-dimensional space. This precept allows us to think about linear algebra geometrically and, if we're lucky enough to be working with $<3$ dimensions, visually. \n\nFor example, if you had a $2$-dimensional vector $\\bs{v}$, you could think of the vector as an ordered pair of real numbers ($a,b$). This ordered pair could then be plotted in a Cartesian coordinate system with a line connecting it to the origin:\n\n<img src=\"images/vector_line.png\" height = 300 width = 300>\n\nIf you had two such vectors ($\\bs{v}$ and $\\bs{w}$), a simple vector operation like addition would geometrically look like this:\n\n<img src=\"images/vector_addition.png\" height = 300 width = 300>\n\nMathematically, that addition looks like this:\n\n<img src=\"images/vector_addition_math.png\" height = 300 width = 300>\n\nNow that we've got you thinking about linear algebra geometrically, consider the identity matrix. If you multiply a vector by the identity matrix, you're technically applying a **transformation** to that vector. But since your multiplier was the identity matrix $\\bs{I}$, the transformation just outputs the multiplicand, $\\bs{x}$, itself. That's what happened above when we saw that $\\bs{x}$ was not altered after being multiplied by $\\bs{I}$. Visually, nothing would happen to your line.\n\nBut what if we slightly change our identity matrix? What if, for example, we change the $1$'s to $2$'s like so:\n\n$$\n\\begin{bmatrix}\n 2 & 0 & 0 \\\\\\\\\n 0 & 2 & 0 \\\\\\\\\n 0 & 0 & 2\n\\end{bmatrix}\n$$\n\nThat would double each element in vector $\\bs{x}$. Visually, that would make the line twice as long.\n\nThe takeaway here is that you can define an **operation matrix** to transform vectors. Here’s a few examples of the types of transformations you could do:\n - Scale: make all inputs bigger/smaller\n - Skew: make certain inputs bigger/smaller\n - Flip: make inputs negative\n - Rotate: make new coordinates based on old ones (e.g. East becomes North, North becomes West, etc.)\nIn short, all of these are geometric interpretations of multiplication. Each of them provides a means to warp a vector space.", "_____no_output_____" ], [ "# Inverse Matrices\nIf you have a square matrix (i.e. a matrix with the same number of columns and rows) then you can calculate the inverse of that matrix so long as its [determinant](https://en.wikipedia.org/wiki/Determinant) doesn't equal 0 (more on the determinant in a later lesson!).\n\nThe inverse of $\\bs{A}$ is denoted $\\bs{A}^{-1}$. It is the matrix that results in the identity matrix when it is multiplied by $\\bs{A}$:\n\n$$\\bs{A}^{-1}\\bs{A}=\\bs{I}_n$$\n\nThis means that if we apply a linear transformation to the space with $\\bs{A}$, it is possible to go back with $\\bs{A}^{-1}$. It provides a way to cancel the transformation.\n\n### Example 2\n\n$$\n\\bs{A}=\\begin{bmatrix}\n 3 & 0 & 2 \\\\\\\\\n 2 & 0 & -2 \\\\\\\\\n 0 & 1 & 1\n\\end{bmatrix}\n$$\n\nFor this example, we will use the numpy function `linalg.inv()` to calculate the inverse of $\\bs{A}$. Let's start by creating $\\bs{A}$. If you want to learn about the nitty gritty details behind this operation, check out [this](https://www.mathsisfun.com/algebra/matrix-inverse-minors-cofactors-adjugate.html) or [this](https://www.mathsisfun.com/algebra/matrix-inverse-row-operations-gauss-jordan.html).", "_____no_output_____" ] ], [ [ "A = np.array([[3, 0, 2], [2, 0, -2], [0, 1, 1]])\nA", "_____no_output_____" ] ], [ [ "Now we calculate its inverse:", "_____no_output_____" ] ], [ [ "A_inv = np.linalg.inv(A)\nA_inv", "_____no_output_____" ] ], [ [ "We can check that $\\bs{A^{-1}}$ is the inverse of $\\bs{A}$ with Python:", "_____no_output_____" ] ], [ [ "A_bis = A_inv.dot(A)\nA_bis", "_____no_output_____" ] ], [ [ "# Sovling a system of linear equations\nThe inverse matrix can be used to solve the equation $\\bs{Ax}=\\bs{b}$ by adding it to each term:\n\n$$\\bs{A}^{-1}\\bs{Ax}=\\bs{A}^{-1}\\bs{b}$$\n\nSince we know by definition that $\\bs{A}^{-1}\\bs{A}=\\bs{I}$, we have:\n\n$$\\bs{I}_n\\bs{x}=\\bs{A}^{-1}\\bs{b}$$\n\nWe saw that a vector is not changed when multiplied by the identity matrix. So we can write:\n\n$$\\bs{x}=\\bs{A}^{-1}\\bs{b}$$\n\nThis is great because we now have our vector of variables $\\bs{x}$ all by itself on the right side of the equation! This means we can solve for $\\bs{x}$ by simply computing the dot product of $\\bs{A^-1}$ and $\\bs{b}$!\n\nLet's try that!", "_____no_output_____" ], [ "### Example 3\n\nWe will take a simple solvable example:\n\n$$\n\\begin{cases}\ny = 2x \\\\\\\\\ny = -x +3\n\\end{cases}\n$$\n\nFirst, lets be sure we're using the notation we saw in above:\n\n$$\n\\begin{cases}\nA_{1,1}x_1 + A_{1,2}x_2 = b_1 \\\\\\\\\nA_{2,1}x_1 + A_{2,2}x_2= b_2\n\\end{cases}\n$$\n\nHere, $x_1$ corresponds to $x$ and $x_2$ corresponds to $y$. So we have:\n\n$$\n\\begin{cases}\n2x_1 - x_2 = 0 \\\\\\\\\nx_1 + x_2= 3\n\\end{cases}\n$$\n\nOur matrix $\\bs{A}$ of weights is:\n\n$$\n\\bs{A}=\n\\begin{bmatrix}\n 2 & -1 \\\\\\\\\n 1 & 1\n\\end{bmatrix}\n$$\n\nAnd the vector $\\bs{b}$ containing the solutions of individual equations is:\n\n$$\n\\bs{b}=\n\\begin{bmatrix}\n 0 \\\\\\\\\n 3\n\\end{bmatrix}\n$$\n\nUnder the matrix form, our systems becomes:\n\n$$\n\\begin{bmatrix}\n 2 & -1 \\\\\\\\\n 1 & 1\n\\end{bmatrix}\n\\begin{bmatrix}\n x_1 \\\\\\\\\n x_2\n\\end{bmatrix}=\n\\begin{bmatrix}\n 0 \\\\\\\\\n 3\n\\end{bmatrix}\n$$\n\nLet's define $\\bs{A}$:", "_____no_output_____" ] ], [ [ "A = np.array([[2, -1], [1, 1]])\nA", "_____no_output_____" ] ], [ [ "And let's define $\\bs{b}$:", "_____no_output_____" ] ], [ [ "b = np.array([[0], [3]])", "_____no_output_____" ] ], [ [ "And now let's find the inverse of $\\bs{A}$:", "_____no_output_____" ] ], [ [ "A_inv = np.linalg.inv(A)\nA_inv", "_____no_output_____" ] ], [ [ "Since we saw that\n\n$$\\bs{x}=\\bs{A}^{-1}\\bs{b}$$\n\nWe have:", "_____no_output_____" ] ], [ [ "x = A_inv.dot(b)\nx", "_____no_output_____" ] ], [ [ "This is our solution! \n\n$$\n\\bs{x}=\n\\begin{bmatrix}\n 1 \\\\\\\\\n 2\n\\end{bmatrix}\n$$\n\nGoing back to the geometric interpretion of linear algebra, you can think of our solution vector $\\bs{x}$ as containing a set of coordinates ($1, 2$). This point in a $2$-dimensional Cartesian plane is actually the intersection of the two lines representing the equations! \n\nLet's plot this to visually check the solution:", "_____no_output_____" ] ], [ [ "#to draw the equation with Matplotlib, first create a vector with some x values\nx = np.arange(-10, 10)\n#then create some y values for each of those x values using the equation\ny = 2*x\ny1 = -x + 3\n\n#then instantiate the plot figure\nplt.figure()\n#draw the first line\nplt.plot(x, y)\n#draw the second line\nplt.plot(x, y1)\n#set the limits of the axes\nplt.xlim(0, 3)\nplt.ylim(0, 3)\n\n#draw the axes\nplt.axvline(x=0, color='grey')\nplt.axhline(y=0, color='grey')", "_____no_output_____" ] ], [ [ "We can see that the intersection of the two lines (where $x=1$ and $y=2$) is the solution to our system of equations!", "_____no_output_____" ], [ "# What's next?\nThis lesson introduced a simple case where our system of equations had one and only one solution. The next lesson will treat systems of linear equations that have a number of solutions.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c35a4d2ff30d53c5ae3c6a2175becfb6ac504f
15,078
ipynb
Jupyter Notebook
notebook_demos/esml_howto_0_short.ipynb
jostrm/azure-enterprise-scale-ml-usage
52508e6193b57eeb71ffe0d70fb31ed692527b9f
[ "MIT" ]
null
null
null
notebook_demos/esml_howto_0_short.ipynb
jostrm/azure-enterprise-scale-ml-usage
52508e6193b57eeb71ffe0d70fb31ed692527b9f
[ "MIT" ]
null
null
null
notebook_demos/esml_howto_0_short.ipynb
jostrm/azure-enterprise-scale-ml-usage
52508e6193b57eeb71ffe0d70fb31ed692527b9f
[ "MIT" ]
null
null
null
29.33463
214
0.579454
[ [ [ "# ESML - accelerator: Quick DEMO\n", "_____no_output_____" ] ], [ [ "import sys, os\nsys.path.append(os.path.abspath(\"../azure-enterprise-scale-ml/esml/common/\")) # NOQA: E402\nfrom esml import ESMLDataset, ESMLProject\np = ESMLProject() # Will search in ROOT for your copied SETTINGS folder '../../../settings', you should copy template settings from '../settings'\n#p = ESMLProject(True) # Demo settings, will search in internal TEMPLATE SETTINGS folder '../settings'", "_____no_output_____" ], [ "#p.dev_test_prod = \"dev\"\np.describe()", "_____no_output_____" ] ], [ [ "from azureml.core import Workspace\nfrom azureml.core.authentication import InteractiveLoginAuthentication\n\nauth = InteractiveLoginAuthentication(tenant_id = p.tenant)\nws = Workspace.get(name = p.workspace_name,subscription_id = p.subscription_id,resource_group = p.resource_group,auth=auth)\nws.write_config(path=\".\", file_name=\"../../ws_config.json\")\n\nws = Workspace.from_config(\"../ws_config.json\") # Reads config.json ", "_____no_output_____" ], [ "# 2) ESML will Automap and Autoregister Azure ML Datasets - IN, SILVER, BRONZE, GOLD\n- `Automap` and `Autoregister` Azure ML Datasets as: `IN, SILVER, BRONZE, GOLD`", "_____no_output_____" ] ], [ [ "from azureml.core import Workspace\nws, config_name = p.authenticate_workspace_and_write_config()\nws = p.get_workspace_from_config()\nws.name", "_____no_output_____" ], [ "print(\"Are we in R&D state (no dataset versioning) = {}\".format(p.rnd))", "_____no_output_____" ], [ "p.unregister_all_datasets(ws) # DEMO purpose", "_____no_output_____" ], [ "datastore = p.init(ws)", "_____no_output_____" ] ], [ [ "# 3) IN->`BRONZE->SILVER`->Gold\n- Create dataset from PANDAS - Save to SILVER", "_____no_output_____" ] ], [ [ "import pandas as pd \nds = p.DatasetByName(\"ds01_diabetes\")\ndf = ds.Bronze.to_pandas_dataframe()\ndf.head()", "_____no_output_____" ] ], [ [ "## 3) BRONZE-SILVER (EDIT rows & SAVE)\n- Test change rows, same structure = new version (and new file added)\n- Note: not earlier files in folder are removed. They are needed for other \"versions\". \n- Expected: For 3 files: New version, 997 rows: 2 older files=627 + 1 new file=370\n- Expected (if we delete OLD files): New version, with less rows. 370 instead of 997", "_____no_output_____" ] ], [ [ "df_filtered = df[df.AGE > 0.015]\nprint(df.shape[0], df_filtered.shape[0])", "_____no_output_____" ] ], [ [ "## 3a) Save `SILVER` ds01_diabetes", "_____no_output_____" ] ], [ [ "aml_silver = p.save_silver(p.DatasetByName(\"ds01_diabetes\"),df_filtered)\naml_silver.name", "_____no_output_____" ] ], [ [ "### COMPARE `BRONZE vs SILVER`\n- Compare and validate the feature engineering", "_____no_output_____" ] ], [ [ "ds01 = p.DatasetByName(\"ds01_diabetes\")\nbronze_rows = ds01.Bronze.to_pandas_dataframe().shape[0]\nsilver_rows = ds01.Silver.to_pandas_dataframe().shape[0]\n\nprint(\"Bronze: {}\".format(bronze_rows)) # Expected 442 rows\nprint(\"Silver: {}\".format(silver_rows)) # Expected 185 rows (filtered)\n\nassert bronze_rows == 442,\"BRONZE Should have 442 rows to start with, but is {}\".format(bronze_rows)\nassert silver_rows == 185,\"SILVER should have 185 after filtering, but is {}\".format(silver_rows)", "_____no_output_____" ] ], [ [ "## 3b) Save `BRONZE → SILVER` ds02_other", "_____no_output_____" ] ], [ [ "df_edited = p.DatasetByName(\"ds02_other\").Silver.to_pandas_dataframe()\nds02_silver = p.save_silver(p.DatasetByName(\"ds02_other\"),df_edited)\nds02_silver.name", "_____no_output_____" ] ], [ [ "## 3c) Merge all `SILVERS -> then save GOLD`", "_____no_output_____" ] ], [ [ "df_01 = ds01.Silver.to_pandas_dataframe()\ndf_02 = ds02_silver.to_pandas_dataframe()\ndf_gold1_join = df_01.join(df_02) # left join -> NULL on df_02\nprint(\"Diabetes shape: \", df_01.shape)\nprint(df_gold1_join.shape)", "_____no_output_____" ] ], [ [ "# Save `GOLD` v1", "_____no_output_____" ] ], [ [ "print(p.rnd)", "_____no_output_____" ], [ "p.rnd=False # Allow versioning on DATASETS, to have lineage", "_____no_output_____" ], [ "ds_gold_v1 = p.save_gold(df_gold1_join)", "_____no_output_____" ] ], [ [ "### 3c) Ops! \"faulty\" GOLD - too many features", "_____no_output_____" ] ], [ [ "print(p.Gold.to_pandas_dataframe().shape) # 19 features...I want 11", "_____no_output_____" ], [ "print(\"Are we in RnD phase? Or do we have 'versioning on datasets=ON'\")\nprint(\"RnD phase = {}\".format(p.rnd))", "_____no_output_____" ] ], [ [ "# Save `GOLD` v2", "_____no_output_____" ] ], [ [ "# Lets just go with features from ds01\nds_gold_v1 = p.save_gold(df_01)", "_____no_output_____" ] ], [ [ "# Get `GOLD` by version", "_____no_output_____" ] ], [ [ "gold_1 = p.get_gold_version(1)\ngold_1.to_pandas_dataframe().shape # (185, 19)", "_____no_output_____" ], [ "gold_2 = p.get_gold_version(2)\ngold_2.to_pandas_dataframe().shape # (185, 11)", "_____no_output_____" ], [ "p.Gold.to_pandas_dataframe().shape # Latest version (185, 11)", "_____no_output_____" ], [ "df_01_filtered = df_01[df_01.AGE > 0.03807]\nds_gold_v1 = p.save_gold(df_01_filtered)", "_____no_output_____" ], [ "gold_2 = p.get_gold_version(3) # sliced, from latest version\ngold_2.to_pandas_dataframe().shape # (113, 11)", "_____no_output_____" ] ], [ [ "# TRAIN - `AutoMLFactory + ComputeFactory`", "_____no_output_____" ] ], [ [ "from baselayer_azure_ml import AutoMLFactory, ComputeFactory", "_____no_output_____" ], [ "p.dev_test_prod = \"test\"\nprint(\"what environment are we targeting? = {}\".format(p.dev_test_prod)) ", "_____no_output_____" ], [ "automl_performance_config = p.get_automl_performance_config()\nautoml_performance_config", "_____no_output_____" ], [ "p.dev_test_prod = \"dev\"\nautoml_performance_config = p.get_automl_performance_config()\nautoml_performance_config", "_____no_output_____" ] ], [ [ "# Get `COMPUTE` for current `ENVIRONMENT`", "_____no_output_____" ] ], [ [ "aml_compute = p.get_training_aml_compute(ws)", "_____no_output_____" ] ], [ [ "# `TRAIN` model -> See other notebook `esml_howto_2_train.ipynb`", "_____no_output_____" ] ], [ [ "from azureml.train.automl import AutoMLConfig\nfrom baselayer_azure_ml import azure_metric_regression\n\nlabel = \"Y\"\ntrain_6, validate_set_2, test_set_2 = p.split_gold_3(0.6,label) # Auto-registerin AZURE (M03_GOLD_TRAIN | M03_GOLD_VALIDATE | M03_GOLD_TEST) # Alt: train,testv= p.Gold.random_split(percentage=0.8, seed=23)\nautoml_config = AutoMLConfig(task = 'regression',\n primary_metric = azure_metric_regression.MAE,\n experiment_exit_score = '0.208', # DEMO purpose\n compute_target = aml_compute,\n training_data = p.GoldTrain, # is 'train_6' pandas dataframe, but as an Azure ML Dataset\n label_column_name = label,\n **automl_performance_config\n )\n\nvia_pipeline = False\nbest_run, fitted_model, experiment = AutoMLFactory(p).train_pipeline(automl_config) if via_pipeline else AutoMLFactory(p).train_as_run(automl_config)", "_____no_output_____" ] ], [ [ "# END", "_____no_output_____" ], [ "# ESML - accelerator\n\n## PROJECT + DATA CONCEPTS + ENTERPRISE Datalake Design + DEV->PROD MLOps\n- `1)ESML Project`: The ONLY thing you need to remember is your `Project number` (and `BRONZE, SILVER, GOLD` concept )\n - ProjectNo=4 have a list of all your datasets as ESMLDatasets. (Well you need to provide names for them also: \"mydata01\", \"mydata02\" - but thats it)\n- `2)Lakedesign & Roles`: Bronze, silver, gold + IN and date folders\n - Benefits: Physical datalake design! onnected to Azure ML Workspace, with autoregistration of `Azure ML Datasets`\n - `Role 1`: `Data ingestion team` only need to care about 1 thing - onboard data to `IN-folder`, in .CSV format\n - `Auto parquet-conversion` from `IN` folder (.CSV) to `OUT`/BRONZE/bronze.PARQUET \n - `Role 2`: `Data scientists` only need to care about 3 things (R/W): `BRONZE, SILVER, GOLD` datasets, all in .PARQUET format\n - How? The ESML project will `Automap` and `Autoregister` Azure ML Datasets - `IN, SILVER, BRONZE, GOLD`\n- `2a) R&D VS Production phase`: \"Latest data\" VS versioning on Datasets and datefolders \n - Benefits \"R&D mode\": Faster RnD phase to onboard and refresh data easy. Also fast \"flip-switch\" to production\n - How? `ESMLDataset is context self aware` - knows when it is used in TRAIN or INFERENCE pipeline\n- `2b) TRAIN vs INFERENCE` versions</u> `Reuse (Bronze->Silver->Gold) pipepline`, for both TRAIN preprocessing, and INFERENCE \n - Benefits: Inference with different MODEL version, on data from the same day/time, (to compare scoring etc)\n - How? ESMLDataset have context self awareness, and `knows WHERE and HOW to load/save data`\n- `2c) BATCH CONFIG`: Turn on/off features on ALL datasets\n - Accelerate setup: `Datadrift, Time series traits, Smart noise, etc`\n - Share refined data back to its \"origin/non-projectbased structure\" easy: \n - ESMLProject.ShareBack(ds.Silver)\n - How? ESMProject controls all ESMDatasets, in a uniform way\n## ENTERPRISE Deployment of Models & Governance - MLOps at scale\n- `3) DEV->TEST-PROD` (configs, compute, performance)\n - ESML has config for 3 environemnts: Easy DEPLOY model across subscriptions and Azure ML Studio workspaces \n - Save costs & time: \n - `DEV` has cheaper compute performance for TRAIN and INFERENCE (batch, AKS)\n - `DEV` has Quick-debug ML training (fast training...VS good scoring in TEST and PROD)\n - How? ESML `AutoMLFactory` and `ComputeFactory`\n \n\n### Q&A:\n- Q: Is ESML Machine learning specific? If I only want to refine some data...for integration, or report? \n- A: You can use this for just data refinement also: `Bronze->Silver->Gold` refinement.\n - Benefits: Enterprise security, Read/write to datalake, easy to share refined data. \n - Benefits: The tooling \"glued togehter\": Azure datafactory + Azure Databricks (and Azure ML Studio pipelines if needed)\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c35f26fe8202b171e1da3a82603ad5cc0be32d
3,130
ipynb
Jupyter Notebook
GAEMII.ipynb
manuelmarcano22/GeometricAlgebra
6f4545f5a8582a602b560cc3e4b8fb44d085c6f6
[ "MIT" ]
null
null
null
GAEMII.ipynb
manuelmarcano22/GeometricAlgebra
6f4545f5a8582a602b560cc3e4b8fb44d085c6f6
[ "MIT" ]
null
null
null
GAEMII.ipynb
manuelmarcano22/GeometricAlgebra
6f4545f5a8582a602b560cc3e4b8fb44d085c6f6
[ "MIT" ]
null
null
null
39.125
328
0.342173
[ [ [ "## Geometric Algebra\n\n#### An unified language for Physics\n\n<blockquote><small>“. . . it is a good thing to have two ways of looking at a subject, and to admit that there\n are two ways of looking at it.” - James Clerk Maxwell (1831-1879)</small>\n\n <p></p>\n<small>\"...for geometry, you know, is the gate of science and the gate is so low and small that\n one can only enter it as a little child.” - William Kingdom Clifford (1845-1879)</small></blockquote>\n\n\n", "_____no_output_____" ], [ "\n<h3>The Protagonist </h3>\n<div class=\"altpanel\" style=\"height: 9%;\">\n<div>\n<img width=\"150\" src=\"images/grass.jpg\" alt=\"Grenoble INP\" class=\"logo\" /> \n<img width=\"120\" src=\"images/hamiltonstamp.jpg\" alt=\"Grenoble INP\" class=\"logo\" /> \n </div>\n\n <div>\n <img width=\"150\" src=\"images/clif.jpg\" alt=\"Grenoble INP\" class=\"logo\" />\n <img width=\"150\" src=\"images/hestenes.png\" alt=\"G-SCOP\" class=\"logo\" />\n </div>\n</div>\n", "_____no_output_____" ], [ "$$\\gamma_0^2=1, \\gamma_k^2=-1$$\n\nand $$\\gamma_\\mu \\cdot \\gamma_\\mu$$\nand $$\\gamma_\\mu \\cdot \\gamma_\\nu$$\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
e7c36fe2d652b79041091a355173fe52b9eb2718
212,364
ipynb
Jupyter Notebook
Pymaceuticals/pymaceuticals_basco.ipynb
bascomary/matplotlib_challenge
9f0c4a5e8c822e94160ae5e8cfb07403dced0225
[ "ADSL" ]
null
null
null
Pymaceuticals/pymaceuticals_basco.ipynb
bascomary/matplotlib_challenge
9f0c4a5e8c822e94160ae5e8cfb07403dced0225
[ "ADSL" ]
null
null
null
Pymaceuticals/pymaceuticals_basco.ipynb
bascomary/matplotlib_challenge
9f0c4a5e8c822e94160ae5e8cfb07403dced0225
[ "ADSL" ]
null
null
null
53.492191
14,360
0.610273
[ [ [ "# Pymaceuticals Inc.\n---\n\n### Analysis\n* Capomulin and Ramicane showed the smallest tumor volume at the end of the study.\n* There appears to be a correlation between mouse weight and the average tumor volume; as weight increases, tumor volume increases.\n* Capomulin had the lowest IQR, indicating a more narrow spread in the results for this drug regimen. ", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as st", "_____no_output_____" ], [ "# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)", "_____no_output_____" ], [ "mouse_metadata.head()", "_____no_output_____" ], [ "study_results.head()", "_____no_output_____" ], [ "# Combine the data into a single dataset\nclinical_trial=pd.merge(study_results, mouse_metadata, how='left')\nclinical_trial.head()", "_____no_output_____" ], [ "clinical_trial.shape", "_____no_output_____" ] ], [ [ "## Summary Statistics", "_____no_output_____" ] ], [ [ "# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen", "_____no_output_____" ], [ "mean_df = clinical_trial.groupby('Drug Regimen').mean().reset_index()\nmean_df = mean_df[['Drug Regimen', 'Tumor Volume (mm3)']]\nmean_df = mean_df.rename(columns={'Tumor Volume (mm3)':'Mean Tumor Volume'})\nmean_df", "_____no_output_____" ], [ "median_df=clinical_trial.groupby('Drug Regimen').median().reset_index()\nmedian_df=median_df[['Drug Regimen', 'Tumor Volume (mm3)']]\nmedian_df=median_df.rename(columns={'Tumor Volume (mm3)':'Median Tumor Volume'})\nmedian_df", "_____no_output_____" ], [ "drug_summary=pd.merge(mean_df, median_df, how=\"inner\")\ndrug_summary", "_____no_output_____" ], [ "variance_df=clinical_trial.groupby('Drug Regimen').var().reset_index()\nvariance_df=variance_df[['Drug Regimen', 'Tumor Volume (mm3)']]\nvariance_df=variance_df.rename(columns={'Tumor Volume (mm3)':'Tumor Volume Variance'})\nvariance_df", "_____no_output_____" ], [ "drug_summary=pd.merge(drug_summary, variance_df, how=\"inner\")\ndrug_summary", "_____no_output_____" ], [ "std_df=clinical_trial.groupby('Drug Regimen').std().reset_index()\nstd_df=std_df[['Drug Regimen', 'Tumor Volume (mm3)']]\nstd_df=std_df.rename(columns={'Tumor Volume (mm3)':'Tumor Volume Std. Dev.'})\nstd_df", "_____no_output_____" ], [ "drug_summary=pd.merge(drug_summary, std_df, how=\"inner\")\ndrug_summary", "_____no_output_____" ], [ "sem_df=clinical_trial.groupby('Drug Regimen').sem().reset_index()\nsem_df=sem_df[['Drug Regimen', 'Tumor Volume (mm3)']]\nsem_df=sem_df.rename(columns={'Tumor Volume (mm3)':'Tumor Volume Std. Err.'})\nsem_df", "_____no_output_____" ], [ "drug_summary=pd.merge(drug_summary, sem_df, how=\"inner\")\ndrug_summary", "_____no_output_____" ], [ "drug_count=clinical_trial.groupby('Drug Regimen').count().reset_index()\ndrug_count=drug_count[['Drug Regimen', 'Tumor Volume (mm3)']]\ndrug_count=drug_count.rename(columns={'Tumor Volume (mm3)':'Count'})\ndrug_count", "_____no_output_____" ], [ "drug_summary=pd.merge(drug_summary, drug_count, how=\"inner\")\ndrug_summary = drug_summary.sort_values('Count', ascending=False)\ndrug_summary", "_____no_output_____" ] ], [ [ "## Bar and Pie Charts", "_____no_output_____" ] ], [ [ "# Generate a bar plot showing number of data points for each treatment regimen using pandas\ndrug_summary.sort_values('Count', ascending=False).plot.bar(x=\"Drug Regimen\", y=\"Count\")", "_____no_output_____" ], [ "# Generate a bar plot showing number of data points for each treatment regimen using pyplot\nplt.bar(drug_summary['Drug Regimen'], drug_summary['Count'], color=\"b\", align=\"center\")\nplt.xticks(rotation='vertical')", "_____no_output_____" ], [ "# Create a gender dataframe\ngender_df = clinical_trial.groupby('Sex').count()\ngender_df = gender_df[['Mouse ID']]\ngender_df = gender_df.rename(columns={'Mouse ID':'Gender Count'})\ngender_df", "_____no_output_____" ], [ "# Generate a pie plot showing the distribution of female versus male mice using pandas\ngender_df.plot.pie(subplots=True)", "_____no_output_____" ], [ "# Generate a pie plot showing the distribution of female versus male mice using pyplot\ngenders= ['female', 'male']\nplt.pie(gender_df['Gender Count'], labels=genders, autopct=\"%1.1f%%\")\nplt.axis('equal')\nplt.show()", "_____no_output_____" ] ], [ [ "## Quartiles, Outliers and Boxplots", "_____no_output_____" ] ], [ [ "# Calculate the final tumor volume of each mouse. \ntumor_df = clinical_trial.groupby('Mouse ID').last()\ntumor_df.head()", "_____no_output_____" ], [ "# Calculate the final tumor volume of each mouse in Capomulin treatment regime. \ncapomulin = tumor_df.loc[(tumor_df['Drug Regimen'] == \"Capomulin\"),:]\ncapomulin.head()", "_____no_output_____" ], [ "# Calculate the IQR and quantitatively determine if there are any potential outliers. \ncap_quartiles = capomulin['Tumor Volume (mm3)'].quantile([.25,.5,.75])\ncap_lowerq = cap_quartiles[0.25]\ncap_upperq = cap_quartiles[0.75]\ncap_iqr = cap_upperq-cap_lowerq\n\nprint(f\"The lower quartile of the Capomulin test group is: {cap_lowerq}\")\nprint(f\"The upper quartile of the Capomulin test group is: {cap_upperq}\")\nprint(f\"The interquartile range of the Capomulin test group is: {cap_iqr}\")\nprint(f\"The the median of the Capomulin test group is: {cap_quartiles[0.5]} \")\n\ncap_lower_bound = cap_lowerq - (1.5*cap_iqr)\ncap_upper_bound = cap_upperq + (1.5*cap_iqr)\nprint(f\"Values below {cap_lower_bound} could be outliers.\")\nprint(f\"Values above {cap_upper_bound} could be outliers.\")", "The lower quartile of the Capomulin test group is: 32.37735684\nThe upper quartile of the Capomulin test group is: 40.1592203\nThe interquartile range of the Capomulin test group is: 7.781863460000004\nThe the median of the Capomulin test group is: 38.125164399999996 \nValues below 20.70456164999999 could be outliers.\nValues above 51.83201549 could be outliers.\n" ], [ "# Calculate the final tumor volume of each mouse in Ramicane treatment regime. \nramicane = tumor_df.loc[(tumor_df['Drug Regimen'] == \"Ramicane\"),:]\nramicane.head()", "_____no_output_____" ], [ "# Calculate the IQR and quantitatively determine if there are any potential outliers. \nram_quartiles = ramicane['Tumor Volume (mm3)'].quantile([.25,.5,.75])\nram_lowerq = ram_quartiles[0.25]\nram_upperq = ram_quartiles[0.75]\nram_iqr = ram_upperq-ram_lowerq\n\nprint(f\"The lower quartile of the Ramicane test group is: {ram_lowerq}\")\nprint(f\"The upper quartile of the Ramicane test group is: {ram_upperq}\")\nprint(f\"The interquartile range of the Ramicane test group is: {ram_iqr}\")\nprint(f\"The the median of the Ramicane test group is: {ram_quartiles[0.5]} \")\n\nram_lower_bound = ram_lowerq - (1.5*ram_iqr)\nram_upper_bound = ram_upperq + (1.5*ram_iqr)\nprint(f\"Values below {ram_lower_bound} could be outliers.\")\nprint(f\"Values above {ram_upper_bound} could be outliers.\")", "The lower quartile of the Ramicane test group is: 31.56046955\nThe upper quartile of the Ramicane test group is: 40.65900627\nThe interquartile range of the Ramicane test group is: 9.098536719999998\nThe the median of the Ramicane test group is: 36.56165229 \nValues below 17.912664470000003 could be outliers.\nValues above 54.30681135 could be outliers.\n" ], [ "# Calculate the final tumor volume of each mouse in Infubinol treatment regime.\ninfubinol = tumor_df.loc[(tumor_df['Drug Regimen'] == \"Infubinol\"),:]\ninfubinol.head()", "_____no_output_____" ], [ "# Calculate the IQR and quantitatively determine if there are any potential outliers. \ninf_quartiles = infubinol['Tumor Volume (mm3)'].quantile([.25,.5,.75])\ninf_lowerq = inf_quartiles[0.25]\ninf_upperq = inf_quartiles[0.75]\ninf_iqr = inf_upperq-inf_lowerq\n\nprint(f\"The lower quartile of the Infubinol test group is: {inf_lowerq}\")\nprint(f\"The upper quartile of the Infubinol test group is: {inf_upperq}\")\nprint(f\"The interquartile range of the Infubinol test group is: {inf_iqr}\")\nprint(f\"The the median of the Infubinol test group is: {inf_quartiles[0.5]} \")\n\ninf_lower_bound = inf_lowerq - (1.5*inf_iqr)\ninf_upper_bound = inf_upperq + (1.5*inf_iqr)\nprint(f\"Values below {inf_lower_bound} could be outliers.\")\nprint(f\"Values above {inf_upper_bound} could be outliers.\")", "The lower quartile of the Infubinol test group is: 54.04860769\nThe upper quartile of the Infubinol test group is: 65.52574285\nThe interquartile range of the Infubinol test group is: 11.477135160000003\nThe the median of the Infubinol test group is: 60.16518046 \nValues below 36.83290494999999 could be outliers.\nValues above 82.74144559000001 could be outliers.\n" ], [ "# Calculate the final tumor volume of each mouse in Ceftamin treatment regime. \nceftamin = tumor_df.loc[(tumor_df['Drug Regimen'] == \"Ceftamin\"),:]\nceftamin.head()", "_____no_output_____" ], [ "# Calculate the IQR and quantitatively determine if there are any potential outliers. \ncef_quartiles = ceftamin['Tumor Volume (mm3)'].quantile([.25,.5,.75])\ncef_lowerq = cef_quartiles[0.25]\ncef_upperq = cef_quartiles[0.75]\ncef_iqr = cef_upperq-cef_lowerq\n\nprint(f\"The lower quartile of the Infubinol test group is: {cef_lowerq}\")\nprint(f\"The upper quartile of the Infubinol test group is: {cef_upperq}\")\nprint(f\"The interquartile range of the Infubinol test group is: {cef_iqr}\")\nprint(f\"The the median of the Infubinol test group is: {cef_quartiles[0.5]} \")\n\ncef_lower_bound = cef_lowerq - (1.5*cef_iqr)\ncef_upper_bound = cef_upperq + (1.5*cef_iqr)\nprint(f\"Values below {cef_lower_bound} could be outliers.\")\nprint(f\"Values above {cef_upper_bound} could be outliers.\")", "The lower quartile of the Infubinol test group is: 48.72207785\nThe upper quartile of the Infubinol test group is: 64.29983003\nThe interquartile range of the Infubinol test group is: 15.577752179999997\nThe the median of the Infubinol test group is: 59.85195552 \nValues below 25.355449580000002 could be outliers.\nValues above 87.66645829999999 could be outliers.\n" ], [ "#Created new dataframe for four drugs of interest\nregimen_of_interest = tumor_df.loc[(tumor_df['Drug Regimen'] == 'Capomulin') |\n (tumor_df['Drug Regimen'] == 'Ramicane') |\n (tumor_df['Drug Regimen'] == 'Infubinol')|\n (tumor_df['Drug Regimen'] == 'Ceftamin')]\nregimen_of_interest", "_____no_output_____" ], [ "# Generate a box plot of the final tumor volume of each mouse across four regimens of interest\nregimen_of_interest.boxplot('Tumor Volume (mm3)', by='Drug Regimen', figsize=(10, 5))\nplt.show", "_____no_output_____" ] ], [ [ "## Line and Scatter Plots", "_____no_output_____" ] ], [ [ "# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin", "_____no_output_____" ], [ "clinical_trial.head()", "_____no_output_____" ], [ "single_mouse = clinical_trial[['Mouse ID', 'Timepoint', 'Tumor Volume (mm3)', 'Drug Regimen']]\nsingle_mouse = single_mouse.loc[(single_mouse['Drug Regimen'] == \"Capomulin\"),:].reset_index()\nsingle_mouse = single_mouse.loc[(single_mouse['Mouse ID'] == \"b128\"),:]\nsingle_mouse", "_____no_output_____" ], [ "# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin\nplt.plot(single_mouse['Timepoint'], single_mouse['Tumor Volume (mm3)'], color='blue', label=\"Mouse treated with Capomulin, Subject b128\")\nplt.ylabel('Tumor Volume (mm3)')\nplt.xlabel('Timepoint')", "_____no_output_____" ], [ "# Create new dataframe \n# Capomulin test group\n\nmouse_treatment = clinical_trial[['Mouse ID', 'Drug Regimen']]\nmouse_treatment", "_____no_output_____" ], [ "mean_mouse = clinical_trial.groupby('Mouse ID').mean().reset_index()\nmean_mouse.head()", "_____no_output_____" ], [ "merged_group=pd.merge(mean_mouse, mouse_treatment, how='inner').reset_index()\nmerged_group.head()", "_____no_output_____" ], [ "capomulin_test_group = merged_group.loc[(merged_group['Drug Regimen'] == \"Capomulin\"),:].reset_index()\ncapomulin_test_group.head()", "_____no_output_____" ], [ "# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen\nweight = capomulin_test_group['Weight (g)']\ntumor = capomulin_test_group['Tumor Volume (mm3)']\nplt.scatter(weight, tumor, marker=\"o\", facecolors=\"red\", edgecolors=\"black\")\nplt.show", "_____no_output_____" ] ], [ [ "## Correlation and Regression", "_____no_output_____" ] ], [ [ "# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen", "_____no_output_____" ], [ "vc_slope, vc_int, vc_r, vc_p, vc_std_err = st.linregress(weight, tumor)\nvc_fit = vc_slope * weight + vc_int", "_____no_output_____" ], [ "plt.plot(weight,vc_fit)\nweight = capomulin_test_group['Weight (g)']\ntumor = capomulin_test_group['Tumor Volume (mm3)']\nplt.scatter(weight, tumor, marker=\"o\", facecolors=\"red\", edgecolors=\"black\")\nplt.show", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c375ae33987f8227435ec2c6d513433aac11fc
183,124
ipynb
Jupyter Notebook
Teste_Algoritmos.ipynb
rauanisanfelice/python-analise-credito
07d09f2f6097168016bd8749f61c9860a508ce79
[ "MIT" ]
2
2019-11-06T19:08:01.000Z
2021-09-20T16:03:17.000Z
Teste_Algoritmos.ipynb
rauanisanfelice/python-analise-credito
07d09f2f6097168016bd8749f61c9860a508ce79
[ "MIT" ]
4
2019-12-05T11:41:48.000Z
2021-09-27T11:55:44.000Z
Teste_Algoritmos.ipynb
rauanisanfelice/python-analise-credito
07d09f2f6097168016bd8749f61c9860a508ce79
[ "MIT" ]
null
null
null
80.956676
228
0.698232
[ [ [ "import pandas as pd\nimport psycopg2", "_____no_output_____" ], [ "conn = psycopg2.connect(dbname=\"postgres\", user=\"postgres\", password=\"docker123\", host=\"localhost\", port=\"5432\")\ncur = conn.cursor()\nsql = 'select * from MACHINE_ANALISE'\npdAnalise = pd.read_sql_query(sql, conn)", "_____no_output_____" ], [ "pdAnalise[\"FaixaRenda\"] = 0\npdAnalise[\"FaixaEtaria\"] = 0\npdAnalise[\"FaixaEmprestimo\"] = 0\npdAnalise", "_____no_output_____" ], [ "pdAnalise = pdAnalise[pdAnalise[\"IDADE\"] > 0]\npdAnalise[pdAnalise[\"IDADE\"] < 0 ]", "_____no_output_____" ], [ "pdAnalise.loc[(pdAnalise[\"RENDA\"] < 20000), \"FaixaRenda\"] = 0\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 20000) & (pdAnalise[\"RENDA\"] < 30000), \"FaixaRenda\"] = 1\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 30000) & (pdAnalise[\"RENDA\"] < 40000), \"FaixaRenda\"] = 2\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 40000) & (pdAnalise[\"RENDA\"] < 50000), \"FaixaRenda\"] = 3\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 50000) & (pdAnalise[\"RENDA\"] < 60000), \"FaixaRenda\"] = 4\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 60000) & (pdAnalise[\"RENDA\"] < 70000), \"FaixaRenda\"] = 5\npdAnalise.loc[(pdAnalise[\"RENDA\"] >= 70000), \"FaixaRenda\"] = 6", "_____no_output_____" ], [ "pdAnalise.loc[(pdAnalise[\"IDADE\"]) >= 18 & (pdAnalise[\"IDADE\"] < 25), \"FaixaEtaria\"] = 0\npdAnalise.loc[(pdAnalise[\"IDADE\"]) >= 25 & (pdAnalise[\"IDADE\"] < 60), \"FaixaEtaria\"] = 1\npdAnalise.loc[(pdAnalise[\"IDADE\"] >= 60), \"FaixaEtaria\"] = 2", "_____no_output_____" ], [ "pdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] < 2000), \"FaixaEmprestimo\"] = 0\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 2000) & (pdAnalise[\"EMPRESTIMO\"] < 4000), \"FaixaEmprestimo\"] = 1\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 4000) & (pdAnalise[\"EMPRESTIMO\"] < 6000), \"FaixaEmprestimo\"] = 2\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 6000) & (pdAnalise[\"EMPRESTIMO\"] < 8000), \"FaixaEmprestimo\"] = 3\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 8000) & (pdAnalise[\"EMPRESTIMO\"] < 10000), \"FaixaEmprestimo\"] = 4\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 12000) & (pdAnalise[\"EMPRESTIMO\"] < 14000), \"FaixaEmprestimo\"] = 5\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 14000) & (pdAnalise[\"EMPRESTIMO\"] < 16000), \"FaixaEmprestimo\"] = 6\npdAnalise.loc[(pdAnalise[\"EMPRESTIMO\"] >= 16000), \"FaixaEmprestimo\"] = 7", "_____no_output_____" ], [ "pdAnalise.describe()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report", "_____no_output_____" ], [ "# VERIFICA PROPORÇÃO DOS RESULTADOS\npdAnalise.groupby('RESULTADO').describe().unstack(1)", "_____no_output_____" ], [ "pdAnalise[pdAnalise[\"id\"] > 2000]", "_____no_output_____" ], [ "#############################################################################\n# KNN ######################################################################\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# ENCONTRA O MELHOR SEED\nmelhorSeed_KNN = 0\nmelhorAcuracia_KNN = 0\n\nfor i in range(0, 1000, 1):\n # AJUSTA PROBLEMA DE BASE DESBALANCEADA\n \n # SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\n dtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\n dtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n # SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\n dtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\n dtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n \n # SHUFFLE NO DF\n dtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=i).reset_index(drop=True)\n\n # DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\n indexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\n dtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n # UNE OS DF NOVAMENTE\n dtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n # SHUFFLE NO DF\n dtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n # dtFullTratadaAnalise\n \n X_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n \n KNN_model = KNeighborsClassifier(n_neighbors=13)\n KNN_model.fit(X_train, y_train)\n KNN_prediction = KNN_model.predict(X_test)\n \n acuracia = round(accuracy_score(KNN_prediction, y_test) * 100,2)\n if acuracia > melhorAcuracia_KNN:\n melhorAcuracia_KNN = acuracia\n melhorSeed_KNN = i\n\nprint(melhorSeed_KNN, melhorAcuracia_KNN)", "905 81.61\n" ], [ "#############################################################################\n# KNN ######################################################################\n# PRINTA TABELA COM O MELHOR SEED\n\n# AJUSTA PROBLEMA DE BASE DESBALANCEADA\n\n# SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\ndtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\ndtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n# SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\ndtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\ndtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n\n# SHUFFLE NO DF\ndtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=melhorSeed_KNN).reset_index(drop=True)\n\n# DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\nindexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\ndtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n# UNE OS DF NOVAMENTE\ndtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n# SHUFFLE NO DF\ndtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n# dtFullTratadaAnalise\n\nX_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n\nKNN_model = KNeighborsClassifier(n_neighbors=13)\nKNN_model.fit(X_train, y_train)\nKNN_prediction = KNN_model.predict(X_test)\n\nprint(classification_report(y_test, KNN_prediction))", " precision recall f1-score support\n\n 0 0.84 0.92 0.88 287\n 1 0.69 0.51 0.58 99\n\n accuracy 0.82 386\n macro avg 0.77 0.71 0.73 386\nweighted avg 0.81 0.82 0.81 386\n\n" ], [ "#############################################################################\n# SVC ######################################################################\n\nfrom sklearn.svm import SVC\nfrom sklearn import svm\n\n# ENCONTRA O MELHOR SEED\nmelhorSeed_SVC = 0\nmelhorAcuracia_SVC = 0\n\nfor i in range(0, 1000, 1):\n # AJUSTA PROBLEMA DE BASE DESBALANCEADA\n \n # SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\n dtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\n dtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n # SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\n dtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\n dtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n\n # SHUFFLE NO DF\n dtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=i).reset_index(drop=True)\n\n # DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\n indexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\n dtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n # UNE OS DF NOVAMENTE\n dtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n # SHUFFLE NO DF\n dtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n # dtFullTratadaAnalise\n \n X_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n \n SVC_model = svm.SVC(gamma='scale')\n SVC_model.fit(X_train, y_train)\n SVC_prediction = SVC_model.predict(X_test)\n \n acuracia = round(accuracy_score(SVC_prediction, y_test) * 100,2)\n if acuracia > melhorAcuracia_SVC:\n melhorAcuracia_SVC = acuracia\n melhorSeed_SVC = i\n\nprint(melhorSeed_SVC, melhorAcuracia_SVC)", "49 78.76\n" ], [ "# AJUSTA PROBLEMA DE BASE DESBALANCEADA\n\n# SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\ndtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\ndtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n# SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\ndtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\ndtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n\n# SHUFFLE NO DF\ndtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=melhorSeed_SVC).reset_index(drop=True)\n\n# DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\nindexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\ndtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n# UNE OS DF NOVAMENTE\ndtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n# SHUFFLE NO DF\ndtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n# dtFullTratadaAnalise\n\nX_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n\nSVC_model = svm.SVC(gamma='scale')\nSVC_model.fit(X_train, y_train)\nSVC_prediction = SVC_model.predict(X_test)\n\nprint(classification_report(y_test, SVC_prediction))", " precision recall f1-score support\n\n 0 0.83 0.91 0.86 287\n 1 0.62 0.44 0.52 99\n\n accuracy 0.79 386\n macro avg 0.72 0.68 0.69 386\nweighted avg 0.77 0.79 0.78 386\n\n" ], [ "#############################################################################\n# NAIVE BAYES ###############################################################\n\nfrom sklearn.naive_bayes import GaussianNB\n\n# ENCONTRA O MELHOR SEED\nmelhorSeed_NAIVE = 0\nmelhorAcuracia_NAIVE = 0\n\nfor i in range(0, 1000, 1):\n # AJUSTA PROBLEMA DE BASE DESBALANCEADA\n \n # SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\n dtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\n dtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n # SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\n dtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\n dtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n \n # SHUFFLE NO DF\n dtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=i).reset_index(drop=True)\n\n # DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\n indexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\n dtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n # UNE OS DF NOVAMENTE\n dtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n # SHUFFLE NO DF\n dtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n # dtFullTratadaAnalise\n \n X_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n \n NAIVE_model = GaussianNB()\n NAIVE_model.fit(X_train, y_train)\n NAIVE_prediction = NAIVE_model.predict(X_test)\n \n acuracia = round(accuracy_score(NAIVE_prediction, y_test) * 100,2)\n if acuracia > melhorAcuracia_NAIVE:\n melhorAcuracia_NAIVE = acuracia\n melhorSeed_NAIVE = i\n\nprint(melhorSeed_NAIVE, melhorAcuracia_NAIVE)", "28 37.05\n" ], [ "# AJUSTA PROBLEMA DE BASE DESBALANCEADA\n\n# SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\ndtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\ndtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n# SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\ndtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\ndtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n\n# SHUFFLE NO DF\ndtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=melhorSeed_NAIVE).reset_index(drop=True)\n\n# DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\nindexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\ndtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n# UNE OS DF NOVAMENTE\ndtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n# SHUFFLE NO DF\ndtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n# dtFullTratadaAnalise\n\nX_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n\nNAIVE_model = GaussianNB()\nNAIVE_model.fit(X_train, y_train)\nNAIVE_prediction = NAIVE_model.predict(X_test)\n\nprint(classification_report(y_test, NAIVE_prediction))", " precision recall f1-score support\n\n 0 1.00 0.15 0.27 287\n 1 0.29 1.00 0.45 99\n\n accuracy 0.37 386\n macro avg 0.64 0.58 0.36 386\nweighted avg 0.82 0.37 0.31 386\n\n" ], [ "#############################################################################\n# DECISION TREE CLASSIFIER #################################################\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n# ENCONTRA O MELHOR SEED\nmelhorSeed_TREE = 0\nmelhorAcuracia_TREE = 0\n\nfor i in range(0, 1000, 1):\n # AJUSTA PROBLEMA DE BASE DESBALANCEADA\n \n # SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\n dtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\n dtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n # SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\n dtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\n dtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n \n # SHUFFLE NO DF\n dtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=i).reset_index(drop=True)\n\n # DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\n indexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\n dtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n # UNE OS DF NOVAMENTE\n dtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n # SHUFFLE NO DF\n dtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n # dtFullTratadaAnalise\n \n X_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n \n TREE_model = DecisionTreeClassifier(random_state=0)\n TREE_model.fit(X_train, y_train)\n TREE_prediction = TREE_model.predict(X_test)\n \n acuracia = round(accuracy_score(TREE_prediction, y_test) * 100,2)\n if acuracia > melhorAcuracia_TREE:\n melhorAcuracia_TREE = acuracia\n melhorSeed_TREE = i\n\nprint(melhorSeed_TREE, melhorAcuracia_TREE)", "269 80.83\n" ], [ "# AJUSTA PROBLEMA DE BASE DESBALANCEADA\n\n# SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\ndtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\ndtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n# SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\ndtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\ndtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n\n# SHUFFLE NO DF\ndtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=melhorSeed_TREE).reset_index(drop=True)\n\n# DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\nindexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\ndtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n# UNE OS DF NOVAMENTE\ndtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n# SHUFFLE NO DF\ndtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n# dtFullTratadaAnalise\n\nX_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n\nTREE_model = DecisionTreeClassifier(random_state=0)\nTREE_model.fit(X_train, y_train)\nTREE_prediction = TREE_model.predict(X_test)\n\nprint(classification_report(y_test, TREE_prediction))", " precision recall f1-score support\n\n 0 0.84 0.91 0.88 287\n 1 0.66 0.52 0.58 99\n\n accuracy 0.81 386\n macro avg 0.75 0.71 0.73 386\nweighted avg 0.80 0.81 0.80 386\n\n" ], [ "#############################################################################\n# MLP ######################################################################\n\nfrom sklearn.neural_network import MLPClassifier\n\n# ENCONTRA O MELHOR SEED\nmelhorSeed_MLP = 0\nmelhorAcuracia_MLP = 0\n\nfor i in range(0, 500, 1):\n # AJUSTA PROBLEMA DE BASE DESBALANCEADA\n \n # SEPARA O REGISTRO NOVO QUE FOI IMPORTADO\n dtNovRegistroAnalise = pdAnalise[pdAnalise[\"id\"] > 2000]\n dtAntigosRegistrosAnalise = pdAnalise[pdAnalise[\"id\"] <= 2000]\n\n # SEPARA EM DOIS DF UM SOMENTE CLASSIFICADO COM 0 E OUTRO COM 1\n dtAnaliseZero = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 0]\n dtAnaliseUm = dtAntigosRegistrosAnalise[dtAntigosRegistrosAnalise[\"RESULTADO\"] == 1]\n \n # SHUFFLE NO DF\n dtAnaliseZero = dtAnaliseZero.sample(frac=1, random_state=i).reset_index(drop=True)\n\n # DEIXA SOMENTE 1000 REGISTROS COM RESULTADOS IGUAL A 0 PARA NÃO DEIXAR A BASE DESBALANCEADA\n indexZero = dtAnaliseZero[dtAnaliseZero.index > 1000]\n dtAnaliseZero = dtAnaliseZero.drop(indexZero.index, axis=0)\n\n # UNE OS DF NOVAMENTE\n dtFullTratadaAnalise = pd.concat([dtAnaliseUm, dtAnaliseZero])\n\n # SHUFFLE NO DF\n dtFullTratadaAnalise = dtFullTratadaAnalise.sample(frac=1, random_state=1).reset_index(drop=True)\n # dtFullTratadaAnalise\n \n X_train, X_test, y_train, y_test = train_test_split(dtFullTratadaAnalise.iloc[:,5:8], dtFullTratadaAnalise[\"RESULTADO\"], test_size=0.30, random_state=27)\n \n MLP_model = MLPClassifier(max_iter=200)\n MLP_model.fit(X_train, y_train)\n MLP_prediction = MLP_model.predict(X_test)\n \n acuracia = round(accuracy_score(MLP_prediction, y_test) * 100,2)\n if acuracia > melhorAcuracia_MLP:\n melhorAcuracia_MLP = acuracia\n melhorSeed_MLP = i\n\nprint(melhorSeed_MLP, melhorAcuracia_MLP)", "/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n/home/rauan/.local/lib/python3.6/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c37f4972106c81d6c3d9514cc1af68b7da9914
759,616
ipynb
Jupyter Notebook
pySM/scripts/SM-Demo.ipynb
alexandrovteam/pySpatialMetabolomics
d14eef9e7b4bd77dc93d29139bfb54a0a647ff5d
[ "Apache-2.0" ]
10
2016-02-10T21:43:58.000Z
2022-03-22T08:24:25.000Z
pySM/scripts/SM-Demo.ipynb
alexandrovteam/pySpatialMetabolomics
d14eef9e7b4bd77dc93d29139bfb54a0a647ff5d
[ "Apache-2.0" ]
null
null
null
pySM/scripts/SM-Demo.ipynb
alexandrovteam/pySpatialMetabolomics
d14eef9e7b4bd77dc93d29139bfb54a0a647ff5d
[ "Apache-2.0" ]
5
2016-11-17T12:19:25.000Z
2022-01-13T06:47:34.000Z
2,498.736842
289,069
0.950176
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c39cfcdb888684cba6232b733bdb6099fa81ee
370,286
ipynb
Jupyter Notebook
Exploration/VAE/Notebooks/VAE-CIFAR10.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
1
2018-10-31T09:59:06.000Z
2018-10-31T09:59:06.000Z
Exploration/VAE/Notebooks/VAE-CIFAR10.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
null
null
null
Exploration/VAE/Notebooks/VAE-CIFAR10.ipynb
svaisakh/aiprojects
cfa17a56066c77519cc0785053ec1828c463f46f
[ "MIT" ]
2
2018-03-20T16:09:05.000Z
2020-03-28T17:43:59.000Z
1,082.707602
32,438
0.943076
[ [ [ "import tensorflow as tf\nimport numpy as np\nimport cifar10\nfrom utils import plot_images\nfrom tfops import fc, flatten, inflate\nfrom time import time\nfrom os.path import exists", "_____no_output_____" ], [ "cifar10.maybe_download_and_extract()\n\ndata_train, _, _ = cifar10.load_training_data()\ndata_test, _, _ = cifar10.load_test_data()", "- Download progress: 100.0%\nDownload finished. Extracting files.\nDone.\nLoading data: data/CIFAR-10/cifar-10-batches-py/data_batch_1\nLoading data: data/CIFAR-10/cifar-10-batches-py/data_batch_2\nLoading data: data/CIFAR-10/cifar-10-batches-py/data_batch_3\nLoading data: data/CIFAR-10/cifar-10-batches-py/data_batch_4\nLoading data: data/CIFAR-10/cifar-10-batches-py/data_batch_5\nLoading data: data/CIFAR-10/cifar-10-batches-py/test_batch\n" ], [ "tf.reset_default_graph()\n\nz_dim = 32\n\nwith tf.name_scope('inputs'):\n x_image = tf.placeholder(tf.float32, (None, 32, 32, 3), 'x')\n epsilon = tf.placeholder(tf.float32, (None, z_dim), 'epsilon')\n \nwith tf.name_scope('encoder'):\n x = flatten(x_image)\n z_mean = fc(x, z_dim, 'sigmoid', 'z_mean')\n z_std = fc(x, z_dim, 'sigmoid', 'z_std')\n with tf.name_scope('latent-space'):\n z = epsilon * z_std + z_mean\n \nwith tf.name_scope('decoder'):\n x_gen = fc(z, 3072, 'sigmoid', 'decode')\n x_gen = inflate(x_gen, (32, 32))\n \nwith tf.name_scope('optimize'):\n generation_loss = tf.reduce_mean((x_image - x_gen)**2, name='generation_loss')\n tf.summary.scalar('generation_loss', generation_loss)\n \n latent_loss = tf.reduce_mean(0.5 * tf.reduce_sum(z_mean ** 2 + z_std ** 2 - tf.log(z_std ** 2) - 1, axis=1))\n tf.summary.scalar('latent_loss', latent_loss)\n \n loss = generation_loss + 1e-3 * latent_loss\n tf.summary.scalar('loss', loss)\n \n optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)\n \nsumm = tf.summary.merge_all()", "_____no_output_____" ], [ "def plot_reconstructions(session):\n img_idx = np.random.randint(0, len(data_test), 5)\n noise = np.random.randn(5, z_dim)\n \n original_img = data_test[img_idx, :, :, :]\n generated_img = session.run(x_gen, {x_image: original_img, epsilon: noise})\n \n plot_images(original_img)\n plot_images(generated_img)", "_____no_output_____" ], [ "def plot_generated_img(session):\n noise = np.random.randn(11, z_dim)\n\n generated_img = session.run(x_gen, {z: noise})\n \n plot_images(generated_img)", "_____no_output_____" ], [ "batch_size = 256\nbatches_per_epoch = int(len(data_train) / batch_size)\n\ndef optimize(epochs):\n start_time = time()\n \n with tf.Session() as sess:\n writer = tf.summary.FileWriter('output/VAE-CIFAR10')\n writer.add_graph(tf.get_default_graph())\n \n saver = tf.train.Saver()\n \n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n for batch in range(batches_per_epoch):\n x_batch = data_test[np.random.choice(len(data_test), batch_size, replace=False), :, :, :]\n noise = np.random.randn(batch_size, z_dim)\n sess.run(optimizer, {x_image: x_batch, epsilon: noise})\n \n if batch % 1000 == 0:\n writer.add_summary(sess.run(summ, {x_image: x_batch, epsilon: noise}), global_step=epoch * batches_per_epoch + batch)\n \n print(\"{} / {} ({}%)\".format(epoch + 1, epochs, np.round((epoch + 1) / epochs * 100, 2)))\n plot_reconstructions(sess)\n \n saver.save(sess, 'checkpoints/VAE-CIFAR10/VAE-CIFAR10', write_meta_graph=False)\n \n print(\"Time taken - {}s\".format(np.round(time() - start_time, 2)))", "_____no_output_____" ], [ "if exists('checkpoints/VAE-CIFAR10/VAE-CIFAR10.data-00000-of-00001'):\n with tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, 'checkpoints/VAE-CIFAR10/VAE-CIFAR10')\n \n print(\"Reconstructions:\")\n plot_reconstructions(sess)\n \n print(\"Generated:\")\n for _ in range(10):\n plot_generated_img(sess)\nelse:\n optimize(50)", "INFO:tensorflow:Restoring parameters from checkpoints/VAE-CIFAR10/VAE-CIFAR10\nReconstructions:\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7c3a165b2207d9120f3df9a69bb0107bbc34881
48,890
ipynb
Jupyter Notebook
Data Analysis - Dealing with Null Values Categories/Data Preprocessing - Dealing with Null values Categories.ipynb
damanpreetkour/Data-Analysis-Course-ETG
7af89d4b1d07bc27ab26f666e3e016165ca13a70
[ "Apache-2.0" ]
6
2021-09-04T18:24:54.000Z
2022-02-19T09:51:58.000Z
Data Analysis - Dealing with Null Values Categories/Data Preprocessing - Dealing with Null values Categories.ipynb
damanpreetkour/Data-Analysis-Course-ETG
7af89d4b1d07bc27ab26f666e3e016165ca13a70
[ "Apache-2.0" ]
null
null
null
Data Analysis - Dealing with Null Values Categories/Data Preprocessing - Dealing with Null values Categories.ipynb
damanpreetkour/Data-Analysis-Course-ETG
7af89d4b1d07bc27ab26f666e3e016165ca13a70
[ "Apache-2.0" ]
10
2021-09-13T14:21:25.000Z
2022-02-06T21:58:03.000Z
36.981846
93
0.331786
[ [ [ "import numpy as np\nimport pandas as pd\n\nfrom sklearn.impute import SimpleImputer\n\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent')", "_____no_output_____" ], [ "df = pd.read_csv('googleplaystore.csv')", "_____no_output_____" ], [ "df.drop(['Last Updated', 'Current Ver', 'Android Ver'], axis = 1, inplace = True)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "rating = np.reshape(df['Rating'].values, (10841,1))\nimputer.fit(rating)", "_____no_output_____" ], [ "df.iloc[ : , 2:3 ] = imputer.transform(rating)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "type_ = np.reshape(df['Type'].values, (10841,1))\nimputer.fit(type_)", "_____no_output_____" ], [ "df.iloc[ : , 6:7 ] = imputer.transform(type_)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "content_rating = np.reshape(df['Content Rating'].values, (10841,1))\nimputer.fit(content_rating)\n\ndf.iloc[ : , 8:9] = imputer.transform(content_rating)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "type(df['Rating'].values)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c3ac4be2bd205721bfea540eeecadf60d48863
233,268
ipynb
Jupyter Notebook
python/Apartment_Search.ipynb
Xenovortex/ors-example
588a6e2c3d25b34f9a75360138dc26aba387de81
[ "Apache-2.0" ]
null
null
null
python/Apartment_Search.ipynb
Xenovortex/ors-example
588a6e2c3d25b34f9a75360138dc26aba387de81
[ "Apache-2.0" ]
null
null
null
python/Apartment_Search.ipynb
Xenovortex/ors-example
588a6e2c3d25b34f9a75360138dc26aba387de81
[ "Apache-2.0" ]
null
null
null
700.504505
205,556
0.977155
[ [ [ "In this notebook we'll provide an example for using different openrouteservice API's to help you look for an apartment.", "_____no_output_____" ] ], [ [ "mkdir ors-apartment\nconda create -n ors-apartment python=3.6 shapely\ncd ors-apartment\npip install openrouteservice ortools folium", "_____no_output_____" ], [ "import folium\n\nfrom openrouteservice import client", "_____no_output_____" ] ], [ [ "We have just moved to San Francisco with our kids and are looking for the perfect location to get a new home. Our geo intuition tells us we have to look at the data to come to this important decision. So we decide to geek it up a bit.", "_____no_output_____" ], [ "# Apartment isochrones", "_____no_output_____" ], [ "There are three different suggested locations for our new home. Let's visualize them and the 15 minute walking radius on a map:", "_____no_output_____" ] ], [ [ "api_key = '' #Provide your personal API key\nclnt = client.Client(key=api_key) \n# Set up folium map\nmap1 = folium.Map(tiles='Stamen Toner', location=([37.738684, -122.450523]), zoom_start=12)\n\n# Set up the apartment dictionary with real coordinates\napt_dict = {'first': {'location': [-122.430954, 37.792965]},\n 'second': {'location': [-122.501636, 37.748653]},\n 'third': {'location': [-122.446629, 37.736928]}\n }\n\n# Request of isochrones with 15 minute footwalk.\nparams_iso = {'profile': 'foot-walking',\n 'intervals': [900], # 900/60 = 15 mins\n 'segments': 900,\n 'attributes': ['total_pop'] # Get population count for isochrones\n }\n\nfor name, apt in apt_dict.items():\n params_iso['locations'] = apt['location'] # Add apartment coords to request parameters\n apt['iso'] = clnt.isochrones(**params_iso) # Perform isochrone request\n folium.features.GeoJson(apt['iso']).add_to(map1) # Add GeoJson to map\n \n folium.map.Marker(list(reversed(apt['location'])), # reverse coords due to weird folium lat/lon syntax\n icon=folium.Icon(color='lightgray',\n icon_color='#cc0000',\n icon='home',\n prefix='fa',\n ),\n popup=name,\n ).add_to(map1) # Add apartment locations to map\n\nmap1", "_____no_output_____" ] ], [ [ "# POIs around apartments", "_____no_output_____" ], [ "For the ever-styled foodie parents we are, we need to have the 3 basic things covered: kindergarten, supermarket and hair dresser. Let's see what options we got around our apartments:", "_____no_output_____" ] ], [ [ "# Common request parameters\nparams_poi = {'request': 'pois',\n 'sortby': 'distance'}\n\n# POI categories according to \n# https://github.com/GIScience/openrouteservice-docs#places-response\ncategories_poi = {'kindergarten': [153],\n 'supermarket': [518],\n 'hairdresser': [395]}\n\nfor name, apt in apt_dict.items():\n apt['categories'] = dict() # Store in pois dict for easier retrieval\n params_poi['geojson'] = apt['iso']['features'][0]['geometry']\n print(\"\\n{} apartment\".format(name))\n \n for typ, category in categories_poi.items():\n params_poi['filter_category_ids'] = category\n apt['categories'][typ] = dict()\n apt['categories'][typ]['geojson']= clnt.places(**params_poi)['features'] # Actual POI request\n print(\"\\t{}: {}\".format(typ, # Print amount POIs\n len(apt['categories'][typ]['geojson'])))", "\nfirst apartment\n\tkindergarten: 1\n\tsupermarket: 8\n\thairdresser: 10\n\nsecond apartment\n\tkindergarten: 3\n\tsupermarket: 1\n\thairdresser: 4\n\nthird apartment\n\tkindergarten: 1\n\tsupermarket: 3\n\thairdresser: 2\n" ] ], [ [ "So, all apartments meet all requirements. Seems like we have to drill down further.", "_____no_output_____" ], [ "# Routing from apartments to POIs", "_____no_output_____" ], [ "To decide on a place, we would like to know from which apartment we can reach all required POI categories the quickest. So, first we look at the distances from each apartment to the respective POIs.", "_____no_output_____" ] ], [ [ "# Set up common request parameters\nparams_route = {'profile': 'foot-walking',\n 'format_out': 'geojson',\n 'geometry': 'true',\n 'geometry_format': 'geojson',\n 'instructions': 'false',\n }\n\n# Set up dict for font-awesome\nstyle_dict = {'kindergarten': 'child',\n 'supermarket': 'shopping-cart',\n 'hairdresser': 'scissors'\n }\n\n# Store all routes from all apartments to POIs\nfor apt in apt_dict.values():\n for cat, pois in apt['categories'].items():\n pois['durations'] = []\n for poi in pois['geojson']:\n poi_coords = poi['geometry']['coordinates']\n \n # Perform actual request\n params_route['coordinates'] = [apt['location'],\n poi_coords\n ]\n json_route = clnt.directions(**params_route)\n \n folium.features.GeoJson(json_route).add_to(map1)\n folium.map.Marker(list(reversed(poi_coords)),\n icon=folium.Icon(color='white',\n icon_color='#1a1aff',\n icon=style_dict[cat],\n prefix='fa'\n )\n ).add_to(map1)\n \n poi_duration = json_route['features'][0]['properties']['summary'][0]['duration']\n pois['durations'].append(poi_duration) # Record durations of routes\n \nmap1", "_____no_output_____" ] ], [ [ "# Quickest route to all POIs", "_____no_output_____" ], [ "Now, we only need to determine which apartment is closest to all POI categories.", "_____no_output_____" ] ], [ [ "# Sum up the closest POIs to each apartment\nfor name, apt in apt_dict.items():\n apt['shortest_sum'] = sum([min(cat['durations']) for cat in apt['categories'].values()])\n print(\"{} apartments: {} mins\".format(name,\n apt['shortest_sum']/60\n )\n )", "first apartments: 37.09 mins\nsecond apartments: 40.325 mins\nthird apartments: 35.315000000000005 mins\n" ] ], [ [ "# We got a winner!", "_____no_output_____" ], [ "Finally, it looks like the 3rd apartment is the one where we would need to walk the shortest amount of time to reach a kindergarten, supermarket and a hair dresser. Let's pack those boxes and welcome to San Francisco.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7c3b7aac65ca7cb607d1aeaa7eaa6b1232d9836
248,403
ipynb
Jupyter Notebook
api-examples/El_Nino_animations.ipynb
steffenmodest/notebooks
e219758cd944f31355acf21c263924f396cdbe67
[ "MIT" ]
87
2016-04-26T15:56:23.000Z
2022-03-09T12:11:43.000Z
api-examples/El_Nino_animations.ipynb
steffenmodest/notebooks
e219758cd944f31355acf21c263924f396cdbe67
[ "MIT" ]
4
2019-04-23T13:43:40.000Z
2021-04-28T15:42:51.000Z
api-examples/El_Nino_animations.ipynb
steffenmodest/notebooks
e219758cd944f31355acf21c263924f396cdbe67
[ "MIT" ]
51
2016-07-10T16:53:48.000Z
2022-03-16T09:35:49.000Z
781.141509
238,846
0.94052
[ [ [ "# Making El Nino Animations\n\nEl Nino is the warm phase of __[El Niño–Southern Oscillation (ENSO)](https://en.wikipedia.org/wiki/El_Ni%C3%B1o%E2%80%93Southern_Oscillation)__. It is a part of a routine climate pattern that occurs when sea surface temperatures in the tropical Pacific Ocean rise to above-normal levels for an extended period of time.\n\nIn this Notebook we show how to make animations of sea surface temperature anomalies using __[NOAA 1/4° Daily Optimum Interpolation Sea Surface Temperature (Daily OISST) dataset (NOAA OISST](https://data.planetos.com/datasets/noaa_oisst_daily_1_4)__\n\n\n_API documentation is available at http://docs.planetos.com. If you have questions or comments, join the Planet OS Slack community to chat with our development team. For general information on usage of IPython/Jupyter and Matplotlib, please refer to their corresponding documentation. https://ipython.org/ and http://matplotlib.org/_\n\n__This Notebook is running on Python3.__", "_____no_output_____" ] ], [ [ "import os\nfrom dh_py_access import package_api\nimport dh_py_access.lib.datahub as datahub\nimport xarray as xr\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport imageio\nimport shutil\nimport datetime\nimport matplotlib as mpl\nmpl.rcParams['font.family'] = 'Avenir Lt Std'\nmpl.rcParams.update({'font.size': 25})", "_____no_output_____" ] ], [ [ "<font color='red'>Please put your datahub API key into a file called APIKEY and place it to the notebook folder or assign your API key directly to the variable API_key!</font>", "_____no_output_____" ] ], [ [ "API_key = open('APIKEY').read().strip()\nserver='api.planetos.com/'\nversion = 'v1'", "_____no_output_____" ] ], [ [ "This is a part where you should change the time period if you want to get animation of different time frame. Strongest __[El Nino years](http://ggweather.com/enso/oni.htm)__ have been 1982-83, 1997-98 and 2015-16. However, El Nino have occured more frequently. NOAA OISST dataset in Planet OS Datahub starts from 2008, however, we can extend the period if requested (since 1981 September). Feel free to change time_start and time_end to see how anomalies looked like on different years. You can find year when El Nino was present from __[here](http://ggweather.com/enso/oni.htm)__.", "_____no_output_____" ] ], [ [ "time_start = '2016-01-01T00:00:00'\ntime_end = '2016-03-10T00:00:00'\ndataset_key = 'noaa_oisst_daily_1_4'\nvariable = 'anom'\narea = 'pacific'\nlatitude_north = 40; latitude_south = -40\nlongitude_west = -180; longitude_east = -77\nanim_name = variable + '_animation_' + str(datetime.datetime.strptime(time_start,'%Y-%m-%dT%H:%M:%S').year) + '.mp4' ", "_____no_output_____" ] ], [ [ "## Download the data with package API\n- Create package objects\n- Send commands for the package creation\n- Download the package files\n", "_____no_output_____" ] ], [ [ "dh=datahub.datahub(server,version,API_key)\npackage = package_api.package_api(dh,dataset_key,variable,longitude_west,longitude_east,latitude_south,latitude_north,time_start,time_end,area_name=area)\npackage.make_package()\npackage.download_package()", "Package exists\n" ] ], [ [ "Here we are using xarray to read in the data. We will also rewrite longitude coordinates as they are from 0-360 at first, but Basemap requires longitude -180 to 180. ", "_____no_output_____" ] ], [ [ "dd1 = xr.open_dataset(package.local_file_name)\ndd1['lon'] = ((dd1.lon+180) % 360) - 180", "_____no_output_____" ] ], [ [ "We like to use Basemap to plot data on it. Here we define the area. You can find more information and documentation about Basemap __[here](https://matplotlib.org/basemap/)__.", "_____no_output_____" ] ], [ [ "m = Basemap(projection='merc', lat_0 = 0, lon_0 = (longitude_east + longitude_west)/2,\n resolution = 'l', area_thresh = 0.05,\n llcrnrlon=longitude_west, llcrnrlat=latitude_south,\n urcrnrlon=longitude_east, urcrnrlat=latitude_north)\n\nlons,lats = np.meshgrid(dd1.lon,dd1.lat)\nlonmap,latmap = m(lons,lats)", "_____no_output_____" ] ], [ [ "Below we make local folder where we save images. These are the images we will use for animation. No worries, in the end, we will delete the folder from your system. ", "_____no_output_____" ] ], [ [ "folder = './ani/'\nif not os.path.exists(folder):\n os.mkdir(folder)", "_____no_output_____" ] ], [ [ "Now it is time to make images from every time step. Let's also show first time step here:", "_____no_output_____" ] ], [ [ "vmin = -5; vmax = 5\nfor k in range(0,len(dd1[variable])):\n filename = folder + 'ani_' + str(k).rjust(3,'0') + '.png'\n \n fig=plt.figure(figsize=(12,10))\n ax = fig.add_subplot(111)\n pcm = m.pcolormesh(lonmap,latmap,dd1[variable][k,0].data,vmin = vmin, vmax = vmax,cmap='bwr')\n m.fillcontinents(color='#58606F')\n m.drawcoastlines(color='#222933')\n m.drawcountries(color='#222933')\n m.drawstates(color='#222933')\n parallels = np.arange(-40.,41,40)\n # labels = [left,right,top,bottom]\n m.drawparallels(parallels,labels=[True,False,True,False])\n #meridians = np.arange(10.,351.,2.)\n #m.drawmeridians(meridians,labels=[True,False,False,True])\n cbar = plt.colorbar(pcm,fraction=0.035, pad=0.03)\n ttl = plt.title('SST Anomaly ' + str(dd1[variable].time[k].data)[:-19],fontweight = 'bold')\n ttl.set_position([.5, 1.05])\n if not os.path.exists(folder):\n os.mkdir(folder)\n plt.savefig(filename)\n if k == 0:\n plt.show()\n plt.close()\n ", "_____no_output_____" ] ], [ [ "This is part where we are making animation. ", "_____no_output_____" ] ], [ [ "files = sorted(os.listdir(folder))\nfileList = []\nfor file in files:\n if not file.startswith('.'):\n complete_path = folder + file\n fileList.append(complete_path)\n\nwriter = imageio.get_writer(anim_name, fps=4)\n\nfor im in fileList:\n writer.append_data(imageio.imread(im))\nwriter.close()\nprint ('Animation is saved as ' + anim_name + ' under current working directory')", "Animation is saved as anom_animation_2016.mp4 under current working directory\n" ] ], [ [ "And finally, we will delete folder where images where saved. Now you just have animation in your working directory. ", "_____no_output_____" ] ], [ [ "shutil.rmtree(folder)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7c3e689df448fc9b0f0f3be524f2b4fe45fdab5
2,072
ipynb
Jupyter Notebook
notebooks/Velocity.ipynb
martinRenou/xleaflet
2e445fca772e82760f77340f020c83cb8d1e1cbe
[ "BSD-3-Clause" ]
36
2018-04-14T15:15:43.000Z
2019-12-22T22:16:11.000Z
notebooks/Velocity.ipynb
martinRenou/xleaflet
2e445fca772e82760f77340f020c83cb8d1e1cbe
[ "BSD-3-Clause" ]
9
2018-04-20T08:50:08.000Z
2020-01-18T01:12:45.000Z
notebooks/Velocity.ipynb
martinRenou/xleaflet
2e445fca772e82760f77340f020c83cb8d1e1cbe
[ "BSD-3-Clause" ]
8
2018-04-20T18:06:04.000Z
2019-10-06T11:49:10.000Z
21.583333
95
0.489865
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c3e88199ee6d58d6788140fcef64af32f2cae9
905
ipynb
Jupyter Notebook
pset_loops/shapes/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_loops/shapes/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_loops/shapes/solutions/nb/p1.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
19.255319
126
0.41105
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c3f7965d014ac6309c026a43280d6719e2bbef
85,627
ipynb
Jupyter Notebook
Task_2_Clustering.ipynb
BakkeshAS/GRIP_Task_2_Predict_Optimum_Clusters
99a69bc9e769afceec943cfbb18f849ecf35d19f
[ "Apache-2.0" ]
null
null
null
Task_2_Clustering.ipynb
BakkeshAS/GRIP_Task_2_Predict_Optimum_Clusters
99a69bc9e769afceec943cfbb18f849ecf35d19f
[ "Apache-2.0" ]
null
null
null
Task_2_Clustering.ipynb
BakkeshAS/GRIP_Task_2_Predict_Optimum_Clusters
99a69bc9e769afceec943cfbb18f849ecf35d19f
[ "Apache-2.0" ]
null
null
null
244.648571
46,510
0.892791
[ [ [ "# Task 2: Prediction using Unsupervised ML - K- Means Clustering\n\n", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Importing the dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('/content/Iris.csv')\ndataset.head()", "_____no_output_____" ], [ "dataset['Species'].describe()", "_____no_output_____" ] ], [ [ "#### Determining K - number of clusters", "_____no_output_____" ] ], [ [ "x = dataset.iloc[:, [0, 1, 2, 3]].values\n\nfrom sklearn.cluster import KMeans\nwcss = []\n\nfor i in range(1, 15):\n kmeans = KMeans(n_clusters = i, init = 'k-means++', \n max_iter = 300, n_init = 10, random_state = 0)\n kmeans.fit(x)\n wcss.append(kmeans.inertia_)\n ", "_____no_output_____" ] ], [ [ "#### Plotting the results - observe 'The elbow'", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(16,8))\nplt.style.use('ggplot')\nplt.plot(range(1, 15), wcss)\nplt.title('The elbow method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS') # Within cluster sum of squares\nplt.show()", "_____no_output_____" ] ], [ [ "#### Creating the kmeans classifier with K = 3", "_____no_output_____" ] ], [ [ "kmeans = KMeans(n_clusters = 3, init = 'k-means++',\n max_iter = 300, n_init = 10, random_state = 0)\ny_kmeans = kmeans.fit_predict(x)", "_____no_output_____" ] ], [ [ "#### Visualising the clusters - On the first two columns", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(14,10))\nplt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], \n s = 100, c = 'tab:orange', label = 'Iris-setosa')\nplt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], \n s = 100, c = 'tab:blue', label = 'Iris-versicolour')\nplt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1],\n s = 100, c = 'tab:green', label = 'Iris-virginica')\n\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], \n s = 100, c = 'black', label = 'Centroids')\n\nplt.title('Clusters K = 3')\nplt.legend(loc = 'upper left', ncol = 2)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7c405589916076104438b67b30c2d712c70a147
23,202
ipynb
Jupyter Notebook
index.ipynb
trailmarkerlib/dataExplore
529edd4f9eff75753ce89006f8a5b840456b9211
[ "BSD-3-Clause" ]
null
null
null
index.ipynb
trailmarkerlib/dataExplore
529edd4f9eff75753ce89006f8a5b840456b9211
[ "BSD-3-Clause" ]
null
null
null
index.ipynb
trailmarkerlib/dataExplore
529edd4f9eff75753ce89006f8a5b840456b9211
[ "BSD-3-Clause" ]
null
null
null
147.783439
20,032
0.895268
[ [ [ "# Welcome to an example Binder", "_____no_output_____" ], [ "## Test markup", "_____no_output_____" ], [ "This notebook uses a Python environment with a few libraries, including `dask`, all of which were specificied using a `conda` [environment.yml](../edit/environment.yml) file. To demo the environment, we'll show a simplified example of using `dask` to analyze time series data, adapted from Matthew Rocklin's excellent repo of [dask examples](https://github.com/blaze/dask-examples) — check out that repo for the full version (and many other examples).", "_____no_output_____" ], [ "## Setup plotting", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "## Turn on a global progress bar", "_____no_output_____" ] ], [ [ "from dask.diagnostics import ProgressBar", "_____no_output_____" ], [ "progress_bar = ProgressBar()\nprogress_bar.register()", "_____no_output_____" ] ], [ [ "## Generate fake data", "_____no_output_____" ] ], [ [ "import dask.dataframe as dd", "_____no_output_____" ], [ "df = dd.demo.make_timeseries(start='2000', end='2015', dtypes={'A': float, 'B': int},\n freq='5s', partition_freq='3M', seed=1234)", "_____no_output_____" ] ], [ [ "## Compute and plot a cumulative sum", "_____no_output_____" ] ], [ [ "df.A.cumsum().resample('1w').mean().compute().plot();", "[########################################] | 100% Completed | 16.5s\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c411d4f2c25b9eb8b548080828e049d8d9cb7a
11,263
ipynb
Jupyter Notebook
notebooks/chapter03_notebook/07_webcam_py2.ipynb
khanparwaz/PythonProjects
3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d
[ "BSD-2-Clause" ]
820
2015-01-01T18:15:54.000Z
2022-03-06T16:15:07.000Z
notebooks/chapter03_notebook/07_webcam_py2.ipynb
khanparwaz/PythonProjects
3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d
[ "BSD-2-Clause" ]
31
2015-02-25T22:08:09.000Z
2018-09-28T08:41:38.000Z
notebooks/chapter03_notebook/07_webcam_py2.ipynb
khanparwaz/PythonProjects
3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d
[ "BSD-2-Clause" ]
483
2015-01-02T13:53:11.000Z
2022-03-18T21:05:16.000Z
33.520833
468
0.490278
[ [ [ "> This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python.\n", "_____no_output_____" ], [ "# 3.7. Processing webcam images in real-time from the notebook", "_____no_output_____" ], [ "In this recipe, we show how to communicate data in both directions from the notebook to the Python kernel, and conversely. Specifically, we will retrieve the webcam feed from the browser using HTML5's `<video>` element, and pass it to Python in real time using the interactive capabilities of the IPython notebook 2.0+. This way, we can process the image in Python with an edge detector (implemented in scikit-image), and display it in the notebook in real time.", "_____no_output_____" ], [ "Most of the code for this recipe comes from [Jason Grout's example](https://github.com/jasongrout/ipywidgets).", "_____no_output_____" ], [ "1. We need to import quite a few modules.", "_____no_output_____" ] ], [ [ "from IPython.html.widgets import DOMWidget\nfrom IPython.utils.traitlets import Unicode, Bytes, Instance\nfrom IPython.display import display\n\nfrom skimage import io, filter, color\nimport urllib\nimport base64\nfrom PIL import Image\nimport StringIO\nimport numpy as np\nfrom numpy import array, ndarray\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "2. We define two functions to convert images from and to base64 strings. This conversion is a common way to pass binary data between processes (here, the browser and Python).", "_____no_output_____" ] ], [ [ "def to_b64(img):\n imgdata = StringIO.StringIO()\n pil = Image.fromarray(img)\n pil.save(imgdata, format='PNG')\n imgdata.seek(0)\n return base64.b64encode(imgdata.getvalue())", "_____no_output_____" ], [ "def from_b64(b64):\n im = Image.open(StringIO.StringIO(base64.b64decode(b64)))\n return array(im)", "_____no_output_____" ] ], [ [ "3. We define a Python function that will process the webcam image in real time. It accepts and returns a NumPy array. This function applies an edge detector with the `roberts()` function in scikit-image.", "_____no_output_____" ] ], [ [ "def process_image(image):\n img = filter.roberts(image[:,:,0]/255.)\n return (255-img*255).astype(np.uint8)", "_____no_output_____" ] ], [ [ "4. Now, we create a custom widget to handle the bidirectional communication of the video flow from the browser to Python and reciprocally.", "_____no_output_____" ] ], [ [ "class Camera(DOMWidget):\n _view_name = Unicode('CameraView', sync=True)\n \n # This string contains the base64-encoded raw\n # webcam image (browser -> Python).\n imageurl = Unicode('', sync=True)\n \n # This string contains the base64-encoded processed \n # webcam image(Python -> browser).\n imageurl2 = Unicode('', sync=True)\n\n # This function is called whenever the raw webcam\n # image is changed.\n def _imageurl_changed(self, name, new):\n head, data = new.split(',', 1)\n if not data:\n return\n \n # We convert the base64-encoded string\n # to a NumPy array.\n image = from_b64(data)\n \n # We process the image.\n image = process_image(image)\n \n # We convert the processed image\n # to a base64-encoded string.\n b64 = to_b64(image)\n \n self.imageurl2 = 'data:image/png;base64,' + b64", "_____no_output_____" ] ], [ [ "5. The next step is to write the Javascript code for the widget.", "_____no_output_____" ] ], [ [ "%%javascript\n\nvar video = $('<video>')[0];\nvar canvas = $('<canvas>')[0];\nvar canvas2 = $('<img>')[0];\nvar width = 320;\nvar height = 0;\n\nrequire([\"widgets/js/widget\"], function(WidgetManager){\n var CameraView = IPython.DOMWidgetView.extend({\n render: function(){\n var that = this;\n\n // We append the HTML elements.\n setTimeout(function() {\n that.$el.append(video).\n append(canvas).\n append(canvas2);}, 200);\n \n // We initialize the webcam.\n var streaming = false;\n navigator.getMedia = ( navigator.getUserMedia ||\n navigator.webkitGetUserMedia ||\n navigator.mozGetUserMedia ||\n navigator.msGetUserMedia);\n\n navigator.getMedia({video: true, audio: false},\n function(stream) {\n if (navigator.mozGetUserMedia) {\n video.mozSrcObject = stream;\n } else {\n var vendorURL = (window.URL || \n window.webkitURL);\n video.src = vendorURL.createObjectURL(\n stream);\n }\n video.controls = true;\n video.play();\n },\n function(err) {\n console.log(\"An error occured! \" + err);\n }\n );\n \n // We initialize the size of the canvas.\n video.addEventListener('canplay', function(ev){\n if (!streaming) {\n height = video.videoHeight / (\n video.videoWidth/width);\n video.setAttribute('width', width);\n video.setAttribute('height', height);\n canvas.setAttribute('width', width);\n canvas.setAttribute('height', height);\n canvas2.setAttribute('width', width);\n canvas2.setAttribute('height', height);\n \n streaming = true;\n }\n }, false);\n \n // Play/Pause functionality.\n var interval;\n video.addEventListener('play', function(ev){\n // We get the picture every 100ms. \n interval = setInterval(takepicture, 100);\n })\n video.addEventListener('pause', function(ev){\n clearInterval(interval);\n })\n \n // This function is called at each time step.\n // It takes a picture and sends it to the model.\n function takepicture() {\n canvas.width = width; canvas.height = height;\n canvas2.width = width; canvas2.height = height;\n \n video.style.display = 'none';\n canvas.style.display = 'none';\n \n // We take a screenshot from the webcam feed and \n // we put the image in the first canvas.\n canvas.getContext('2d').drawImage(video, \n 0, 0, width, height);\n \n // We export the canvas image to the model.\n that.model.set('imageurl',\n canvas.toDataURL('image/png'));\n that.touch();\n }\n },\n \n update: function(){\n // This function is called whenever Python modifies\n // the second (processed) image. We retrieve it and\n // we display it in the second canvas.\n var img = this.model.get('imageurl2');\n canvas2.src = img;\n return CameraView.__super__.update.apply(this);\n }\n });\n \n // Register the view with the widget manager.\n WidgetManager.register_widget_view('CameraView', \n CameraView);\n});", "_____no_output_____" ] ], [ [ "6. Finally, we create and display the widget.", "_____no_output_____" ] ], [ [ "c = Camera()\ndisplay(c)", "_____no_output_____" ] ], [ [ "> You'll find all the explanations, figures, references, and much more in the book (to be released later this summer).\n\n> [IPython Cookbook](http://ipython-books.github.io/), by [Cyrille Rossant](http://cyrille.rossant.net), Packt Publishing, 2014 (500 pages).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7c418f78bf825ccb7e9939362d0a8ef9a48b458
95,573
ipynb
Jupyter Notebook
examples/raytracer_data.ipynb
akhilsadam/raytracer-imaging
d7c7f8ab5435710ce5fe71823ea967be6804749c
[ "MIT" ]
null
null
null
examples/raytracer_data.ipynb
akhilsadam/raytracer-imaging
d7c7f8ab5435710ce5fe71823ea967be6804749c
[ "MIT" ]
null
null
null
examples/raytracer_data.ipynb
akhilsadam/raytracer-imaging
d7c7f8ab5435710ce5fe71823ea967be6804749c
[ "MIT" ]
null
null
null
190.005964
34,556
0.859395
[ [ [ "# raytracer-imaging\nFor PET image reconstruction", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nimport math\nimport numpy as np\nfrom numba import cuda,types, from_dtype\nimport raytracer.cudaOptions\nfrom raytracer.rotation.quaternion import *\nfrom raytracer.raytracer.voxel import *\nfrom raytracer.raytracer.raytrace import *", "_____no_output_____" ], [ "data_rays = 0.1*np.loadtxt(rayOptions.data_directory).T # x,y,z [mm,ns]", "_____no_output_____" ], [ "data_rays[0,:]", "_____no_output_____" ], [ "voxel_size = np.array([10,10,100]) #odd so we have center point\nraytracerA = raytracer(voxel_size,method=\"ART\")", "_____no_output_____" ], [ "#Reconstruction:\nprojection_error = raytracerA.reconstruct(data_rays[0:100],iterations=8)", "100%|███████████████████████████████████████████████████████████████████████████████| 800/800 [00:05<00:00, 139.75it/s]\n" ], [ "# rayNHit,rayHits,rayWeights = raytracerA.norm_raytrace(0.0*np.pi/180.0,90.0*np.pi/180.0)\n# projection = raytracerA.rayproject(rayNHit,rayHits)\nraytracerA.make_projection(phi=0.0*np.pi/180.0,alpha=90.0*np.pi/180.0)", "[ 0.70710678 0. -0.70710678 0. ]\n" ], [ "print(rayNHit)\nprint(\"---\")\nprint(nvoxels,camera_nrays)\nprint(rayHits.shape)\nprint(\"---\")\ncount = 0\n# for r in range(100):\n# if rayNHit[r] > 0:\n# print(rayHits[r,0:rayNHit[r]])\n# print(\"---\")\n# for r in range(100):\n# if rayNHit[r] > 0:\n# print(rayWeights[r,0:rayNHit[r]])\n# print(\"---\")\nprint(np.sum(rayNHit > 0))", "_____no_output_____" ], [ "quaternion.rotate(verts,20.*np.pi/180.0,40.*np.pi/180.0)\nprint(verts)", "[[ 5.65491516 -24.54622352 -35.22080132]\n [ 5.05089239 -24.76606983 -34.45475688]\n [ 4.44686962 -24.98591614 -33.68871243]\n ...\n [ -4.22067301 24.0040672 32.27988038]\n [ -4.82469578 23.78422089 33.04592482]\n [ -5.42871856 23.56437457 33.81196927]]\n" ], [ "# set the colors of each object\ncolors = np.empty(verts.shape, dtype=object)\ncolors = 'red'\n\n# and plot everything\nax = plt.figure().add_subplot(projection='3d')\nax.voxels(verts, facecolors=colors, edgecolor='k')\nplt.show()", "_____no_output_____" ], [ "ax = plt.figure().add_subplot()\nax.scatter(verts[:,1],verts[:,2],c=voxels.flatten(),cmap='inferno',s=2,alpha=0.5) #project onto x\nplt.show()", "_____no_output_____" ], [ "ax = plt.figure().add_subplot()\nax.scatter(verts[:,1].astype(int),verts[:,2].astype(int),c=voxels.flatten(),cmap='inferno',s=6,alpha=0.5) #project onto x\nplt.show()", "_____no_output_____" ], [ "H, xedges, yedges = np.histogram2d(verts[:,1], verts[:,2], bins=camera_size,range=camera_range)", "_____no_output_____" ], [ "plt.imshow(H.T,cmap='binary',extent=np.array(camera_range).flatten())", "_____no_output_____" ], [ "xedges", "_____no_output_____" ], [ "rayverts", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c41af4adf90bc2333fbf3d60d907efcf4c11c8
818,444
ipynb
Jupyter Notebook
Module 2.ipynb
parth2608/Stock-Analysis
2a0c0d8cacfbbf392411decd8858d05a4a21593f
[ "MIT" ]
null
null
null
Module 2.ipynb
parth2608/Stock-Analysis
2a0c0d8cacfbbf392411decd8858d05a4a21593f
[ "MIT" ]
null
null
null
Module 2.ipynb
parth2608/Stock-Analysis
2a0c0d8cacfbbf392411decd8858d05a4a21593f
[ "MIT" ]
null
null
null
331.75679
181,876
0.90918
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 2.1", "_____no_output_____" ] ], [ [ "df = pd.read_csv('week2.csv')\ndf = df.sort_values(by=['Date'])\ndf = df.drop(['Unnamed: 2', 'Month.1', 'Year.1'], axis = 1)\ndf.head()", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df['Date'] = pd.to_datetime(df['Date'])\ndf.dtypes", "_____no_output_____" ], [ "df.set_index(\"Date\", inplace = True)\ndf.head()", "_____no_output_____" ], [ "df.reset_index().plot(x='Date', y='Close_Price');", "_____no_output_____" ] ], [ [ "## 2.2", "_____no_output_____" ] ], [ [ "plt.stem(df.index.values,df.Day_Perc_Change, bottom=0);", "C:\\Users\\Dell\\AppData\\Roaming\\Python\\Python37\\site-packages\\ipykernel_launcher.py:1: UserWarning: In Matplotlib 3.3 individual lines on a stem plot will be added as a LineCollection instead of individual lines. This significantly improves the performance of a stem plot. To remove this warning and switch to the new behaviour, set the \"use_line_collection\" keyword argument to True.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "## 2.3", "_____no_output_____" ] ], [ [ "df.reset_index().plot(x='Date', y='Total_Traded_Quantity');", "_____no_output_____" ], [ "sns.set(style=\"darkgrid\")\n\nscaledvolume = df[\"Total_Traded_Quantity\"] - df[\"Total_Traded_Quantity\"].min()\nscaledvolume = scaledvolume/scaledvolume.max() * df.Day_Perc_Change.max()\n\nfig, ax = plt.subplots(figsize=(12, 6))\n\nax.stem(df.index, df.Day_Perc_Change , 'b', markerfmt='bo', label='Daily Percente Change')\nax.plot(df.index, scaledvolume, 'k', label='Volume')\n\nax.set_xlabel('Date')\nplt.legend(loc=2)\n\nplt.tight_layout()\nplt.xticks(plt.xticks()[0], df.index.date, rotation=45)\nplt.show()", "C:\\Users\\Dell\\AppData\\Roaming\\Python\\Python37\\site-packages\\ipykernel_launcher.py:8: UserWarning: In Matplotlib 3.3 individual lines on a stem plot will be added as a LineCollection instead of individual lines. This significantly improves the performance of a stem plot. To remove this warning and switch to the new behaviour, set the \"use_line_collection\" keyword argument to True.\n \n" ] ], [ [ "## 2.4", "_____no_output_____" ] ], [ [ "df = df.reset_index()\ndf.head()", "_____no_output_____" ], [ "df.Trend.groupby(df.Trend).count().plot(kind='pie', autopct='%.1f%%')\nplt.axis('equal')\nplt.show()", "_____no_output_____" ], [ "avg = df.groupby(df['Trend'])['Total_Traded_Quantity'].mean()\nmed = df.groupby(df['Trend'])['Total_Traded_Quantity'].median()\nplt.subplot(2, 1, 1)\navg.plot.bar(color='Blue', label='mean').label_outer()\nplt.title('mean')\nplt.legend()\nplt.show()\nplt.subplot(2, 1, 2)\nmed.plot.bar(color='Orange', label='median')\nplt.title('median')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## 2.5", "_____no_output_____" ] ], [ [ "df['Day_Perc_Change'].plot.hist(bins=20);", "_____no_output_____" ] ], [ [ "## 2.6", "_____no_output_____" ] ], [ [ "jublfood = pd.read_csv('JUBLFOOD.csv')\njublfood = jublfood.drop(jublfood[jublfood.Series != 'EQ'].index)\njublfood.reset_index(inplace=True)\nprint(jublfood.shape)\njublfood.head()", "(494, 16)\n" ], [ "godrejind = pd.read_csv('GODREJIND.csv')\ngodrejind = godrejind.drop(godrejind[godrejind.Series != 'EQ'].index)\ngodrejind.reset_index(inplace=True)\nprint(godrejind.shape)\ngodrejind.head()", "(494, 16)\n" ], [ "maruti = pd.read_csv('MARUTI.csv')\nmaruti = maruti.drop(maruti[maruti.Series != 'EQ'].index)\nmaruti.reset_index(inplace=True)\nprint(maruti.shape)\nmaruti.head()", "(494, 16)\n" ], [ "pvr = pd.read_csv('PVR.csv')\npvr = pvr.drop(pvr[pvr.Series != 'EQ'].index)\npvr.reset_index(inplace=True)\nprint(pvr.shape)\npvr.head()", "(494, 16)\n" ], [ "tcs = pd.read_csv('TCS.csv')\ntcs = tcs.drop(tcs[tcs.Series != 'EQ'].index)\ntcs.reset_index(inplace=True)\nprint(tcs.shape)\ntcs.head()", "(494, 16)\n" ], [ "combined = pd.concat([godrejind['Close Price'], jublfood['Close Price'], maruti['Close Price'], pvr['Close Price'], tcs['Close Price']], join='inner', axis=1, keys=['GODREJIND', 'JUBLFOOD', 'MARUTI', 'PVR', 'TCS'])\ncombined.head()", "_____no_output_____" ], [ "perc_change = pd.DataFrame()\nperc_change['GODREJIND'] = combined['GODREJIND'].pct_change()*100\nperc_change['GODREJIND'][0] = 0\nperc_change['JUBLFOOD'] = combined['JUBLFOOD'].pct_change()*100\nperc_change['JUBLFOOD'][0] = 0\nperc_change['MARUTI'] = combined['MARUTI'].pct_change()*100\nperc_change['MARUTI'][0] = 0\nperc_change['PVR'] = combined['PVR'].pct_change()*100\nperc_change['PVR'][0] = 0\nperc_change['TCS'] = combined['TCS'].pct_change()*100\nperc_change['TCS'][0] = 0\nperc_change.head()", "_____no_output_____" ], [ "sns.pairplot(perc_change);", "_____no_output_____" ] ], [ [ "## 2.7", "_____no_output_____" ] ], [ [ "perc_change = perc_change.drop([0])\nperc_change.reset_index(inplace=True)\nperc_change.head()", "_____no_output_____" ], [ "print(perc_change[['GODREJIND', 'JUBLFOOD', 'MARUTI', 'PVR', 'TCS']].rolling(7, min_periods=1).std(ddof=0).head())\nplt.plot(perc_change[['GODREJIND', 'JUBLFOOD', 'MARUTI', 'PVR', 'TCS']].rolling(7, min_periods=1).std(ddof=0))\nplt.legend([\"GODREJIND\", \"JUBLFOOD\", \"MARUTI\", \"PVR\", \"TCS\"])\nplt.title('Volatility');", " GODREJIND JUBLFOOD MARUTI PVR TCS\n0 0.000000 0.000000 0.000000 0.000000 0.000000\n1 0.215246 1.304872 0.922343 0.743322 0.814782\n2 1.539099 2.159120 1.524084 0.821539 0.936910\n3 1.378038 1.869992 1.348622 0.713198 1.721081\n4 1.484981 1.758872 1.297517 1.012500 1.553279\n" ] ], [ [ "## 2.8", "_____no_output_____" ] ], [ [ "nifty = pd.read_csv('Nifty50.csv')\nnifty['PCT'] = nifty['Close'].pct_change()*100\nnifty = nifty.drop([0])\nnifty.reset_index(inplace=True)\nnifty.head()", "_____no_output_____" ], [ "print(nifty[['PCT']].rolling(7, min_periods=1).std(ddof=0).head())\nplt.plot(nifty[['PCT']].rolling(7, min_periods=1).std(ddof=0), color='Black');\nplt.legend([\"Nifty\"]);\nplt.title('Nifty Volatility');", " PCT\n0 0.000000\n1 0.282915\n2 0.715167\n3 0.619564\n4 0.557577\n" ], [ "plt.plot(nifty[['PCT']].rolling(7, min_periods=1).std(ddof=0), color='Black');\nplt.plot(perc_change[['GODREJIND', 'JUBLFOOD', 'MARUTI', 'PVR', 'TCS']].rolling(7, min_periods=1).std(ddof=0));\nplt.legend([\"Nifty\", \"GODREJIND\", \"JUBLFOOD\", \"MARUTI\", \"PVR\", \"TCS\"]);\nplt.title('Volatility Comparison');", "_____no_output_____" ] ], [ [ "## 2.9", "_____no_output_____" ] ], [ [ "short_window = 21\nlong_window = 34\ntcs_roll_21 = combined['TCS'].rolling(short_window, min_periods=1).mean()\ntcs_roll_34 = combined['TCS'].rolling(long_window, min_periods=1).mean()\ndate = pd.to_datetime(tcs['Date'])\nsignals = pd.DataFrame(index=date)\nsignals['signal'] = 0\nsignals['signal'][short_window:] = np.where(tcs_roll_21[short_window:] > tcs_roll_34[short_window:], 1, 0)\nsignals['position'] = signals['signal'].diff().fillna(0)\nsignals['short_mavg'] = tcs_roll_21.tolist()\nsignals['long_mavg'] = tcs_roll_34.tolist()\nsignals.head()", "_____no_output_____" ], [ "plt.figure(figsize=(16,8))\nplt.plot(date, combined['TCS'].rolling(7, min_periods=1).mean(), color='red', label='TCS')\nplt.plot(date, tcs_roll_21, color='blue', label='21_SMA')\nplt.plot(date, tcs_roll_34, color='green', label='34_SMA')\nplt.plot(signals.loc[signals.position == 1].index, signals['short_mavg'][signals.position == 1], '^', markersize=10, color='green', label='Buy')\nplt.plot(signals.loc[signals.position == -1].index, signals['short_mavg'][signals.position == -1], 'v', markersize=10, color='red', label='Sell')\nplt.legend(loc='best')\nplt.xlabel('Date')\nplt.ylabel('Price in Rupees')\nplt.show()", "_____no_output_____" ] ], [ [ "## 2.10", "_____no_output_____" ] ], [ [ "tcs_mean_14 = combined.TCS.rolling(14, min_periods=1).mean()\ntcs_std_14 = combined.TCS.rolling(14, min_periods=1).std()\nupper = tcs_mean_14 + 2*tcs_std_14\nlower = tcs_mean_14 - 2*tcs_std_14\nplt.figure(figsize=(16,8))\nplt.plot(date, tcs_mean_14, color='black', label='TCS')\nplt.plot(date, tcs['Average Price'], color='red', label='Average Price')\nplt.plot(date, upper, color='blue', label='Upper Bound')\nplt.plot(date, lower, color='green', label='Lower Bound')\nplt.xlabel('Date')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c4215e096220e7d53fd66b049ad079cf9102ef
46,216
ipynb
Jupyter Notebook
notes/FEM_beam.ipynb
gfsReboucas/Drivetrain-python
90cc8a0b26fa6dd851a8ddaaf321f5ae9f5cf431
[ "MIT" ]
1
2020-10-17T13:43:01.000Z
2020-10-17T13:43:01.000Z
notes/FEM_beam.ipynb
gfsReboucas/Drivetrain-python
90cc8a0b26fa6dd851a8ddaaf321f5ae9f5cf431
[ "MIT" ]
null
null
null
notes/FEM_beam.ipynb
gfsReboucas/Drivetrain-python
90cc8a0b26fa6dd851a8ddaaf321f5ae9f5cf431
[ "MIT" ]
null
null
null
64.188889
7,393
0.278367
[ [ [ "# Beam finite element matrices\nBased on: \\\n[1] Neto, M. A., Amaro, A., Roseiro, L., Cirne, J., & Leal, R. (2015). Finite Element Method for Beams. In Engineering Computation of Structures: The Finite Element Method (pp. 115–156). Springer International Publishing. http://doi.org/10.1007/978-3-319-17710-6_4\n\n\nThe beam finite element, its nodes and degrees-of-freedom (DOFs) can be seen below:\n![alt text](beam_DOFs.svg)", "_____no_output_____" ] ], [ [ "from sympy import *\ninit_printing()\n\n\ndef symb(x, y):\n return symbols('{0}_{1}'.format(x, y), type = float)\n\n\nE, A, L, G, I_1, I_2, I_3, rho = symbols('E A L G I_1 I_2 I_3 rho', type = float)", "_____no_output_____" ] ], [ [ "The finite element matrices should have order 12, accounting for each of the element's DOFs, shown below:", "_____no_output_____" ] ], [ [ "u = Matrix(12, 1, [symb('u', v + 1) for v in range(12)])\ntranspose(u)", "_____no_output_____" ] ], [ [ "## Axial deformation along $x_1$\nIn terms of generic coordinates $v_i$:", "_____no_output_____" ] ], [ [ "v_a = Matrix(2, 1, [symb('v', v + 1) for v in range(2)])\ntranspose(v_a)", "_____no_output_____" ] ], [ [ "which are equivalent to the $u_i$ coordinates in the following way:\n$$\n \\mathbf{v} = \\mathbf{R} \\mathbf{u},\n$$\nwhere:\n$$\n v_1 = u_1, \\\\\n v_2 = u_7,\n$$\nwith the following coordinate transformation matrix:", "_____no_output_____" ] ], [ [ "R = zeros(12)\nR[1 - 1, 1 - 1] = 1\nR[2 - 1, 7 - 1] = 1\nR[:2, :]", "_____no_output_____" ] ], [ [ "### Stiffness matrix\nEq. (3.15) of [1]:", "_____no_output_____" ] ], [ [ "K_a = (E * A / L) * Matrix([[1, -1],\n [-1, 1]])\nK_a", "_____no_output_____" ] ], [ [ "### Inertia matrix\nEq. (3.16) of [1]:", "_____no_output_____" ] ], [ [ "M_a = (rho * A * L / 6) * Matrix([[2, 1],\n [1, 2]])\nM_a", "_____no_output_____" ] ], [ [ "## Torsional deformation around $x_1$\nAccording to [1], one can obtain the matrices for the torsional case from the axial case by replacing the elasticity modulus $E$ and the cross-sectional area $A$ by the shear modulus $G$ and the polar area moment of inertia $I_1$.\n\nIn terms of generic coordinates $v_i$:", "_____no_output_____" ] ], [ [ "v_t = Matrix(2, 1, [symb('v', v + 3) for v in range(2)])\ntranspose(v_t)", "_____no_output_____" ] ], [ [ "which are equivalent to the $u_i$ coordinates in the following way:\n$$\n v_3 = u_4, \\\\\n v_4 = u_{10},\n$$\nwith the following coordinate transformation matrix:", "_____no_output_____" ] ], [ [ "R[3 - 1, 4 - 1] = 1\nR[4 - 1, 10 - 1] = 1\nR[0:4, :]", "_____no_output_____" ] ], [ [ "### Stiffness matrix", "_____no_output_____" ] ], [ [ "K_t = K_a.subs([(E, G), (A, I_1)])\nK_t", "_____no_output_____" ] ], [ [ "### Inertia matrix", "_____no_output_____" ] ], [ [ "M_t = M_a.subs([(E, G), (A, I_1)])\nM_t", "_____no_output_____" ] ], [ [ "## Bending on the plane $x_1-x_3$\nIn this case the bending torsion occurs around the $x_2$ axis. In terms of generic coordinates $v_i$:", "_____no_output_____" ] ], [ [ "v_b13 = Matrix(4, 1, [symb('v', v + 9) for v in range(4)])\ntranspose(v_b13)", "_____no_output_____" ] ], [ [ "which are equivalent to the $u_i$ coordinates in the following way:\n$$\n v_9 = u_3, \\\\\n v_{10} = u_5, \\\\\n v_{11} = u_9, \\\\\n v_{12} = u_{11},\n$$\nwith the following coordinate transformation matrix:", "_____no_output_____" ] ], [ [ "R[9 - 1, 3 - 1] = 1\nR[10 - 1, 5 - 1] = 1\nR[11 - 1, 9 - 1] = 1\nR[12 - 1, 11 - 1] = 1\nR", "_____no_output_____" ] ], [ [ "### Stiffness matrix", "_____no_output_____" ] ], [ [ "K_b13 = (E * I_2 / L**3) * Matrix([[ 12 , 6 * L , -12 , 6 * L ],\n [ 6 * L, 4 * L**2, - 6 * L, 2 * L**2],\n [-12 , -6 * L , 12 , -6 * L ],\n [ 6 * L, 2 * L**2, - 6 * L, 4 * L**2]])\n\nif (not K_b13.is_symmetric()):\n print('Error in K_b13.')\nK_b13", "_____no_output_____" ] ], [ [ "### Inertia matrix", "_____no_output_____" ] ], [ [ "M_b13 = (rho * L * A / 420) * Matrix([[ 156 , 22 * L , 54 , -13 * L ],\n [ 22 * L, 4 * L**2, 13 * L, - 3 * L**2],\n [ 54 , 13 * L , 156 , -22 * L ],\n [- 13 * L, - 3 * L**2, - 22 * L, 4 * L**2]])\n\nif (not M_b13.is_symmetric()):\n print('Error in M_b13.')\nM_b13", "_____no_output_____" ] ], [ [ "## Bending on the plane $x_1-x_2$\nIn this case the bending torsion occurs around the $x_3$ axis, but in the opposite direction. The matrices are similar to the case of bending on the $x_1-x_3$ plane, needing proper coordinate transformation and replacing the index of the area moment of inertia from 2 to 3.\n\nWritten in terms of generic coordinates $v_i$:", "_____no_output_____" ] ], [ [ "v_b12 = Matrix(4, 1, [symb('v', v + 5) for v in range(4)])\ntranspose(v_b12)", "_____no_output_____" ] ], [ [ "which are equivalent to the $u_i$ coordinates in the following way:\n$$\n v_5 = u_2, \\\\\n v_6 = -u_6, \\\\\n v_7 = u_8, \\\\\n v_8 = -u_{12},\n$$\nwith the following coordinate transformation matrix:", "_____no_output_____" ] ], [ [ "R[5 - 1, 2 - 1] = 1\nR[6 - 1, 6 - 1] = -1\nR[7 - 1, 8 - 1] = 1\nR[8 - 1, 12 - 1] = -1\nR", "_____no_output_____" ] ], [ [ "### Stiffness matrix", "_____no_output_____" ] ], [ [ "K_b12 = K_b13.subs(I_2, I_3)\nK_b12", "_____no_output_____" ] ], [ [ "### Inertia matrix", "_____no_output_____" ] ], [ [ "M_b12 = M_b13\nif (not M_b12.is_symmetric()):\n print('Error in M_b12.')\nM_b12", "_____no_output_____" ] ], [ [ "## Assembly of the full matrices\nAccounting for axial loads, torques and bending in both planes.", "_____no_output_____" ] ], [ [ "RAR = lambda A: transpose(R)*A*R\ntranspose(R**-1*u)", "_____no_output_____" ], [ "K_f = diag(K_a, K_t, K_b12, K_b13)\nK = RAR(K_f)\nif (not K.is_symmetric()):\n print('Error in K.')\nK", "_____no_output_____" ], [ "M_f = diag(M_a, M_t, M_b12, M_b13)\nM = RAR(M_f)\nif (not M.is_symmetric()):\n print('Error in M.')\nM", "_____no_output_____" ] ], [ [ "## Dynamic matrices for Lin and Parker\n\nSee:\n\nLin, J., & Parker, R. G. (1999). Analytical characterization of the unique properties of planetary gear free vibration. Journal of Vibration and Acoustics, Transactions of the ASME, 121(3), 316–321. http://doi.org/10.1115/1.2893982\n\nConsidering translation on directions $x_2$ and $x_3$ and rotation around $x_1$:", "_____no_output_____" ] ], [ [ "id = [2,3, 4, 8, 9, 10]\nu_LP = [symb('u', i) for i in id]\nMatrix(u_LP)", "_____no_output_____" ], [ "T = zeros(12,6)\nT[2 - 1, 1 - 1] = 1\nT[3 - 1, 2 - 1] = 1\nT[4 - 1, 3 - 1] = 1\nT[8 - 1, 4 - 1] = 1\nT[9 - 1, 5 - 1] = 1\nT[10 - 1, 6 - 1] = 1\n(T.T) * K * T", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7c4264614d4032d0f7c236bcec5c62ded728b84
1,474
ipynb
Jupyter Notebook
viz_scripts/mapping_dictionaries.ipynb
e-mission/em-public-dashboard
2e3d85de85537d65fbe5c03eea2e983942f6b0eb
[ "BSD-3-Clause" ]
null
null
null
viz_scripts/mapping_dictionaries.ipynb
e-mission/em-public-dashboard
2e3d85de85537d65fbe5c03eea2e983942f6b0eb
[ "BSD-3-Clause" ]
15
2021-02-09T21:47:42.000Z
2022-01-28T04:35:31.000Z
viz_scripts/mapping_dictionaries.ipynb
e-mission/em-public-dashboard
2e3d85de85537d65fbe5c03eea2e983942f6b0eb
[ "BSD-3-Clause" ]
3
2021-01-23T23:55:01.000Z
2022-01-05T17:38:28.000Z
22.333333
91
0.561058
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df_pur= pd.read_csv(r'auxiliary_files/purpose_labels.csv')\ndf_re = pd.read_csv(r'auxiliary_files/mode_labels.csv')\ndf_EI = pd.read_csv(r'auxiliary_files/energy_intensity.csv')\n\n#dictionaries:\ndic_pur = dict(zip(df_pur['purpose_confirm'],df_pur['bin_purpose'])) # bin purpose\ndic_re = dict(zip(df_re['replaced_mode'],df_re['mode_clean'])) # bin modes\ndic_fuel = dict(zip(df_EI['mode'],df_EI['fuel']))", "_____no_output_____" ], [ "%store df_EI \n%store dic_re \n%store dic_pur \n%store dic_fuel ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7c42d3abbfcc4e42346859007a7c60f0ee52a39
417,695
ipynb
Jupyter Notebook
workbench materials/end-to-end-bluebook-bulldozer-price-regression-video.ipynb
Mikolaj-Myszka/DataScience-bulldozer-price-prediction
2afc0363eb78602a128cd2a10dda340074ff40d6
[ "MIT" ]
null
null
null
workbench materials/end-to-end-bluebook-bulldozer-price-regression-video.ipynb
Mikolaj-Myszka/DataScience-bulldozer-price-prediction
2afc0363eb78602a128cd2a10dda340074ff40d6
[ "MIT" ]
null
null
null
workbench materials/end-to-end-bluebook-bulldozer-price-regression-video.ipynb
Mikolaj-Myszka/DataScience-bulldozer-price-prediction
2afc0363eb78602a128cd2a10dda340074ff40d6
[ "MIT" ]
3
2021-09-21T11:56:19.000Z
2021-12-30T15:39:33.000Z
53.148619
26,952
0.472395
[ [ [ "# 🚜 Predicting the Sale Price of Bulldozers using Machine Learning\n\nIn this notebook, we're going to go through an example machine learning project with the goal of predicting the sale price of bulldozers.\n\n## 1. Problem defition\n\n> How well can we predict the future sale price of a bulldozer, given its characteristics and previous examples of how much similar bulldozers have been sold for?\n\n## 2. Data\n\nThe data is downloaded from the Kaggle Bluebook for Bulldozers competition: https://www.kaggle.com/c/bluebook-for-bulldozers/data\n\nThere are 3 main datasets:\n\n* Train.csv is the training set, which contains data through the end of 2011.\n* Valid.csv is the validation set, which contains data from January 1, 2012 - April 30, 2012 You make predictions on this set throughout the majority of the competition. Your score on this set is used to create the public leaderboard.\n* Test.csv is the test set, which won't be released until the last week of the competition. It contains data from May 1, 2012 - November 2012. Your score on the test set determines your final rank for the competition.\n\n## 3. Evaluation\n\nThe evaluation metric for this competition is the RMSLE (root mean squared log error) between the actual and predicted auction prices.\n\nFor more on the evaluation of this project check: https://www.kaggle.com/c/bluebook-for-bulldozers/overview/evaluation\n\n**Note:** The goal for most regression evaluation metrics is to minimize the error. For example, our goal for this project will be to build a machine learning model which minimises RMSLE.\n\n## 4. Features\n\nKaggle provides a data dictionary detailing all of the features of the dataset. You can view this data dictionary on Google Sheets: https://docs.google.com/spreadsheets/d/18ly-bLR8sbDJLITkWG7ozKm8l3RyieQ2Fpgix-beSYI/edit?usp=sharing", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn", "_____no_output_____" ], [ "# Import training and validation sets\ndf = pd.read_csv(\"data/bluebook-for-bulldozers/TrainAndValid.csv\",\n low_memory=False)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 412698 entries, 0 to 412697\nData columns (total 53 columns):\nSalesID 412698 non-null int64\nSalePrice 412698 non-null float64\nMachineID 412698 non-null int64\nModelID 412698 non-null int64\ndatasource 412698 non-null int64\nauctioneerID 392562 non-null float64\nYearMade 412698 non-null int64\nMachineHoursCurrentMeter 147504 non-null float64\nUsageBand 73670 non-null object\nsaledate 412698 non-null object\nfiModelDesc 412698 non-null object\nfiBaseModel 412698 non-null object\nfiSecondaryDesc 271971 non-null object\nfiModelSeries 58667 non-null object\nfiModelDescriptor 74816 non-null object\nProductSize 196093 non-null object\nfiProductClassDesc 412698 non-null object\nstate 412698 non-null object\nProductGroup 412698 non-null object\nProductGroupDesc 412698 non-null object\nDrive_System 107087 non-null object\nEnclosure 412364 non-null object\nForks 197715 non-null object\nPad_Type 81096 non-null object\nRide_Control 152728 non-null object\nStick 81096 non-null object\nTransmission 188007 non-null object\nTurbocharged 81096 non-null object\nBlade_Extension 25983 non-null object\nBlade_Width 25983 non-null object\nEnclosure_Type 25983 non-null object\nEngine_Horsepower 25983 non-null object\nHydraulics 330133 non-null object\nPushblock 25983 non-null object\nRipper 106945 non-null object\nScarifier 25994 non-null object\nTip_Control 25983 non-null object\nTire_Size 97638 non-null object\nCoupler 220679 non-null object\nCoupler_System 44974 non-null object\nGrouser_Tracks 44875 non-null object\nHydraulics_Flow 44875 non-null object\nTrack_Type 102193 non-null object\nUndercarriage_Pad_Width 102916 non-null object\nStick_Length 102261 non-null object\nThumb 102332 non-null object\nPattern_Changer 102261 non-null object\nGrouser_Type 102193 non-null object\nBackhoe_Mounting 80712 non-null object\nBlade_Type 81875 non-null object\nTravel_Controls 81877 non-null object\nDifferential_Type 71564 non-null object\nSteering_Controls 71522 non-null object\ndtypes: float64(3), int64(5), object(45)\nmemory usage: 166.9+ MB\n" ], [ "df.isna().sum()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.scatter(df[\"saledate\"][:1000], df[\"SalePrice\"][:1000])", "_____no_output_____" ], [ "df.saledate[:1000]", "_____no_output_____" ], [ "df.saledate.dtype", "_____no_output_____" ], [ "df.SalePrice.plot.hist()", "_____no_output_____" ] ], [ [ "### Parsing dates\n\nWhen we work with time series data, we want to enrich the time & date component as much as possible.\n\nWe can do that by telling pandas which of our columns has dates in it using the `parse_dates` parameter.", "_____no_output_____" ] ], [ [ "# Import data again but this time parse dates\ndf = pd.read_csv(\"data/bluebook-for-bulldozers/TrainAndValid.csv\",\n low_memory=False,\n parse_dates=[\"saledate\"])", "_____no_output_____" ], [ "df.saledate.dtype", "_____no_output_____" ], [ "df.saledate[:1000]", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.scatter(df[\"saledate\"][:1000], df[\"SalePrice\"][:1000])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.head().T", "_____no_output_____" ], [ "df.saledate.head(20)", "_____no_output_____" ] ], [ [ "### Sort DataFrame by saledate\n\nWhen working with time series data, it's a good idea to sort it by date.", "_____no_output_____" ] ], [ [ "# Sort DataFrame in date order\ndf.sort_values(by=[\"saledate\"], inplace=True, ascending=True)\ndf.saledate.head(20)", "_____no_output_____" ] ], [ [ "### Make a copy of the original DataFrame\n\nWe make a copy of the original dataframe so when we manipulate the copy, we've still got our original data.", "_____no_output_____" ] ], [ [ "# Make a copy of the original DataFrame to perform edits on\ndf_tmp = df.copy()", "_____no_output_____" ] ], [ [ "### Add datetime parameters for `saledate` column", "_____no_output_____" ] ], [ [ "df_tmp[\"saleYear\"] = df_tmp.saledate.dt.year\ndf_tmp[\"saleMonth\"] = df_tmp.saledate.dt.month\ndf_tmp[\"saleDay\"] = df_tmp.saledate.dt.day\ndf_tmp[\"saleDayOfWeek\"] = df_tmp.saledate.dt.dayofweek\ndf_tmp[\"saleDayOfYear\"] = df_tmp.saledate.dt.dayofyear\n", "_____no_output_____" ], [ "df_tmp.head().T", "_____no_output_____" ], [ "# Now we've enriched our DataFrame with date time features, we can remove 'saledate'\ndf_tmp.drop(\"saledate\", axis=1, inplace=True)", "_____no_output_____" ], [ "# Check the values of different columns\ndf_tmp.state.value_counts()", "_____no_output_____" ], [ "df_tmp.head()", "_____no_output_____" ], [ "len(df_tmp)", "_____no_output_____" ] ], [ [ "## 5. Modelling \n\nWe've done enough EDA (we could always do more) but let's start to do some model-driven EDA.", "_____no_output_____" ] ], [ [ "# Let's build a machine learning model \nfrom sklearn.ensemble import RandomForestRegressor\n\nmodel = RandomForestRegressor(n_jobs=-1,\n random_state=42)\n\nmodel.fit(df_tmp.drop(\"SalePrice\", axis=1), df_tmp[\"SalePrice\"])", "_____no_output_____" ], [ "df_tmp.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 412698 entries, 205615 to 409203\nData columns (total 57 columns):\nSalesID 412698 non-null int64\nSalePrice 412698 non-null float64\nMachineID 412698 non-null int64\nModelID 412698 non-null int64\ndatasource 412698 non-null int64\nauctioneerID 392562 non-null float64\nYearMade 412698 non-null int64\nMachineHoursCurrentMeter 147504 non-null float64\nUsageBand 73670 non-null object\nfiModelDesc 412698 non-null object\nfiBaseModel 412698 non-null object\nfiSecondaryDesc 271971 non-null object\nfiModelSeries 58667 non-null object\nfiModelDescriptor 74816 non-null object\nProductSize 196093 non-null object\nfiProductClassDesc 412698 non-null object\nstate 412698 non-null object\nProductGroup 412698 non-null object\nProductGroupDesc 412698 non-null object\nDrive_System 107087 non-null object\nEnclosure 412364 non-null object\nForks 197715 non-null object\nPad_Type 81096 non-null object\nRide_Control 152728 non-null object\nStick 81096 non-null object\nTransmission 188007 non-null object\nTurbocharged 81096 non-null object\nBlade_Extension 25983 non-null object\nBlade_Width 25983 non-null object\nEnclosure_Type 25983 non-null object\nEngine_Horsepower 25983 non-null object\nHydraulics 330133 non-null object\nPushblock 25983 non-null object\nRipper 106945 non-null object\nScarifier 25994 non-null object\nTip_Control 25983 non-null object\nTire_Size 97638 non-null object\nCoupler 220679 non-null object\nCoupler_System 44974 non-null object\nGrouser_Tracks 44875 non-null object\nHydraulics_Flow 44875 non-null object\nTrack_Type 102193 non-null object\nUndercarriage_Pad_Width 102916 non-null object\nStick_Length 102261 non-null object\nThumb 102332 non-null object\nPattern_Changer 102261 non-null object\nGrouser_Type 102193 non-null object\nBackhoe_Mounting 80712 non-null object\nBlade_Type 81875 non-null object\nTravel_Controls 81877 non-null object\nDifferential_Type 71564 non-null object\nSteering_Controls 71522 non-null object\nsaleYear 412698 non-null int64\nsaleMonth 412698 non-null int64\nsaleDay 412698 non-null int64\nsaleDayOfWeek 412698 non-null int64\nsaleDayOfYear 412698 non-null int64\ndtypes: float64(3), int64(10), object(44)\nmemory usage: 182.6+ MB\n" ], [ "df_tmp[\"UsageBand\"].dtype", "_____no_output_____" ], [ "df_tmp.isna().sum()", "_____no_output_____" ] ], [ [ "### Convert string to categories\n\nOne way we can turn all of our data into numbers is by converting them into pandas catgories.\n\nWe can check the different datatypes compatible with pandas here: https://pandas.pydata.org/pandas-docs/stable/reference/general_utility_functions.html#data-types-related-functionality", "_____no_output_____" ] ], [ [ "df_tmp.head().T", "_____no_output_____" ], [ "pd.api.types.is_string_dtype(df_tmp[\"UsageBand\"])", "_____no_output_____" ], [ "# Find the columns which contain strings\nfor label, content in df_tmp.items():\n if pd.api.types.is_string_dtype(content):\n print(label)", "UsageBand\nfiModelDesc\nfiBaseModel\nfiSecondaryDesc\nfiModelSeries\nfiModelDescriptor\nProductSize\nfiProductClassDesc\nstate\nProductGroup\nProductGroupDesc\nDrive_System\nEnclosure\nForks\nPad_Type\nRide_Control\nStick\nTransmission\nTurbocharged\nBlade_Extension\nBlade_Width\nEnclosure_Type\nEngine_Horsepower\nHydraulics\nPushblock\nRipper\nScarifier\nTip_Control\nTire_Size\nCoupler\nCoupler_System\nGrouser_Tracks\nHydraulics_Flow\nTrack_Type\nUndercarriage_Pad_Width\nStick_Length\nThumb\nPattern_Changer\nGrouser_Type\nBackhoe_Mounting\nBlade_Type\nTravel_Controls\nDifferential_Type\nSteering_Controls\n" ], [ "# If you're wondering what df.items() does, here's an example\nrandom_dict = {\"key1\": \"hello\",\n \"key2\": \"world!\"}\n\nfor key, value in random_dict.items():\n print(f\"this is a key: {key}\",\n f\"this is a value: {value}\")", "this is a key: key1 this is a value: hello\nthis is a key: key2 this is a value: world!\n" ], [ "# This will turn all of the string value into category values\nfor label, content in df_tmp.items():\n if pd.api.types.is_string_dtype(content):\n df_tmp[label] = content.astype(\"category\").cat.as_ordered()", "_____no_output_____" ], [ "df_tmp.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 412698 entries, 205615 to 409203\nData columns (total 57 columns):\nSalesID 412698 non-null int64\nSalePrice 412698 non-null float64\nMachineID 412698 non-null int64\nModelID 412698 non-null int64\ndatasource 412698 non-null int64\nauctioneerID 392562 non-null float64\nYearMade 412698 non-null int64\nMachineHoursCurrentMeter 147504 non-null float64\nUsageBand 73670 non-null category\nfiModelDesc 412698 non-null category\nfiBaseModel 412698 non-null category\nfiSecondaryDesc 271971 non-null category\nfiModelSeries 58667 non-null category\nfiModelDescriptor 74816 non-null category\nProductSize 196093 non-null category\nfiProductClassDesc 412698 non-null category\nstate 412698 non-null category\nProductGroup 412698 non-null category\nProductGroupDesc 412698 non-null category\nDrive_System 107087 non-null category\nEnclosure 412364 non-null category\nForks 197715 non-null category\nPad_Type 81096 non-null category\nRide_Control 152728 non-null category\nStick 81096 non-null category\nTransmission 188007 non-null category\nTurbocharged 81096 non-null category\nBlade_Extension 25983 non-null category\nBlade_Width 25983 non-null category\nEnclosure_Type 25983 non-null category\nEngine_Horsepower 25983 non-null category\nHydraulics 330133 non-null category\nPushblock 25983 non-null category\nRipper 106945 non-null category\nScarifier 25994 non-null category\nTip_Control 25983 non-null category\nTire_Size 97638 non-null category\nCoupler 220679 non-null category\nCoupler_System 44974 non-null category\nGrouser_Tracks 44875 non-null category\nHydraulics_Flow 44875 non-null category\nTrack_Type 102193 non-null category\nUndercarriage_Pad_Width 102916 non-null category\nStick_Length 102261 non-null category\nThumb 102332 non-null category\nPattern_Changer 102261 non-null category\nGrouser_Type 102193 non-null category\nBackhoe_Mounting 80712 non-null category\nBlade_Type 81875 non-null category\nTravel_Controls 81877 non-null category\nDifferential_Type 71564 non-null category\nSteering_Controls 71522 non-null category\nsaleYear 412698 non-null int64\nsaleMonth 412698 non-null int64\nsaleDay 412698 non-null int64\nsaleDayOfWeek 412698 non-null int64\nsaleDayOfYear 412698 non-null int64\ndtypes: category(44), float64(3), int64(10)\nmemory usage: 63.3 MB\n" ], [ "df_tmp.state.cat.categories", "_____no_output_____" ], [ "df_tmp.state.cat.codes", "_____no_output_____" ] ], [ [ "Thanks to pandas Categories we now have a way to access all of our data in the form of numbers.\n\nBut we still have a bunch of missing data...", "_____no_output_____" ] ], [ [ "# Check missing data\ndf_tmp.isnull().sum()/len(df_tmp)", "_____no_output_____" ] ], [ [ "### Save preprocessed data", "_____no_output_____" ] ], [ [ "# Export current tmp dataframe\ndf_tmp.to_csv(\"data/bluebook-for-bulldozers/train_tmp.csv\",\n index=False)", "_____no_output_____" ], [ "# Import preprocessed data\ndf_tmp = pd.read_csv(\"data/bluebook-for-bulldozers/train_tmp.csv\",\n low_memory=False)\ndf_tmp.head().T", "_____no_output_____" ], [ "df_tmp.isna().sum()", "_____no_output_____" ] ], [ [ "## Fill missing values \n\n### Fill numerical missing values first", "_____no_output_____" ] ], [ [ "for label, content in df_tmp.items():\n if pd.api.types.is_numeric_dtype(content):\n print(label)", "SalesID\nSalePrice\nMachineID\nModelID\ndatasource\nauctioneerID\nYearMade\nMachineHoursCurrentMeter\nsaleYear\nsaleMonth\nsaleDay\nsaleDayOfWeek\nsaleDayOfYear\n" ], [ "df_tmp.ModelID", "_____no_output_____" ], [ "# Check for which numeric columns have null values\nfor label, content in df_tmp.items():\n if pd.api.types.is_numeric_dtype(content):\n if pd.isnull(content).sum():\n print(label)", "auctioneerID\nMachineHoursCurrentMeter\n" ], [ "# Fill numeric rows with the median\nfor label, content in df_tmp.items():\n if pd.api.types.is_numeric_dtype(content):\n if pd.isnull(content).sum():\n # Add a binary column which tells us if the data was missing or not\n df_tmp[label+\"_is_missing\"] = pd.isnull(content)\n # Fill missing numeric values with median\n df_tmp[label] = content.fillna(content.median())", "_____no_output_____" ], [ "# Demonstrate how median is more robust than mean\nhundreds = np.full((1000,), 100)\nhundreds_billion = np.append(hundreds, 1000000000)\nnp.mean(hundreds), np.mean(hundreds_billion), np.median(hundreds), np.median(hundreds_billion)", "_____no_output_____" ], [ "# Check if there's any null numeric values\nfor label, content in df_tmp.items():\n if pd.api.types.is_numeric_dtype(content):\n if pd.isnull(content).sum():\n print(label)", "_____no_output_____" ], [ "# Check to see how many examples were missing\ndf_tmp.auctioneerID_is_missing.value_counts()", "_____no_output_____" ], [ "df_tmp.isna().sum()", "_____no_output_____" ] ], [ [ "### Filling and turning categorical variables into numbers", "_____no_output_____" ] ], [ [ "# Check for columns which aren't numeric\nfor label, content in df_tmp.items():\n if not pd.api.types.is_numeric_dtype(content):\n print(label)", "UsageBand\nfiModelDesc\nfiBaseModel\nfiSecondaryDesc\nfiModelSeries\nfiModelDescriptor\nProductSize\nfiProductClassDesc\nstate\nProductGroup\nProductGroupDesc\nDrive_System\nEnclosure\nForks\nPad_Type\nRide_Control\nStick\nTransmission\nTurbocharged\nBlade_Extension\nBlade_Width\nEnclosure_Type\nEngine_Horsepower\nHydraulics\nPushblock\nRipper\nScarifier\nTip_Control\nTire_Size\nCoupler\nCoupler_System\nGrouser_Tracks\nHydraulics_Flow\nTrack_Type\nUndercarriage_Pad_Width\nStick_Length\nThumb\nPattern_Changer\nGrouser_Type\nBackhoe_Mounting\nBlade_Type\nTravel_Controls\nDifferential_Type\nSteering_Controls\n" ], [ "# Turn categorical variables into numbers and fill missing\nfor label, content in df_tmp.items():\n if not pd.api.types.is_numeric_dtype(content):\n # Add binary column to indicate whether sample had missing value\n df_tmp[label+\"_is_missing\"] = pd.isnull(content)\n # Turn categories into numbers and add +1\n df_tmp[label] = pd.Categorical(content).codes+1", "_____no_output_____" ], [ "pd.Categorical(df_tmp[\"state\"]).codes+1", "_____no_output_____" ], [ "df_tmp.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 412698 entries, 0 to 412697\nColumns: 103 entries, SalesID to Steering_Controls_is_missing\ndtypes: bool(46), float64(3), int16(4), int64(10), int8(40)\nmemory usage: 77.9 MB\n" ], [ "df_tmp.head().T", "_____no_output_____" ], [ "df_tmp.isna().sum()", "_____no_output_____" ] ], [ [ "Now that all of data is numeric as well as our dataframe has no missing values, we should be able to build a machine learning model.", "_____no_output_____" ] ], [ [ "df_tmp.head()", "_____no_output_____" ], [ "len(df_tmp)", "_____no_output_____" ], [ "%%time\n# Instantiate model\nmodel = RandomForestRegressor(n_jobs=-1,\n random_state=42)\n\n# Fit the model\nmodel.fit(df_tmp.drop(\"SalePrice\", axis=1), df_tmp[\"SalePrice\"])", "CPU times: user 21min 9s, sys: 16.6 s, total: 21min 25s\nWall time: 6min 58s\n" ], [ "# Score the model\nmodel.score(df_tmp.drop(\"SalePrice\", axis=1), df_tmp[\"SalePrice\"])", "_____no_output_____" ] ], [ [ "**Question:** Why doesn't the above metric hold water? (why isn't the metric reliable)", "_____no_output_____" ], [ "### Splitting data into train/validation sets", "_____no_output_____" ] ], [ [ "df_tmp.saleYear", "_____no_output_____" ], [ "df_tmp.saleYear.value_counts()", "_____no_output_____" ], [ "# Split data into training and validation\ndf_val = df_tmp[df_tmp.saleYear == 2012]\ndf_train = df_tmp[df_tmp.saleYear != 2012]\n\nlen(df_val), len(df_train)", "_____no_output_____" ], [ "# Split data into X & y\nX_train, y_train = df_train.drop(\"SalePrice\", axis=1), df_train.SalePrice\nX_valid, y_valid = df_val.drop(\"SalePrice\", axis=1), df_val.SalePrice\n\nX_train.shape, y_train.shape, X_valid.shape, y_valid.shape", "_____no_output_____" ], [ "y_train", "_____no_output_____" ] ], [ [ "### Building an evaluation function", "_____no_output_____" ] ], [ [ "# Create evaluation function (the competition uses RMSLE)\nfrom sklearn.metrics import mean_squared_log_error, mean_absolute_error, r2_score\n\ndef rmsle(y_test, y_preds):\n \"\"\"\n Caculates root mean squared log error between predictions and\n true labels.\n \"\"\"\n return np.sqrt(mean_squared_log_error(y_test, y_preds))\n\n# Create function to evaluate model on a few different levels\ndef show_scores(model):\n train_preds = model.predict(X_train)\n val_preds = model.predict(X_valid)\n scores = {\"Training MAE\": mean_absolute_error(y_train, train_preds),\n \"Valid MAE\": mean_absolute_error(y_valid, val_preds),\n \"Training RMSLE\": rmsle(y_train, train_preds),\n \"Valid RMSLE\": rmsle(y_valid, val_preds),\n \"Training R^2\": r2_score(y_train, train_preds),\n \"Valid R^2\": r2_score(y_valid, val_preds)}\n return scores", "_____no_output_____" ] ], [ [ "## Testing our model on a subset (to tune the hyperparameters)", "_____no_output_____" ] ], [ [ "# # This takes far too long... for experimenting\n\n# %%time\n# model = RandomForestRegressor(n_jobs=-1, \n# random_state=42)\n\n# model.fit(X_train, y_train)", "_____no_output_____" ], [ "len(X_train)", "_____no_output_____" ], [ "# Change max_samples value\nmodel = RandomForestRegressor(n_jobs=-1,\n random_state=42,\n max_samples=10000)", "_____no_output_____" ], [ "%%time\n# Cutting down on the max number of samples each estimator can see improves training time\nmodel.fit(X_train, y_train)", "CPU times: user 44.7 s, sys: 1.01 s, total: 45.7 s\nWall time: 16.6 s\n" ], [ "(X_train.shape[0] * 100) / 1000000", "_____no_output_____" ], [ "10000 * 100", "_____no_output_____" ], [ "show_scores(model)", "_____no_output_____" ] ], [ [ "### Hyerparameter tuning with RandomizedSearchCV", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.model_selection import RandomizedSearchCV\n\n# Different RandomForestRegressor hyperparameters\nrf_grid = {\"n_estimators\": np.arange(10, 100, 10),\n \"max_depth\": [None, 3, 5, 10],\n \"min_samples_split\": np.arange(2, 20, 2),\n \"min_samples_leaf\": np.arange(1, 20, 2),\n \"max_features\": [0.5, 1, \"sqrt\", \"auto\"],\n \"max_samples\": [10000]}\n\n# Instantiate RandomizedSearchCV model\nrs_model = RandomizedSearchCV(RandomForestRegressor(n_jobs=-1,\n random_state=42),\n param_distributions=rf_grid,\n n_iter=2,\n cv=5,\n verbose=True)\n\n# Fit the RandomizedSearchCV model\nrs_model.fit(X_train, y_train)", "Fitting 5 folds for each of 2 candidates, totalling 10 fits\n" ], [ "# Find the best model hyperparameters\nrs_model.best_params_", "_____no_output_____" ], [ "# Evaluate the RandomizedSearch model\nshow_scores(rs_model)", "_____no_output_____" ] ], [ [ "### Train a model with the best hyperparamters\n\n**Note:** These were found after 100 iterations of `RandomizedSearchCV`.", "_____no_output_____" ] ], [ [ "%%time\n\n# Most ideal hyperparamters\nideal_model = RandomForestRegressor(n_estimators=40,\n min_samples_leaf=1,\n min_samples_split=14,\n max_features=0.5,\n n_jobs=-1,\n max_samples=None,\n random_state=42) # random state so our results are reproducible\n\n# Fit the ideal model\nideal_model.fit(X_train, y_train)", "CPU times: user 3min 50s, sys: 2.22 s, total: 3min 52s\nWall time: 1min 14s\n" ], [ "# Scores for ideal_model (trained on all the data)\nshow_scores(ideal_model)", "_____no_output_____" ], [ "# Scores on rs_model (only trained on ~10,000 examples)\nshow_scores(rs_model)", "_____no_output_____" ] ], [ [ "## Make predictions on test data", "_____no_output_____" ] ], [ [ "# Import the test data\ndf_test = pd.read_csv(\"data/bluebook-for-bulldozers/Test.csv\",\n low_memory=False,\n parse_dates=[\"saledate\"])\n\ndf_test.head()", "_____no_output_____" ], [ "# Make predictions on the test dataset\ntest_preds = ideal_model.predict(df_test)", "_____no_output_____" ] ], [ [ "### Preprocessing the data (getting the test dataset in the same format as our training dataset)", "_____no_output_____" ] ], [ [ "def preprocess_data(df):\n \"\"\"\n Performs transformations on df and returns transformed df.\n \"\"\"\n df[\"saleYear\"] = df.saledate.dt.year\n df[\"saleMonth\"] = df.saledate.dt.month\n df[\"saleDay\"] = df.saledate.dt.day\n df[\"saleDayOfWeek\"] = df.saledate.dt.dayofweek\n df[\"saleDayOfYear\"] = df.saledate.dt.dayofyear\n \n df.drop(\"saledate\", axis=1, inplace=True)\n \n # Fill the numeric rows with median\n for label, content in df.items():\n if pd.api.types.is_numeric_dtype(content):\n if pd.isnull(content).sum():\n # Add a binary column which tells us if the data was missing or not\n df[label+\"_is_missing\"] = pd.isnull(content)\n # Fill missing numeric values with median\n df[label] = content.fillna(content.median())\n \n # Filled categorical missing data and turn categories into numbers\n if not pd.api.types.is_numeric_dtype(content):\n df[label+\"_is_missing\"] = pd.isnull(content)\n # We add +1 to the category code because pandas encodes missing categories as -1\n df[label] = pd.Categorical(content).codes+1\n \n return df", "_____no_output_____" ], [ "# Process the test data \ndf_test = preprocess_data(df_test)\ndf_test.head()", "_____no_output_____" ], [ "# Make predictions on updated test data\ntest_preds = ideal_model.predict(df_test)", "_____no_output_____" ], [ "X_train.head()", "_____no_output_____" ], [ "# We can find how the columns differ using sets\nset(X_train.columns) - set(df_test.columns)", "_____no_output_____" ], [ "# Manually adjust df_test to have auctioneerID_is_missing column\ndf_test[\"auctioneerID_is_missing\"] = False\ndf_test.head()", "_____no_output_____" ] ], [ [ "Finally now our test dataframe has the same features as our training dataframe, we can make predictions!", "_____no_output_____" ] ], [ [ "# Make predictions on the test data\ntest_preds = ideal_model.predict(df_test)", "_____no_output_____" ], [ "test_preds", "_____no_output_____" ] ], [ [ "We've made some predictions but they're not in the same format Kaggle is asking for: https://www.kaggle.com/c/bluebook-for-bulldozers/overview/evaluation", "_____no_output_____" ] ], [ [ "# Format predictions into the same format Kaggle is after\ndf_preds = pd.DataFrame()\ndf_preds[\"SalesID\"] = df_test[\"SalesID\"]\ndf_preds[\"SalesPrice\"] = test_preds\ndf_preds", "_____no_output_____" ], [ "# Export prediction data\ndf_preds.to_csv(\"data/bluebook-for-bulldozers/test_predictions.csv\", index=False)", "_____no_output_____" ] ], [ [ "### Feature Importance\n\nFeature importance seeks to figure out which different attributes of the data were most importance when it comes to predicting the **target variable** (SalePrice).", "_____no_output_____" ] ], [ [ "# Find feature importance of our best model\nideal_model.feature_importances_", "_____no_output_____" ], [ "# Helper function for plotting feature importance\ndef plot_features(columns, importances, n=20):\n df = (pd.DataFrame({\"features\": columns,\n \"feature_importances\": importances})\n .sort_values(\"feature_importances\", ascending=False)\n .reset_index(drop=True))\n \n # Plot the dataframe\n fig, ax = plt.subplots()\n ax.barh(df[\"features\"][:n], df[\"feature_importances\"][:20])\n ax.set_ylabel(\"Features\")\n ax.set_xlabel(\"Feature importance\")\n ax.invert_yaxis()", "_____no_output_____" ], [ "plot_features(X_train.columns, ideal_model.feature_importances_)", "_____no_output_____" ], [ "df[\"Enclosure\"].value_counts()", "_____no_output_____" ] ], [ [ "**Question to finish:** Why might knowing the feature importances of a trained machine learning model be helpful?\n\n**Final challenge/extension:** What other machine learning models could you try on our dataset? \n**Hint:** https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html check out the regression section of this map, or try to look at something like CatBoost.ai or XGBooost.ai.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
e7c42e4d4232c324ea6d1ba21eab8360182e966c
1,058
ipynb
Jupyter Notebook
03.Video Basics/01.using video files.ipynb
klausinfo/Udemy-OpenCV
64dd4473bde403f1b946c3a2e7a64ff12b522707
[ "MIT" ]
1
2020-07-15T02:51:16.000Z
2020-07-15T02:51:16.000Z
03.Video Basics/01.using video files.ipynb
klausinfo/Udemy-OpenCV
64dd4473bde403f1b946c3a2e7a64ff12b522707
[ "MIT" ]
null
null
null
03.Video Basics/01.using video files.ipynb
klausinfo/Udemy-OpenCV
64dd4473bde403f1b946c3a2e7a64ff12b522707
[ "MIT" ]
null
null
null
21.591837
60
0.495274
[ [ [ "import cv2\nimport time\ncap = cv2.VideoCapture('../DATA/video_capture.mp4')\nfps = 25\nif cap.isOpened() == False :\n print('error')\nwhile cap.isOpened() :\n ret,frame = cap.read()\n if ret == True :\n time.sleep(1/fps)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q') :\n break\ncap.release()\ncv2.destroyAllWindows()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7c433a4a217ffd96cd6a88bb6daea26073bca7c
288,998
ipynb
Jupyter Notebook
zz_test/030-bhsa.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
2
2019-07-17T18:51:26.000Z
2019-07-24T19:45:23.000Z
zz_test/030-bhsa.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
3
2019-01-16T10:56:50.000Z
2020-11-16T16:30:48.000Z
zz_test/030-bhsa.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
2
2020-12-17T15:41:33.000Z
2021-11-03T18:23:07.000Z
73.72398
17,096
0.565821
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from tf.app import use\nfrom fixture import typeShow", "_____no_output_____" ] ], [ [ "# BHSA specifics", "_____no_output_____" ] ], [ [ "A = use(\"bhsa:clone\", checkout=\"clone\", hoist=globals())", "_____no_output_____" ], [ "A.reuse()", "_____no_output_____" ], [ "A.showContext()", "_____no_output_____" ] ], [ [ "# A slot with standard features", "_____no_output_____" ] ], [ [ "A.displayShow(\"standardFeatures\")", "_____no_output_____" ], [ "w = 2\np = L.u(w, otype=\"phrase\")[0]", "_____no_output_____" ], [ "tree = A.unravel(w, explain=True)", "<0> TOP\n <1> word 3461 {3461} \n" ], [ "tree = A.unravel(p, explain=True)", "<0> TOP\n <1> phrase 675477 {38612} \n <2> word 38612 {38612} \n" ], [ "A.pretty(w, standardFeatures=True)\nA.pretty(p, standardFeatures=True)", "_____no_output_____" ] ], [ [ "# Base types", "_____no_output_____" ] ], [ [ "p = 675477\nhighlights = {p}\nA.pretty(p, highlights=highlights)\nA.pretty(p, highlights=highlights, baseTypes=\"phrase\")", "_____no_output_____" ] ], [ [ "# A tricky verse", "_____no_output_____" ] ], [ [ "v = T.nodeFromSection((\"Genesis\", 7, 14))\nv", "_____no_output_____" ], [ "A.plain(v, explain=False)", "_____no_output_____" ] ], [ [ "Halfverses are hidden. This verse is divided (at top level) in 3 spans: one for each clause chunk. The first and last chunks\nbelong to clause 1, and the middle chunk is clause 2.\n\nLook what happens if we set `hideTypes=True`:", "_____no_output_____" ] ], [ [ "A.plain(v, hideTypes=False, explain=False)", "_____no_output_____" ] ], [ [ "When you make the browser window narrower, the line breaks are different.\nBecause now the verse is divided in 2 spans: one for each half verse, and the separation between\nthe half verses is within the third clause chunk.\n\nSee it in pretty view:", "_____no_output_____" ] ], [ [ "A.pretty(v, explain=False)", "_____no_output_____" ] ], [ [ "We can selectively unhide the half verse and leave everything else hidden:", "_____no_output_____" ] ], [ [ "A.pretty(\n v,\n hideTypes=True,\n hiddenTypes=\"subphrase phrase_atom clause_atom sentence_atom\",\n explain=False,\n)", "_____no_output_____" ] ], [ [ "This shows the reason of the split.\n\nWe can also print the full structure (although that's a bit over the top):", "_____no_output_____" ] ], [ [ "A.pretty(v, hideTypes=False)", "_____no_output_____" ] ], [ [ "# Alignment in tables", "_____no_output_____" ] ], [ [ "A.table(\n ((2, 213294, 426583), (3, 213295, 426582), (4, 213296, 426581)), withPassage={1, 2}\n)", "_____no_output_____" ] ], [ [ "# Subphrases\n\nSubphrases with equal slots:", "_____no_output_____" ] ], [ [ "w = 3461\nsps = L.u(w, otype=\"subphrase\")\nfor sp in sps[0:-1]:\n print(E.oslots.s(sp))\n A.pretty(sp, standardFeatures=True, extraFeatures=\"rela\", withNodes=True)", "array('I', [3461, 3462, 3463])\n" ] ], [ [ "# Sentence spanning two verses", "_____no_output_____" ] ], [ [ "A.pretty(v, withNodes=True, explain=False)", "_____no_output_____" ] ], [ [ "# Base types", "_____no_output_____" ] ], [ [ "cl = 427612\nwords = L.d(cl, otype=\"word\")\nphrases = L.d(cl, otype=\"phrase\")\nhighlights = {phrases[1]: \"lightsalmon\", words[2]: \"lightblue\"}\nA.pretty(cl, baseTypes=\"phrase\", withNodes=True, highlights=highlights, explain=True)", "<0> TOP\n <1> clause 427612 {322-326} \n <2> phrase* 651725 {322-323} \n <3> word 322 {322} \n <3> word 323 {323} \n <2> phrase* 651726 {324-326} \n <3> word 324 {324} \n <3> word 325 {325} \n <3> word 326 {326} \n" ] ], [ [ "# Gaps", "_____no_output_____" ] ], [ [ "c = 427931\ns = L.u(c, otype=\"sentence\")[0]\nv = L.u(c, otype=\"verse\")[0]\nhighlights = {c: \"khaki\", c + 1: \"lightblue\"}", "_____no_output_____" ], [ "A.webLink(s)", "_____no_output_____" ], [ "A.plain(s, withNodes=True, highlights=highlights)", "_____no_output_____" ], [ "A.plain(s)", "_____no_output_____" ], [ "T.formats", "_____no_output_____" ], [ "A.plain(c, fmt=\"text-phono-full\", withNodes=True, explain=False)", "_____no_output_____" ], [ "A.plain(c, withNodes=False, explain=False)", "_____no_output_____" ], [ "A.pretty(c, withNodes=True, explain=False)", "_____no_output_____" ], [ "A.pretty(c, withNodes=True, hideTypes=False, explain=False)", "_____no_output_____" ], [ "A.pretty(s, withNodes=True, highlights=highlights, explain=False)", "_____no_output_____" ], [ "A.plain(s, withNodes=True, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(s, withNodes=True, highlights=highlights, hideTypes=False, explain=False)", "_____no_output_____" ], [ "A.plain(427931)\nA.pretty(427931, withNodes=True)\nA.plain(427932)\nA.pretty(427932)", "_____no_output_____" ], [ "sp = F.otype.s(\"subphrase\")[0]\nv = L.u(sp, otype=\"verse\")[0]", "_____no_output_____" ], [ "A.pretty(v)", "_____no_output_____" ], [ "p = 653380\ns = L.u(p, otype=\"sentence\")[0]\nc = L.d(s, otype=\"clause\")[0]", "_____no_output_____" ], [ "A.plain(p)", "_____no_output_____" ], [ "A.pretty(p)", "_____no_output_____" ], [ "A.pretty(p, baseTypes=\"phrase_atom\", hideTypes=True)", "_____no_output_____" ], [ "A.pretty(p, baseTypes=\"phrase\")", "_____no_output_____" ], [ "A.plain(s)", "_____no_output_____" ], [ "A.plain(s, plainGaps=False)", "_____no_output_____" ], [ "A.plain(c, withNodes=True)", "_____no_output_____" ], [ "A.pretty(c)", "_____no_output_____" ], [ "A.pretty(s, baseTypes={\"subphrase\", \"word\"})", "_____no_output_____" ], [ "A.pretty(s, baseTypes=\"phrase\")", "_____no_output_____" ], [ "A.prettyTuple((p,), 1, baseTypes=\"phrase\")", "_____no_output_____" ], [ "A.pretty(p, baseTypes=\"phrase\")", "_____no_output_____" ] ], [ [ "# Atom types", "_____no_output_____" ] ], [ [ "p = F.otype.s(\"phrase\")[0]\npa = F.otype.s(\"phrase_atom\")[0]", "_____no_output_____" ] ], [ [ "# Plain", "_____no_output_____" ] ], [ [ "A.plain(p, highlights={p, pa})\nA.plain(p, highlights={p, pa}, hideTypes=False)", "_____no_output_____" ], [ "A.plain(p, highlights={p})\nA.plain(p, highlights={p}, hideTypes=False)", "_____no_output_____" ], [ "A.plain(p, highlights={pa})\nA.plain(p, highlights={pa}, hideTypes=False)", "_____no_output_____" ], [ "A.plain(pa, highlights={p, pa})\nA.plain(pa, highlights={p, pa}, hideTypes=False)", "_____no_output_____" ], [ "A.plain(pa, highlights={pa})\nA.plain(pa, highlights={pa}, hideTypes=False)", "_____no_output_____" ], [ "A.plain(pa, highlights={p})\nA.plain(pa, highlights={p}, hideTypes=False)", "_____no_output_____" ] ], [ [ "# Pretty", "_____no_output_____" ] ], [ [ "A.pretty(p, highlights={p, pa})\nA.pretty(p, highlights={p, pa}, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(p, highlights={p})\nA.pretty(p, highlights={p}, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(p, highlights={pa})\nA.pretty(p, highlights={pa}, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(pa, highlights={p, pa})\nA.pretty(pa, highlights={p, pa}, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(pa, highlights={pa})\nA.pretty(pa, highlights={pa}, hideTypes=False)", "_____no_output_____" ], [ "A.pretty(pa, highlights={p})\nA.pretty(pa, highlights={p}, hideTypes=False)", "_____no_output_____" ] ], [ [ "# Highlights", "_____no_output_____" ] ], [ [ "cl = 435509\nph = 675481\nw = 38625\n\nhighlights = {ph, w}", "_____no_output_____" ], [ "A.pretty(cl, highlights=highlights, withNodes=True)", "_____no_output_____" ], [ "A.pretty(cl, highlights=highlights, baseTypes={\"phrase\"}, withNodes=True)", "_____no_output_____" ], [ "A.plain(ph, highlights=highlights)", "_____no_output_____" ], [ "A.plain(ph, highlights=highlights, baseTypes={\"phrase\"}, withNodes=True)", "_____no_output_____" ], [ "typeShow(A)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e7c43ba818e4cf58fb5ab5756741d9172a561aa5
936
ipynb
Jupyter Notebook
hacker-rank/10 Days of Statistics/Day 5/Normal Distribution II.ipynb
izan-majeed/archives
89af2a24f4a6f07bda8ee38d99ae8667d42727f4
[ "Apache-2.0" ]
null
null
null
hacker-rank/10 Days of Statistics/Day 5/Normal Distribution II.ipynb
izan-majeed/archives
89af2a24f4a6f07bda8ee38d99ae8667d42727f4
[ "Apache-2.0" ]
null
null
null
hacker-rank/10 Days of Statistics/Day 5/Normal Distribution II.ipynb
izan-majeed/archives
89af2a24f4a6f07bda8ee38d99ae8667d42727f4
[ "Apache-2.0" ]
null
null
null
19.5
73
0.496795
[ [ [ "from math import erf\nmean, std = 70, 10\ncdf = lambda x: 0.5 * (1 + erf((x - mean) / (std * (2 ** 0.5))))\n\nprint(round((1-cdf(80))*100, 2))\nprint(round((1-cdf(60))*100, 2))\nprint(round((cdf(60))*100, 2))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7c45413d9cef12e30e5a67012b14becb17695b3
3,369
ipynb
Jupyter Notebook
Interview Preparation Kit/1. arrays/4. array manipulation.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
Interview Preparation Kit/1. arrays/4. array manipulation.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
Interview Preparation Kit/1. arrays/4. array manipulation.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
23.559441
247
0.448204
[ [ [ "Starting with a 1-indexed array of zeros and a list of operations, for each operation add a value to each the array element between two given indices, inclusive. Once all operations have been performed, return the maximum value in the array.", "_____no_output_____" ] ], [ [ "def main():\n n, m = map(int, input().split())\n xs = [0] * (n + 2)\n\n for _ in range(m):\n a, b, k = map(int, input().split())\n xs[a] += k\n xs[b + 1] -= k\n\n answer = 0\n current = 0\n\n for x in xs:\n current += x\n answer = max(answer, current)\n\n print(answer)\n\n\nif __name__ == '__main__':\n main()", "_____no_output_____" ], [ "import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the arrayManipulation function below.\ndef arrayManipulation(n, queries): \n res = [0]*(n+1)\n for row in range(len(queries)):\n a = queries[row][0]\n b = queries[row][1]\n k = queries[row][2]\n res[a-1] += k\n res[b] -= k\n sm = 0\n mx = 0\n for i in range(len(res)):\n sm += res[i]\n if sm > mx:\n mx = sm\n return mx\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n queries = []\n\n for _ in range(m):\n queries.append(list(map(int, input().rstrip().split())))\n\n result = arrayManipulation(n, queries)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()", "_____no_output_____" ] ], [ [ "**Sample Input**\n\n 5 3\n 1 2 100\n 2 5 100\n 3 4 100\n\n**Sample Output**\n\n 200\n\n**Explanation**\n\n After the first update the list is 100 100 0 0 0.\n After the second update list is 100 200 100 100 100.\n After the third update list is 100 200 200 200 100.\n\n The maximum value is 200.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e7c473d60ee1ad9f9d1dbe6e06aab75f4545ca68
190,999
ipynb
Jupyter Notebook
Robotics Concepts/03 wheeled.ipynb
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
Robotics Concepts/03 wheeled.ipynb
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
Robotics Concepts/03 wheeled.ipynb
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
369.437137
102,732
0.926513
[ [ [ "# Forward Kinematics for Wheeled Robots; Dead Reckoning", "_____no_output_____" ] ], [ [ "# Preparation\nimport numpy as np\nnp.set_printoptions(precision=4, suppress=True)\nimport matplotlib.pyplot as plt\nimport ipywidgets", "_____no_output_____" ] ], [ [ "Let's first setup useful functions (see transforms2d notebook)", "_____no_output_____" ] ], [ [ "def mktr(x, y):\n return np.array([[1, 0, x],\n [0, 1, y],\n [0, 0, 1]])\n\n\ndef mkrot(theta):\n return np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n\ndef drawf(f, ax=None, name=None):\n \"\"\" Draw frame defined by f on axis ax (if provided) or on plt.gca() otherwise \"\"\"\n xhat = f @ np.array([[0, 0, 1], [1, 0, 1]]).T\n yhat = f @ np.array([[0, 0, 1], [0, 1, 1]]).T\n if(not ax):\n ax = plt.gca()\n ax.plot(xhat[0, :], xhat[1, :], 'r-') # transformed x unit vector\n ax.plot(yhat[0, :], yhat[1, :], 'g-') # transformed y unit vector\n if(name):\n ax.text(xhat[0, 0], xhat[1, 0], name, va=\"top\", ha=\"center\")", "_____no_output_____" ] ], [ [ "A function to draw a robot at a given pose `f`", "_____no_output_____" ] ], [ [ "def drawrobot(f, l, ax=None, alpha=0.5):\n \"\"\" Draw robot at f, with wheel distance from center l,\n on axis ax (if provided) or on plt.gca() otherwise.\n if l is None, no wheels are drawn\"\"\"\n\n if(not ax):\n ax = plt.gca()\n\n robot = ([[-1, 2, -1, -1], # x\n [-1, 0, 1, -1]]) # y\n robot = np.array(robot)\n robot = np.vstack((\n robot * 0.1, # scale by 0.1 units\n np.ones((1, robot.shape[1]))))\n\n robott = f @ robot\n\n wheell = np.array([\n [-0.05, 0.05],\n [l, l],\n [1, 1]\n ])\n wheelr = wheell * np.array([[1, -1, 1]]).T\n wheellt = f @ wheell\n wheelrt = f @ wheelr\n ax.plot(robott[0, :], robott[1, :], 'k-', alpha=alpha)\n ax.plot(wheellt[0, :], wheellt[1, :], 'k-', alpha=alpha)\n ax.plot(wheelrt[0, :], wheelrt[1, :], 'k-', alpha=alpha)", "_____no_output_____" ] ], [ [ "Note how the frame is centered in the middle point between the two wheels, and the robot points towards the $x$ axis.", "_____no_output_____" ] ], [ [ "drawf(np.eye(3))\ndrawrobot(np.eye(3), 0.1)\ndrawrobot(mktr(0.5, 0.3) @ mkrot(np.pi/4), 0.1)\nplt.gca().axis(\"equal\")", "_____no_output_____" ] ], [ [ "## The kinematic model of a differential-drive robot\n![image.png](attachment:image.png)\n\nFor a given differential drive robot, we have the following (fixed) parameter:\n* $l$: distance from the robot frame to each wheel. The distance between the wheel is therefore $2 \\cdot l$\n\nWe control the angular speed of each wheel $\\phi_L, \\phi_R$. Given the wheel radius $r$, the tangential speed for each wheel is $v_L = r \\phi_L$ and $v_R = r \\phi_R$, respectively. From now on, we assume we directly control (or measure) $v_L$ and $v_R$.\n\nAssuming that $v_L$ and $v_R$ are constant during a short time interval $[t, t+\\delta t]$, we have three ways to update the pose of the robot:\n* Euler method\n* Runge-Kutta method\n* Exact method\n\nThe function below implements the exact method, and returns a transform from the pose at $t$ to the pose at $t+\\delta t$.", "_____no_output_____" ] ], [ [ "def ddtr(vl, vr, l, dt):\n \"\"\" returns the pose transform for a motion with duration dt of a differential\n drive robot with wheel speeds vl and vr and wheelbase l \"\"\"\n\n if(np.isclose(vl, vr)): # we are moving straight, R is at the infinity and we handle this case separately\n return mktr((vr + vl)/2*dt, 0) # note we translate along x ()\n\n omega = (vr - vl) / (2 * l) # angular speed of the robot frame\n R = l * (vr + vl) / (vr - vl)\n\n # Make sure you understand this!\n return mktr(0, R) @ mkrot(omega * dt) @ mktr(0, -R)", "_____no_output_____" ] ], [ [ "Let's test our function. Try special cases and try for each to predict where the ICR will be.\n* $v_R = -v_L$\n* $v_R = 0$, $v_L > 0$\n* $v_R = 0.5 \\cdot v_L$", "_____no_output_____" ] ], [ [ "l = 0.1\ninitial_frame = np.eye(3) # try changing this\n\n\[email protected](vl=ipywidgets.FloatSlider(min=-2, max=+2),\n vr=ipywidgets.FloatSlider(min=-2, max=+2))\ndef f(vl, vr):\n drawf(initial_frame) # Initial frame\n f = ddtr(vl, vr, l, 1)\n drawf(f)\n drawrobot(f, l)\n plt.axis(\"equal\")", "_____no_output_____" ] ], [ [ "This approach tells you how to move from the pose at $t$ to the pose at $t+\\delta t$. Then you can concatenate multiple transformations.", "_____no_output_____" ] ], [ [ "dt = 1.0\nTs = [mktr(1, 1) @ mkrot(np.pi/4)]\n\nvl, vr = 0.10, 0.05\nl = 0.1\nfor i in range(10):\n Ts.append(Ts[-1] @ ddtr(vl, vr, l, dt))\n\ndrawf(np.eye(3))\nfor T in Ts:\n drawrobot(T, l)\nplt.axis(\"equal\")", "_____no_output_____" ] ], [ [ "Question: in the example above, would you get the same result if you estimated the final position in a single step? Try that, and make sure you understand the result.", "_____no_output_____" ] ], [ [ "@ipywidgets.interact(\n vl=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n vr=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n l= ipywidgets.FloatSlider(min=0.05, max=0.15, value=0.10, step=0.01))\ndef f(vl, vr, l):\n Ts = [np.eye(3)]\n\n for i in range(10):\n Ts.append(Ts[-1] @ ddtr(vl, vr, l, 1))\n\n drawf(np.eye(3))\n for T in Ts:\n drawrobot(T, l)\n plt.axis(\"equal\")", "_____no_output_____" ] ], [ [ "## Exercise\nImplement the same approach, using the Euler method for integration. Compare the results with the method above using different values for $\\delta t$.", "_____no_output_____" ], [ "## Dead Reckoning\nWe now have all the necessary info in order to predict the trajectory when the wheel speeds change over time. We define the initial and final speeds for the left and right wheels, and let them vary linearly from $t=0$ to $t=10$.\n\nCan you set the parameters to get an s-shaped path? Try changing the value of `dt` and make sure you understand under what conditions the final pose of the robot changes when you change `dt`.", "_____no_output_____" ] ], [ [ "l = 0.05\n\n\[email protected](\n vl0=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n vr0=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n vl1=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n vr1=ipywidgets.FloatSlider(min=-0.5, max=0.5, value=0, step=0.02),\n dt=ipywidgets.Select(options=[1, 5]))\ndef f(vl0, vr0, vl1, vr1, dt):\n t0 = 0\n t1 = 10\n\n def wheelspeeds(t):\n return (vl0 + (vl1-vl0)*(t-t0)/(t1-t0),\n vr0 + (vr1-vr0)*(t-t0)/(t1-t0))\n\n ts = np.arange(t0, t1+dt, dt)\n vls, vrs = [], []\n for t in ts:\n vl, vr = wheelspeeds(t)\n vls.append(vl)\n vrs.append(vr)\n\n cT = np.eye(3)\n Ts = []\n for i, t in enumerate(ts):\n if(i == 0):\n Ts.append(cT)\n else:\n vl, vr = vls[i-1], vrs[i-1]\n cT = cT @ ddtr(vl, vr, l, dt)\n Ts.append(cT)\n\n fig, ax = plt.subplots()\n ax.plot(ts, vls, label=(\"left\"))\n ax.plot(ts, vrs, label=(\"right\"))\n ax.set(xlabel=\"time\",\n ylabel=\"wheel tangential speed\")\n ax.legend()\n\n fig, ax = plt.subplots()\n drawf(np.eye(3), ax=ax)\n for T in Ts:\n drawrobot(T, l, ax=ax)\n drawf(Ts[-1], name=\"time = {}\".format(ts[-1]), ax=ax)\n plt.axis(\"equal\")", "_____no_output_____" ] ], [ [ "## Exercise\nImplement the kinematic model of a steered robot (bicycle kinematics) with control inputs:\n* steering angle $\\gamma$\n* tangential velocity of the back wheel $v$.\n\nNote: the model depends on the wheelbase (distance $L$ between the front and back wheel).\n\n![image.png](attachment:image.png)\n\nDraw the trajectory of a robot with a constant $v$ and a $\\gamma$ value that changes with time according to the following laws.\n\nLaw 1:\n* Constant $v$\n* Cycle forever:\n * 2 seconds straight ($\\gamma = 0$)\n * 10 seconds slight left ($\\gamma = 5$ degrees)\n * 5 seconds slight right ($\\gamma = -5$ degrees)\n\n\nLaw 2:\n* Constant $v$\n* $\\gamma(t) = t / 2$ degrees on the interval $t \\in [0, 20]$ seconds.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7c478778a403857b34c70115a1c0ac827fb1118
2,329
ipynb
Jupyter Notebook
Python-Drills/01-Sort_List_of_Lists/Sort_List_of_Lists.ipynb
SVEENASHARMA/web-design-challenge
08526d8257e0e29d5581e69d0697f4ba8af760c4
[ "ADSL" ]
null
null
null
Python-Drills/01-Sort_List_of_Lists/Sort_List_of_Lists.ipynb
SVEENASHARMA/web-design-challenge
08526d8257e0e29d5581e69d0697f4ba8af760c4
[ "ADSL" ]
null
null
null
Python-Drills/01-Sort_List_of_Lists/Sort_List_of_Lists.ipynb
SVEENASHARMA/web-design-challenge
08526d8257e0e29d5581e69d0697f4ba8af760c4
[ "ADSL" ]
null
null
null
20.429825
121
0.497209
[ [ [ "# Sorting Lists of Lists", "_____no_output_____" ], [ "Sort the following list of lists by the grades in descending order. \n\nThe desired output should be: <b>[['Kaylee', 99], ['Simon', 99], ['Zoe', 85], ['Malcolm', 80], ['Wash', 79]]</b>\n \n### Hints: https://wiki.python.org/moin/HowTo/Sorting", "_____no_output_____" ] ], [ [ "grades = [[\"Malcolm\", 80], [\"Zoe\", 85], [\"Kaylee\", 99], [\"Simon\", 99], [\"Wash\", 79]]", "_____no_output_____" ] ], [ [ "### YOUR CODE HERE", "_____no_output_____" ] ], [ [ "sorted_list = sorted(grades, key = lambda x:(-x[1], x[0]))\nprint(sorted_list)", "[['Kaylee', 99], ['Simon', 99], ['Zoe', 85], ['Malcolm', 80], ['Wash', 79]]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7c49e8744e5d36bde5cb9ff78ddc7e196e3e4fe
4,183
ipynb
Jupyter Notebook
TensorFlow-notebook-002.ipynb
Varun-Bawa/Tensorflow-basics
311b9c7a3a5d35c1075395912f27041179cde9f3
[ "Apache-2.0" ]
1
2018-07-17T13:04:28.000Z
2018-07-17T13:04:28.000Z
TensorFlow-notebook-002.ipynb
Varun-Bawa/Tensorflow-basics
311b9c7a3a5d35c1075395912f27041179cde9f3
[ "Apache-2.0" ]
null
null
null
TensorFlow-notebook-002.ipynb
Varun-Bawa/Tensorflow-basics
311b9c7a3a5d35c1075395912f27041179cde9f3
[ "Apache-2.0" ]
null
null
null
28.455782
486
0.588573
[ [ [ "#tf.train API", "_____no_output_____" ], [ "print('TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function. The simplest optimizer is gradient descent. It modifies each variable according to the magnitude of the derivative of loss with respect to that variable. In general, computing symbolic derivatives manually is tedious and error-prone. Consequently, TensorFlow can automatically produce derivatives given only a description of the model using the function tf.gradients.')", "TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function. The simplest optimizer is gradient descent. It modifies each variable according to the magnitude of the derivative of loss with respect to that variable. In general, computing symbolic derivatives manually is tedious and error-prone. Consequently, TensorFlow can automatically produce derivatives given only a description of the model using the function tf.gradients.\n" ], [ "#For better understanding of gradient descent and neural network, I suggest you to go through this link:\n\n##http://neuralnetworksanddeeplearning.com/chap1.html", "_____no_output_____" ], [ "#Importing and restoring variables in new notebook\nimport tensorflow as tf\nsess = tf.Session()\n#A basic TF linear model\nW = tf.Variable([.3], tf.float32)\nb = tf.Variable([-.3], tf.float32)\nx = tf.placeholder(tf.float32)\nlinear_model = W * x + b\n\n#Loss variable def\ny = tf.placeholder(tf.float32)\nsquared_deltas = tf.square(linear_model - y)\nloss = tf.reduce_sum(squared_deltas)\n\ninit = tf.global_variables_initializer()\nsess.run(init)", "_____no_output_____" ], [ "optimizer = tf.train.GradientDescentOptimizer(0.01) #here 0.01 is the learning rate \ntrain = optimizer.minimize(loss)", "_____no_output_____" ], [ "#running a loop to to minimize the value for loss and finding accurate weights 'W & b'\n\nfor i in range(10000):\n sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})\n \nprint(sess.run([W, b]))\nprint(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))", "[array([-0.9999991], dtype=float32), array([0.99999744], dtype=float32)]\n4.206413e-12\n" ], [ "#Evaluating training data\ncurr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:[1,2,3,4], y:[0,-1,-2,-3]})\nprint(\"W: %s b: %s loss: %s\"%(curr_W, curr_b, curr_loss) )", "W: [-0.9999991] b: [0.99999744] loss: 4.206413e-12\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7c4a5b7f4ced01ef6c81f2f423186d77de678cb
9,999
ipynb
Jupyter Notebook
Week 09 Neural Networks/Code Challenges/Day 1 XOR.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
29
2018-04-18T07:43:27.000Z
2021-12-13T17:24:24.000Z
Week 09 Neural Networks/Code Challenges/Day 1 XOR.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
null
null
null
Week 09 Neural Networks/Code Challenges/Day 1 XOR.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
46
2018-08-18T15:59:15.000Z
2021-11-17T02:14:51.000Z
28.815562
506
0.473247
[ [ [ "In this challenge we jump directly into building neural networks. We won't get into much theory or generality, but by the end of this exercise you'll build a very simple example of one, and in the mean time gain some intuition for how they work. First, we import numpy as usual.", "_____no_output_____" ] ], [ [ "# LAMBDA SCHOOL\n#\n# MACHINE LEARNING\n#\n# MIT LICENSE\n\nimport numpy as np", "_____no_output_____" ] ], [ [ "Next, look at the Wikipedia article for the XOR function (https://en.wikipedia.org/wiki/XOR_gate). Basically, it's a function that takes in two truth values (aka booleans) x and y and spits out a third truth value f(x,y) according to the following rule: f(x,y) is true when x is true OR y is true, but not both. If we use the common representation wherein \"1\" means \"True\" and \"0\" means \"False\", this means that f(0,0) = f(1,1) = 0 and f(0,1) = f(1,0) = 1. Check that this makes sense!\n\nYour first task for today is to implement the XOR function. There are slick ways to do this using modular arithmetic (if you're in to that sort of thing), but implement it however you like. Check that it gives the right values for each of the inputs (0,0), (0,1), (1,0), and (1,1).", "_____no_output_____" ] ], [ [ "def xorFunction(x, y):\n if x == y:\n return False\n else:\n return True", "_____no_output_____" ] ], [ [ "Great. Now, define a function sigma(x) that acts the way that the sigmoid function does. If you don't remember exactly how this works, check Wikipedia or ask one of your classmates.", "_____no_output_____" ] ], [ [ "def sigma(x):\n return 1 / (1 + np.exp(-x))", "_____no_output_____" ] ], [ [ "Most machine learning algorithms have free parameters that we tweak to get the behavior we want, and this is no exception. Introduce two variables a and b and assign them both to the value 10 (for now).", "_____no_output_____" ] ], [ [ "a = 10\nb = 10", "_____no_output_____" ] ], [ [ "Finally, here's our first neural network. Just like linear and logistic regression, it's nothing more than a function that takes in our inputs (x and y) and returns an output according to some prescribed rule. Today our rule consists of the following steps:\n\nStep 1: Take x and y and calculate ax + by.\n\nStep 2: Plug the result of step 1 into the sigma function we introduced earlier.\n\nStep 3: Take the result of step 2 and round it to the nearest whole number.\n\nDefine a function NN(x,y) that takes in x and y and returns the result of performing these steps.", "_____no_output_____" ] ], [ [ "def NN(x,y):\n linear = a*x + b*y\n out = sigma(linear)\n return np.round(out)", "_____no_output_____" ] ], [ [ "See what happens when you plug the values (0,0), (0,1), (1,0), and (1,1) into NN. The last (and possible trickiest) part of this assignment is to try and find values of a and b such that NN and XOR give the same outputs on each of those inputs. If you find a solution, share it. If you can't, talk with your classmates and see how they do. Feel free to collaborate!", "_____no_output_____" ] ], [ [ "print([NN(*args) for args in [(0, 0), (0, 1), (1, 0), (1, 1)]])", "[0.0, 1.0, 1.0, 1.0]\n" ] ], [ [ "The XOR function cannot be learned by a single unit, which has a linear decision boundary.\n\nSee: https://www.youtube.com/watch?v=kNPGXgzxoHw", "_____no_output_____" ] ], [ [ "def NN2(x, y):\n h1 = np.round(sigma(w11*x + w11*y + b11))\n h2 = np.round(sigma(w12*x + w12*y + b12))\n out = np.round(sigma(w21*h1 + w22*h2 + b2))\n \n return out", "_____no_output_____" ], [ "w11 = 20\nw12 = -20\nb11 = -10\nb12 = 30\nw21 = 20\nw22 = 20\nb2 = -30", "_____no_output_____" ], [ "print([NN2(*args) for args in [(0, 0), (0, 1), (1, 0), (1, 1)]])", "[0.0, 1.0, 1.0, 0.0]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c4b11e39fe3f8434d5e0f8ea265a5b654cf1be
10,973
ipynb
Jupyter Notebook
Chapter3/Pipeline.ipynb
bpbpublications/Continuous-Machine-Learning-with-Kubeflow
a774502905beac33ce62acd55d0006db3e88f3db
[ "MIT" ]
1
2021-12-19T23:58:52.000Z
2021-12-19T23:58:52.000Z
Chapter3/Pipeline.ipynb
bpbpublications/Continuous-Machine-Learning-with-Kubeflow
a774502905beac33ce62acd55d0006db3e88f3db
[ "MIT" ]
null
null
null
Chapter3/Pipeline.ipynb
bpbpublications/Continuous-Machine-Learning-with-Kubeflow
a774502905beac33ce62acd55d0006db3e88f3db
[ "MIT" ]
1
2021-12-17T04:24:17.000Z
2021-12-17T04:24:17.000Z
35.977049
145
0.455117
[ [ [ "\nimport kfp.dsl as dsl\nimport yaml\nfrom kubernetes import client as k8s\nimport kfp.gcp as gcp\nfrom kfp import components\nfrom string import Template\nimport json\nfrom kubernetes import client as k8s_client\n\n\[email protected](\n name='',\n description='End to End pipeline for Tensorflow Brain MRI '\n)\n\n\ndef brain_tensorflow_pipeline(\n dataextraction_step_image=\"gcr.io/<PROJECT_ID>/brain_tumor_scan1/step1_download_data:v1\",\n dataprocessing_step_image=\"gcr.io/<PROJECT_ID>/brain_tumor_scan4/step2_dataprocessing:v1\",\n trainmodel_step_image=\"gcr.io/<PROJECT_ID>/brain_tumor_scan1/step3_training_model:v1\",\n evaluator_step_image=\"gcr.io/<PROJECT_ID>/brain_tumor_scan1/step4_evaluation_model:V1\",\n root=\"/mnt/\",\n data_file=\"/mnt/BrainScan_Data/\",\n kaggle_api_data=\"navoneel/brain-mri-images-for-brain-tumor-detection\",\n train_file='/mnt/training.data',\n test_file='/mnt/test.data',\n validation_file=\"/mnt/validation.data\",\n label=\"/mnt/labels.data\",\n activation=\"sigmoid\",\n image_size=224,\n train_target=\"/mnt/trainingtarget.data\",\n test_target=\"/mnt/testtarget.data\",\n validation_target=\"/mnt/validationtarget.data\",\n epochs=10,\n learning_rate=.001,\n shuffle_size=1000,\n tensorboard_logs=\"/mnt/logs/\",\n tensorboard_gcs_logs=\"gs://<BUCKET_NAME>/brain/logs\",\n model_output_base_path=\"/mnt/saved_model\",\n gcs_path=\"gs://<BUCKET_NAME>/brain/model\",\n gcs_path_confusion=\"gs://<BUCKET_NAME>/brain\",\n mode=\"gcs\",\n probability=0.5,\n serving_name=\"kfserving-braintumor\",\n serving_namespace=\"kubeflow\",\n image=\"gcr.io/<PROJECT_ID>/brain_tumor_scan/kf_serving_braintest:v1\"\n \n ):\n\n\n \"\"\"\n Pipeline\n \"\"\"\n # PVC : PersistentVolumeClaim volume\n vop = dsl.VolumeOp(\n name='my-pvc',\n resource_name=\"my-pvc\",\n modes=dsl.VOLUME_MODE_RWO,\n size=\"1Gi\"\n )\n\n\n # data extraction\n data_extraction_step = dsl.ContainerOp(\n name='data_extraction',\n image=dataextraction_step_image,\n command=\"python\",\n arguments=[\n \"/app/dataextract.py\",\n \"--root\",root,\n \"--data-file\", data_file,\n \"--kaggle-api-data\", kaggle_api_data,\n ],\n pvolumes={\"/mnt\": vop.volume}\n ).apply(gcp.use_gcp_secret(\"user-gcp-sa\"))\n\n \n \n # processing\n data_processing_step = dsl.ContainerOp(\n name='data_processing',\n image=dataprocessing_step_image,\n command=\"python\",\n arguments=[\n \"/app/preprocessing.py\",\n \"--train-file\", train_file,\n \"--test-file\", test_file,\n \"--validation-file\", validation_file,\n \"--root\",root,\n \"--image-size\",image_size,\n \"--train-target\",train_target,\n \"--test-target\",test_target,\n \"--validation-target\",validation_target,\n \"--label\",label\n \n ],\n pvolumes={\"/mnt\": data_extraction_step.pvolume}\n ).apply(gcp.use_gcp_secret(\"user-gcp-sa\"))\n\n\n\n #trainmodel\n train_model_step = dsl.ContainerOp(\n name='train_model',\n image=trainmodel_step_image,\n command=\"python\",\n arguments=[\n \"/app/train.py\",\n \"--train-file\", train_file,\n \"--test-file\", test_file,\n \"--label\",label,\n \"--activation\",activation,\n \"--validation-file\", validation_file,\n \"--train-target\",train_target,\n \"--test-target\",test_target,\n \"--validation-target\",validation_target,\n \"--epochs\",epochs,\n \"--image-size\",image_size,\n \n \"--learning-rate\",learning_rate,\n \"--tensorboard-logs\",tensorboard_logs,\n \"--tensorboard-gcs-logs\",tensorboard_gcs_logs,\n \"--model-output-base-path\",model_output_base_path,\n \"--gcs-path\", gcs_path,\n \"--mode\", mode,\n \n ],file_outputs={\"mlpipeline-ui-metadata\": \"/mlpipeline-ui-metadata.json\"\n },\n pvolumes={\"/mnt\": data_processing_step.pvolume}\n ).apply(gcp.use_gcp_secret(\"user-gcp-sa\")) \n \n\n #evaluation\n evaluation_model_step = dsl.ContainerOp(\n name='evaluation_model',\n image=evaluator_step_image,\n command=\"python\",\n arguments=[\n \"/app/evaluator.py\",\n \"--test-file\", test_file,\n \"--test-target\",test_target,\n \"--probability\",probability,\n \"--model-output-base-path\",model_output_base_path,\n \"--gcs-path\", gcs_path,\n \"--label\",label,\n \"--gcs-path-confusion\", gcs_path_confusion,\n \n ],file_outputs={\"mlpipeline-metrics\":\"/mlpipeline-metrics.json\",\"mlpipeline-ui-metadata\": \"/mlpipeline-ui-metadata.json\"\n },\n pvolumes={\"/mnt\": train_model_step.pvolume}\n ).apply(gcp.use_gcp_secret(\"user-gcp-sa\")) \n\n\n kfserving_template = Template(\"\"\"{\n \"apiVersion\": \"serving.kubeflow.org/v1alpha2\",\n \"kind\": \"InferenceService\",\n \"metadata\": {\n \"labels\": {\n \"controller-tools.k8s.io\": \"1.0\"\n },\n \"name\": \"$name\",\n \"namespace\": \"$namespace\"\n },\n \"spec\": {\n \"default\": {\n \"predictor\": {\n \"custom\": {\n \"container\": {\n \"image\": \"$image\"\n }\n }\n }\n }\n }\n }\"\"\")\n\n\n kfservingjson = kfserving_template.substitute({ 'name': str(serving_name),\n 'namespace': str(serving_namespace),\n 'image': str(image)})\n\n kfservingdeployment = json.loads(kfservingjson)\n\n serve = dsl.ResourceOp(\n name=\"serve\",\n k8s_resource=kfservingdeployment,\n action=\"apply\",\n success_condition=\"status.url\"\n )\n serve.after(evaluation_model_step)\n \n", "_____no_output_____" ], [ "import kfp.dsl as dsl\nimport yaml\nimport kfp\nfrom kubernetes import client as k8s\nimport kfp.gcp as gcp\nkfp.__version__\n", "_____no_output_____" ], [ "if __name__ == '__main__':\n import kfp.compiler as compiler\n pipeline_func = brain_tensorflow_pipeline\n pipeline_filename = pipeline_func.__name__ + '.pipeline.yaml'\n compiler.Compiler().compile(pipeline_func,pipeline_filename)", "_____no_output_____" ], [ "import kfp\nfrom kfp import compiler\nimport kfp.components as comp\nimport kfp.dsl as dsl\nfrom kfp import gcp\nEXPERIMENT_NAME = 'Brain_experiment'\nclient = kfp.Client()\n\ntry:\n experiment = client.get_experiment(experiment_name=EXPERIMENT_NAME)\nexcept:\n experiment = client.create_experiment(EXPERIMENT_NAME)\n \nprint(experiment)", "_____no_output_____" ], [ "arguments = {}\n\nrun_name = pipeline_func.__name__ + 'heart_run'\nrun_result = client.run_pipeline(experiment.id, \n run_name, \n pipeline_filename, \n arguments)\n\nprint(experiment.id)\nprint(run_name)\nprint(pipeline_filename)\nprint(arguments)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e7c4b2520830af728836a943d83684c321b7f1bf
48,983
ipynb
Jupyter Notebook
paper/analysis/Loops.ipynb
jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions
8030aed77f8db545faa835542dc67921aa863d88
[ "Unlicense" ]
null
null
null
paper/analysis/Loops.ipynb
jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions
8030aed77f8db545faa835542dc67921aa863d88
[ "Unlicense" ]
null
null
null
paper/analysis/Loops.ipynb
jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions
8030aed77f8db545faa835542dc67921aa863d88
[ "Unlicense" ]
null
null
null
190.595331
42,606
0.892289
[ [ [ "import collections\nimport numpy as np\nimport seaborn as sns\nimport os\nimport matplotlib.gridspec as gridspec\nimport pickle\nimport matplotlib as mpl\nfrom pathlib import Path\n\npgf_with_custom_preamble = {\n \"text.usetex\": False, # use inline math for ticks\n \"pgf.rcfonts\": False, # don't setup fonts from rc parameters\n \"pgf.preamble\": [\n '\\\\usepackage[T1]{fontenc}'\n ]\n}\n\n\nsns.set_style('ticks')\nsns.set_context('poster')\nsns.set_palette('Set2', 25)\n\ncolors = sns.color_palette('Set2', 20)\n\nfrom scipy import interpolate", "_____no_output_____" ] ], [ [ "# No Water", "_____no_output_____" ] ], [ [ "no_water_loops = collections.defaultdict(list)\nno_water_path = Path('no_water')\nfor f in no_water_path.iterdir():\n if 'loops_hist' in f.name:\n k = float(f.name.split('_')[3])\n with f.open('rb') as iv:\n d = pickle.load(iv)\n no_water_loops[k].extend(d[-1])", "_____no_output_____" ] ], [ [ "# With water", "_____no_output_____" ] ], [ [ "water_loops = collections.defaultdict(list)\nwater_path = Path('with_water')\nfor f in water_path.iterdir():\n if 'loops_hist' in f.name:\n k = float(f.name.split('_')[3])\n with f.open('rb') as iv:\n d = pickle.load(iv)\n water_loops[k].extend(d[-1])", "_____no_output_____" ] ], [ [ "# Water rev", "_____no_output_____" ] ], [ [ "water_rev_loops = collections.defaultdict(list)\nwater_rev_path = Path('with_water_rev')\nfor f in water_rev_path.iterdir():\n if 'loops_hist' in f.name and 'rt1' in f.name:\n k = float(f.name.split('_')[3])\n kr = float(f.name.split('_')[4])\n if kr != 0.001:\n continue\n print(k,kr)\n with f.open('rb') as iv:\n d = pickle.load(iv)\n water_rev_loops[k].extend(d[-1])\n print(k, d[-1])", "0.1 0.001\n0.1 [6, 6, 6, 4, 4]\n0.1 0.001\n0.1 [6, 6, 6, 6]\n0.001 0.001\n0.001 []\n0.1 0.001\n0.1 [8, 6, 14, 6]\n0.01 0.001\n0.01 [10, 4]\n0.01 0.001\n0.01 [8, 4]\n0.001 0.001\n0.001 []\n0.01 0.001\n0.01 [8]\n0.001 0.001\n0.001 []\n0.001 0.001\n0.001 []\n0.01 0.001\n0.01 [4, 4, 6]\n0.1 0.001\n0.1 [4, 6, 4, 4, 6]\n0.1 0.001\n0.1 [8, 10, 4, 6, 6, 6, 6, 8, 6]\n" ], [ "matplotlib", "_____no_output_____" ], [ "plt.rcParams['figure.figsize'] = (8, 6)\nmarkers = {0.001: '*', 0.01: 'h', 0.1: 'X'}\nfor k in no_water_loops:\n if no_water_loops[k]:\n n, x = np.histogram(no_water_loops[k], density=False, bins='auto')\n n = np.asarray(n, np.float)\n n[n <= 0.0001] = np.nan\n plt.plot(x[:-1], n, 'h', linestyle='None', label='no water k={}'.format(k))\n\nfor k in water_loops:\n if water_loops[k]:\n n, x = np.histogram(water_loops[k], density=False, bins='auto')\n n = np.asarray(n, np.float)\n n[n <= 0.0001] = np.nan\n plt.plot(x[:-1], n, 'd', linestyle='None', label='with water k={}'.format(k))\n \nfor k in water_rev_loops:\n if water_rev_loops[k]:\n n, x = np.histogram(water_rev_loops[k], density=False, bins='auto')\n n = np.asarray(n, np.float)\n n[n <= 0.0001] = np.nan\n plt.plot(x[:-1], n, markers[k], linestyle='None', label='with water $k_f={}$ $k_r=0.001$ (hydrolysis)'.format(k))\n \nplt.legend()\nplt.xlabel('loop size (monomers)')\nplt.ylabel('num. of structures')\nplt.savefig('hist_loops.pdf', dpi=200, tight_layout=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c4b7249ff6fd486a45306ad47b132a6bdcf0f7
199,440
ipynb
Jupyter Notebook
data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb
shane-kercheval/udacity
97e1d39e73f5af26bd2839b1a504875d5d94eca7
[ "MIT" ]
null
null
null
data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb
shane-kercheval/udacity
97e1d39e73f5af26bd2839b1a504875d5d94eca7
[ "MIT" ]
null
null
null
data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb
shane-kercheval/udacity
97e1d39e73f5af26bd2839b1a504875d5d94eca7
[ "MIT" ]
null
null
null
89.155118
44,724
0.700476
[ [ [ "# !pip install oolearning --upgrade", "_____no_output_____" ], [ "import math\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import AdaBoostClassifier, ExtraTreesClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import make_scorer, roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import StandardScaler, Imputer, MinMaxScaler\nfrom sklearn.decomposition import PCA\nfrom xgboost import XGBClassifier\n\nimport oolearning as oo\nfrom helpers import DataFrameSelector, CustomLogTransform, ChooserTransform, CombineAgeHoursTransform, CombineCapitalGainLossTransform\n\npd.set_option('display.width', 500)\npd.set_option('display.max_colwidth', -1)\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\nwidth = 10\nplt.rcParams['figure.figsize'] = [width, width/1.333]", "_____no_output_____" ], [ "working_directory = os.path.join(os.getcwd(), '../')\ncsv_file = os.path.join(working_directory, 'census.csv')\ntarget_variable = 'income'\npositive_class = '>50K'\nnegative_class = '<=50K'\n\n#target_mapping = {0: 'died', 1: 'lived'} # so we can convert from numeric to categoric\n\nexplore = oo.ExploreClassificationDataset.from_csv(csv_file_path=csv_file,\n target_variable=target_variable)\n # map_numeric_target=target_mapping)\n\n# look at data\nexplore.dataset.head()", "_____no_output_____" ], [ "explore.numeric_summary()", "_____no_output_____" ], [ "explore.categoric_summary()", "_____no_output_____" ], [ "explore.plot_correlation_heatmap()", "_____no_output_____" ], [ "# NOTE: since I will be cross-validating transformations e.g. GridSearchCV, it typically won't work to\n# one-hot-encode during cross-validation because the holdout fold will tend to have categoric values that\n# weren't found in the training folds, and therefore will break during transformation because it will encode\n# a value (i.e. add a column) that didn't exist in the training folds.\n# So, for this, we need to fit ALL data. Then, below if we have new data e.g. Kaggle, we have to apply\n# the same pipeline (i.e. cat_encoding_pipeline.transform()\n# TODO: this breaks though if there are any categorical features with missing values in the final test/Kaggle set\none_hot_transformer = oo.DummyEncodeTransformer(encoding=oo.CategoricalEncoding.ONE_HOT)\ntransformed_data = one_hot_transformer.fit_transform(explore.dataset.drop(columns=target_variable))\ntransformed_data[target_variable] = explore.dataset[target_variable]", "_____no_output_____" ] ], [ [ "# Transformations & Transformation Tuning Parameters\n\ndefine the transformations we want to do, some transformations will have parameters (e.g. base of log tranform (or no transform), type of scaling, whether or not to add column combinations (e.g. age * hours-per-week)", "_____no_output_____" ], [ "Below is the pipeline for captail-gain/lost. \n\nWe want to tune whether or not we should log transform. We need to do this after imputing but before scaling, so it needs to be it's own pipeline.", "_____no_output_____" ] ], [ [ "cap_gain_loss_pipeline = Pipeline([\n ('selector', DataFrameSelector(attribute_names=['capital-gain', 'capital-loss'])),\n ('imputer', Imputer()),\n # tune Log trasformation base (or no transformation); update: tuned - chose base e\n ('custom_transform', CustomLogTransform(base=math.e)),\n # tune \"net gain\" (have to do it after log transform; log of <=0 doesn't exist)\n ('custom_cap_gain_minus_loss', CombineCapitalGainLossTransform(combine=True)),\n # tune MinMax vs StandardScaler; we chose MinMax; update: tuned - chose MinMax\n ('custom_scaler', ChooserTransform(base_transformer=MinMaxScaler())),\n ])", "_____no_output_____" ] ], [ [ "Below is the pipeline for the rest of numeric features:", "_____no_output_____" ] ], [ [ "num_pipeline = Pipeline([\n ('selector', DataFrameSelector(attribute_names=['age', 'education-num', 'hours-per-week'])),\n ('imputer', Imputer()),\n # tune age * hours-per-week; update: tuned -chose not to include\n #('combine_agehours', CombineAgeHoursTransform()),\n # tune MinMax vs StandardScaler; update: tuned - chose MinMax\n ('custom_scaler', ChooserTransform(base_transformer=MinMaxScaler())),\n ])", "_____no_output_____" ] ], [ [ "Pipeline that simply gets the categorical/encoded columns from the previous transformation (which used `oo-learning`)", "_____no_output_____" ] ], [ [ "append_categoricals = Pipeline([\n ('append_cats', DataFrameSelector(attribute_names=one_hot_transformer.encoded_columns)) # already encoded\n ])", "_____no_output_____" ] ], [ [ "Below is the pipeline for combining all of the other pipelines", "_____no_output_____" ] ], [ [ "# combine pipelines\ntransformations_pipeline = FeatureUnion(transformer_list=[\n (\"cap_gain_loss_pipeline\", cap_gain_loss_pipeline),\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", append_categoricals),\n ])", "_____no_output_____" ] ], [ [ "Choose the transformations to tune, below:", "_____no_output_____" ], [ "Below calculates the a standard value for `scale_pos_weight` based on the recommendation from http://xgboost.readthedocs.io/en/latest/parameter.html\n\n> Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases) ", "_____no_output_____" ] ], [ [ "model = LogisticRegression(random_state=42,\n #penalty='l2',\n C=1.0,\n )", "_____no_output_____" ], [ "full_pipeline = Pipeline([\n ('preparation', transformations_pipeline),\n #('pca_chooser', ChooserTransform()), # PCA option lost; didn't include\n #('feature_selection', TopFeatureSelector(feature_importances, k)),\n ('model', model)\n])", "_____no_output_____" ] ], [ [ "Tuning strategy according to https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/", "_____no_output_____" ] ], [ [ "from scipy.stats import randint, uniform, expon\nmodel_param_dict = {\n # 1st\n 'model__penalty': ['l1', 'l2'],\n 'model__C': uniform(0.001, 100),\n}", "_____no_output_____" ], [ "# actual hyper-parameters/options to tune for transformations.\ntransformation_parameters = {\n #'preparation__num_pipeline__imputer__strategy': ['mean', 'median', 'most_frequent'], # tune strategy\n #'pca_chooser__base_transformer': [PCA(n_components=0.95, random_state=42), None], # PCA vs not\n #'preparation__cap_gain_loss_pipeline__custom_transform__base': [None, math.e], # Log transform (base e) or not\n #'preparation__cap_gain_loss_pipeline__custom_scaler__base_transformer': [MinMaxScaler(), StandardScaler()],\n #'preparation__num_pipeline__custom_scaler__base_transformer': [MinMaxScaler(), StandardScaler()],\n #'preparation__num_pipeline__combine_agehours__combine': [True, False],\n #'preparation__cap_gain_loss_pipeline__custom_cap_gain_minus_loss__combine': [True, False]\n }", "_____no_output_____" ], [ "param_grid = {**transformation_parameters, **model_param_dict}\nparam_grid", "_____no_output_____" ], [ "# def binary_roc_auc(y_true, y_score):\n# return roc_auc_score(y_true=y_true,\n# # binary makes it so it converts the \"scores\" to predictions\n# y_score=[1 if x > 0.5 else 0 for x in y_score])\n\nscorer = make_scorer(roc_auc_score, greater_is_better=True)", "_____no_output_____" ], [ "y = transformed_data[target_variable].apply(lambda x: 1 if x == positive_class else 0)", "_____no_output_____" ], [ "transformed_data[target_variable].values[0:10]", "_____no_output_____" ], [ "y[0:10]", "_____no_output_____" ], [ "print('Starting....')\ntime_start = time.time()\nfrom sklearn.model_selection import RandomizedSearchCV\ngrid_search = RandomizedSearchCV(estimator=full_pipeline,\n param_distributions=param_grid,\n n_iter=50,\n cv=RepeatedKFold(n_splits=5, n_repeats=2),\n scoring=scorer,\n return_train_score=True,\n n_jobs=-1,\n verbose=2)\ngrid_search.fit(transformed_data.drop(columns=target_variable), y)\ntime_end = time.time()", "Starting....\nFitting 10 folds for each of 50 candidates, totalling 500 fits\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.2s\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.6s\n[CV] model__C=80.6377254219594, model__penalty=l2 ....................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.6s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.7s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.9s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.8s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.8s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 2.1s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.4s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] ..... model__C=80.6377254219594, model__penalty=l2, total= 1.6s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 1.9s\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 1.8s\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] model__C=98.96263882449205, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 2.0s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 1.9s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 2.0s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 1.9s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 1.8s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 2.2s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 2.1s\n[CV] .... model__C=98.96263882449205, model__penalty=l1, total= 2.2s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=79.22455234949753, model__penalty=l1, total= 2.3s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=79.22455234949753, model__penalty=l1, total= 2.3s\n[CV] model__C=79.22455234949753, model__penalty=l1 ...................\n[CV] .... model__C=79.22455234949753, model__penalty=l1, total= 2.8s\n[CV] model__C=37.213514391847674, model__penalty=l2 ..................\n[CV] .... model__C=79.22455234949753, model__penalty=l1, total= 2.7s\n[CV] model__C=37.213514391847674, model__penalty=l2 ..................\n[CV] .... model__C=79.22455234949753, model__penalty=l1, total= 2.7s\n[CV] model__C=37.213514391847674, model__penalty=l2 ..................\n" ], [ "print('Time: {}m'.format(round((time_end-time_start)/60, 1)))", "Time: 4.2m\n" ], [ "results_df = pd.concat([pd.DataFrame({'mean_score': grid_search.cv_results_[\"mean_test_score\"],\n 'st_dev_score': grid_search.cv_results_[\"std_test_score\"]}),\n pd.DataFrame(grid_search.cv_results_[\"params\"])],\n axis=1)\nresults_df.sort_values(by=['mean_score'], ascending=False).head(10)", "_____no_output_____" ], [ "grid_search.best_score_, grid_search.best_params_", "_____no_output_____" ], [ "rescaled_means = MinMaxScaler(feature_range=(100, 1000)).fit_transform(results_df['mean_score'].values.reshape(-1, 1))\nrescaled_means = rescaled_means.flatten() # reshape back to array\n#rescaled_means", "_____no_output_____" ], [ "def compare_two_parameters(x_label, y_label):\n x = results_df[x_label]\n y = results_df[y_label] \n \n plt.scatter(x,y,c=rescaled_means, s=rescaled_means, alpha=0.5)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n index_of_best = np.argmax(rescaled_means)\n plt.scatter(x[index_of_best], y[index_of_best], marker= 'x', s=200, color='red')", "_____no_output_____" ], [ "x_label = 'model__C'\ny_label = 'model__penalty'\ncompare_two_parameters(x_label, y_label)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c4b84b72ed7bae5242a8f50a5be77ed30b9af2
45,290
ipynb
Jupyter Notebook
Fase_2.ipynb
rodrigotesone1997/Limpieza_Datos_Municipales
3a174d4cb6f22373a039d57cf10a849fa334cd4c
[ "MIT" ]
null
null
null
Fase_2.ipynb
rodrigotesone1997/Limpieza_Datos_Municipales
3a174d4cb6f22373a039d57cf10a849fa334cd4c
[ "MIT" ]
null
null
null
Fase_2.ipynb
rodrigotesone1997/Limpieza_Datos_Municipales
3a174d4cb6f22373a039d57cf10a849fa334cd4c
[ "MIT" ]
null
null
null
46.026423
322
0.653102
[ [ [ "# Descarga de excels", "_____no_output_____" ] ], [ [ "from datetime import datetime,timedelta\nInicio_programa=datetime.now()", "_____no_output_____" ] ], [ [ "# Importo los paquetes", "_____no_output_____" ] ], [ [ "import os\nimport tabula\nimport re\nfrom openpyxl import Workbook,load_workbook\nimport pandas as pd\nimport shutil\nfrom natsort import natsorted", "_____no_output_____" ], [ "path_local=\"/home/rodrigo/Scrapper_Sueldos_Municipales\"", "_____no_output_____" ] ], [ [ "# Verificación de carpetas", "_____no_output_____" ] ], [ [ "path_folder_pdf=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/pdf\"\nlista_folder_pdf=natsorted(os.listdir(path_folder_pdf))\nfor i in lista_folder_pdf:\n if re.search(\"Sueldos\",i) is None:\n lista_folder_pdf.remove(i)", "_____no_output_____" ], [ "lista_folder_pdf", "_____no_output_____" ], [ "index_carpeta_a_modificar=-1", "_____no_output_____" ], [ "folder_pdf_a_convertir=lista_folder_pdf[index_carpeta_a_modificar]", "_____no_output_____" ] ], [ [ "# Crea la carpeta donde se van a guardar los excel en crudo", "_____no_output_____" ] ], [ [ "path_folder_pdf_a_convertir=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/pdf/\"+folder_pdf_a_convertir\nLista_pdf_nuevos=natsorted(os.listdir(path_folder_pdf_a_convertir))\n\nLista_nombres_secretarias=[re.sub(\"\\.pdf\",\"\",i) for i in Lista_pdf_nuevos]\n\n\ntry :\n os.mkdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza\")\n for secretaria in Lista_nombres_secretarias:\n os.mkdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza\"+\"/\"+secretaria)\nexcept :\n pass\n\nfor ubicacion_secretaria,nombre_secretaria in enumerate(Lista_pdf_nuevos):\n variable = True\n n=0\n df = tabula.read_pdf(path_folder_pdf_a_convertir+\"/\"+nombre_secretaria, pages = \"all\")\n while (variable == True):\n try :\n df[n].to_excel(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza\"+\"/\"+Lista_nombres_secretarias[ubicacion_secretaria]+\"/\"+Lista_nombres_secretarias[ubicacion_secretaria]+\"_imperfecto_\"+str(n)+\".xlsx\")\n n+=1\n except :\n variable=False", "_____no_output_____" ], [ "carpeta_especifica=re.sub(\"_Sueldos\",\"\",folder_pdf_a_convertir)\ncarpeta_especifica", "_____no_output_____" ] ], [ [ "# Acomodo de excel sucios", "_____no_output_____" ] ], [ [ "# Estos 2 nombres no me gustan\npath_carpetas_excels_sucios=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza\"\ncarpetas_excels_sucios=natsorted(os.listdir(path_carpetas_excels_sucios))\n\nfor carpeta_secretaria in carpetas_excels_sucios:\n path_excels_sin_modificar= path_carpetas_excels_sucios+\"/\"+ carpeta_secretaria\n archivos_pdf=natsorted(os.listdir(path_excels_sin_modificar))\n n=0\n for nombre_archivo_pdf in archivos_pdf:\n wb = load_workbook(filename=path_excels_sin_modificar+\"/\"+nombre_archivo_pdf)\n sheet= wb.active\n sheet.delete_cols(1)\n celda_1=sheet.cell(row=1, column=1).value\n celda_2=sheet.cell(row=1, column=2).value\n if re.search(\"[a-z]\",celda_1) is not None :\n os.remove(path_excels_sin_modificar+\"/\"+nombre_archivo_pdf)\n n+=1\n elif re.search(\"[0-9]\",celda_1) is not None and re.search(\"Unnamed: 0\",celda_2) is not None:\n inicio=re.search(\"[0-9]\",celda_1).span()[0]\n nombre=celda_1[:inicio]\n numero=celda_1[inicio:]\n sheet[\"A1\"] = nombre\n sheet[\"B1\"] = numero\n sheet.insert_rows(1)\n sheet[\"A1\"],sheet[\"B1\"],sheet[\"C1\"],sheet[\"D1\"],sheet[\"E1\"],sheet[\"F1\"]=\"apellido_y_nombre\",\"sueldo_bruto\",\"sueldo_neto\",\"costo_laboral\",\"planta\",\"funcion\"\n wb.save(filename=path_excels_sin_modificar+\"/\"+nombre_archivo_pdf)\n os.rename(path_excels_sin_modificar+\"/\"+nombre_archivo_pdf , path_excels_sin_modificar+\"/\"+carpeta_secretaria+\"_perfecto_\"+str(n)+\".xlsx\")\n n+=1\n else :\n sheet.insert_rows(1)\n sheet[\"A1\"],sheet[\"B1\"],sheet[\"C1\"],sheet[\"D1\"],sheet[\"E1\"],sheet[\"F1\"]=\"apellido_y_nombre\",\"sueldo_bruto\",\"sueldo_neto\",\"costo_laboral\",\"planta\",\"funcion\"\n wb.save(filename=path_excels_sin_modificar+\"/\"+nombre_archivo_pdf)\n os.rename(path_excels_sin_modificar+\"/\"+nombre_archivo_pdf,path_excels_sin_modificar+\"/\"+carpeta_secretaria+\"_perfecto_\"+str(n)+\".xlsx\")\n n+=1", "_____no_output_____" ] ], [ [ "# Crea la carpeta donde se almacenan los excel", "_____no_output_____" ] ], [ [ "path_carpeta_sueldos_nuevos=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"\ntry:\n os.mkdir(path_carpeta_sueldos_nuevos)\nexcept:\n pass\n\nnombres_secciones_secretarias=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/secciones\"))\n#print(nombres_secciones_secretarias)\nfor secretaria in nombres_secciones_secretarias:\n try:\n secretaria_sin_extension=re.sub(\".txt\",\"\",secretaria)\n os.mkdir(os.path.join(path_carpeta_sueldos_nuevos,secretaria_sin_extension))\n except:\n pass\n path_secretarias=os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/secciones\",secretaria)\n with open(path_secretarias,\"r\", encoding='utf8') as f:\n secciones=f.readlines()\n for seccion in secciones:\n try:\n seccion_sin_extension=re.sub(\".txt\",\"\",seccion)\n os.mkdir(os.path.join(os.path.join(path_carpeta_sueldos_nuevos,secretaria_sin_extension),seccion_sin_extension))\n except Exception as e:\n print(e)", "_____no_output_____" ] ], [ [ "# Re-acomodo los excel en cada subcarpeta", "_____no_output_____" ] ], [ [ "lista_secretarias_sin_excepciones=[\"salario_ambiente\",\n \"salario_control\",\n \"salario_deporte_turismo\",\n \"salario_hacienda\",\n \"salario_ilar\",\n \"salario_modernizacion\",\n \"salario_movilidad\",\n \"salario_obras\",\n \"salario_salud\",\n \"salario_spv\"]\n\nfor secretaria in lista_secretarias_sin_excepciones:\n path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/\"+secretaria\n nombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\n lista_valores=[*range(int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n path_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/\"+secretaria\n\n lista_secretaria=natsorted(os.listdir(path_secretaria))\n\n parametro_de_referencia=0\n numero_archivo=0\n for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))", "_____no_output_____" ] ], [ [ "# Manejo de excepciones", "_____no_output_____" ], [ "# Salario Cultura", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_cultura\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_cultura\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_cultura_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_cultura_perfecto_0.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(1,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=1\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\n#try:\n# lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n# for archivo in lista_ultimos_archivos:\n# shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\n#except:\n# pass\n\n# Para el caso 2021_01\n\n#shutil.move(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba/salario_cultura_perfecto_92.xlsx\",os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_cultura/40_TEATRO_LA_COMEDIA\",\"salario_cultura_perfecto_92.xlsx\"))\n\n#try:\n# lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n# for archivo in lista_ultimos_archivos:\n# shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-2]))\n#except:\n# pass\n\n\n\n# Para el caso 2021_04\n\nshutil.move(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba/salario_cultura_perfecto_95.xlsx\",os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_cultura/\"+lista_secretaria[-1],\"salario_cultura_perfecto_95.xlsx\"))\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-2]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario Desarrollo Economico", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_desarrollo_economico\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_desarrollo_economico\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_desarrollo_economico_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_desarrollo_economico_perfecto_0.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_desarrollo_economico_perfecto_1.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),\"salario_desarrollo_economico_perfecto_1.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(2,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=1\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario desarrollo humano", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_desarrollo_humano\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_desarrollo_humano\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_desarrollo_humano_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_desarrollo_humano_perfecto_0.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_desarrollo_humano_perfecto_1.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),\"salario_desarrollo_humano_perfecto_1.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(2,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=2\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario Genero", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_genero_ddhh\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_genero_ddhh\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_genero_ddhh_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_genero_ddhh_perfecto_0.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(1,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=1\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario Gobierno", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_gobierno\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_gobierno\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_gobierno_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_gobierno_perfecto_0.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_gobierno_perfecto_1.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),\"salario_gobierno_perfecto_1.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(2,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=2\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario intendencia", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_intendencia\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_intendencia\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_intendencia_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_intendencia_perfecto_0.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_intendencia_perfecto_1.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),\"salario_intendencia_perfecto_1.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(3,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=2\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Salario Planeamiento", "_____no_output_____" ] ], [ [ "path_limpieza_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_planeamiento\"\n\npath_secretaria=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos/salario_planeamiento\"\n\nlista_secretaria=natsorted(os.listdir(path_secretaria))\n\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_planeamiento_perfecto_0.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),\"salario_planeamiento_perfecto_0.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_planeamiento_perfecto_1.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),\"salario_planeamiento_perfecto_1.xlsx\"))\nshutil.move(os.path.join(path_limpieza_secretaria,\"salario_planeamiento_perfecto_2.xlsx\"),os.path.join(os.path.join(path_secretaria,lista_secretaria[2]),\"salario_planeamiento_perfecto_2.xlsx\"))\n\nnombre_ultimo_archivo_secretaria=re.sub(\".xlsx\",\"\",natsorted(os.listdir(path_limpieza_secretaria))[-1])\nlista_valores=[*range(4,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\n# Para el caso 2020_09\n#lista_valores=[*range(3,int(re.findall(r'\\d+',nombre_ultimo_archivo_secretaria)[0])+1)]\n\nparametro_de_referencia=0\nnumero_archivo=3\n\nfor archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):\n archivo_para_extraer_numero=re.sub(\".xlsx\",\"\",archivo_para_localizar)\n numero=re.search(\"[0-999]\",archivo_para_extraer_numero)\n inicio_del_numero=numero.span()[0]\n numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])\n if numero_extraido == lista_valores[parametro_de_referencia]:\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n else:\n for k in natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\")):\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))\n numero_archivo+=1\n\n #print(\"Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\\n\")\n\n shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo_para_localizar))\n\n parametro_de_referencia+=1\n parametro_de_referencia+=1\n\ntry:\n lista_ultimos_archivos=natsorted(os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"))\n for archivo in lista_ultimos_archivos:\n shutil.move(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))\nexcept:\n pass", "_____no_output_____" ] ], [ [ "# Elimina la carpeta \"Excels_proceso_limpieza\"", "_____no_output_____" ] ], [ [ "shutil.rmtree(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza\")", "_____no_output_____" ] ], [ [ "# Creo los archivos finales", "_____no_output_____" ] ], [ [ "path_folder_carpeta=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"\nlista_secretarias=natsorted(os.listdir(path_folder_carpeta))\npath_carpeta_prueba=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba\"\nfor secretaria in lista_secretarias:\n path_folder_secretaria=os.path.join(path_folder_carpeta,secretaria)\n lista_secciones_secretaria=natsorted(os.listdir(path_folder_secretaria))\n for seccion in lista_secciones_secretaria:\n path_folder_secciones=os.path.join(path_folder_secretaria,seccion)\n lista_archivos_secciones=natsorted(os.listdir(path_folder_secciones))\n path_primer_archivo_seccion=os.path.join(path_folder_secciones, lista_archivos_secciones[0])\n path_archivo_final=path_folder_secciones+\".xlsx\"\n\n if len(lista_archivos_secciones) == 1:\n shutil.move(path_primer_archivo_seccion,path_archivo_final)\n else:\n shutil.move(path_primer_archivo_seccion,path_carpeta_prueba)\n for archivo in lista_archivos_secciones[1:]:\n shutil.move(os.path.join(path_folder_secciones,archivo),path_carpeta_prueba)\n lista_archivos_carpeta_prueba=natsorted(os.listdir(path_carpeta_prueba))\n path_df_1=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[0])\n path_df_2=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[1])\n df_1=pd.read_excel(path_df_1)\n df_2=pd.read_excel(path_df_2)\n df_final=pd.concat([df_1,df_2],ignore_index=True)\n os.remove(path_df_1)\n os.remove(path_df_2)\n df_final.to_excel(path_df_1,index=False,header=True)\n path_ubicacion_archivo_final=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[0])\n shutil.move(path_ubicacion_archivo_final,path_archivo_final)", "_____no_output_____" ] ], [ [ "# Elimina las carpetas", "_____no_output_____" ] ], [ [ "for secretarias in os.listdir(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"):\n for archivos_secretaria in os.listdir(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\",secretarias)):\n try:\n os.rmdir(os.path.join(os.path.join(path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\",secretarias),archivos_secretaria))\n except:\n pass", "_____no_output_____" ] ], [ [ "# Reviso la columna 1 de todos los excel y muevo los numeros a la segunda columna", "_____no_output_____" ] ], [ [ "path=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"\nlista_secretarias=natsorted(os.listdir(path))\nfor secretaria in lista_secretarias:\n path_secretaria=os.path.join(path,secretaria)\n lista_archivos=natsorted(os.listdir(path_secretaria))\n for archivo in lista_archivos:\n path_archivo=os.path.join(path_secretaria,archivo)\n df=pd.read_excel(path_archivo)\n final=df.shape[0]\n workbook= load_workbook(filename=path_archivo)\n sheet= workbook.active\n for fila in range(1,final+2):\n celda=sheet.cell(row=fila, column=1).value\n try:\n inicio_numero=re.search(\"[0-9]\",celda).span()[0]\n nombre=celda[:inicio_numero]\n numero=celda[inicio_numero:]\n sheet[\"A\"+str(k)] = nombre\n sheet[\"B\"+str(k)] = numero\n except:\n pass\n workbook.save(filename=path_archivo)", "_____no_output_____" ] ], [ [ "# Reemplazo las comas por puntos", "_____no_output_____" ] ], [ [ "path=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"\nfor secretaria in natsorted(os.listdir(path)):\n lista_secretaria=natsorted(os.listdir(os.path.join(path,secretaria)))\n for archivo in lista_secretaria:\n path_archivo=os.path.join(os.path.join(path,secretaria),archivo)\n df=pd.read_excel(path_archivo)\n final=df.shape[0]\n workbook= load_workbook(filename=path_archivo)\n sheet= workbook.active\n for fila in range(1,final+2):\n celda_1=sheet.cell(row=fila, column=2).value\n celda_2=sheet.cell(row=fila, column=3).value\n celda_3=sheet.cell(row=fila, column=4).value\n try:\n sheet[\"B\"+str(k)]=re.sub(\",\",\".\",celda_1)\n sheet[\"C\"+str(k)]=re.sub(\",\",\".\",celda_2)\n sheet[\"D\"+str(k)]=re.sub(\",\",\".\",celda_3)\n except:\n pass\n workbook.save(filename=path_archivo)", "_____no_output_____" ] ], [ [ "# Esto revisa unos numeros extras que salen random luego de los 2 decimales", "_____no_output_____" ] ], [ [ "path=path_local+\"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/\"+carpeta_especifica+\"_Sueldos\"\nlista_secretarias=natsorted(os.listdir(path))\nfor columna in [2,3,4]:\n for secretaria in lista_secretarias:\n lista_archivos=natsorted(os.listdir(os.path.join(path,secretaria)))\n for archivo in lista_archivos:\n path_archivo=os.path.join(os.path.join(path,secretaria),archivo)\n df=pd.read_excel(path_archivo)\n final=df.shape[0]\n workbook= load_workbook(filename=path_archivo)\n sheet= workbook.active\n for fila in range(1,final+2):\n celda=str(sheet.cell(row=fila, column=columna).value)\n if len(re.findall(\"\\.\",celda))>1:\n final=re.search(\"\\.\",celda).span()[1]\n sheet[\"B\"+str(fila)]=celda[:final+2]\n else:\n pass\n workbook.save(filename=path_archivo)", "_____no_output_____" ] ], [ [ "# Mido cuanto tardo el programa (aproximadamente)", "_____no_output_____" ] ], [ [ "Finalizacion_programa=datetime.now()\ntiempo=Finalizacion_programa-Inicio_programa\ntiempo_medido=str(timedelta(seconds=tiempo.seconds))[2:]\ntiempo_medido", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7c4bf73c0f3d855aa8bb77d7865756842d56670
3,404
ipynb
Jupyter Notebook
sklearn/sklearn learning/demonstration/auto_examples_jupyter/linear_model/plot_lasso_and_elasticnet.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
1
2020-06-04T11:10:27.000Z
2020-06-04T11:10:27.000Z
sklearn/sklearn learning/demonstration/auto_examples_jupyter/linear_model/plot_lasso_and_elasticnet.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
sklearn/sklearn learning/demonstration/auto_examples_jupyter/linear_model/plot_lasso_and_elasticnet.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
63.037037
2,194
0.573443
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Lasso and Elastic Net for Sparse Signals\n\n\nEstimates Lasso and Elastic-Net regression models on a manually generated\nsparse signal corrupted with an additive noise. Estimated coefficients are\ncompared with the ground-truth.\n", "_____no_output_____" ] ], [ [ "print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import r2_score\n\n# #############################################################################\n# Generate some sparse data to play with\nnp.random.seed(42)\n\nn_samples, n_features = 50, 100\nX = np.random.randn(n_samples, n_features)\n\n# Decreasing coef w. alternated signs for visualization\nidx = np.arange(n_features)\ncoef = (-1) ** idx * np.exp(-idx / 10)\ncoef[10:] = 0 # sparsify coef\ny = np.dot(X, coef)\n\n# Add noise\ny += 0.01 * np.random.normal(size=n_samples)\n\n# Split data in train set and test set\nn_samples = X.shape[0]\nX_train, y_train = X[:n_samples // 2], y[:n_samples // 2]\nX_test, y_test = X[n_samples // 2:], y[n_samples // 2:]\n\n# #############################################################################\n# Lasso\nfrom sklearn.linear_model import Lasso\n\nalpha = 0.1\nlasso = Lasso(alpha=alpha)\n\ny_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)\nr2_score_lasso = r2_score(y_test, y_pred_lasso)\nprint(lasso)\nprint(\"r^2 on test data : %f\" % r2_score_lasso)\n\n# #############################################################################\n# ElasticNet\nfrom sklearn.linear_model import ElasticNet\n\nenet = ElasticNet(alpha=alpha, l1_ratio=0.7)\n\ny_pred_enet = enet.fit(X_train, y_train).predict(X_test)\nr2_score_enet = r2_score(y_test, y_pred_enet)\nprint(enet)\nprint(\"r^2 on test data : %f\" % r2_score_enet)\n\nm, s, _ = plt.stem(np.where(enet.coef_)[0], enet.coef_[enet.coef_ != 0],\n markerfmt='x', label='Elastic net coefficients',\n use_line_collection=True)\nplt.setp([m, s], color=\"#2ca02c\")\nm, s, _ = plt.stem(np.where(lasso.coef_)[0], lasso.coef_[lasso.coef_ != 0],\n markerfmt='x', label='Lasso coefficients',\n use_line_collection=True)\nplt.setp([m, s], color='#ff7f0e')\nplt.stem(np.where(coef)[0], coef[coef != 0], label='true coefficients',\n markerfmt='bx', use_line_collection=True)\n\nplt.legend(loc='best')\nplt.title(\"Lasso $R^2$: %.3f, Elastic Net $R^2$: %.3f\"\n % (r2_score_lasso, r2_score_enet))\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
e7c4cd184d4781edac314451358ff75f5f449d05
721,205
ipynb
Jupyter Notebook
campus-recruitment-97-score.ipynb
theyazilimci/KaggleProject
917176d4db20ef848b4b20093e3386f19179a0b7
[ "Apache-2.0" ]
1
2022-01-07T22:11:59.000Z
2022-01-07T22:11:59.000Z
campus-recruitment-97-score.ipynb
theyazilimci/KaggleProject
917176d4db20ef848b4b20093e3386f19179a0b7
[ "Apache-2.0" ]
null
null
null
campus-recruitment-97-score.ipynb
theyazilimci/KaggleProject
917176d4db20ef848b4b20093e3386f19179a0b7
[ "Apache-2.0" ]
null
null
null
721,205
721,205
0.938764
[ [ [ "<a href=\"https://www.kaggle.com/theyazilimci/campus-recruitment-97-score?scriptVersionId=83384629\" target=\"_blank\"><img align=\"left\" alt=\"Kaggle\" title=\"Open in Kaggle\" src=\"https://kaggle.com/static/images/open-in-kaggle.svg\"></a>", "_____no_output_____" ], [ "# Campus Recruitment\n\nIn this notebook we will try to answer these questions.\n* Which factor influenced a candidate in getting placed?\n* Does percentage matters for one to get placed?\n* Which degree specialization is much demanded by corporate?\n* Play with the data conducting all statistical tests.\n\nAt the end we'll use DecisionTreeClassifier to predict if a student will be placed or not depending on the given data.\n", "_____no_output_____" ], [ "# Data Import & Information", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt ", "_____no_output_____" ], [ "df = pd.read_csv(\"../input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv\")\ndf.head()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 215 entries, 0 to 214\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 sl_no 215 non-null int64 \n 1 gender 215 non-null object \n 2 ssc_p 215 non-null float64\n 3 ssc_b 215 non-null object \n 4 hsc_p 215 non-null float64\n 5 hsc_b 215 non-null object \n 6 hsc_s 215 non-null object \n 7 degree_p 215 non-null float64\n 8 degree_t 215 non-null object \n 9 workex 215 non-null object \n 10 etest_p 215 non-null float64\n 11 specialisation 215 non-null object \n 12 mba_p 215 non-null float64\n 13 status 215 non-null object \n 14 salary 148 non-null float64\ndtypes: float64(6), int64(1), object(8)\nmemory usage: 25.3+ KB\n" ] ], [ [ "We can observe some Null rows for the columns salary", "_____no_output_____" ] ], [ [ "df.isna().any()", "_____no_output_____" ], [ "df['salary'].mean()", "_____no_output_____" ], [ "df.groupby('degree_t')['salary'].mean()", "_____no_output_____" ] ], [ [ "Student in Science and Tech get more money than others ", "_____no_output_____" ] ], [ [ "df.groupby('gender')['salary'].mean()", "_____no_output_____" ] ], [ [ "Male earn more money than female", "_____no_output_____" ], [ "# Plot data ", "_____no_output_____" ] ], [ [ "sns.barplot(x='gender',y='salary',data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "sns.barplot(x='degree_t',y='salary',data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "sns.countplot(x='gender',data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "sns.barplot(x=\"workex\",y=\"salary\",data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "\nsns.barplot(x=\"status\",y=\"degree_p\",data=df,palette=\"Blues_d\")", "_____no_output_____" ] ], [ [ "placed person arehigher degree than not_placed ", "_____no_output_____" ] ], [ [ "sns.barplot(x=\"workex\",y=\"degree_p\",data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "sns.barplot(x=\"gender\",y=\"degree_p\",data=df,palette=\"Blues_d\")", "_____no_output_____" ] ], [ [ "Female have better degree than male ", "_____no_output_____" ] ], [ [ "sns.barplot(x=\"ssc_b\",y=\"salary\",data=df,palette=\"Blues_d\")", "_____no_output_____" ], [ "sns.barplot(x=\"specialisation\",y=\"salary\",data=df,palette=\"Blues_d\")", "_____no_output_____" ] ], [ [ "# Global plot", "_____no_output_____" ] ], [ [ "sns.pairplot(data=df,palette=\"Blues_d\")", "_____no_output_____" ] ], [ [ "#### We Can see the correlation (linearity) between different columns", "_____no_output_____" ] ], [ [ "df.groupby('gender')['status'].value_counts()", "_____no_output_____" ] ], [ [ "Male are more recruited than female however we saw above that female got better mark than male ", "_____no_output_____" ] ], [ [ "# Check the correlation \nsns.heatmap(df.corr(),cmap=\"Blues\")", "_____no_output_____" ] ], [ [ "if we look at the salary we can say that the degree nor the gender of anything have a real impact to the salary of the student ", "_____no_output_____" ] ], [ [ "sns.kdeplot(df['salary'])", "_____no_output_____" ], [ "# we get only categorical data\ncols = df.columns\n\nnum_cols = df._get_numeric_data().columns\nnum_cols\n\ncategorical_col = list(set(cols) - set(num_cols))", "_____no_output_____" ], [ "for i in categorical_col:\n plt.figure()\n sns.stripplot(x=i, y=\"salary\",hue='gender',data=df, palette=\"Set1\", dodge=True)", "_____no_output_____" ], [ "df.groupby('degree_t').count()", "_____no_output_____" ], [ "df.groupby('degree_t')['status'].value_counts()", "_____no_output_____" ] ], [ [ "Student in Commerce are more placed than ", "_____no_output_____" ], [ "# Prediction a DecisionTreeClassifier ", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import confusion_matrix\ndf.columns", "_____no_output_____" ], [ "labelEncode = LabelEncoder()\n\nfor i in categorical_col:\n df[i] = labelEncode.fit_transform(df[i])\n \ndf", "_____no_output_____" ], [ "# people not placed don't have a salary so we fill it with the mean\ndf = df.fillna(df.mean())\ndf.isna().sum()", "_____no_output_____" ], [ "X = df.drop(['sl_no','status'],axis=1)\ny = df['status']\nx_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)\n\nlen(x_test),len(y_test)", "_____no_output_____" ] ], [ [ "### Build DecisionTreeClassifier", "_____no_output_____" ] ], [ [ "decisionTree = DecisionTreeClassifier()\ndecisionTree.fit(x_train,y_train)", "_____no_output_____" ], [ "decisionTree.score(x_test,y_test)", "_____no_output_____" ], [ "y_pred = decisionTree.predict(x_test)\ny_pred", "_____no_output_____" ], [ "len(y_test)", "_____no_output_____" ], [ "y_test.head()", "_____no_output_____" ], [ "y_test = y_test.tolist()", "_____no_output_____" ], [ "number_error = 0\n\nfor i in range(len(y_test)):\n if y_test[i] != y_pred[i]:\n number_error += 1\n \nnumber_error ", "_____no_output_____" ], [ "print('confusion matrix: \\n',confusion_matrix(y_pred,y_test),'\\n')", "confusion matrix: \n [[12 1]\n [ 0 30]] \n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c4cde86257cc6fad5a98ba260b61ffbf380f89
8,859
ipynb
Jupyter Notebook
notebooks/chap03me.ipynb
mdominguez2010/ThinkBayes2
a8b5497d1e1706e917b6d76fe41782e0a1b2a9e8
[ "MIT" ]
null
null
null
notebooks/chap03me.ipynb
mdominguez2010/ThinkBayes2
a8b5497d1e1706e917b6d76fe41782e0a1b2a9e8
[ "MIT" ]
null
null
null
notebooks/chap03me.ipynb
mdominguez2010/ThinkBayes2
a8b5497d1e1706e917b6d76fe41782e0a1b2a9e8
[ "MIT" ]
null
null
null
26.287834
847
0.500395
[ [ [ "!pip install empiricaldist # to install", "Collecting empiricaldist\n Downloading empiricaldist-0.6.2.tar.gz (9.5 kB)\nBuilding wheels for collected packages: empiricaldist\n Building wheel for empiricaldist (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for empiricaldist: filename=empiricaldist-0.6.2-py3-none-any.whl size=10736 sha256=509a0be447ba0f7472d72048ed31544e2e099da9523bdd3b85fa8eae43fc13cf\n Stored in directory: /Users/dominguez/Library/Caches/pip/wheels/2a/ed/75/39cda0596e8f5606df54fb63969c565b49d23869ee64a62435\nSuccessfully built empiricaldist\nInstalling collected packages: empiricaldist\nSuccessfully installed empiricaldist-0.6.2\n" ] ], [ [ "# Distributions", "_____no_output_____" ] ], [ [ "from empiricaldist import Pmf", "_____no_output_____" ] ], [ [ "# Probability Mass Functions\n\n## PMF\n- for dicrete outcomes (ex: head or tails)\n- maps each possible outcome to it's probability", "_____no_output_____" ] ], [ [ "# Representing the outcome of a coin toss\ncoin = Pmf()\ncoin['heads'] = 1/2\ncoin['tails'] = 1/2\ncoin", "_____no_output_____" ], [ "# Create a probability mass function for a series of die outcomes\ndie = Pmf.from_seq([1, 2, 3, 4, 5, 6])\ndie", "_____no_output_____" ] ], [ [ "All outcomes in the sequence appear once, so they all have the same probability, 1/6", "_____no_output_____" ] ], [ [ "letters = Pmf.from_seq(list('Mississippi'))\nletters", "_____no_output_____" ] ], [ [ "'M' appears once, so probability = 1/11 = 0.0909\n\n'i' appears 4 times, so probability = 4/11 = 0.3636\n\nand so on...", "_____no_output_____" ] ], [ [ "letters['s']", "_____no_output_____" ], [ "# Avoid KeyError\ntry:\n letters['t']\nexcept KeyError as e:\n print(\"Please choose a letter contained in the Pmf\")", "Please choose a letter contained in the Pmf\n" ] ], [ [ "You can also call a Pmf as if it were a function", "_____no_output_____" ] ], [ [ "letters('s')", "_____no_output_____" ] ], [ [ "Calling a quantity that does not exists in the distribution will yield a 0, not an error", "_____no_output_____" ] ], [ [ "letters('t')", "_____no_output_____" ] ], [ [ "Can also call some of the elements in the distribution", "_____no_output_____" ] ], [ [ "die([1,4,6])", "_____no_output_____" ] ], [ [ "# The Cookie Problem Revisited", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7c4d53f0126b91a0e68554da00eee1c83ab55b8
203,908
ipynb
Jupyter Notebook
lessons/08_Step_6.ipynb
fayazr/CFDPython
83d175d4940322ec4e0f3b251543cd40d387913d
[ "CC-BY-3.0" ]
1
2018-02-05T09:03:55.000Z
2018-02-05T09:03:55.000Z
lessons/08_Step_6.ipynb
abahman/CFDPython
83d175d4940322ec4e0f3b251543cd40d387913d
[ "CC-BY-3.0" ]
null
null
null
lessons/08_Step_6.ipynb
abahman/CFDPython
83d175d4940322ec4e0f3b251543cd40d387913d
[ "CC-BY-3.0" ]
1
2019-08-09T23:12:06.000Z
2019-08-09T23:12:06.000Z
518.849873
95,729
0.92273
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c4d92d3a39b1885efb443eafa57135c4cd2979
31,234
ipynb
Jupyter Notebook
python/projects/10h_master_python/python_learning.ipynb
zhaoace/codecraft
bf06267e86bd7386714911b0df4aa0ca0a91d882
[ "Unlicense" ]
null
null
null
python/projects/10h_master_python/python_learning.ipynb
zhaoace/codecraft
bf06267e86bd7386714911b0df4aa0ca0a91d882
[ "Unlicense" ]
null
null
null
python/projects/10h_master_python/python_learning.ipynb
zhaoace/codecraft
bf06267e86bd7386714911b0df4aa0ca0a91d882
[ "Unlicense" ]
null
null
null
18.95267
856
0.452904
[ [ [ "return \"Positive\" if var >=0 else \"Negative\"", "_____no_output_____" ], [ "a =1 \nassert a==2 , \"fuck\"", "_____no_output_____" ], [ "\"condition\"\nfor x in xrange(1,20):\n print x", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n" ], [ "range(10)", "_____no_output_____" ], [ "xrange(10)", "_____no_output_____" ], [ "li = ['a', 'b','c' ]\nfor index in xrange(len(li)):\n print index", "0\n1\n2\n" ], [ "li = ['a', 'b','c' ]\nfor i,e in enumerate(li):\n print i, e", "0 a\n1 b\n2 c\n" ], [ "li = ['a', 'b','c' ]\nfor i,e in enumerate(li,start=3):\n print i, e", "3 a\n4 b\n5 c\n" ], [ "li = ['a', 'b','c','d' , 'e' ]\nfor i,e in enumerate(reversed(li), start=100):\n print i, e", "100 e\n101 d\n102 c\n103 b\n104 a\n" ], [ "def test1():pass\ndef test1(name):\n print name", "_____no_output_____" ], [ "test1(\"hello\")", "hello\n" ], [ "test1()\n", "_____no_output_____" ], [ "def test(): pass", "_____no_output_____" ], [ "ret = test()", "_____no_output_____" ], [ "test", "_____no_output_____" ], [ "test.a = 1", "_____no_output_____" ], [ "test.a", "_____no_output_____" ], [ "def test():\n \"\"\"this is doc\"\"\"\"\"\n ", "_____no_output_____" ], [ "test.func_doc", "_____no_output_____" ], [ "__builtin__\n", "_____no_output_____" ], [ "dir(__builtin__)", "_____no_output_____" ], [ "type(str)", "_____no_output_____" ], [ "\nimport pandas as pd\n", "_____no_output_____" ], [ "\"\"\" \"\"\"\ndef func(p1, p2, p3=None, p4=None, *args, **kwargs)", "_____no_output_____" ], [ "pd.readfile", "_____no_output_____" ], [ "sorted()", "_____no_output_____" ], [ "lst = list(\"abccbas\")", "_____no_output_____" ], [ "sorted(lst)", "_____no_output_____" ], [ "sorted(lst)", "_____no_output_____" ], [ "def add(v1,v2,v3):\n return v1+v2+v3\n", "_____no_output_____" ], [ "v1,v2,v3=1,2,3\nadd(v1,v2,v3)", "_____no_output_____" ], [ "tpl=(1,2,3)", "_____no_output_____" ], [ "add(*tpl) #解包tuple, 也可以是list", "_____no_output_____" ], [ "def add2(v1=0,v2=0,v3=0):\n return v1+v2+v3", "_____no_output_____" ], [ "dct = {\"v1\":1,\"v2\":2,\"v3\":3}", "_____no_output_____" ], [ "add2(**dct) #双*高级解包", "_____no_output_____" ], [ "dct2={1:\"xx\"}", "_____no_output_____" ], [ "def add(v1,v2,**hello):\n return hello", "_____no_output_____" ], [ "add(1,2,a=1,b=1)", "_____no_output_____" ], [ "def change_list(lst):\n lst[0] =2\n lst = []\n print \"2:\", id(lst)\n \n \nlst = [1,2,3]\nprint \"1:\" , id(lst)\nchange_list(lst)\nprint lst\n", "1: 4418840552\n2: 4400802200\n[2, 2, 3]\n" ], [ "lst", "_____no_output_____" ], [ "reversed(lst)", "_____no_output_____" ], [ "sorted2([1,2,3,1]) ==> [3,2,1,1]\nsorted2([“123”, “22”, “0000”], key=len) ==> [“0000”, “123”, “22”]\nsorted2([1,2,3,1], reverse=False) ==> [1,1,2,3]\n\n", "_____no_output_____" ], [ "sorted([1,2,3,1])#==> [3,2,1,1]\n", "_____no_output_____" ], [ "sorted(iterable, cmp=None, key=None, reverse=False)", "_____no_output_____" ], [ "def sorted2(iterable, cmp=None, key=None, reverse=False):\n return sorted(iterable, cmp=cmp, key=key, reverse=reverse)\n \n", "_____no_output_____" ], [ "sorted2([1,2,3,1])", "_____no_output_____" ], [ "sorted2([1,2,3,1], reverse=True)\n", "_____no_output_____" ], [ "sorted2([1,2,3,1], reverse=False)", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "sorted2([\"123\", \"22\", \"0000\"], key=len) ", "_____no_output_____" ], [ "assert [1, 1, 2, 3] == sorted2([1,2,3,1], reverse=True) , \"not reverse\"", "_____no_output_____" ], [ "sorted2([1,2,3,1], reverse=False)", "_____no_output_____" ], [ "sorted([\"123\", \"22\", \"0000\"], key=len) ", "_____no_output_____" ], [ "sorted([\"123\", \"22\", \"0000\"], key=len, reverse=True) ", "_____no_output_____" ], [ "sorted([\"123\", \"22\", \"0000\"], key=len) ", "_____no_output_____" ], [ "sorted([1,2,3,1], reverse=True)", "_____no_output_____" ], [ "sorted.__doc__\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c4e0fdbfa2e41c085c33e1e3bbf23f8476d50a
393,196
ipynb
Jupyter Notebook
A4-Common-Analysis.ipynb
emi90/data-512-a4
f23c4737c78d618f0dcfb40e9843bd5666a48f31
[ "MIT" ]
null
null
null
A4-Common-Analysis.ipynb
emi90/data-512-a4
f23c4737c78d618f0dcfb40e9843bd5666a48f31
[ "MIT" ]
null
null
null
A4-Common-Analysis.ipynb
emi90/data-512-a4
f23c4737c78d618f0dcfb40e9843bd5666a48f31
[ "MIT" ]
null
null
null
263.359678
156,132
0.911049
[ [ [ "import pandas as pd\nimport numpy as np\n\nimport os\nfrom zipfile import ZipFile\n\nimport requests\nimport json\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Step 0: ", "_____no_output_____" ], [ "This analysis research question will require several different datasets: \n\n1. The `RAW_us_confirmed_cases.csv` file from the [Kaggle repository](https://www.kaggle.com/antgoldbloom/covid19-data-from-john-hopkins-university?select=RAW_us_confirmed_cases.csv) of John Hopkins University COVID-19 data.\n2. The [CDC dataset](https://data.cdc.gov/Policy-Surveillance/U-S-State-and-Territorial-Public-Mask-Mandates-Fro/62d6-pm5i) of masking mandates by county.\n3. The New York Times [mask compliance survey data](https://github.com/nytimes/covid-19-data/tree/master/mask-use). \n\nThe majority of this data is by US County by Day. The mask compliance is a single shot estimator that gives you a compliance estimate for every County in the US. You should carefully review the data descriptions that accompany these datasets. They each have some interesting caveats. As well, some of them are explicit with regard to the way you should interpret missing data. \n\nLastly, you have been assigned a specific US County for analysis. You are NOT analyzing the entire dataset. You have been assigned one US County that forms the basis for your individual analysis. You can find your individual US County assignment from this Google spreadsheet.", "_____no_output_____" ], [ "###### Setup- specify county of interest\nAssigned Montgomery County in Maryland", "_____no_output_____" ] ], [ [ "# setup- county of interest\n# Montgomery, MD\n\n\nstate = 'Maryland'\nst = 'MD'\ncounty = 'Montgomery'", "_____no_output_____" ] ], [ [ "###### Get Cases data", "_____no_output_____" ] ], [ [ "# Unzip cases file\n\nos.chdir('data_raw')\n\nwith ZipFile('RAW_us_confirmed_cases.csv.zip') as zipfiles:\n zipfiles.extractall()\n \nos.chdir('..')\n\nos.getcwd()", "_____no_output_____" ], [ "# load cases data\n\nraw = pd.read_csv('data_raw/RAW_us_confirmed_cases.csv')\n\nraw.columns", "_____no_output_____" ] ], [ [ "###### Get Mask policy data", "_____no_output_____" ] ], [ [ "# get mask policy data\n\n'https://data.cdc.gov/resource/62d6-pm5i.json?state_tribe_territory=TX&county'\n\nbase_url = 'https://data.cdc.gov/resource/62d6-pm5i.json?state_tribe_territory={st}&county_name={county}'\n\nparams = {\n 'st' : st,\n 'county' : county + ' County'\n}\nmask_url = base_url.format(**params)\nmasks_json = requests.get(mask_url).json()\n\nmasks_df = pd.DataFrame.from_dict(masks_json)\nmasks_df.shape", "_____no_output_____" ] ], [ [ "###### Get mask compliance data", "_____no_output_____" ] ], [ [ "# get mask compliance data\n\ncompliance_url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/mask-use/mask-use-by-county.csv'\ncompliance_df = pd.read_csv(compliance_url)\n\ncompliance_df.head()", "_____no_output_____" ] ], [ [ "##### Filter based on given county", "_____no_output_____" ] ], [ [ "# filter\n\ncases_filter = raw.loc[(raw.Province_State==state) & (raw.Admin2==county)].copy().reset_index(drop=True)\n\nfips_county = int('10' + masks_df.fips_county[0])\n\ncompliance_filter = compliance_df.loc[compliance_df.COUNTYFP==fips_county].copy().reset_index(drop=True)", "_____no_output_____" ] ], [ [ "## Step 1: Analyze", "_____no_output_____" ], [ "The common question that you are to answer is:\n\n- How did masking policies change the progression of confirmed COVID-19 cases from February 1, 2020 through October 15, 2021? \n\nAnswering this question can be a little tricky - and it will be useful for you all (whole class) to discuss this on Slack. We will also spend some time in class on this discussion. Some of the issues that you probably should consider when conducting your analysis include:\n1. What needs to be cleaned and standardized over the three datasets?\n2. There is a delay between the time of infection and the time a case is confirmed. Many factors may contribute to such delay. People may not show symptoms right away after infection. It may take a few days for the testing results to become available especially during the early period of the pandemic. Should we model the delay?\n3. Masking may simply make it longer to get infected or it may prevent some percentage of infection. How should we consider the effect of a mask?\n4. The research question is about how a time series changes. The infection time series is a set of slopes. Therefore the question is about a derivative function. That is, you want to answer a question about the change in slope over time. How can we test the difference in the derivative function?\n5. Masking survey data shows probability of compliance in several categories. How can we model different proportions for population compliance?\n6. Masking policies varied in their implementation (e.g., size of “crowd” required, different situations, restaurants, bars, clubs ...). How should I handle things when my County implemented two different policies at different times?\n7. The County I was assigned did not implement a masking policy! What is a reasonable way to answer this question? That is, how might I model “voluntary” masking?\n8. Vaccinations probably impacted the apparent effectiveness of masks. How should we account for different vaccination rates in different populations within the same County? ", "_____no_output_____" ], [ "We note that we did not enumerate all potential issues that you may want to discuss. Further, there are some better and worse ways to handle these questions. We are not looking for the one right answer. We are looking for reasonable solutions. There are aspects of this problem that are very hard to model - and so you will probably want to make simplifying assumptions. For example, you might decide to ignore the impacts of vaccinations or consider pre-vaccine availability as one time series and post-vaccine availability as a totally different time series. ", "_____no_output_____" ], [ "###### Define additional datapoints that are initially given", "_____no_output_____" ] ], [ [ "# additional data\n\npop = 1062061 # given\nsq_mi = 491.25 # given\nsq_km = 1272.34 # given", "_____no_output_____" ] ], [ [ "##### Cases dataset", "_____no_output_____" ] ], [ [ "# cases have dates in columns- transpose to have each entry as rows\n# also convert dates to datetime\n\ncases = cases_filter[cases_filter.columns[11:]].T.reset_index().rename(columns={'index':'date', 0:'cases'})\n\ncases.date = pd.to_datetime(cases.date)", "_____no_output_____" ], [ "#quick visualization\n\ncases.plot(x='date', y='cases')", "_____no_output_____" ], [ "# cases per population\n\ncases['pct_pop'] = cases.cases/pop\n\n# same, just different units \ncases.plot(x='date', y='pct_pop')", "_____no_output_____" ] ], [ [ "###### Masks dataset", "_____no_output_____" ] ], [ [ "# look at data types\n\nfor col in masks_df.columns:\n print('Data type for column {} is {}'.format(col, type(masks_df[col][0])))", "Data type for column state_tribe_territory is <class 'str'>\nData type for column county_name is <class 'str'>\nData type for column fips_state is <class 'str'>\nData type for column fips_county is <class 'str'>\nData type for column date is <class 'str'>\nData type for column order_code is <class 'str'>\nData type for column face_masks_required_in_public is <class 'str'>\nData type for column source_of_action is <class 'str'>\nData type for column url is <class 'str'>\nData type for column citation is <class 'str'>\n" ], [ "# convert data types\n# dates to datetime\n# order_code to int\n\nmasks_df.date = pd.to_datetime(masks_df.date)\nmasks_df.order_code = masks_df.order_code.astype(int)", "_____no_output_____" ], [ "# quick visualization\n\nmasks_df.plot(x='date', y='order_code')", "_____no_output_____" ] ], [ [ "##### Mask compliance dataset", "_____no_output_____" ] ], [ [ "# also setup as columns --> transpose to show each entry as rows\n# also show as magnitude \n\nmask_comp = compliance_filter.drop(columns=['COUNTYFP']).T.reset_index().rename(columns={'index':'response',0:'pct_pop'})\nmask_comp['pop'] = mask_comp.pct_pop * pop\n\nmask_comp", "_____no_output_____" ] ], [ [ "###### Merging datasets", "_____no_output_____" ] ], [ [ "# helper function\n\ndef case_per_pop(df, subset):\n \"\"\"\n Helper function to join mask compliance population % with cases dataset\n df: pd.DataFrame\n Cases dataframe\n subset: str\n String of mask compliance category\n Returns: new pd.DataFrame with columns added for population given mask compliance response\n \"\"\"\n \n subset_pop = mask_comp.loc[mask_comp.response==subset]['pop'].values[0]\n df[subset.lower()] = subset_pop\n \n return df", "_____no_output_____" ], [ "# apply helper function- populate columns with mask compliance population\n\ncases_mask = cases.copy()\n\nfor resp in mask_comp.response:\n cases_mask = case_per_pop(cases_mask, resp)", "_____no_output_____" ], [ "# additional merge- merge case + compliance with mask order code\n\ncases_merge = cases_mask.merge(masks_df[['order_code', 'date']], on='date')", "_____no_output_____" ] ], [ [ "##### Getting daily cases", "_____no_output_____" ] ], [ [ "# get daily cases- daily difference between total cases\n\ncases_merge['daily_cases'] = cases_merge.cases - cases_merge.cases.shift(1)", "_____no_output_____" ], [ "# quick visualization\n\ncases_merge.plot(x='date', y='daily_cases')", "_____no_output_____" ] ], [ [ "###### Get rolling average", "_____no_output_____" ] ], [ [ "# Helper function\n\ndef get_rolling_avg(df, days):\n \n col_name = 'rolling_avg_' + str(days)\n df[col_name] = df.daily_cases.rolling(days).mean()\n \n return df", "_____no_output_____" ], [ "# get rolling average- 7 day\n\ncases_merge = get_rolling_avg(cases_merge, 7)\n\n# get rolling average- 14 day\n\ncases_merge = get_rolling_avg(cases_merge, 14)", "_____no_output_____" ], [ "# quick visualization - 7 day\n\ncases_merge.plot(x='date', y='rolling_avg_7')", "_____no_output_____" ], [ "# quick visualization - 14 day\n\ncases_merge.plot(x='date', y='rolling_avg_14')", "_____no_output_____" ] ], [ [ "##### Getting change in daily cases", "_____no_output_____" ] ], [ [ "# helper function\n\ndef get_pct_chg(df, days):\n \n col_name = 'pct_chg_' + str(days) + 'D_avg'\n pct_col = 'rolling_avg_' + str(days)\n df[col_name] = df[pct_col].pct_change(1)\n \n return df", "_____no_output_____" ], [ "# get pct change- 7 days RA\n\ncases_merge = get_pct_chg(cases_merge, 7)\n\n# get pct change- 14 days RA\n\ncases_merge = get_pct_chg(cases_merge, 14)\n\n# get pct change- daily \n\ncases_merge['pct_chg_daily'] = cases_merge.daily_cases.pct_change(1)", "_____no_output_____" ], [ "# quick visualization 7D pct chg\n\ncases_merge.plot(x='date', y='pct_chg_7D_avg')", "_____no_output_____" ], [ "# quick visualization 14D pct chg\n\ncases_merge.plot(x='date', y='pct_chg_14D_avg')", "_____no_output_____" ], [ "# quick visualization daily pct chg\n\ncases_merge.plot(x='date', y='pct_chg_daily')", "_____no_output_____" ], [ "# recap- what do we have so far\n\ncases_merge.head()", "_____no_output_____" ] ], [ [ "##### Save to file", "_____no_output_____" ] ], [ [ "# save cleaned dataset to file\n\ncases_merge.to_csv('data_clean/cases_clean.csv', index=False)", "_____no_output_____" ] ], [ [ "## Step 2: Visualize", "_____no_output_____" ], [ "In this step we want you to create a graph that visualizes how the course of the disease was changed by masking policies. For your county, you should create a time series showing the changes in the derivative function of the rate of infection. Your graph should indicate days where masking policies were in effect (or not) and whether the difference in the derivative function was significant. Optionally, you can add a second time series that shows the actual rate of infection. ", "_____no_output_____" ] ], [ [ "# read from file\n\ncases = pd.read_csv('data_clean/cases_clean.csv')\n\ncases.head()", "_____no_output_____" ], [ "# helper function- get masked array\n\ndef get_ma(var, order):\n return np.ma.masked_where(cases.order_code==order, cases[var])", "_____no_output_____" ], [ "# get variables\n\nx = pd.to_datetime(cases.date)\n\n# get masked arrays (for different masking orders)\n\ndaily_mask = get_ma('daily_cases', 2)\ndaily_no_mask = get_ma('daily_cases', 1)\ntotal_mask = get_ma('cases', 2)\ntotal_no_mask = get_ma('cases', 1)\nroll7_avg_mask = get_ma('rolling_avg_7', 2)\nroll7_avg_no_mask = get_ma('rolling_avg_7', 1)\nroll14_avg_mask = get_ma('rolling_avg_14', 2)\nroll14_avg_no_mask = get_ma('rolling_avg_14', 1)", "_____no_output_____" ], [ "fig = plt.figure(figsize=(20, 10), facecolor='white')\nax = fig.add_subplot()\n\nax.set_title('Daily COVID19 Cases in Montegomery County, MD \\nFeb 1 2020 - Oct 15 2021')\n\n\n# plot daily cases\nax.bar(x, daily_mask, alpha=0.8, linewidth=2, color='lightblue',\n label='Daily Cases-Mask Mandate (LHS)')\nax.bar(x, daily_no_mask, alpha=0.5, color='silver', label='Daily Cases-No Mask Mandate (LHS)')\n\n# plot rolling avg 7D\nax.plot(x, roll7_avg_mask, alpha=0.8, linewidth=2, color='crimson', \n label='7D Rolling Avg of Cases-Mask Mandate (LHS)')\nax.plot(x, roll7_avg_no_mask, alpha=0.5, color='crimson', linestyle='dashed',\n label='7D Rolling Avg of Cases-No Mask Mandate (LHS)')\n\n# plot rolling avg 14D\nax.plot(x, roll14_avg_mask, alpha=0.8, linewidth=2, color='darkblue', \n label='14D Rolling Avg of Cases-Mask Mandate (LHS)')\nax.plot(x, roll14_avg_no_mask, alpha=0.5, color='darkblue', linestyle='dashed',\n label='14D Rolling Avg of Cases-No Mask Mandate (LHS)')\n\n# set labels\nax.set_xlabel('Date')\nax.set_ylabel('Number of Positive Cases')\nplt.legend(loc='upper left')\n\nax2 = ax.twinx()\nax2.plot(x, total_mask, c='darkgreen', alpha=0.6, linewidth=5, label='Total Cases-Mask Mandate (RHS)')\nax2.plot(x, total_no_mask, c='darkgreen', alpha=0.3, linewidth=3, \n linestyle='dashed', label='Total Cases-No Mask Mandate (RHS)')\nax2.set_ylabel('Number of Positive Cases')\n\nplt.legend(loc='upper right')\n\nplt.savefig('visualization/plot.png', facecolor=fig.get_facecolor(), bbox_inches='tight')", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\numpy\\lib\\stride_tricks.py:256: UserWarning: Warning: converting a masked element to nan.\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
e7c4f0a6987057ec5583ab06fa807da61cb1aeb7
449,951
ipynb
Jupyter Notebook
07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb
hritik5102/SHALA2020
f0c74cd6718b51ddeffc9a8cda7c30d3cd78dcd3
[ "MIT" ]
5
2020-05-10T15:43:11.000Z
2022-03-02T00:15:36.000Z
07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb
Sankalp679/SHALA2020
d9e596346b396acde33f2965f6f39f7aefcd7188
[ "MIT" ]
null
null
null
07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb
Sankalp679/SHALA2020
d9e596346b396acde33f2965f6f39f7aefcd7188
[ "MIT" ]
5
2020-05-10T17:51:14.000Z
2020-06-05T15:12:11.000Z
497.733407
199,506
0.934008
[ [ [ "#Gaussian bayes classifier\n\nIn this assignment we will use a Gaussian bayes classfier to classify our data points.", "_____no_output_____" ], [ "# Import packages", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\nfrom sklearn.metrics import classification_report\nfrom matplotlib import cm", "_____no_output_____" ] ], [ [ "# Load training data\n\nOur data has 2D feature $x1, x2$. Data from the two classes is are in $\\texttt{class1_train}$ and $\\texttt{class2_train}$ respectively. Each file has two columns corresponding to the 2D feature.", "_____no_output_____" ] ], [ [ "class1_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class1_train').to_numpy()\nclass2_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class2_train').to_numpy()", "_____no_output_____" ], [ "class1_train[:10]", "_____no_output_____" ], [ "class1_train.shape", "_____no_output_____" ], [ "class2_train.shape", "_____no_output_____" ] ], [ [ "# Visualize training data\nGenerate 2D scatter plot of the training data. Plot the points from class 1 in red and the points from class 2 in blue.", "_____no_output_____" ] ], [ [ "import seaborn as sns\nclasses = ['class-1','class-2']\n\nfor i in range(class1_train.shape[0]):\n \n plt.scatter(class1_train[i][0],class1_train[i][1] ,c=\"red\",alpha=0.6, edgecolors='none')\n\n # plt.legend(loc='best', fontsize=16)\n plt.xlabel('Growth %')\n plt.ylabel('Population')\n\nfor j in range(class2_train.shape[0]):\n plt.scatter(class1_train[j][0],class1_train[j][1] ,c=\"blue\")\n", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ] ], [ [ "# Maximum likelihood estimate of parameters\n\nWe will model the likelihood, $P(\\mathbf{x}|C_1)$ and $P(\\mathbf{x}|C_2)$ as $\\mathcal{N}(\\mathbf{\\mu_1},\\Sigma_1)$ and $\\mathcal{N}(\\mathbf{\\mu_2},\\Sigma_2)$ respectively. The prior probability of the classes are called, $P(C_1)=\\pi_1$ and $P(C_2)=\\pi_2$.\n\nThe maximum likelihood estimate of the parameters as follows:\n\\begin{align*}\n\\pi_k &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}{N}\\\\\n\\mathbf{\\mu_k} &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)\\mathbf{x}^i}{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}\\\\\n\\Sigma_k &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)(\\mathbf{x}^i-\\mathbf{\\mu_k})(\\mathbf{x}^i-\\mathbf{\\mu_k})^T}{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}\\\\\n\\end{align*}\n\nHere, $t^i$ is the target or class of $i^{th}$ sample. $\\mathbb{1}(t^i=k)$ is 1 if $t^i=k$ and 0 otherwise.\n\nCompute maximum likelihood values estimates of $\\pi_1$, $\\mu_1$, $\\Sigma_1$ and $\\pi_2$, $\\mu_2$, $\\Sigma_2$ \n\nAlso print these values\n", "_____no_output_____" ], [ "$pi$ = `Prior` <br/>\n$mu$ and $sigma$ = `Likelihood` \n", "_____no_output_____" ] ], [ [ "def calculate_pi_1():\n num = class1_train.shape[0]\n deno = class1_train.shape[0] + class2_train.shape[0]\n return num/deno\n\ndef calculate_pi_2():\n num = class2_train.shape[0]\n deno = class1_train.shape[0] + class2_train.shape[0]\n return num/deno\n\ndef calculate_mu_1():\n return class1_train.mean(axis=0)\n\ndef calculate_mu_2():\n return class2_train.mean(axis=0)\n\ndef calculate_cov_1():\n x = class1_train\n print(x.shape)\n mu = x.mean(axis=0) \n x_norm = x-mu\n x_transpose = x_norm.transpose()\n return np.cov(x_transpose)\n\ndef calculate_cov_2():\n x = class2_train\n print(x.shape)\n mu = x.mean(axis=0)\n x_norm = x-mu\n x_transpose = x_norm.transpose()\n return np.cov(x_transpose)\n\n\nprint( 'pi_1 : {} and pi_2 : {}'.format(calculate_pi_1(),calculate_pi_2()))\nprint( 'mu_1 : {} and mu_2 : {}'.format(calculate_mu_1(),calculate_mu_2()))\nprint( 'sigma_1 : \\n{} \\n sigma_2 : \\n{}'.format(calculate_cov_1(),calculate_cov_2()))", "pi_1 : 0.8040201005025126 and pi_2 : 0.19597989949748743\nmu_1 : [0.96998989 1.02894917] and mu_2 : [-1.02482819 -0.91492055]\n(160, 2)\n(39, 2)\nsigma_1 : \n[[0.96127884 0.07824879]\n [0.07824879 0.82105102]] \n sigma_2 : \n[[1.1978678 0.48182629]\n [0.48182629 0.93767199]]\n" ], [ "## Another way to get Pi , mu and sigma\n\npi1 = len(class1_train)/(len(class1_train)+len(class2_train))\npi2 = len(class2_train)/(len(class1_train)+len(class2_train))\nmu1 = class1_train.mean(axis=0)\nmu2 = class2_train.mean(axis=0)\nsig1 = np.cov(class1_train,rowvar=False)\nsig2 = np.cov(class2_train,rowvar=False)\n\nprint(\"Pi-1 {} and Pi-2 {}\".format(pi1,pi2))\nprint(\"mu-1 {} and mu-2 {}\".format(mu1,mu2))\nprint(\"sig-1 {} and sig-2 {}\".format(sig1,sig2))\n", "Pi-1 0.8040201005025126 and Pi-2 0.19597989949748743\nmu-1 [0.96998989 1.02894917] and mu-2 [-1.02482819 -0.91492055]\nsig-1 [[0.96127884 0.07824879]\n [0.07824879 0.82105102]] and sig-2 [[1.1978678 0.48182629]\n [0.48182629 0.93767199]]\n" ] ], [ [ "# Visualize the likelihood\nNow that you have the parameters, let us visualize how the likelihood looks like.\n\n1. Use $\\texttt{np.mgrid}$ to generate points uniformly spaced in -5 to 5 along 2 axes\n1. Use $\\texttt{multivariate_normal.pdf}$ to get compute the Gaussian likelihood for each class \n1. Use $\\texttt{plot_surface}$ to plot the likelihood of each class.\n1. Use $\\texttt{contourf}$ to plot the likelihood of each class. \n\nYou may find the code in the lecture notebook helpful.\n \nFor the plots, use $\\texttt{cmap=cm.Reds}$ for class 1 and $\\texttt{cmap=cm.Blues}$ for class 2. Use $\\texttt{alpha=0.5}$ to overlay both plots together.", "_____no_output_____" ] ], [ [ "from matplotlib import cm\n\nx,y = np.mgrid[-5:5:.01, -5:5:.01]\npos = np.empty(x.shape + (2,))\npos[:, :, 0] = x; pos[:, :, 1] = y\n\nmu1 = calculate_mu_1()\nmu2 = calculate_mu_2()\ncov1 = calculate_cov_1()\ncov2 = calculate_cov_2()\nrv1 = multivariate_normal(mean = mu1, cov = cov1)\nrv2 = multivariate_normal(mean = mu2, cov = cov2)\n\nfig = plt.figure(figsize=(20,10))\nax = fig.add_subplot(121, projection='3d')\nplt.xlabel('x')\nplt.ylabel('y')\nax.plot_surface(x,y,rv1.pdf(pos), cmap=cm.Reds,alpha=0.5)\nax.plot_surface(x,y,rv2.pdf(pos), cmap=cm.Blues,alpha=0.5)\n\nplt.subplot(122)\nplt.contourf(x, y, rv1.pdf(pos), cmap=cm.Reds,alpha=0.5)\nplt.contourf(x, y, rv2.pdf(pos), cmap=cm.Blues,alpha=0.5)\n\nplt.colorbar()\nplt.xlabel('x')\nplt.ylabel('y')", "(160, 2)\n(39, 2)\n" ] ], [ [ "#Visualize the posterior\nUse the prior and the likelihood you've computed to obtain the posterior distribution for each class.\n\nLike in the case of the likelihood above, make same similar surface and contour plots for the posterior.", "_____no_output_____" ] ], [ [ "likelihood1 = rv1.pdf(pos)\nlikelihood2 = rv2.pdf(pos)\n\np1 = (likelihood1 * pi1)/(likelihood1*pi1+likelihood2*pi2)\np2 = (likelihood2 * pi2)/(likelihood1*pi1+likelihood2*pi2)\n\nx, y = np.mgrid[-5:5:.01, -5:5:.01]\npos = np.empty(x.shape + (2,))\npos[:, :, 0] = x; pos[:, :, 1] = y\nfig = plt.figure(figsize=(20,10))\nax = fig.add_subplot(131, projection='3d')\nplt.xlabel('x')\nplt.ylabel('y')\nax.plot_surface(x,y,p1, cmap=cm.Reds,alpha=0.5)\nax.plot_surface(x,y,p2, cmap=cm.Blues,alpha=0.5)\nplt.subplot(132)\nplt.contourf(x,y,p1,cmap=cm.Reds,alpha=0.5)\nplt.contourf(x,y,p2,cmap=cm.Blues,alpha=0.5)\nplt.xlabel('x')\nplt.ylabel('y')", "_____no_output_____" ] ], [ [ "# Decision boundary\n1. Decision boundary can be obtained by $P(C_2|x)>P(C_1|x)$ in python. Use $\\texttt{contourf}$ to plot the decision boundary. Use $\\texttt{cmap=cm.Blues}$ and $\\texttt{alpha=0.5}$\n1. Also overlay the scatter plot of train data points from the 2 classes on the same plot. Use red color for class 1 and blue color for class 2 ", "_____no_output_____" ] ], [ [ "des = p2>p1\nplt.contourf(x,y,p1,cmap=cm.Reds,alpha=0.5)\nplt.contourf(x,y,p2,cmap=cm.Blues,alpha=0.5)\nplt.contourf(x,y,des,cmap=cm.Greens,alpha=0.3)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.scatter(class1_train[:,0],class1_train[:,1],marker='*',color='red')\nplt.scatter(class2_train[:,0],class2_train[:,1],marker='+',color='blue')", "_____no_output_____" ] ], [ [ "# Test Data\nNow let's use our trained model to classify test data points\n\n1. $\\texttt{test_data}$ contains the $x1,x2$ features of different data points\n1. $\\texttt{test_label}$ contains the true class of the data points. 0 means class 1. 1 means class 2. \n1. Classify the test points based on whichever class has higher posterior probability for each data point\n1. Use $\\texttt{classification_report}$ to test the classification performance", "_____no_output_____" ] ], [ [ "test = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/test').to_numpy()\ntest_data, test_label = test[:,:2], test[:,2]\n\ntest_data\n", "_____no_output_____" ], [ "## likelihood \nl1 = rv1.pdf(test_data)\nl2 = rv2.pdf(test_data)", "_____no_output_____" ], [ "##Posterior \np1_test= (l1*pi1)/(l1*pi1+l2*pi2)\np2_test= (l2*pi2)/(l1*pi1+l2*pi2)", "_____no_output_____" ], [ "## Descision bundory \ntest_data_predict=p2_test>p1_test\ntest_data_predict", "_____no_output_____" ], [ "test_data_predict = np.where(test_data_predict==True,1,0)\ntest_data_predict", "_____no_output_____" ], [ "from sklearn.metrics import classification_report,accuracy_score", "_____no_output_____" ], [ "print(accuracy_score(test_label,test_data_predict))", "0.8775510204081632\n" ], [ "print(classification_report(test_label,test_data_predict))", " precision recall f1-score support\n\n 0.0 0.93 0.93 0.93 40\n 1.0 0.67 0.67 0.67 9\n\n accuracy 0.88 49\n macro avg 0.80 0.80 0.80 49\nweighted avg 0.88 0.88 0.88 49\n\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c501142c3d841e8253f5e8d026180d195454e4
204,172
ipynb
Jupyter Notebook
Air-Pollution-Levels-Exploratory-Data-Analysis-master/Bangalore.ipynb
varuntotakura/AirPollutionAnalysis
ec8a1df747c018b13856a346c8236adc8268c6bb
[ "BSD-2-Clause" ]
null
null
null
Air-Pollution-Levels-Exploratory-Data-Analysis-master/Bangalore.ipynb
varuntotakura/AirPollutionAnalysis
ec8a1df747c018b13856a346c8236adc8268c6bb
[ "BSD-2-Clause" ]
null
null
null
Air-Pollution-Levels-Exploratory-Data-Analysis-master/Bangalore.ipynb
varuntotakura/AirPollutionAnalysis
ec8a1df747c018b13856a346c8236adc8268c6bb
[ "BSD-2-Clause" ]
null
null
null
117.882217
137,979
0.795427
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib notebook\n\nimport seaborn as sns", "_____no_output_____" ], [ "plt.style.use(\"seaborn-colorblind\")", "_____no_output_____" ], [ "data = pd.read_csv(\"C:/Users/VARUN/Desktop/AirPollution/Dataset/cpcb_dly_aq_karnataka-2011.csv\")", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "dates = ['-'.join(i.split('/')[1:]) for i in data['Sampling Date']]", "_____no_output_____" ], [ "data['Sampling Date'] = dates", "_____no_output_____" ], [ "for i in range(len(data['City/Town/Village/Area'])):\n if data['City/Town/Village/Area'][i] != 'Bangalore':\n data.drop(i, inplace = True)", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data = data.groupby(\"Sampling Date\").mean()\ndata", "_____no_output_____" ], [ "data = data.groupby(\"Sampling Date\").mean().drop(\"SPM\", axis=1)", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "plt.figure(figsize=(9, 7))\nplt.subplot(211)\nplt.cla()\nplotter = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun','Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec']\nx = np.arange(0,len(data[\"NO2\"]),1)\nax = plt.gca()\nax.plot(x, data[\"NO2\"])\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.set_xticks(x)\nplt.xticks(rotation=45)\nplt.subplots_adjust(bottom=0.2)\nplt.legend([2014],loc=2)\nplt.title(\"$\\mathregular{NO_2}$ (Nitrogen Dioxide) levels in Bangalore\")\nax.set_xticklabels(plotter)\n\nplt.subplot(212)\nplt.cla()\nplotter = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun','Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec']\nx = np.arange(0,len(data[\"SO2\"]),1)\nax = plt.gca()\nax.plot(x, data[\"SO2\"])\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.set_xticks(x)\nplt.xticks(rotation=45)\nplt.title(\"$\\mathregular{SO_2}$ (Sulphur Dioxide) levels in Bangalore\")\nax.set_xticklabels(plotter)\n\nplt.tight_layout()\n#plt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c5253b0e27342ad1ec4a4cec85d9bda40941d3
7,117
ipynb
Jupyter Notebook
data-wrangling-analysis.ipynb
Clercy/proj_bedbugs
44c220a0e758d9c3144ab0a499f78568817163a7
[ "MIT" ]
null
null
null
data-wrangling-analysis.ipynb
Clercy/proj_bedbugs
44c220a0e758d9c3144ab0a499f78568817163a7
[ "MIT" ]
null
null
null
data-wrangling-analysis.ipynb
Clercy/proj_bedbugs
44c220a0e758d9c3144ab0a499f78568817163a7
[ "MIT" ]
null
null
null
23.107143
295
0.556695
[ [ [ "# Data Wrangling & Cleaning", "_____no_output_____" ] ], [ [ "# import the library\n%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# convert scientific notation to decimals\npd.set_option('display.float_format', lambda x: '%.2f' % x)", "_____no_output_____" ] ], [ [ "### Load & Merge the data", "_____no_output_____" ] ], [ [ "df_listing = pd.read_csv('data/kc_house_data.csv')\ndf_walking_score = pd.read_csv('data/walking_score.csv')\ndf_income = pd.read_csv('data/ZIP-3.csv')", "_____no_output_____" ] ], [ [ "### Summarizing your data for inspection", "_____no_output_____" ] ], [ [ "print('Listings')\nprint(df_listing.columns)\nprint(df_listing.head())\nprint(df_listing.describe())\nprint('')\nprint('Walking Score')\n# TODO: print the columns, head and describe for the Walking Score dataframe\nprint('')\nprint('Income')\n# TODO: print the columns, head and describe for the Income dataframe", "_____no_output_____" ] ], [ [ "### Fixing column name", "_____no_output_____" ] ], [ [ "df_income.columns = ['zipcode', 'median_income', 'mean_income', 'population']", "_____no_output_____" ] ], [ [ "### Converting data types", "_____no_output_____" ] ], [ [ "df_listing['date'] = pd.to_datetime(df_listing['date'])\ndf_income['median_income'] = df_income['median_income'].str.replace(',', '').astype(float)\ndf_income['mean_income'] = df_income['mean_income'].str.replace(',', '').astype(float)\ndf_income.head()", "_____no_output_____" ], [ "# TODO: Convert the data type of the population column\ndf_income", "_____no_output_____" ] ], [ [ "### Dealing with missing values\nHow to deal with the missing values? Should we remove the rows or fill the gap with a value?", "_____no_output_____" ] ], [ [ "# Number of missing values by columns\nprint(df_listing.isnull().sum())\nprint('')\nprint(df_walking_score.isnull().sum())\nprint('')\nprint(df_income.isnull().sum())", "_____no_output_____" ], [ "# select all the rows with missing values\ndf_walking_score[df_walking_score.isnull().any(axis=1)]", "_____no_output_____" ], [ "# select all the rows with missing values\ndf_income[df_income.isnull().any(axis=1)]", "_____no_output_____" ], [ "# TODO: Create a strategy to handle the missing values on the Walking Score and Income dataframes", "_____no_output_____" ] ], [ [ "### Removing outliers\nSome algorithms are very sensitive to outliers. Considering the number of bedrooms, should we remove houses with an extreme number of bedrooms? How many bedrooms are too many? (Suggestion: as a rule of thumb, three standard deviations from the mean is a good measure to identify outliers).", "_____no_output_____" ] ], [ [ "# bedrooms\nprint(df_listing['bedrooms'].value_counts())\nprint('mean', np.mean(df_listing['bedrooms']))\nprint('std', np.std(df_listing['bedrooms']))\nplt.hist(df_listing['bedrooms'], bins=20)\nplt.show()", "_____no_output_____" ], [ "# TODO: Remove the outlier houses considering the number of bedrooms", "_____no_output_____" ], [ "# Dealing with outliers\nhouses_to_remove = []\n\n# remove based on zipcode and price\nfor zipcode in df_listing['zipcode'].unique():\n df_zipcode = df_listing[df_listing['zipcode']==zipcode]\n m = np.mean(df_zipcode['price'])\n s = np.std(df_zipcode['price'])\n houses_to_remove = houses_to_remove + list(df_zipcode[df_zipcode['price']>m+3.0*s].index)\nprint('')\nprint('# houses to remove', len(houses_to_remove))\n\ndf_listing = df_listing[~df_listing.index.isin(houses_to_remove)]", "_____no_output_____" ] ], [ [ "### Merging Data Sets", "_____no_output_____" ] ], [ [ "df_merge = df_listing.copy()\ndf_merge = df_merge.merge(df_walking_score, on='zipcode', how='left')\ndf_merge = df_merge.merge(df_income, on='zipcode', how='left')", "_____no_output_____" ], [ "print('Total # houses', len(df_merge))", "_____no_output_____" ] ], [ [ "### Saving the processed file", "_____no_output_____" ] ], [ [ "df_merge.to_csv('data/house_pricing.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c53aba75d52551f9395690a4a9d0b0d9cefeb5
428,742
ipynb
Jupyter Notebook
tests/.ipynb_checkpoints/standalone_vkdv_convergence KISSME bathy REAL BVP-checkpoint.ipynb
iosonobert/iwaves
143563bc9075d1e42e486a064f1fefa67ed84702
[ "BSD-2-Clause" ]
null
null
null
tests/.ipynb_checkpoints/standalone_vkdv_convergence KISSME bathy REAL BVP-checkpoint.ipynb
iosonobert/iwaves
143563bc9075d1e42e486a064f1fefa67ed84702
[ "BSD-2-Clause" ]
1
2020-08-31T09:37:48.000Z
2020-08-31T09:37:48.000Z
tests/.ipynb_checkpoints/standalone_vkdv_convergence KISSME bathy REAL BVP-checkpoint.ipynb
iosonobert/iwaves
143563bc9075d1e42e486a064f1fefa67ed84702
[ "BSD-2-Clause" ]
null
null
null
389.411444
101,708
0.930112
[ [ [ "# Standalone Convergence Checker for the numerical vKdV solver KISSME bottom - KISSME stratification\n\nGetting more realistic now. Using the real KISSME stratification. \n\nStill linear non hydrostatic\nStill using an offshore 'blank' zone with initial conditions not boundary conditions", "_____no_output_____" ] ], [ [ "import xarray as xr\nfrom iwaves.kdv.kdvimex import KdVImEx#from_netcdf\nfrom iwaves.kdv.vkdv import vKdV \nfrom iwaves.kdv.solve import solve_kdv\nfrom iwaves.utils.plot import vKdV_plot\nimport iwaves.utils.initial_conditions as ics\nimport iwaves.utils.boundary_conditions as bcs\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.interpolate import PchipInterpolator as pchip\nfrom scipy.interpolate import interp1d\nimport scipy.signal\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nfrom matplotlib import rcParams\n\n# Set font sizes\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Bitstream Vera Sans']\nrcParams['font.serif'] = ['Bitstream Vera Sans']\nrcParams[\"font.size\"] = \"14\"\nrcParams['axes.labelsize']='large'\n\n# CONSTANTS FOR WHOLE NOTEBOOK\nd = 252.5\nL_d = 3.0e5\nNz = 100\nbathy_params = [L_d/2, 60000, d, d/2] \nruntime = 4.5*86400.\nruntime = 1.5*86400.\nruntime = 2*86400.\n\nnonlinear = True\nnonhydrostatic = True\n\na0 = 0.\na_bc_left = 35\nkb_start = 1000\ndrag_param = 0.0006\n\nWn = 1/200 # Filtering both in and outside of the KdV module.", "_____no_output_____" ], [ "def get_kissme_h(dx, kb_start, start_depth=650, sponge_start_depth=150):\n \"\"\"\n kb_start is where the KISSME bathy starts - will be constant depth before that.\n dx is the depth to interpolate to. \n \n sponge_start_depth is the depth at which the sponge boundary will kick in.\n \n \"\"\"\n \n data = pd.read_csv(r'..\\..\\..\\..\\03_CODE\\GA BATHY CODES [and data]\\xyz for chapter 3.txt', names=['x', 'y', 'h', 'z'])\n \n \n x = data['x'].values.tolist()\n y = data['y'].values.tolist()\n z = data['z'].values.tolist()\n\n x.reverse()\n y.reverse()\n z.reverse()\n\n x = np.array(x)\n y = np.array(y)\n z = np.array(z)\n\n delta_x = x - x[0]\n delta_y = y - y[0]\n\n horz_km = 110*np.sqrt((delta_x**2+(delta_y*np.cos(np.pi*y/180))**2))\n \n ##########\n ## OUTPUTS\n x = 1000*horz_km\n h = -z\n \n \n #########\n ## LIMIT\n \n ind = np.where(abs(h-start_depth)==min(abs(h-start_depth)))[0][0]\n \n x = x[ind:-1]\n h = h[ind:-1]\n \n x = x-x[0]+kb_start\n \n ############\n ## ADD START\n \n print(np.max(x))\n print(dx)\n xi = np.arange(-2*dx, L_d+dx, dx)\n F = interp1d(x, h, bounds_error=False, fill_value='extrapolate')\n hi = F(xi)\n \n ind = np.where(xi<kb_start)[0]\n hi[ind] = h[0]\n \n ind = np.where(xi>max(x))[0]\n hi[ind] = h[-1]\n\n #########\n ## FILTER\n b, a = scipy.signal.butter(4, Wn)\n hi = scipy.signal.filtfilt(b, a, hi)\n \n \n #########\n ## SPONGE \n ind = np.where(abs(hi-sponge_start_depth)==min(abs(hi-sponge_start_depth)))[0][0]\n spongedist = xi[-1] - xi[ind]\n \n return xi, hi, spongedist\n\ndef get_rho_kissme_apr3():\n \n ncfile = r'\\\\drive.irds.uwa.edu.au\\CEME-BBLE-001\\KISSME\\Processed data\\Moorings\\Mooring-Temp-SP250_2short.nc'\n ncfile = r'./data/Mooring-Temp-SP250_2short.nc'\n ncfile = r'C:\\Users\\AZulberti\\Dropbox\\University\\PhD\\2016_Andrew_Zulberti_2\\02 WRITING\\3 - Energy dissipation/data/Mooring-Temp-SP250_2short.nc'\n\n # April 4\n ti_i = 3390 - 200\n ti_f = 3420 + 200\n\n # Apr 3\n ti_i = 1000\n ti_f = 1500\n\n ti_m = int(np.mean((ti_i, ti_f)))\n\n ti_wave = np.arange(ti_i, ti_f)\n ti_full = np.arange(ti_i-200, ti_f+200)\n\n ds = xr.open_dataset(ncfile)\n \n rho_opt = 'hat'\n # rho_opt = 'star'\n if rho_opt == 'hat':\n rho = ds.dens_hat.values[:, ti_m]\n elif rho_opt == 'star':\n rho = ds.dens_star.values[:, ti_m]\n\n z = ds.height_star.values\n\n rho = rho[::-1]\n z = z[::-1]\n\n z = z-max(z)\n \n return z, rho\n", "_____no_output_____" ], [ "\n# Functions\n\ndef run_kdv(args):\n \"\"\"\n Main function for generating different soliton scenarios\n \"\"\"\n rho_params, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw, solver = args\n ####################################################\n # Inputs\n mode = 0\n \n Nz = 100\n\n ntout = 1800.0\n\n# z = np.linspace(0, -d, Nz)\n# rhoz = ics.rho_double_tanh_rayson(rho_params,z)\n \n z, rhoz = get_rho_kissme_apr3()\n\n dz = np.abs(z[1]-z[0])\n\n if solver == 'vkdv':\n# h = 0*x+d\n# h = ics.depth_tanh2(bathy_params, x) \n x, h, spongedist = get_kissme_h(dx, kb_start)\n pass\n elif solver == 'imex':\n x = np.arange(-2*dx,L_d+dx,dx)\n h = None\n spongedist = 0\n\n kdvargs = dict(\\\n verbose=False,\\\n a0=a0,\\\n Lw=Lw,\\\n mode=mode,\n dt=dt,\\\n nu_H=nu_H,\\\n ekdv=False,\\\n wavefunc=ics.eta_fullsine,\\\n #L_d = L_d,\n x=x,\\\n Nsubset=10,\n nonlinear=nonlinear,\n nonhydrostatic=nonhydrostatic, \n spongedist=spongedist,\n drag_param = drag_param\n \n )\n\n ###\n# THIS WAS COPIED FROM THE KdV VERSION. IT INITIALISES EACH vKdV 3 TIMES - QUITE SLOW. \n ###\n \n ii=0\n #rhoz = single_tanh_rho(\n # z, pp['rho0'][ii], pp['drho1'][ii], pp['z1'][ii], pp['h1'][ii])\n \n ######\n\n ## Call the vKdV run function\n mykdv, Bda = solve_kdv(rhoz, z, runtime,\\\n solver=solver, h=h, ntout=ntout, outfile=None, a_bc_left=a_bc_left, Wn=Wn, **kdvargs)\n\n print('Done with dx={} and dt={}'.format(dx, dt))\n \n return mykdv, Bda", "_____no_output_____" ], [ "#betas = [1023.7, 1.12, 105, 52, 155, 43] # ~April 5\n#betas = [1023.5, 1.22, 67, 55, 157, 52] # ~March 1\n\nbetas_w = [1023.8229810318612,\n 0.9865506702797462,\n 143.5428700089361,\n 46.1265812512485,\n 136.66278860120943,\n 41.57014327398592] # 15 July 2016\n\nbetas_s =[1023.6834358117951,\n 1.2249066117658955,\n 156.78804559089772,\n 53.66835548728355,\n 73.14183287436342,\n 40.21031777315428] # 1st April 2017\n\nmode =0\nnu_H = 0\n\n# Going to make Lw an input for the vKdV as it will really speed things up. \ndx = 100\ndt = 10\n\ndx = 10\n# x = np.arange(-2*dx,L_d+dx,dx)\n# h = ics.depth_tanh2(bathy_params, x) # Intended bathy profile\nx, h, spongedist = get_kissme_h(50, kb_start)\n\nz = np.linspace(0, -d, Nz)\nrhoz_s = ics.rho_double_tanh_rayson(betas_s, z)\nLw_s = ics.get_Lw(rhoz_s, z, z0=max(h), mode=0)\n\nprint(Lw_s)\nprint(spongedist)\n", "178852.84114035743\n50\n83783.35651337773\n155050.0\n" ], [ "\ndxs = [800, 400, 200, 100, 50] # Note this is not necessary, it is set by the KISSME bathy currently.\ndxs = [50] # Note this is not necessary, it is set by the KISSME bathy currently.\ndt = 4.\n\nall_vkdv_dx_s = []\n\nall_kdvimex_dx_s = []\n\nfor dx in dxs:\n \n print(' ')\n print('Running dx={}'.format(dx))\n print(' ')\n \n mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'vkdv'))\n all_vkdv_dx_s.append(mykdv)\n \n print(' ')\n print('Completed dx={}'.format(dx))\n print(' ')\n ", " \nRunning dx=50\n \n178852.84114035743\n50\nCalculating eigenfunctions...\n0.0 % complete...\n5.0 % complete...\n10.0 % complete...\n15.0 % complete...\n20.0 % complete...\n25.0 % complete...\n30.0 % complete...\n35.0 % complete...\n40.0 % complete...\n45.0 % complete...\n50.0 % complete...\n55.0 % complete...\n60.0 % complete...\n65.0 % complete...\n70.0 % complete...\n75.0 % complete...\n80.0 % complete...\n85.0 % complete...\n90.0 % complete...\n95.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n5.0 % complete...\n10.0 % complete...\n15.0 % complete...\n20.0 % complete...\n25.0 % complete...\n30.0 % complete...\n35.0 % complete...\n40.0 % complete...\n45.0 % complete...\n50.0 % complete...\n55.0 % complete...\n60.0 % complete...\n65.0 % complete...\n70.0 % complete...\n75.0 % complete...\n80.0 % complete...\n85.0 % complete...\n90.0 % complete...\n95.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50 and dt=4.0\n \nCompleted dx=50\n \n" ] ], [ [ "# Just double check that vKdV used the correct bathy", "_____no_output_____" ] ], [ [ "x, h, spongedist = get_kissme_h(50, kb_start)\n\n# h = 0*x+d\n \nplt.figure(figsize=(9,5))\nplt.plot(x, h, 'b', label='Intended bathy', linewidth=2)\nplt.plot(all_vkdv_dx_s[-1].x, all_vkdv_dx_s[-1].h, 'r--', label='Actual vKdV bathy')\nplt.ylabel('h (m)')\nplt.xlabel('x (m)')\nplt.title('vKdV bathy')\nplt.legend()\nplt.show()\n", "178852.84114035743\n50\n" ], [ "import importlib, iwaves\nimportlib.reload(iwaves.utils.plot)\n\nfrom iwaves.utils.plot import vKdV_plot\n\nf = vKdV_plot(all_vkdv_dx_s[-1])\nf.savefig('Draft for ch3.png')", "_____no_output_____" ], [ "\nfull_lims = (0, 230000)\nzoom_lims_vkdv = (50000, 100000)\nzoom_lims_vkdv2 = (110000, 160000)\nzoom_lims_y = (-70, 40)\n\n###########################\n##### KISSME\nplt.figure(figsize=(12,5))\nax=plt.subplot(131)\nfor mykdv in all_vkdv_dx_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\nplt.xlim(full_lims)\nplt.ylim(zoom_lims_y)\nplt.ylabel('A (m)')\nplt.xlabel('x (m)')\nplt.title('KISSME vKdV full')\nplt.grid()\nplt.legend()\n\nax=plt.subplot(132)\nfor mykdv in all_vkdv_dx_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\nplt.xlim(zoom_lims_vkdv)\nplt.ylim(zoom_lims_y)\nplt.xlabel('x (m)')\nplt.title('KISSME vKdV zoom')\nplt.grid()\n\n\nax=plt.subplot(133)\nfor mykdv in all_vkdv_dx_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\nplt.xlim(zoom_lims_vkdv2)\nplt.ylim(zoom_lims_y)\nplt.xlabel('x (m)')\nplt.title('KISSME vKdV zoom 2')\nplt.grid()\n", "_____no_output_____" ], [ "# Compute the errors\nX = np.arange(0,L_d, 10.)\nnx = X.shape[0]\nndx = len(dxs)\n\ndef get_rms_error(mykdv, nd):\n \n solns = np.zeros((nd, nx))\n for ii, mykdv in enumerate(mykdv):\n Fx = pchip(mykdv.x, mykdv.B)\n solns[ii,:] = Fx(X)\n\n # Compute the error between each solution\n #err = np.diff(solns, axis=0)\n err = solns - solns[-1,:]\n\n err_rms = np.linalg.norm(err, ord=2, axis=1) # L2-norm\n #err_rms_w = np.sqrt(np.mean(err**2,axis=1))\n\n return err_rms\n\nerr_rms_vkdv_s = get_rms_error(all_vkdv_dx_s, ndx)\n\nerr_rms_kdvimex_s = get_rms_error(all_kdvimex_dx_s, ndx)\n", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: overflow encountered in true_divide\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: invalid value encountered in add\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: overflow encountered in add\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\n" ], [ "\ndef make_dx_convergence_plot(kdv_s, err_s, tit):\n \n plt.figure(figsize=(9,8))\n plt.loglog(dxs[:-1],err_s[:-1],'ko')\n plt.xlim(2e1,2e3)\n plt.ylim(1e-1,5e3)\n plt.grid(b=True)\n x0 = np.array([50,100.])\n plt.plot(x0, 100/x0[0]**2*x0**2, 'k--')\n plt.plot(x0, 100/x0[0]**1*x0**1, 'k:')\n plt.ylabel('L2-norm Error [m]')\n plt.xlabel('$\\Delta x$ [m]')\n plt.title(tit)\n \n alpha_s = -2*kdv_s[0].c1*kdv_s[0].r10 \n beta_s = -1*kdv_s[0].r01\n \n print(type(alpha_s)) \n \n if not type(alpha_s) == np.float64:\n\n plt.legend((r'$\\alpha$ = (%3.4f,%3.4f), $\\beta$ = (%3.4f,%3.4f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),\n ), loc='lower right')\n else:\n \n plt.legend((r'$\\alpha$ = (%3.4f), $\\beta$ = (%3.4f)'%(alpha_s, beta_s),), loc='lower right')\n \nmake_dx_convergence_plot(all_kdvimex_dx_s, err_rms_kdvimex_s, 'IMEX')\nmake_dx_convergence_plot(all_vkdv_dx_s, err_rms_vkdv_s, 'vKdV')\n", "<class 'numpy.float64'>\n<class 'numpy.ndarray'>\n" ], [ "# Delta t comparison\ndts = [20,10.,5,2.5,1.25,0.6,0.3]\ndx = 50.\n\nall_vkdv_dt_w = []\nall_vkdv_dt_s = []\n\nall_kdvimex_dt_w = []\nall_kdvimex_dt_s = []\n\nfor dt in dts:\n \n print(' ')\n print('Running dt={}'.format(dt))\n print(' ')\n \n mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'imex'))\n all_kdvimex_dt_s.append(mykdv)\n \n mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'vkdv'))\n all_vkdv_dt_s.append(mykdv)\n \n print(' ')\n print('Completed dt={}'.format(dt))\n print(' ')\n ", " \nRunning dt=20\n \nDone with dx=50.0 and dt=20\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=20\n \nCompleted dt=20\n \n \nRunning dt=10.0\n \nDone with dx=50.0 and dt=10.0\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=10.0\n \nCompleted dt=10.0\n \n \nRunning dt=5\n \nDone with dx=50.0 and dt=5\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=5\n \nCompleted dt=5\n \n \nRunning dt=2.5\n \nDone with dx=50.0 and dt=2.5\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=2.5\n \nCompleted dt=2.5\n \n \nRunning dt=1.25\n \nDone with dx=50.0 and dt=1.25\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=1.25\n \nCompleted dt=1.25\n \n \nRunning dt=0.6\n \nDone with dx=50.0 and dt=0.6\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=0.6\n \nCompleted dt=0.6\n \n \nRunning dt=0.3\n \nDone with dx=50.0 and dt=0.3\n234499.58941895614\n50.0\nCalculating eigenfunctions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating nonlinear structure functions...\n0.0 % complete...\n25.0 % complete...\n50.0 % complete...\n75.0 % complete...\n100.0 % complete...\nCalculating buoyancy coefficients...\nDone with dx=50.0 and dt=0.3\n \nCompleted dt=0.3\n \n" ], [ "###########################\n##### SUMMER\n\nfull_lims = (0, 230000)\nzoom_lims_imex = (150000, 230000)\nzoom_lims_vkdv = (150000, 230000)\nzoom_lims_y = (-30, 30)\n\n###########################\n##### IMEX\nplt.figure(figsize=(12,5))\nax=plt.subplot(121)\nfor mykdv in all_kdvimex_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\nplt.ylabel('A (m)')\nplt.xlabel('x (m)')\nplt.title('Summer IMEX full') \nplt.xlim((full_lims))\nplt.grid()\n\nax=plt.subplot(122)\nfor mykdv in all_kdvimex_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\n \nplt.ylim(zoom_lims_y)\nplt.xlim((zoom_lims_imex))\nplt.xlabel('x (m)')\nplt.title('Summer IMEX zoom')\nplt.grid()\n\n###########################\n##### vKdV\nplt.figure(figsize=(12,5))\nax=plt.subplot(121)\nfor mykdv in all_vkdv_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\nplt.ylabel('A (m)')\nplt.xlabel('x (m)')\nplt.title('Summer vKdV full') \nplt.xlim((full_lims))\nplt.grid()\n\nax=plt.subplot(122)\nfor mykdv in all_vkdv_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\n \nplt.ylim(zoom_lims_y)\nplt.xlim((zoom_lims_vkdv))\nplt.xlabel('x (m)')\nplt.title('Summer vKdV zoom')\nplt.grid()\nplt.show()\n", "_____no_output_____" ], [ "ndt = len(dts)\n\nerr_rms_vkdv_dt_s = get_rms_error(all_vkdv_dt_s, ndt)\n\nerr_rms_kdvimex_dt_s = get_rms_error(all_kdvimex_dt_s, ndt)\n", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: overflow encountered in true_divide\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: overflow encountered in add\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\interpolate\\_cubic.py:288: RuntimeWarning: invalid value encountered in add\n whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)\n" ], [ "\ndef make_dt_convergence_plot(kdv_s, err_s, tit):\n\n plt.figure(figsize=(9,8))\n plt.loglog(dts[:-1],err_s[:-1],'kd', markersize=6)\n plt.xlim(0,0.5e2)\n plt.ylim(1e-2,1e3)\n plt.grid(b=True)\n x0 = np.array([5,20])\n plt.plot(x0, 10/x0[0]**2*x0**2, 'k--')\n plt.plot(x0, 10/x0[0]**1*x0**1, 'k:')\n #plt.ylabel('L2-norm Error [m]')\n plt.xlabel('$\\Delta t$ [s]')\n plt.title(tit)\n \n plt.text(0.05,0.95,'(b)',transform=ax.transAxes)\n alpha_s = -2*kdv_s[0].c1*kdv_s[0].r10 \n beta_s = -1*kdv_s[0].r01\n\n if not type(alpha_s) == np.float64:\n\n plt.legend((r'$\\alpha$ = (%3.4f,%3.4f), $\\beta$ = (%3.0f,%3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),\n ), loc='lower right')\n else:\n \n plt.legend((r'$\\alpha$ = (%3.4f), $\\beta$ = (%3.0f)'%(alpha_s, beta_s),\n ), loc='lower right')\n \n plt.savefig('../FIGURES/vkdv_convergence_dxdt.png',dpi=150)\n plt.savefig('../FIGURES/vkdv_convergence_dxdt.pdf',dpi=150)\n \nmake_dt_convergence_plot(all_kdvimex_dt_s, err_rms_kdvimex_dt_s, 'KdV IMEX')\nmake_dt_convergence_plot(all_vkdv_dt_s, err_rms_vkdv_dt_s, 'vKdV')\n", "<ipython-input-91-3b71a6ddff0e>:5: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.\nInvalid limit will be ignored.\n plt.xlim(0,0.5e2)\n<ipython-input-91-3b71a6ddff0e>:5: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.\nInvalid limit will be ignored.\n plt.xlim(0,0.5e2)\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c53e6d1c3b8c7a7b3f3d4dcf1180198ad12ebe
21,059
ipynb
Jupyter Notebook
Growth_Hacking_3.ipynb
Programming-Skills/NLP
c1857fb03b54209836dc7474dbca3d2a268f4e6a
[ "Unlicense" ]
null
null
null
Growth_Hacking_3.ipynb
Programming-Skills/NLP
c1857fb03b54209836dc7474dbca3d2a268f4e6a
[ "Unlicense" ]
null
null
null
Growth_Hacking_3.ipynb
Programming-Skills/NLP
c1857fb03b54209836dc7474dbca3d2a268f4e6a
[ "Unlicense" ]
null
null
null
35.393277
242
0.385773
[ [ [ "# libraries\nimport ndjson\nimport json\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport gzip\nimport random\nimport csv\nimport re\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pywaffle import Waffle\n\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.sentiment.util import mark_negation\nnltk.download(\"opinion_lexicon\")\nnltk.download(\"stopwords\")\nnltk.download(\"punkt\")\nnltk.download('averaged_perceptron_tagger')\nfrom nltk.corpus import opinion_lexicon\n\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nsa = SentimentIntensityAnalyzer()\nsa.lexicon\n\nfrom imblearn.under_sampling import RandomUnderSampler \n\n%matplotlib inline", "[nltk_data] Downloading package opinion_lexicon to\n[nltk_data] C:\\Users\\hewin\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package opinion_lexicon is already up-to-date!\n[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\hewin\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\hewin\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] C:\\Users\\hewin\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package averaged_perceptron_tagger is already up-to-\n[nltk_data] date!\n" ], [ "# open csv and create list with just the reviews\nwith open('small_corpus_vader.csv', 'r') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n reviews = []\n for row in readCSV:\n reviews.append(row)\n\n print(reviews[0:3])", "[['', 'ratings', 'sentiment', 'reviews'], ['1', '1.0', '-0.9511', \"code didn't work, got me a refund.\"], ['2', '1.0', '0.0', 'these do not work at all, all i get is static and they came with nothing in the box to give any help.']]\n" ], [ "reviews = pd.DataFrame(reviews)\nreviews = reviews.rename(columns = reviews.iloc[0]).drop(reviews.index[0])\nreviews = reviews.iloc[:, [1,2,3]]\nreviews", "_____no_output_____" ], [ "reviews['sentiment classification'] = ['Positive' if v > 0.2 else 'Neutral' if v >= -0.2 and v <= 0.2 else 'Negative' for v in reviews['sentiment'].astype(float)]\nreviews", "_____no_output_____" ], [ "reviews['ratings classification'] = ['Positive' if v == 5 else 'Neutral' if v == 2 or v == 3 or v == 4 else 'Negative' for v in reviews['ratings'].astype(float)]\nreviews", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e7c53f2dbd58bb6bd8a99f1bc8c51770e193cae0
21,651
ipynb
Jupyter Notebook
site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb
RedContritio/docs-l10n
f69a7c0d2157703a26cef95bac34b39ac0250373
[ "Apache-2.0" ]
1
2022-03-29T22:32:18.000Z
2022-03-29T22:32:18.000Z
site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb
RedContritio/docs-l10n
f69a7c0d2157703a26cef95bac34b39ac0250373
[ "Apache-2.0" ]
null
null
null
site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb
RedContritio/docs-l10n
f69a7c0d2157703a26cef95bac34b39ac0250373
[ "Apache-2.0" ]
null
null
null
30.885877
340
0.523348
[ [ [ "##### Copyright 2021 The TF-Agents Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# REINFORCE 代理\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td> <a target=\"_blank\" href=\"https://tensorflow.google.cn/agents/tutorials/6_reinforce_tutorial\"><img src=\"https://tensorflow.google.cn/images/tf_logo_32px.png\">在 TensorFlow.org 上查看</a>\n</td>\n <td> <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb\"><img src=\"https://tensorflow.google.cn/images/colab_logo_32px.png\">在 Google Colab 运行</a>\n</td>\n <td> <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb\"><img src=\"https://tensorflow.google.cn/images/GitHub-Mark-32px.png\">在 Github 上查看源代码</a>\n</td>\n <td> <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb\"><img src=\"https://tensorflow.google.cn/images/download_logo_32px.png\">下载笔记本</a> </td>\n</table>", "_____no_output_____" ], [ "## 简介", "_____no_output_____" ], [ "本例介绍如何使用 TF-Agents 库在 Cartpole 环境中训练 [REINFORCE](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) 代理,与 [DQN 教程](1_dqn_tutorial.ipynb)比较相似。\n\n![Cartpole environment](images/cartpole.png)\n\n我们会引导您完成强化学习 (RL) 流水线中关于训练、评估和数据收集的所有部分。\n", "_____no_output_____" ], [ "## 设置", "_____no_output_____" ], [ "如果尚未安装以下依赖项,请运行以下命令:", "_____no_output_____" ] ], [ [ "!sudo apt-get update\n!sudo apt-get install -y xvfb ffmpeg freeglut3-dev\n!pip install 'imageio==2.4.0'\n!pip install pyvirtualdisplay\n!pip install tf-agents[reverb]\n!pip install pyglet xvfbwrapper\n", "_____no_output_____" ], [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport base64\nimport imageio\nimport IPython\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image\nimport pyvirtualdisplay\nimport reverb\n\nimport tensorflow as tf\n\nfrom tf_agents.agents.reinforce import reinforce_agent\nfrom tf_agents.drivers import py_driver\nfrom tf_agents.environments import suite_gym\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.networks import actor_distribution_network\nfrom tf_agents.policies import py_tf_eager_policy\nfrom tf_agents.replay_buffers import reverb_replay_buffer\nfrom tf_agents.replay_buffers import reverb_utils\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\n\n# Set up a virtual display for rendering OpenAI gym environments.\ndisplay = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()", "_____no_output_____" ] ], [ [ "## 超参数", "_____no_output_____" ] ], [ [ "env_name = \"CartPole-v0\" # @param {type:\"string\"}\nnum_iterations = 250 # @param {type:\"integer\"}\ncollect_episodes_per_iteration = 2 # @param {type:\"integer\"}\nreplay_buffer_capacity = 2000 # @param {type:\"integer\"}\n\nfc_layer_params = (100,)\n\nlearning_rate = 1e-3 # @param {type:\"number\"}\nlog_interval = 25 # @param {type:\"integer\"}\nnum_eval_episodes = 10 # @param {type:\"integer\"}\neval_interval = 50 # @param {type:\"integer\"}", "_____no_output_____" ] ], [ [ "## 环境\n\nRL 环境用于描述要解决的任务或问题。在 TF-Agents 中,使用 `suites` 可以轻松创建标准环境。我们提供了不同的 `suites`,只需提供一个字符串环境名称,即可帮助您从来源加载环境,如 OpenAI Gym、Atari、DM Control 等。\n\n现在,我们试试从 OpenAI Gym 套件加载 CartPole 环境。", "_____no_output_____" ] ], [ [ "env = suite_gym.load(env_name)", "_____no_output_____" ] ], [ [ "我们可以渲染此环境以查看其形式:小车上连接一条自由摆动的长杆。目标是向右或向左移动小车,使长杆保持朝上。", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\nenv.reset()\nPIL.Image.fromarray(env.render())", "_____no_output_____" ] ], [ [ "在该环境中,`time_step = environment.step(action)` 语句用于执行 `action`。返回的 `TimeStep` 元组包含该操作在环境中的下一个观测值和奖励。环境中的 `time_step_spec()` 和 `action_spec()` 方法分别返回 `time_step` 和 `action` 的规范(类型、形状、边界)。", "_____no_output_____" ] ], [ [ "print('Observation Spec:')\nprint(env.time_step_spec().observation)\nprint('Action Spec:')\nprint(env.action_spec())", "_____no_output_____" ] ], [ [ "我们可以看到,该观测值是一个包含 4 个浮点数的数组:小车的位置和速度,长杆的角度位置和速度。由于只有两个操作(向左或向右移动),因此,`action_spec` 是一个标量,其中 0 表示“向左移动”,1 表示“向右移动”。", "_____no_output_____" ] ], [ [ "time_step = env.reset()\nprint('Time step:')\nprint(time_step)\n\naction = np.array(1, dtype=np.int32)\n\nnext_time_step = env.step(action)\nprint('Next time step:')\nprint(next_time_step)", "_____no_output_____" ] ], [ [ "通常,我们会创建两个环境:一个用于训练,另一个用于评估。大部分环境都是使用纯 Python 语言编写的,但是使用 `TFPyEnvironment` 包装器可轻松将其转换至 TensorFlow 环境。原始环境的 API 使用 NumPy 数组,但凭借 `TFPyEnvironment`,这些数组可以与 `Tensors` 相互转换,从而更轻松地与 TensorFlow 策略和代理交互。\n", "_____no_output_____" ] ], [ [ "train_py_env = suite_gym.load(env_name)\neval_py_env = suite_gym.load(env_name)\n\ntrain_env = tf_py_environment.TFPyEnvironment(train_py_env)\neval_env = tf_py_environment.TFPyEnvironment(eval_py_env)", "_____no_output_____" ] ], [ [ "## 代理\n\n我们用于解决 RL 问题的算法以 `Agent` 形式表示。除了 REINFORCE 代理,TF-Agents 还为各种 `Agents` 提供了标准实现,如 [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf)、[DDPG](https://arxiv.org/pdf/1509.02971.pdf)、[TD3](https://arxiv.org/pdf/1802.09477.pdf)、[PPO](https://arxiv.org/abs/1707.06347) 和 [SAC](https://arxiv.org/abs/1801.01290)。\n\n要创建 REINFORCE 代理,首先需要有一个通过环境提供的观测值,学会预测操作的 `Actor Network`。\n\n使用观测值和操作的规范,我们可以轻松创建 `Actor Network`。我们也可以在网络中指定层,本例中是设置为 `ints` 元祖(表示每个隐藏层的大小)的 `fc_layer_params` 参数(请参阅上面的“超参数”部分)。\n", "_____no_output_____" ] ], [ [ "actor_net = actor_distribution_network.ActorDistributionNetwork(\n train_env.observation_spec(),\n train_env.action_spec(),\n fc_layer_params=fc_layer_params)", "_____no_output_____" ] ], [ [ "我们还需要一个 `optimizer` 来训练刚才创建的网络,以及一个跟踪网络更新次数的 `train_step_counter` 变量。\n", "_____no_output_____" ] ], [ [ "optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\ntrain_step_counter = tf.Variable(0)\n\ntf_agent = reinforce_agent.ReinforceAgent(\n train_env.time_step_spec(),\n train_env.action_spec(),\n actor_network=actor_net,\n optimizer=optimizer,\n normalize_returns=True,\n train_step_counter=train_step_counter)\ntf_agent.initialize()", "_____no_output_____" ] ], [ [ "## 策略\n\n在 TF-Agents 中,策略是 RL 中的标准策略概念:给订 `time_step` 来产生操作或操作的分布。主要方法是 `policy_step = policy.step(time_step)`,其中 `policy_step` 是命名元祖 `PolicyStep(action, state, info)`。`policy_step.action` 是要应用到环境的 `action`,`state` 表示有状态 (RNN) 策略的状态,而 `info` 可能包含辅助信息(如操作的对数几率)。\n\n代理包含两个策略:一个是用于评估/部署的主要策略 (agent.policy),另一个是用于数据收集的策略 (agent.collect_policy)。", "_____no_output_____" ] ], [ [ "eval_policy = tf_agent.policy\ncollect_policy = tf_agent.collect_policy", "_____no_output_____" ] ], [ [ "## 指标和评估\n\n用于评估策略的最常用指标是平均回报。回报就是在环境中运行策略时,某个片段获得的奖励总和,我们通常会计算几个片段的平均值。计算平均回报指标的代码如下。\n", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\ndef compute_avg_return(environment, policy, num_episodes=10):\n\n total_return = 0.0\n for _ in range(num_episodes):\n\n time_step = environment.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n total_return += episode_return\n\n avg_return = total_return / num_episodes\n return avg_return.numpy()[0]\n\n\n# Please also see the metrics module for standard implementations of different\n# metrics.", "_____no_output_____" ] ], [ [ "## 回放缓冲区\n\n为了跟踪从环境收集的数据,我们将使用 [Reverb](https://deepmind.com/research/open-source/Reverb),这是 Deepmind 打造的一款高效、可扩展且易于使用的回放系统。它会在我们收集轨迹时存储经验数据,并在训练期间使用。\n\n此回放缓冲区使用描述要存储的张量的规范进行构造,可以使用 `tf_agent.collect_data_spec` 从代理获取这些张量。", "_____no_output_____" ] ], [ [ "table_name = 'uniform_table'\nreplay_buffer_signature = tensor_spec.from_spec(\n tf_agent.collect_data_spec)\nreplay_buffer_signature = tensor_spec.add_outer_dim(\n replay_buffer_signature)\ntable = reverb.Table(\n table_name,\n max_size=replay_buffer_capacity,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=replay_buffer_signature)\n\nreverb_server = reverb.Server([table])\n\nreplay_buffer = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n table_name=table_name,\n sequence_length=None,\n local_server=reverb_server)\n\nrb_observer = reverb_utils.ReverbAddEpisodeObserver(\n replay_buffer.py_client,\n table_name,\n replay_buffer_capacity\n)", "_____no_output_____" ] ], [ [ "对于大多数代理,`collect_data_spec` 是一个 `Trajectory` 命名元组,其中包含观测值、操作和奖励等。", "_____no_output_____" ], [ "## 数据收集\n\n当 REINFORCE 从全部片段中学习时,我们使用给定数据收集策略定义一个函数来收集片段,并在回放缓冲区中将数据(观测值、操作、奖励等)保存为轨迹。这里我们使用“PyDriver”运行经验收集循环。您可以在我们的 [driver 教程](https://tensorflow.google.cn/agents/tutorials/4_drivers_tutorial)中了解到有关 TF Agents driver 的更多信息。", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\n\ndef collect_episode(environment, policy, num_episodes):\n\n driver = py_driver.PyDriver(\n environment,\n py_tf_eager_policy.PyTFEagerPolicy(\n policy, use_tf_function=True),\n [rb_observer],\n max_episodes=num_episodes)\n initial_time_step = environment.reset()\n driver.run(initial_time_step)", "_____no_output_____" ] ], [ [ "## 训练代理\n\n训练循环包括从环境收集数据和优化代理的网络。在训练过程中,我们偶尔会评估代理的策略,看看效果如何。\n\n运行下面的代码大约需要 3 分钟。", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\ntry:\n %%time\nexcept:\n pass\n\n# (Optional) Optimize by wrapping some of the code in a graph using TF function.\ntf_agent.train = common.function(tf_agent.train)\n\n# Reset the train step\ntf_agent.train_step_counter.assign(0)\n\n# Evaluate the agent's policy once before training.\navg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)\nreturns = [avg_return]\n\nfor _ in range(num_iterations):\n\n # Collect a few episodes using collect_policy and save to the replay buffer.\n collect_episode(\n train_py_env, tf_agent.collect_policy, collect_episodes_per_iteration)\n\n # Use data from the buffer and update the agent's network.\n iterator = iter(replay_buffer.as_dataset(sample_batch_size=1))\n trajectories, _ = next(iterator)\n train_loss = tf_agent.train(experience=trajectories) \n\n replay_buffer.clear()\n\n step = tf_agent.train_step_counter.numpy()\n\n if step % log_interval == 0:\n print('step = {0}: loss = {1}'.format(step, train_loss.loss))\n\n if step % eval_interval == 0:\n avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)\n print('step = {0}: Average Return = {1}'.format(step, avg_return))\n returns.append(avg_return)", "_____no_output_____" ] ], [ [ "## 可视化\n", "_____no_output_____" ], [ "### 绘图\n\n我们可以通过绘制回报与全局步骤的图形来了解代理的性能。在 `Cartpole-v0` 中,长杆每停留一个时间步骤,环境就会提供一个 +1 的奖励,由于最大步骤数量为 200,所以可以获得的最大回报也是 200。", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\n\nsteps = range(0, num_iterations + 1, eval_interval)\nplt.plot(steps, returns)\nplt.ylabel('Average Return')\nplt.xlabel('Step')\nplt.ylim(top=250)", "_____no_output_____" ] ], [ [ "### 视频", "_____no_output_____" ], [ "在每个步骤渲染环境有助于可视化代理的性能。在此之前,我们先创建一个函数,在该 Colab 中嵌入视频。", "_____no_output_____" ] ], [ [ "def embed_mp4(filename):\n \"\"\"Embeds an mp4 file in the notebook.\"\"\"\n video = open(filename,'rb').read()\n b64 = base64.b64encode(video)\n tag = '''\n <video width=\"640\" height=\"480\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\">\n Your browser does not support the video tag.\n </video>'''.format(b64.decode())\n\n return IPython.display.HTML(tag)", "_____no_output_____" ] ], [ [ "以下代码用于为代理可视化几个片段的策略:", "_____no_output_____" ] ], [ [ "num_episodes = 3\nvideo_filename = 'imageio.mp4'\nwith imageio.get_writer(video_filename, fps=60) as video:\n for _ in range(num_episodes):\n time_step = eval_env.reset()\n video.append_data(eval_py_env.render())\n while not time_step.is_last():\n action_step = tf_agent.policy.action(time_step)\n time_step = eval_env.step(action_step.action)\n video.append_data(eval_py_env.render())\n\nembed_mp4(video_filename)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7c54673a08a70f5a7c2e0b65c63eff600580be4
87,627
ipynb
Jupyter Notebook
Classification/Linear Models/LogisticRegression_MinMaxScaler_PolynomialFeatures.ipynb
mohityogesh44/ds-seed
e124f0078faf97568951e19e4302451ad0c7cf6c
[ "Apache-2.0" ]
null
null
null
Classification/Linear Models/LogisticRegression_MinMaxScaler_PolynomialFeatures.ipynb
mohityogesh44/ds-seed
e124f0078faf97568951e19e4302451ad0c7cf6c
[ "Apache-2.0" ]
null
null
null
Classification/Linear Models/LogisticRegression_MinMaxScaler_PolynomialFeatures.ipynb
mohityogesh44/ds-seed
e124f0078faf97568951e19e4302451ad0c7cf6c
[ "Apache-2.0" ]
null
null
null
105.447653
47,908
0.828934
[ [ [ "# LogisticRegression with MinMaxScaler & PolynomialTransformer\n", "_____no_output_____" ], [ "**This Code template is for the Classification task using LogisticRegression with MinMaxScaler feature scaling technique and PolynomialTransformer as Feature Transformation Technique in a pipeline.**", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "!pip install imblearn", "_____no_output_____" ], [ "import warnings as wr\nimport numpy as np \nimport pandas as pd \nimport seaborn as sns\nimport matplotlib.pyplot as plt \nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import MinMaxScaler,PolynomialFeatures\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.linear_model import LogisticRegression \nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.metrics import classification_report,confusion_matrix\nwr.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\n\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\nfile_path= \"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\nfeatures=['']", "_____no_output_____" ] ], [ [ "Target feature for prediction.", "_____no_output_____" ] ], [ [ "#y_value\ntarget=''", "_____no_output_____" ] ], [ [ "### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path) #reading file\ndf.head()#displaying initial entries", "_____no_output_____" ], [ "print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1])", "Number of rows are : 768 ,and number of columns are : 9\n" ], [ "df.columns.tolist()\n", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)\ndef EncodeY(df):\n if len(df.unique())<=2:\n return df\n else:\n un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')\n df=LabelEncoder().fit_transform(df)\n EncodedT=[xi for xi in range(len(un_EncodedT))]\n print(\"Encoded Target: {} to {}\".format(un_EncodedT,EncodedT))\n return df", "_____no_output_____" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "plt.figure(figsize = (20, 12))\ncorr = df.corr()\nmask = np.triu(np.ones_like(corr, dtype = bool))\nsns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = \".2f\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "#spliting data into X(features) and Y(Target)\n\nX=df[features] \nY=df[target] ", "_____no_output_____" ], [ "x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i]) \nX=EncodeX(X)\nY=EncodeY(NullClearner(Y))\nX.head()", "_____no_output_____" ] ], [ [ "#### Distribution Of Target Variable", "_____no_output_____" ] ], [ [ "plt.figure(figsize = (10,6))\nsns.countplot(Y,palette='pastel')", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.", "_____no_output_____" ] ], [ [ "#we can choose randomstate and test_size as over requerment\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) #performing datasplitting", "_____no_output_____" ] ], [ [ "#### Handling Target Imbalance\n\nThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.\n\nOne approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. ", "_____no_output_____" ] ], [ [ "X_train,y_train = RandomOverSampler(random_state=123).fit_resample(X_train, y_train)", "_____no_output_____" ] ], [ [ "\n### Feature Transformation\n\n**Polynomial Features**\n\nGenerate polynomial and interaction features.\n\nGenerate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n\nRefer [API](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) for the parameters\n\n### Feature Rescaling\n**MinMaxScaler**\n\n* We use MinMaxScaler to scale the data. The MinMaxScaler scaler scale the data beetween 0 to 1\n* formula for scaling (actual-min / max-min)\n* We will fit an object of MinMaxScaler to training data then transform the same data by fit_transform(X_train) method\n\n## Model\n**Logistic regression :**\n\nLogistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). This can be extended to model several classes of events.\n\n#### Model Tuning Parameters\n1. penalty : {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’\nUsed to specify the norm used in the penalization. The ‘newton-cg’, ‘sag’ and ‘lbfgs’ solvers support only l2 penalties. ‘elasticnet’ is only supported by the ‘saga’ solver. If ‘none’ (not supported by the liblinear solver), no regularization is applied.\n\n2. C : float, default=1.0\nInverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization.\n\n3. tol : float, default=1e-4\nTolerance for stopping criteria.\n\n4. solver : {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}, default=’lbfgs’\nAlgorithm to use in the optimization problem.\nFor small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones.\nFor multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss; ‘liblinear’ is limited to one-versus-rest schemes.\n\n‘newton-cg’, ‘lbfgs’, ‘sag’ and ‘saga’ handle L2 or no penalty.\n‘liblinear’ and ‘saga’ also handle L1 penalty.\n‘saga’ also supports ‘elasticnet’ penalty.\n‘liblinear’ does not support setting penalty='none'.\n5. random_state : int, RandomState instance, default=None\nUsed when solver == ‘sag’, ‘saga’ or ‘liblinear’ to shuffle the data.\n\n6. max_iter : int, default=100\nMaximum number of iterations taken for the solvers to converge.\n\n7. multi_class : {‘auto’, ‘ovr’, ‘multinomial’}, default=’auto’\nIf the option chosen is ‘ovr’, then a binary problem is fit for each label. For ‘multinomial’ the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. ‘multinomial’ is unavailable when solver=’liblinear’. ‘auto’ selects ‘ovr’ if the data is binary, or if solver=’liblinear’, and otherwise selects ‘multinomial’.\n\n8. verbose : int, default=0\nFor the liblinear and lbfgs solvers set verbose to any positive number for verbosity.\n\n9. n_jobs : int, default=None\nNumber of CPU cores used when parallelizing over classes if multi_class=’ovr’”. This parameter is ignored when the solver is set to ‘liblinear’ regardless of whether ‘multi_class’ is specified or not. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors", "_____no_output_____" ] ], [ [ "# Build Model here\nmodel=make_pipeline(MinMaxScaler(),PolynomialFeatures(),LogisticRegression(random_state=42))\nmodel.fit(X_train,y_train)", "_____no_output_____" ] ], [ [ "#### Model Accuracy\nscore() method return the mean accuracy on the given test data and labels.\n\nIn multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.", "_____no_output_____" ] ], [ [ "print(\"Accuracy score {:.2f} %\\n\".format(model.score(X_test,y_test)*100))", "Accuracy score 79.22 %\n\n" ], [ "#prediction on testing set\nprediction=model.predict(X_test)", "_____no_output_____" ] ], [ [ "#### Confusion Matrix\n\nA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.", "_____no_output_____" ] ], [ [ "\ncf_matrix=confusion_matrix(y_test,prediction)\nplt.figure(figsize=(7,6))\nsns.heatmap(cf_matrix,annot=True,fmt=\"d\")\n", "_____no_output_____" ] ], [ [ "#### Classification Report\n\nA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.\n\n* **where**:\n - Precision:- Accuracy of positive predictions.\n - Recall:- Fraction of positives that were correctly identified.\n - f1-score:- percent of positive predictions were correct\n - support:- Support is the number of actual occurrences of the class in the specified dataset.", "_____no_output_____" ] ], [ [ "print(classification_report(y_test,model.predict(X_test)))", " precision recall f1-score support\n\n 0 0.83 0.83 0.83 96\n 1 0.72 0.72 0.72 58\n\n accuracy 0.79 154\n macro avg 0.78 0.78 0.78 154\nweighted avg 0.79 0.79 0.79 154\n\n" ] ], [ [ "#### Creator: Akshar Nerkar , Github: [Profile](https://github.com/Akshar777)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7c550d1dcc6c972483c63d6480e5ce40239d266
65,865
ipynb
Jupyter Notebook
examples/example.ipynb
ECRL/ECNet
85bd81862e705440bd8dc5fa843465bfd4101048
[ "MIT" ]
5
2020-05-13T02:03:11.000Z
2021-02-05T19:01:48.000Z
examples/example.ipynb
TJKessler/ECNet
e7fe3e1674a3dd66f46f61337374ca8778031e34
[ "MIT" ]
5
2017-09-12T07:17:29.000Z
2017-09-13T21:00:50.000Z
examples/example.ipynb
ecrl/ecnet
85bd81862e705440bd8dc5fa843465bfd4101048
[ "MIT" ]
2
2021-06-04T05:07:06.000Z
2022-03-27T17:31:43.000Z
210.43131
33,156
0.900144
[ [ [ "from ecnet.datasets import load_cn\nfrom ecnet.tasks.feature_selection import select_rfr\nfrom ecnet.tasks.parameter_tuning import tune_batch_size, tune_model_architecture,\\\n tune_training_parameters\nfrom ecnet import ECNet\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import median_absolute_error, r2_score\nimport torch\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\nfrom math import sqrt", "_____no_output_____" ], [ "dataset = load_cn(as_dataset=True, backend='alvadesc')\nprint(type(dataset), dataset.desc_vals.shape, dataset.target_vals.shape)", "<class 'ecnet.datasets.structs.QSPRDatasetFromFile'> torch.Size([460, 5305]) torch.Size([460, 1])\n" ], [ "index_train, index_test = train_test_split([i for i in range(len(dataset))],\n test_size=0.2, random_state=42)\ndataset_train = deepcopy(dataset)\ndataset_train.set_index(index_train)\ndataset_test = deepcopy(dataset)\ndataset_test.set_index(index_test)\nprint(dataset_train.desc_vals.shape, dataset_test.desc_vals.shape)", "torch.Size([368, 5305]) torch.Size([92, 5305])\n" ], [ "desc_idx, desc_imp = select_rfr(dataset_train, total_importance=0.95,\n n_estimators=100, n_jobs=4)\ndataset_train.set_desc_index(desc_idx)\ndataset_test.set_desc_index(desc_idx)\ndesc_names = [dataset.desc_names[i] for i in desc_idx]\nprint(dataset_train.desc_vals.shape, dataset_test.desc_vals.shape)\nprint(desc_names[:5], len(desc_names))", "torch.Size([368, 326]) torch.Size([92, 326])\n['SpMaxA_EA(ed)', 'CIC1', 'S3K', 'SssCH2', 'PHI'] 326\n" ], [ "model = ECNet(dataset_train.desc_vals.shape[1], 1, 128, 2)\ntrain_loss, valid_loss = model.fit(\n dataset=dataset_train, valid_size=0.2,verbose=5,\n patience=50, epochs=300, random_state=24\n)", "Epoch: 0 | Train loss: 1431.029512781675 | Valid loss: 9223372036854775807\nEpoch: 5 | Train loss: 256.55278923397975 | Valid loss: 312.9687194824219\nEpoch: 10 | Train loss: 182.27286529541016 | Valid loss: 230.17494201660156\nEpoch: 15 | Train loss: 153.28179630616896 | Valid loss: 207.3500518798828\nEpoch: 20 | Train loss: 159.1366594794656 | Valid loss: 207.48974609375\nEpoch: 25 | Train loss: 140.81340639283056 | Valid loss: 239.57949829101562\nEpoch: 30 | Train loss: 98.12742137260177 | Valid loss: 148.23655700683594\nEpoch: 35 | Train loss: 153.3734633517103 | Valid loss: 200.86534118652344\nEpoch: 40 | Train loss: 109.15588352955929 | Valid loss: 141.3617706298828\nEpoch: 45 | Train loss: 88.7531884122057 | Valid loss: 135.99325561523438\nEpoch: 50 | Train loss: 95.22778156825474 | Valid loss: 199.7128143310547\nEpoch: 55 | Train loss: 95.3749063712399 | Valid loss: 131.777099609375\nEpoch: 60 | Train loss: 76.55440091762414 | Valid loss: 117.02298736572266\nEpoch: 65 | Train loss: 69.95369306551356 | Valid loss: 120.0013198852539\nEpoch: 70 | Train loss: 59.19819425725613 | Valid loss: 103.91657257080078\nEpoch: 75 | Train loss: 85.16970467080876 | Valid loss: 107.87355041503906\nEpoch: 80 | Train loss: 58.32269123622349 | Valid loss: 111.8017349243164\nEpoch: 85 | Train loss: 61.898352344019884 | Valid loss: 138.05780029296875\nEpoch: 90 | Train loss: 59.89323991009978 | Valid loss: 120.66303253173828\nEpoch: 95 | Train loss: 48.192738643308886 | Valid loss: 106.33235931396484\nEpoch: 100 | Train loss: 55.81611524309431 | Valid loss: 121.11176300048828\nEpoch: 105 | Train loss: 52.97307249315742 | Valid loss: 104.79387664794922\nEpoch: 110 | Train loss: 67.76363598570532 | Valid loss: 108.07382202148438\nEpoch: 115 | Train loss: 66.85969994992627 | Valid loss: 133.162841796875\nEpoch: 120 | Train loss: 42.65270303220165 | Valid loss: 97.49714660644531\nEpoch: 125 | Train loss: 45.72234181644154 | Valid loss: 113.35257720947266\nEpoch: 130 | Train loss: 46.83465768204255 | Valid loss: 94.48330688476562\nEpoch: 135 | Train loss: 43.752784391649726 | Valid loss: 118.69684600830078\nEpoch: 140 | Train loss: 35.358051896906225 | Valid loss: 97.295166015625\nEpoch: 145 | Train loss: 53.58933078999422 | Valid loss: 129.30355834960938\nEpoch: 150 | Train loss: 40.554945770575074 | Valid loss: 104.6352767944336\nEpoch: 155 | Train loss: 35.993613930786545 | Valid loss: 105.04428100585938\nEpoch: 160 | Train loss: 60.37953196415285 | Valid loss: 120.87259674072266\nEpoch: 165 | Train loss: 38.273053928297394 | Valid loss: 110.79714965820312\nEpoch: 170 | Train loss: 46.848397053828855 | Valid loss: 103.561767578125\nEpoch: 175 | Train loss: 36.76983536830565 | Valid loss: 113.00218963623047\nEpoch: 180 | Train loss: 28.529781373990637 | Valid loss: 100.6767349243164\nEpoch: 185 | Train loss: 43.24229708820784 | Valid loss: 107.31328582763672\nEpoch: 190 | Train loss: 27.72144299461728 | Valid loss: 103.23924255371094\nEpoch: 195 | Train loss: 28.648454134156104 | Valid loss: 115.97016143798828\nEpoch: 200 | Train loss: 26.045289526180344 | Valid loss: 115.27000427246094\nEpoch: 205 | Train loss: 25.658692664840594 | Valid loss: 104.42704772949219\nEpoch: 210 | Train loss: 39.044153979035464 | Valid loss: 144.37440490722656\nEpoch: 215 | Train loss: 23.528638100137517 | Valid loss: 103.07978820800781\nEpoch: 220 | Train loss: 29.17349538997728 | Valid loss: 104.57456970214844\nEpoch: 225 | Train loss: 75.41639725042849 | Valid loss: 113.8974609375\n" ], [ "train_loss = [sqrt(l) for l in train_loss][5:]\nvalid_loss = [sqrt(l) for l in valid_loss][5:]\nepoch = [i for i in range(len(train_loss))]\nplt.clf()\nplt.xlabel('Epochs')\nplt.ylabel('Sqrt(Loss)')\nplt.plot(epoch, train_loss, color='blue', label='Training Loss')\nplt.plot(epoch, valid_loss, color='red', label='Validation Loss')\nplt.legend(loc='upper right')\nplt.show()", "_____no_output_____" ], [ "y_hat_train = model(dataset_train.desc_vals).detach().numpy()\ny_train = dataset_train.target_vals\ntrain_mae = median_absolute_error(y_hat_train, y_train)\ntrain_r2 = r2_score(y_hat_train, y_train)\ny_hat_test = model(dataset_test.desc_vals).detach().numpy()\ny_test = dataset_test.target_vals\ntest_mae = median_absolute_error(y_hat_test, y_test)\ntest_r2 = r2_score(y_hat_test, y_test)\nprint('Training median absolute error: {}'.format(train_mae))\nprint('Training r-squared coefficient: {}'.format(train_r2))\nprint('Testing median absolute error: {}'.format(test_mae))\nprint('Testing r-squared coefficient: {}'.format(test_r2))", "Training median absolute error: 3.772597312927246\nTraining r-squared coefficient: 0.8910070089699159\nTesting median absolute error: 5.318617820739746\nTesting r-squared coefficient: 0.779898764385632\n" ], [ "plt.clf()\nplt.xlabel('Experimental CN Value')\nplt.ylabel('Predicted CN Value')\nplt.scatter(y_train, y_hat_train, color='blue', label='Training Set')\nplt.scatter(y_test, y_hat_test, color='red', label='Testing Set')\nplt.legend(loc='upper left')\nplt.show()", "_____no_output_____" ], [ "test_maes = []\ntest_r2s = []\nfor _ in range(25):\n model = ECNet(dataset_train.desc_vals.shape[1], 1, 128, 2)\n model.fit(dataset=dataset_train, valid_size=0.2, patience=50, epochs=300, random_state=24)\n y_hat_test = model(dataset_test.desc_vals).detach().numpy()\n y_test = dataset_test.target_vals\n test_maes.append(median_absolute_error(y_hat_test, y_test))\n test_r2s.append(r2_score(y_hat_test, y_test))\nprint('Median median absolute error: {}'.format(np.median(test_maes)))\nprint('Median r-squared score: {}'.format(np.median(test_r2s)))", "Median median absolute error: 5.252781867980957\nMedian r-squared score: 0.8107531026041015\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c559a7a8555615769207a53a5e23e416b7ce3f
4,787
ipynb
Jupyter Notebook
notebooks/01_create_datasets.ipynb
charlesm93/ML_for_ocean_pCO2_interpolation
f81d48c92920d53d98b2bc934c21ae187b49c706
[ "FTL", "RSA-MD" ]
1
2021-09-01T17:13:04.000Z
2021-09-01T17:13:04.000Z
notebooks/01_create_datasets.ipynb
charlesm93/ML_for_ocean_pCO2_interpolation
f81d48c92920d53d98b2bc934c21ae187b49c706
[ "FTL", "RSA-MD" ]
null
null
null
notebooks/01_create_datasets.ipynb
charlesm93/ML_for_ocean_pCO2_interpolation
f81d48c92920d53d98b2bc934c21ae187b49c706
[ "FTL", "RSA-MD" ]
1
2021-11-16T16:30:23.000Z
2021-11-16T16:30:23.000Z
26.743017
207
0.546062
[ [ [ "### Description", "_____no_output_____" ], [ "This file takes the raw xarray files (which can be found at https://figshare.com/collections/Large_ensemble_pCO2_testbed/4568555), applies feature transformations, and saves it into a pandas dataframe.", "_____no_output_____" ], [ "### Inputs", "_____no_output_____" ] ], [ [ "# =========================================\n# For accessing directories\n# =========================================\nroot_dir = \"/local/data/artemis/workspace/jfs2167/recon_eval\" # Set this to the path of the project\nensemble_dir_head = \"/local/data/artemis/simulations/LET\" # Set this to where you have placed the raw data\n\ndata_output_dir = f\"{root_dir}/data/processed\"\nreference_output_dir = f\"{root_dir}/references\"\nxco2_path = f\"{ensemble_dir_head}/CESM/member_001/XCO2_1D_mon_CESM001_native_198201-201701.nc\" # Forcing is the same across members so only reference it once", "_____no_output_____" ] ], [ [ "### Modules", "_____no_output_____" ] ], [ [ "# standard imports\nimport os\nimport datetime\nfrom pathlib import Path\nfrom collections import defaultdict\nimport scipy\nimport random\nimport numpy as np\nimport xarray as xr\nimport pandas as pd\nimport joblib\nimport pickle\n\n# machine learning libraries\nfrom sklearn.model_selection import train_test_split\n\n# Python file with supporting functions\nimport pre ", "Using TensorFlow backend.\n" ] ], [ [ "### Predefined values", "_____no_output_____" ] ], [ [ "# Loading references\npath_LET = f\"{reference_output_dir}/members_LET_dict.pickle\"\npath_seeds = f\"{reference_output_dir}/random_seeds.npy\"\npath_loc = f\"{reference_output_dir}/members_seed_loc_dict.pickle\"\nwith open(path_LET,'rb') as handle:\n mems_dict = pickle.load(handle)\n \nrandom_seeds = np.load(path_seeds) \n \nwith open(path_loc,'rb') as handle:\n seed_loc_dict = pickle.load(handle)", "_____no_output_____" ], [ "# =========================================\n# Setting the date range to unify the date type\n# =========================================\n\n# Define date range\ndate_range_start = '1982-01-01T00:00:00.000000000'\ndate_range_end = '2017-01-31T00:00:00.000000000'\n\n# create date vector\ndates = pd.date_range(start=date_range_start, \n end=date_range_end,freq='MS') + np.timedelta64(14, 'D')\n\n# Select the start and end\ndate_start = dates[0]\ndate_end = dates[420]", "_____no_output_____" ] ], [ [ "### Loop to load in data, clean it, and save it", "_____no_output_____" ] ], [ [ "# ensemble_list = ['CanESM2', 'CESM', 'GFDL', 'MPI']\nensemble_list = []\n\nfor ens, mem_list in mems_dict.items():\n for member in mem_list:\n # This function loads in the data, cleans it, and creates a pandas data frame\n df = pre.create_inputs(ensemble_dir_head, ens, member, dates, xco2_path=xco2_path)\n \n # Save the pandas data frame to my workspace\n pre.save_clean_data(df, data_output_dir, ens, member)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c5647fd7436d681453d403b3f3a882444d8f25
76,163
ipynb
Jupyter Notebook
Geek Univesity/Cap07.ipynb
otaviosanluz/learning-Python
52a6cf8516eb4c1fff85500ca02cc40c484839c2
[ "MIT" ]
1
2020-12-09T03:01:26.000Z
2020-12-09T03:01:26.000Z
Geek Univesity/Cap07.ipynb
otaviosanluz/learning-Python
52a6cf8516eb4c1fff85500ca02cc40c484839c2
[ "MIT" ]
null
null
null
Geek Univesity/Cap07.ipynb
otaviosanluz/learning-Python
52a6cf8516eb4c1fff85500ca02cc40c484839c2
[ "MIT" ]
null
null
null
29.440665
419
0.472841
[ [ [ "# Capítulo 07 : Coleções em Python", "_____no_output_____" ], [ "## Capítulo 07b", "_____no_output_____" ] ], [ [ "#1.\n#a. \nA = [1, 0, 5, -2, -5, 7] #Lista A\nprint(type(A))\nprint(A)\n\n#b. \nsoma = A[0] + A[1] + A[5] #Somando os índices 0, 1 e 5\nprint(f'A soma dos índices 0, 1 e 5 é {soma}.')\n\n#c. \nA[4] = 100 #Modificando valor do índice 4\nprint(A)\n\n#d.\nprint(A[0])\nprint(A[1])\nprint(A[2])\nprint(A[3])\nprint(A[4])\nprint(A[5])", "<class 'list'>\n[1, 0, 5, -2, -5, 7]\nA soma dos índices 0, 1 e 5 é 8.\n[1, 0, 5, -2, 100, 7]\n1\n0\n5\n-2\n100\n7\n" ], [ "#2. \nvalores = [int(input()) for i in range(0,6)] #Lista com valores inseridos pelo usuário dentro de um range até 6\nprint(valores)\n", "10\n1\n2\n3\n4\n5\n[10, 1, 2, 3, 4, 5]\n" ], [ "#3.\nconj = [float(input()) for i in range(10)] #Criando um vetor tipo float com 10 elementos\nprint(conj)\nquadrado = []\n\nfor i in conj: #Para cada item do conj\n quadrado1 = i**2 #Quadrado de cada item\n quadrado.append(quadrado1) #Criando uma lista com o quadrado de cada item\nprint(quadrado)\n\n\n", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]\n[1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0, 81.0, 100.0]\n" ], [ "# 4.\nvetor = [int(input()) for i in range(8)] #Criando vetor com 8 posições\nprint(vetor)\n\nx = vetor[3] #O valor 'x' é o índice 3 do vetor\ny = vetor[7] #O valor 'y' é o índice 7 do vetor\n\nsoma = x + y #Soma de x e y\nprint(f'X + Y = {soma}')", "1\n2\n3\n4\n5\n6\n7\n8\n[1, 2, 3, 4, 5, 6, 7, 8]\nX + Y = 12\n" ], [ "#5.\nvetor = [int(input()) for i in range(10)] #Vetor com 10 posições\nprint(vetor)\npar = []\ncont = 0\nfor elemento in vetor:\n if elemento % 2 == 0:\n par.append(elemento)\n cont = cont + 1\n\nprint(f'Valores pares do vetor: {par}')\nprint(f'O vetor possui {cont} valores pares.')", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nValores pares do vetor: [2, 4, 6, 8, 10]\nO vetor possui 5 valores pares.\n" ], [ "## 6.\nvetor = [int(input()) for i in range(10)] #Vetor com 10 posições\nprint(vetor)\nprint(max(vetor)) #Valor máximo do vetor\nprint(min(vetor)) #Valor mínimo do vetor", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n10\n1\n" ], [ "#7.\nvetor = [int(input()) for i in range(10)] #Vetor com 10 posições\nprint(vetor)\nmaximo = max(vetor)\nprint(f'O valor máximo do vetor é {maximo}.')\nposiçao = vetor.index(maximo)\nprint(f'A posição do valor máximo do vetor é {posiçao}.')\n", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nO valor máximo do vetor é 10.\nA posição do valor máximo do vetor é 9.\n" ], [ "#8. \nvalor = [int(input()) for item in range(6)] #Vetor com 6 valores\nvalor.reverse() #Valores na forma inversa\nprint(valor)", "1\n2\n3\n4\n5\n6\n[6, 5, 4, 3, 2, 1]\n" ], [ "#9. \nvetor = []\nfor item in range(6): #Para cada item até 6\n valor = int(input('Digite um número par: '))\n while valor % 2 != 0: #Enquanto o valor for diferente de par\n valor = int(input('O número informado não é válido! Digite um número par: '))\n if valor % 2 == 0: #Se o valor for par\n vetor.append(valor) #Lista de valores pares\nprint(vetor)\nvetor.reverse() #Revertendo a lista de valores pares\nprint(vetor)\n\n", "Digite um número par: 10\nDigite um número par: 12\nDigite um número par: 36\nDigite um número par: 44\nDigite um número par: 46\nDigite um número par: 8\n[10, 12, 36, 44, 46, 8]\n[8, 46, 44, 36, 12, 10]\n" ], [ "#10. \nvetor = []\nfor item in range(6): #Para cada item até 6\n valor = int(input('Digite um número par: '))\n while valor % 2 != 0: #Enquanto o valor for diferente de par\n valor = int(input('O número informado não é válido! Digite um número par: '))\n if valor % 2 == 0: #Se o valor for par\n vetor.append(valor) #Lista de valores pares\nprint(vetor)", "Digite um número par: 11\nO número informado não é válido! Digite um número par: 8\nDigite um número par: 9\nO número informado não é válido! Digite um número par: 2\nDigite um número par: 4\nDigite um número par: 6\nDigite um número par: 12\nDigite um número par: 14\n[8, 2, 4, 6, 12, 14]\n" ], [ "#11.\nvetor = []\nfor item in range(15):\n nota = float(input('Informe a nota do aluno.'))\n while nota < 1:\n nota = float(input('Nota inválida. Informe novamente.'))\n while nota > 100:\n nota = float(input('Nota inválida. Informe novamente.'))\n if nota >= 1 and nota <= 100:\n vetor.append(nota)\nprint(vetor)\nsoma = sum(vetor)\nmedia = soma / 15\nprint(f'A média geral da turma é {media}.')\n", "Informe a nota do aluno.10\nInforme a nota do aluno.8\nInforme a nota do aluno.12\nInforme a nota do aluno.35\nInforme a nota do aluno.11\nInforme a nota do aluno.2\nInforme a nota do aluno.4\nInforme a nota do aluno.2\nInforme a nota do aluno.8\nInforme a nota do aluno.9\nInforme a nota do aluno.0\nNota inválida. Informe novamente.12\nInforme a nota do aluno.45\nInforme a nota do aluno.23\nInforme a nota do aluno.12\nInforme a nota do aluno.11\n[10.0, 8.0, 12.0, 35.0, 11.0, 2.0, 4.0, 2.0, 8.0, 9.0, 12.0, 45.0, 23.0, 12.0, 11.0]\nA média geral da turma é 13.6.\n" ], [ "#12.\nvetortotal = []\nvetorneg = []\nvetorpos = []\ncont = 0\nfor item in range(10): #Para cada item em um range até 10\n valor = float(input('Informe um valor: ')) #Valor tipo real\n vetortotal.append(valor) #Vetor com o valor real informado\n if valor < 0: #Se o valor for negativo\n cont = cont + 1\n vetorneg.append(valor) #Vetor dos valores negativos\n elif valor > 0: #Se o valor for positivo\n vetorpos.append(valor) #Vetor dos valores positivos\nsoma = sum(vetorpos) #Soma dos valores do vetor positivo\nprint(f'Vetor: {vetortotal}')\nprint(f'Números positivos: {vetorpos}. Soma dos número positivos: {soma}')\nprint(f'Número negativos: {vetorneg}. O vetor possui {cont} número(s) negativo(s).')\n\n", "Informe um valor: 1\nInforme um valor: 2\nInforme um valor: 3\nInforme um valor: 4\nInforme um valor: 5\nInforme um valor: 6\nInforme um valor: 7\nInforme um valor: 8\nInforme um valor: 9\nInforme um valor: 1\nVetor: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0]\nNúmeros positivos: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0]. Soma dos número positivos: 46.0\nNúmero negativos: []. O vetor possui 0 número(s) negativo(s).\n" ], [ "#13.\nvetor = [int(input()) for item in range(5)]\nmaior = max(vetor)\nmenor = min(vetor)\nposiçao = vetor.index(maior)\nprint(f'O maior valor do vetor é {maior} e sua posição é a {posiçao}.')", "12\n11\n10\n9\n8\nO maior valor do vetor é 12 e sua posição é a 0.\n" ], [ "#14.\nvetor = [int(input()) for item in range(10)]\n\nfrom collections import Counter #Importando método counter\n\niguais = Counter(vetor) #Conta quantas vezes aparece determinado valor\nprint(f'Valores / Repetições: {iguais}')\n", "1\n2\n3\n4\n5\n6\n7\n8\n9\n11\nValores / Repetições: Counter({1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 11: 1})\n" ], [ "#15.\nvetor = [int(input()) for item in range(20)]\n\na = sorted(vetor) #Organiza o vetor em ordem crescente\nprint(a)\nb= sorted(set(vetor)) #Organiza os vetores em ordem crescente e elimina os valores repetidos\nprint(b)", "12\n1\n2\n3\n4\n5\n5\n6\n7\n8\n9\n0\n0\n0\n9\n8\n7\n6\n7\n4\n[0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 12]\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12]\n" ], [ "#16. \nvetor = []\nfor item in range(5):\n valor = float(input('Informe um valor'))\n vetor.append(valor)\nprint(vetor)\n\ncodigo = int(input('Informe um código (1 ou 2): '))\nif codigo != 1 and codigo != 2:\n print('Código inválido!')\nif codigo == 1:\n print(vetor)\nelif codigo == 2:\n vetor.reverse()\n print(vetor)", "Informe um valor1\nInforme um valor2\nInforme um valor3\nInforme um valor4\nInforme um valor5\n[1.0, 2.0, 3.0, 4.0, 5.0]\nInforme um código (1 ou 2): 1\n[1.0, 2.0, 3.0, 4.0, 5.0]\n" ], [ "#17.\nvetor = []\nfor elemento in range(10):\n valor = int(input('Informe um valor.'))\n if valor < 0: #Se o valor informado for menor que 0\n vetor.append(0) #Adicionar o valor 0 no vetor\n else: \n vetor.append(valor) #Adicionar o valor informado no vetor\nprint(vetor)", "Informe um valor.1\nInforme um valor.2\nInforme um valor.3\nInforme um valor.4\nInforme um valor.5\nInforme um valor.6\nInforme um valor.7\nInforme um valor.8\nInforme um valor.9\nInforme um valor.10\n[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n" ], [ "#18.\nvetor = []\ncont = 0\nx = int(input('Informe o valor de x.'))\nfor elemento in range(10):\n valor = int(input('Informe um valor.'))\n if valor % x == 0:\n vetor.append(valor)\n cont = cont + 1\nprint(f'O número {x} possui {cont} múltiplo(s): {vetor}')", "Informe o valor de x.10\nInforme um valor.0\nInforme um valor.1\nInforme um valor.2\nInforme um valor.3\nInforme um valor.4\nInforme um valor.5\nInforme um valor.6\nInforme um valor.7\nInforme um valor.8\nInforme um valor.0\nO número 10 possui 2 múltiplo(s): [0, 0]\n" ], [ "#19.\nvetor = []\ncont = 0 #Indice\nfor item in range(50):\n elemento = ((cont + 5) * cont) % (cont + 1) \n vetor.append(elemento)\n cont = cont +1 #Indice + 1\nprint(vetor)", "[0, 0, 2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46]\n" ], [ "#20. FALTOU A IMPRESSÃO DE DOIS ELEMENTOS DOS VETORES POR LINHA\nvetor = []\nimpar = []\nfor item in range(10):\n valor = int(input('Informe um valor no intervalo [0,50]: '))\n while valor < 0:\n valor = int(input('Valor inválido! Informe um valor no intervalo [0,50]: '))\n while valor > 50:\n valor = int(input('Valor inválido. Informe um valor no intervalo [0,50]: '))\n vetor.append(valor)\n if valor % 2 != 0:\n impar.append(valor)\nprint(vetor)\nprint(impar)\n\n", "Informe um valor no intervalo [0,50]: 11\nInforme um valor no intervalo [0,50]: -20\nValor inválido! Informe um valor no intervalo [0,50]: 1\nInforme um valor no intervalo [0,50]: 2\nInforme um valor no intervalo [0,50]: 33\nInforme um valor no intervalo [0,50]: 555\nValor inválido. Informe um valor no intervalo [0,50]: 34\nInforme um valor no intervalo [0,50]: 12\nInforme um valor no intervalo [0,50]: 1\nInforme um valor no intervalo [0,50]: 2\nInforme um valor no intervalo [0,50]: 3\nInforme um valor no intervalo [0,50]: 4\n[11, 1, 2, 33, 34, 12, 1, 2, 3, 4]\n[11, 1, 33, 1, 3]\n" ], [ "#21.\na = []\nb = []\nc = []\nfor elemento in range(10):\n valora = int(input('Informe um valor para o vetor A.'))\n valorb = int(input('Informe um valor para o vetor B.'))\n valorc = valora - valorb\n c.append(valorc)\nprint(a.append(valora))\nprint(a.append(valorb))\nprint(f'A - B: {c}')", "Informe um valor para o vetor A.0\nInforme um valor para o vetor B.2\nInforme um valor para o vetor A.4\nInforme um valor para o vetor B.5\nInforme um valor para o vetor A.7\nInforme um valor para o vetor B.2\nInforme um valor para o vetor A.4\nInforme um valor para o vetor B.2\nInforme um valor para o vetor A.8\nInforme um valor para o vetor B.1\nInforme um valor para o vetor A.9\nInforme um valor para o vetor B.3\nInforme um valor para o vetor A.0\nInforme um valor para o vetor B.6\nInforme um valor para o vetor A.9\nInforme um valor para o vetor B.-10\nInforme um valor para o vetor A.-11\nInforme um valor para o vetor B.1\nInforme um valor para o vetor A.2\nInforme um valor para o vetor B.3\nNone\nNone\nA - B: [-2, -1, 5, 2, 7, 6, -6, 19, -12, -1]\n" ], [ "#22.\na = []\nb = []\nc = []\npos = 0 #Indice\nfor elemento in range(10):\n valora = int(input('Informe um valor para o vetor A.')) #Valores do vetor A\n valorb = int(input('Informe um valor para o vetor B.')) #Valores do vetor B\n if pos % 2 == 0: #Se o resto da divisao indice/2 = 0\n c.append(valora) \n else:\n c.append(valorb)\n pos = pos + 1\n a.append(valora)\n b.append(valorb)\n\nprint(f'Vetor A: {a}')\nprint(f'Vetor B: {b}')\nprint(f'Vetor C: {c}')\n", "Informe um valor para o vetor A.1\nInforme um valor para o vetor B.2\nInforme um valor para o vetor A.3\nInforme um valor para o vetor B.4\nInforme um valor para o vetor A.5\nInforme um valor para o vetor B.6\nInforme um valor para o vetor A.7\nInforme um valor para o vetor B.8\nInforme um valor para o vetor A.9\nInforme um valor para o vetor B.1\nInforme um valor para o vetor A.2\nInforme um valor para o vetor B.3\nInforme um valor para o vetor A.4\nInforme um valor para o vetor B.5\nInforme um valor para o vetor A.6\nInforme um valor para o vetor B.7\nInforme um valor para o vetor A.8\nInforme um valor para o vetor B.9\nInforme um valor para o vetor A.1\nInforme um valor para o vetor B.2\nVetor A: [1, 3, 5, 7, 9, 2, 4, 6, 8, 1]\nVetor B: [2, 4, 6, 8, 1, 3, 5, 7, 9, 2]\nVetor C: [1, 4, 5, 8, 9, 3, 4, 7, 8, 2]\n" ], [ "#23.\na = []\nb = []\nvetorescalar = []\nfor elemento in range(5):\n valora = float(input('Informe um valor para o vetor A.')) #Valores do vetor A\n valorb = float(input('Informe um valor para o vetor B.')) #Valores do vetor B\n escalar = valora * valorb #Escalar = x1*y1\n vetorescalar.append(escalar) #Criando vetor a partir dos valores do escalar\n a.append(valora)\n b.append(valorb)\nprint(a)\nprint(b)\nprint(vetorescalar)\nprint(sum(vetorescalar)) #Vetor escalar de A e B\n", "Informe um valor para o vetor A.1\nInforme um valor para o vetor B.2\nInforme um valor para o vetor A.3\nInforme um valor para o vetor B.4\nInforme um valor para o vetor A.5\nInforme um valor para o vetor B.6\nInforme um valor para o vetor A.7\nInforme um valor para o vetor B.8\nInforme um valor para o vetor A.9\nInforme um valor para o vetor B.1\n[1.0, 3.0, 5.0, 7.0, 9.0]\n[2.0, 4.0, 6.0, 8.0, 1.0]\n[2.0, 12.0, 30.0, 56.0, 9.0]\n109.0\n" ], [ "#25.\nelemento = 0\nvetor = []\nwhile len(vetor) < 101: #Enquanto o tamanho do vetor for menor que 101\n if elemento % 7 != 0: #Se o resto da divisão por 7 for diferente de 0\n vetor.append(elemento)\n elif elemento % 10 == 7: #Se o resto da divisão por 10 for igual a 7\n vetor.append(elemento)\n elemento = elemento + 1\nprint(vetor)", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 43, 44, 45, 46, 47, 48, 50, 51, 52, 53, 54, 55, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111, 113, 114, 115]\n" ], [ "#26.\nn = 10 #número de elementos do vetor\nsomav = 0\nvetor = []\nfor elemento in range(10):\n v = int(input())\n somav = somav + v #soma dos elementos do vetor\n media = somav / n #média do vetor\n vetor.append(v) #vetor v\nprint(vetor)\nprint(media)\n\nsomatorio2 = 0\nfor elemento in vetor: #para cada elemento do vetor\n somatorio = (elemento - media)**2 #aplicação da parte final da formula DV\n somatorio2 = somatorio2 + somatorio #aplicação da parte final da formula DV\nprint(somatorio2)\n\na = 1 / (n - 1) #Primeira parte da formula DP\nDV = (a * somatorio2)**(1/2) #calculo final DV\nprint(f'O desvio padrão do vetor v é {DV}.')", "12\n14\n16\n18\n19\n13\n15\n17\n19\n23\n[12, 14, 16, 18, 19, 13, 15, 17, 19, 23]\n16.6\n98.39999999999999\nO desvio padrão do vetor v é 3.306559138036598.\n" ] ], [ [ "## Capítulo 07a", "_____no_output_____" ] ], [ [ "#Exemplo.\ngalera = list()\ndados = list()\nfor c in range(3):\n dados.append(str(input('Nome: ')))\n dados.append(int(input('Idade: ')))\n galera.append(dados[:])\n dados.clear()\n\nprint(galera)", "Nome: otavio\nIdade: 19\nNome: gabriel\nIdade: 2\nNome: miguel\nIdade: 45\n[['otavio', 19], ['gabriel', 2], ['miguel', 45]]\n" ], [ "#Crie um programa que cria uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado. No final, mostre a matriz na tela, com a formatação correta. \nmatriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\nfor linha in range(3):\n for coluna in range(3):\n matriz[linha][coluna] = int(input(f'Digite um valor para [{linha}, {coluna}]: '))\nprint('-=' * 30) \n\nfor linha in range(3):\n for coluna in range(3):\n print(f'[{matriz[linha][coluna]:^5}]', end='')\n print()", "Digite um valor para [0, 0]: 1\nDigite um valor para [0, 1]: 2\nDigite um valor para [0, 2]: 3\nDigite um valor para [1, 0]: 4\nDigite um valor para [1, 1]: 5\nDigite um valor para [1, 2]: 6\nDigite um valor para [2, 0]: 7\nDigite um valor para [2, 1]: 8\nDigite um valor para [2, 2]: 9\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n[ 1 ][ 2 ][ 3 ]\n[ 4 ][ 5 ][ 6 ]\n[ 7 ][ 8 ][ 9 ]\n" ], [ "#Aprimore o desafio anterior, mostrando no final: soma de todos os valores digitados; soma dos valores da terceira coluna; maior valor da segunda linha.\n\n#Variáveis\nmatriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\nspar = 0 #Soma dos pares\nmaior = 0 #Maior valor\nscol = 0 #Soma da terceira coluna\n\n#Preenchendo a matriz\nfor linha in range(3):\n for coluna in range(3):\n matriz[linha][coluna] = int(input(f'Digite um valor para [{linha}, {coluna}]: '))\nprint('-=' * 30) \n\n#Organizando a matriz\nfor linha in range(3):\n for coluna in range(3):\n print(f'[{matriz[linha][coluna]:^5}]', end='')\n if matriz[linha][coluna] % 2 == 0: #Soma dos pares\n spar = spar + matriz[linha][coluna]\n\n print()\nprint('-=' * 30)\nprint(f'A soma dos pares é {spar}.')\n\nfor linha in range(3): #Soma da terceira coluna\n scol = scol + matriz[linha][2]\nprint(f'A soma dos valores da terceira coluna é {scol}.')\n\nfor coluna in range(3):\n if coluna == 0:\n maior = matriz[1][coluna]\n elif matriz [1][coluna] > maior:\n maior = matriz[1][coluna]\nprint(f'O maior valor da segunda linha é {maior}.')", "Digite um valor para [0, 0]: 1\nDigite um valor para [0, 1]: 2\nDigite um valor para [0, 2]: 3\nDigite um valor para [1, 0]: 4\nDigite um valor para [1, 1]: 5\nDigite um valor para [1, 2]: 6\nDigite um valor para [2, 0]: 7\nDigite um valor para [2, 1]: 8\nDigite um valor para [2, 2]: 9\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n[ 1 ][ 2 ][ 3 ]\n[ 4 ][ 5 ][ 6 ]\n[ 7 ][ 8 ][ 9 ]\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\nA soma dos pares é 20.\nA soma dos valores da terceira coluna é 18.\nO maior valor da segunda linha é 6.\n" ], [ "#1.\nmatriz = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n\nmaior = [] #variável maior que 10\nfor l in range(4):\n for c in range(4):\n matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]:'))\n if matriz[l][c] > 10: #se os valores forem maior que 10\n maior.append(matriz[l][c])\n maior.sort() #organizando a lista em ordem crescente\n\n#Organizando a matriz\nfor l in range(4):\n for c in range(4):\n print(f'[{matriz[l][c]:^5}]', end='')\n print()\n\nprint(f'Valores maiores que 10: {maior}')", "Digite um valor para [0,0]:1\nDigite um valor para [0,1]:2\nDigite um valor para [0,2]:3\nDigite um valor para [0,3]:4\nDigite um valor para [1,0]:5\nDigite um valor para [1,1]:6\nDigite um valor para [1,2]:7\nDigite um valor para [1,3]:8\nDigite um valor para [2,0]:9\nDigite um valor para [2,1]:0\nDigite um valor para [2,2]:1\nDigite um valor para [2,3]:2\nDigite um valor para [3,0]:3\nDigite um valor para [3,1]:4\nDigite um valor para [3,2]:5\nDigite um valor para [3,3]:6\n[ 1 ][ 2 ][ 3 ][ 4 ]\n[ 5 ][ 6 ][ 7 ][ 8 ]\n[ 9 ][ 0 ][ 1 ][ 2 ]\n[ 3 ][ 4 ][ 5 ][ 6 ]\nValores maiores que 10: []\n" ], [ "#2.\nmatriz = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n\nfor l in range(5):\n for c in range(5):\n if l == c:\n matriz[l][c] = 1\n else:\n matriz[l][c] = 0\n\nfor l in range(5):\n for c in range(5):\n print(f'[{matriz[l][c]:^5}]', end='')\n print()", "[ 1 ][ 0 ][ 0 ][ 0 ][ 0 ]\n[ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]\n[ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]\n[ 0 ][ 0 ][ 0 ][ 1 ][ 0 ]\n[ 0 ][ 0 ][ 0 ][ 0 ][ 1 ]\n" ], [ "#3.\nmatriz = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n\nfor l in range(4):\n for c in range(4):\n matriz[l][c] = l * c\n\nfor l in range(4):\n for c in range(4):\n print(f'[{matriz[l][c]:^5}]', end='')\n print()", "[ 0 ][ 0 ][ 0 ][ 0 ]\n[ 0 ][ 1 ][ 2 ][ 3 ]\n[ 0 ][ 2 ][ 4 ][ 6 ]\n[ 0 ][ 3 ][ 6 ][ 9 ]\n" ], [ "#4.\nmatriz = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n\nmaior = 0\nposiçao = (0,0)\nfor l in range(4):\n for c in range(4):\n matriz[l][c] = (int(input(f'Digite um valor para [{l},{c}]:')))\n if matriz[l][c] > maior:\n maior = matriz[l][c]\n posiçao = (l,c)\n\nfor l in range(4):\n for c in range(4):\n print(f'[{matriz[l][c]:^5}]', end='')\n print()\n\nprint('-=' * 30)\nprint(f'O maior valor é {maior} e encontra-se na posição {posiçao}.')", "Digite um valor para [0,0]:1\nDigite um valor para [0,1]:2\nDigite um valor para [0,2]:3\nDigite um valor para [0,3]:4\nDigite um valor para [1,0]:5\nDigite um valor para [1,1]:6\nDigite um valor para [1,2]:7\nDigite um valor para [1,3]:8\nDigite um valor para [2,0]:9\nDigite um valor para [2,1]:0\nDigite um valor para [2,2]:11\nDigite um valor para [2,3]:12\nDigite um valor para [3,0]:13\nDigite um valor para [3,1]:14\nDigite um valor para [3,2]:15\nDigite um valor para [3,3]:16\n[ 1 ][ 2 ][ 3 ][ 4 ]\n[ 5 ][ 6 ][ 7 ][ 8 ]\n[ 9 ][ 0 ][ 11 ][ 12 ]\n[ 13 ][ 14 ][ 15 ][ 16 ]\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\nO maior valor é 16 e encontra-se na posição (3, 3).\n" ], [ "#5.Leia uma matriz 5x5. Leia também um valor X. O programa deverá fazer uma busca desse valor na matriz e, ao final, escrever a localização(linha,coluna) ou uma mensagem de 'não encontrado'.\nmatriz = []\nx = int(input('Informe um valor para X: '))\nposiçao = []\nfor i in range(5):\n linha = []\n for j in range(5):\n valor = (int(input(f'Valor da posição:[{i},{j}]')))\n linha.append(valor)\n if valor == x:\n posiçao.append((i,j))\n print(linha)\n matriz.append(linha)\nprint(posiçao)", "Informe um valor para X: 1\nValor da posição:[0,0]2\nValor da posição:[0,1]3\nValor da posição:[0,2]4\nValor da posição:[0,3]5\nValor da posição:[0,4]6\n[2, 3, 4, 5, 6]\nValor da posição:[1,0]7\nValor da posição:[1,1]8\nValor da posição:[1,2]9\nValor da posição:[1,3]0\nValor da posição:[1,4]1\n[7, 8, 9, 0, 1]\nValor da posição:[2,0]2\nValor da posição:[2,1]3\nValor da posição:[2,2]4\nValor da posição:[2,3]5\nValor da posição:[2,4]6\n[2, 3, 4, 5, 6]\nValor da posição:[3,0]7\nValor da posição:[3,1]8\nValor da posição:[3,2]9\nValor da posição:[3,3]0\nValor da posição:[3,4]1\n[7, 8, 9, 0, 1]\nValor da posição:[4,0]2\nValor da posição:[4,1]3\nValor da posição:[4,2]4\nValor da posição:[4,3]5\nValor da posição:[4,4]6\n[2, 3, 4, 5, 6]\n[(1, 4), (3, 4)]\n" ], [ "#Exemplo Dicionários\n\npessoas = {'nome':'Gustavo', 'sexo':'M', 'idade':22}\nprint(pessoas)\nprint(f'O {pessoas[\"nome\"]} tem {pessoas[\"idade\"]} anos.')\nprint(pessoas.keys())\nprint(pessoas.values())\nprint(pessoas.items())\n\nfor k in pessoas.keys():\n print(k)\n\nfor v in pessoas.values():\n print(v)\n\nfor k,v in pessoas.items():\n print(f'{k} = {v}')\n\ndel pessoas['sexo']\nprint(pessoas)\n\npessoas['nome'] = 'Leandro'\nprint(pessoas)\n\npessoas['peso'] = 98.5\nprint(pessoas)", "{'nome': 'Gustavo', 'sexo': 'M', 'idade': 22}\nO Gustavo tem 22 anos.\ndict_keys(['nome', 'sexo', 'idade'])\ndict_values(['Gustavo', 'M', 22])\ndict_items([('nome', 'Gustavo'), ('sexo', 'M'), ('idade', 22)])\nnome\nsexo\nidade\nGustavo\nM\n22\nnome = Gustavo\nsexo = M\nidade = 22\n{'nome': 'Gustavo', 'idade': 22}\n{'nome': 'Leandro', 'idade': 22}\n{'nome': 'Leandro', 'idade': 22, 'peso': 98.5}\n" ], [ "#Criando dicionário dentro de uma lista\n\nbrasil = []\nestado1 = {'uf':'Rio de Janeiro', 'sigla':'RJ'}\nestado2 = {'uf': 'São Paulo', 'sigla':'SP'}\nbrasil.append(estado1)\nbrasil.append(estado2)\n\nprint(estado1)\nprint(estado2)\nprint(brasil)\nprint(brasil[0])\nprint(brasil[1]['uf'])", "{'uf': 'Rio de Janeiro', 'sigla': 'RJ'}\n{'uf': 'São Paulo', 'sigla': 'SP'}\n[{'uf': 'Rio de Janeiro', 'sigla': 'RJ'}, {'uf': 'São Paulo', 'sigla': 'SP'}]\n{'uf': 'Rio de Janeiro', 'sigla': 'RJ'}\nSão Paulo\n" ], [ "#Exemplo\n\nestado = dict()\nbrasil = list()\n\nfor c in range(3):\n estado['uf'] = str(input('Unidade Federativa:'))\n estado['sigla'] = str(input('Sigla:'))\n brasil.append(estado.copy())\nprint(brasil)\n\nfor e in brasil:\n for v in e.values():\n print(v, end=' ')\n print()", "Unidade Federativa:RS\nSigla:RS\nUnidade Federativa:São Paulo\nSigla:SP\nUnidade Federativa:Rio de Janeiro\nSigla:RJ\n[{'uf': 'RS', 'sigla': 'RS'}, {'uf': 'São Paulo', 'sigla': 'SP'}, {'uf': 'Rio de Janeiro', 'sigla': 'RJ'}]\nRS RS \nSão Paulo SP \nRio de Janeiro RJ \n" ], [ "#Faça um programa que leia 5 valores numéricos e guarde-os em uma lista. No final, mostre qual foi o maior e o menor valor digitado e as susas respectivas posições na lista\n\nlistanum = []\nmaior = 0\nmenor = 0\nposiçaomaior = []\nposiçaomenor = []\nfor c in range(5):\n valor = (int(input(f'Informe um valor para a posição {c}:')))\n listanum.append(valor)\n if c == 0:\n maior = menor = listanum[c]\n else:\n if listanum[c] > maior:\n maior = listanum[c]\n if listanum[c] < menor:\n menor = listanum[c]\n\nfor i,v in enumerate(listanum):\n if v == maior:\n posiçaomaior.append(i)\nfor i,v in enumerate(listanum):\n if v == menor:\n posiçaomenor.append(i)\n\n\nprint(f'Você digitou os valores {listanum}.')\nprint(f'O número {maior} foi o maior valor encontrado na(s) posição(ões): {posiçaomaior}.')\nprint(f'O número {menor} foi o menor valor encontrado na(s) posição(ões): {posiçaomenor}.')", "Informe um valor para a posição 0:1\nInforme um valor para a posição 1:2\nInforme um valor para a posição 2:3\nInforme um valor para a posição 3:4\nInforme um valor para a posição 4:5\nVocê digitou os valores [1, 2, 3, 4, 5].\nO número 5 foi o maior valor encontrado na(s) posição(ões): [4].\nO número 1 foi o menor valor encontrado na(s) posição(ões): [0].\n" ], [ "#Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-se em uma lista. Caso o número já exista lá dentro, ele não será adicionado. No final, serão exibidos todos os valores únicos digitados, em ordem crescente.\n\nnumeros = []\nwhile True:\n n = int(input('Digite um valor:'))\n if n not in numeros:\n numeros.append(n)\n print('Valor adicionado!')\n else:\n print('valor duplicado! Não será adicionado.')\n r = str(input('Quer continuar? [S/N]'))\n if r in 'Nn':\n break\nnumeros.sort()\nprint(f'Valores digitados: {numeros}')", "Digite um valor:10\nValor adicionado!\nQuer continuar? [S/N]s\nDigite um valor:-1\nValor adicionado!\nQuer continuar? [S/N]S\nDigite um valor:0\nValor adicionado!\nQuer continuar? [S/N]n\nValores digitados: [-1, 0, 10]\n" ], [ "#Crie um programa onde o usuário possa digitar cinco valores numéricos e cadastre-se em uma lista, já na posição correta de inserção(sem usar o sort()). No final, mostre a lista ordenada na tela.\n\nlista = []\nfor c in range(5):\n n = int(input('Digite um valor:'))\n if c == 0:\n lista.append(n)\n elif n > lista[-1]: #Se o n for maior que o último elemento da lista\n lista.append(n)\n else:\n pos = 0\n while pos < len(lista):\n if n <= lista[pos]:\n lista.insert(pos, n)\n break\n pos = pos + 1\nprint(f'Valores digitados em ordem crescente: {lista}')", "Digite um valor:1\nDigite um valor:2\nDigite um valor:3\nDigite um valor:4\nDigite um valor:5\nValores digitados em ordem crescente: [1, 2, 3, 4, 5]\n" ], [ "# Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, mostre: (a) Quantos números foram digitados (b) A lista de valores, ordenada de forma decrescente (c) Se o valor 5 foi digitado e esta ou não na lista\n\nvalores = []\nwhile True:\n valores.append(int(input('Digite um valor:')))\n resp = str(input('Quer continuar? [S/N]'))\n if resp in 'Nn':\n break\nprint(f'Você digitou {len(valores)} elementos.')\nvalores.sort(reverse=True)#Ordem decrescente\nprint(f'Valores em ordem decrescente: {valores}.')\n\nif 5 in valores:\n print('O valor 5 faz parte da lista.')\nelse:\n print('O valor 5 não faz parte da lista.')", "Digite um valor:12\nQuer continuar? [S/N]s\nDigite um valor:11\nQuer continuar? [S/N]n\nVocê digitou 2 elementos.\nValores em ordem decrescente: [12, 11].\nO valor 5 não faz parte da lista.\n" ], [ "#Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente. Ao final, mostre o conteúdo das três listas geradas.\n\nlista = []\nwhile True:\n lista.append(int(input('Informe um valor:')))\n resp = str(input('Quer continuar? [S/N]'))\n if resp in 'Nn':\n break\n\nlistapar = []\nlistaimpar = []\nfor valor in lista:\n if valor % 2 == 0:\n listapar.append(valor)\n else:\n listaimpar.append(valor)\n\nprint(lista)\nprint(f'Lista com valores pares: {listapar}')\nprint(f'Lista com valor ímpares: {listaimpar}')", "Informe um valor:19\nQuer continuar? [S/N]s\nInforme um valor:11\nQuer continuar? [S/N]s\nInforme um valor:16\nQuer continuar? [S/N]s\nInforme um valor:10\nQuer continuar? [S/N]s\nInforme um valor:0\nQuer continuar? [S/N]n\n[19, 11, 16, 10, 0]\nLista com valores pares: [16, 10, 0]\nLista com valor ímpares: [19, 11]\n" ], [ "#Crie um programa onde o usuário digite uma expressão qualquer que use parênteses. Seu aplicativo deverá analisar se a expressão passada está com os parênteses abertos e fechados na ordem correta.\n\nexp = str(input('Digite a expressão: '))\npilha = []\nfor simbolo in exp:\n if simbolo == '(':\n pilha.append('(')\n elif simbolo == ')':\n if len(pilha) > 0:\n pilha.pop()\n else:\n pilha.append(')')\n break\n\nif len(pilha) == 0:\n print('Sua expressão esta válida!')\nelse:\n print('Sua expressão esta errada!')", "Digite a expressão: (oi)\nSua expressão esta válida!\n" ], [ "#Faça um programa que leia nome e peso de várias pessoas, guardando tudo em uma lista. No final mostre: (a)Quantas pessoas foram cadastradas. (b) Uma listagem com as pessoas mais pesadas. (c) Uma listagem com as pessoas mais leves.\n\nlista = []\nprincipal = []\npessoas = 0\nmaior = 0\nmenor = 0\nwhile True:\n nome = str(input('Informe o nome da pessoa:'))\n peso = float(input( f'Informe o peso de {nome}: '))\n lista.append(nome)\n lista.append(peso)\n if len(principal) == 0:\n maior = menor = lista[1]\n else:\n if lista[1] > maior:\n maior = lista[1]\n if lista[1] < menor:\n menor = lista[1]\n pessoas = pessoas + 1\n principal.append(lista[:])\n lista.clear()\n resp = str(input('Quer continuar? [S/N]'))\n if resp in 'Nn':\n break\n\nprint(pessoas)\nprint(principal)\nprint(f'Maior peso: {maior}kg')\nfor p in principal:\n if p[1] == maior:\n print(f'{p[0]}')\nprint(f'Menor peso: {menor}kg')\nfor p in principal:\n if p[1] == menor:\n print(f'{p[0]}')", "Informe o nome da pessoa:otavio\nInforme o peso de otavio: 98\nQuer continuar? [S/N]s\nInforme o nome da pessoa:miguel\nInforme o peso de miguel: 80\nQuer continuar? [S/N]n\n2\n[['otavio', 98.0], ['miguel', 80.0]]\nMaior peso: 98.0kg\notavio\nMenor peso: 80.0kg\nmiguel\n" ], [ "#Faça um programa que leia nome e média de um aluno, guardando também a situação em um dicionário. No final, mostre o conteúdo da estrutura na tela.\n\naluno = {}\naluno['nome'] = str(input('Informe o nome do aluno:'))\naluno['média'] = float(input('Informe a média do aluno: '))\nif aluno['média'] >= 7:\n aluno['situação'] = 'Aprovado'\nelse:\n aluno['situação'] = 'Reprovado'\n\nprint(aluno)", "Informe o nome do aluno:otavio\nInforme a média do aluno: 10\n{'nome': 'otavio', 'média': 10.0, 'situação': 'Aprovado'}\n" ], [ "#Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicionário. No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado.\nfrom random import randint #método para gerar números aleatórios\nfrom operator import itemgetter\njogo = {'Jogador1':randint(1, 6),\n 'Jogador2':randint(1, 6),\n 'Jogador3':randint(1, 6),\n 'Jogador4':randint(1, 6)}\nranking = []\nprint('Valores sorteados:')\nfor k,v in jogo.items():\n print(f'{k} tirou {v} no dado.')\nranking = sorted(jogo.items(), key=itemgetter(1), reverse=True)\nprint(ranking)\n\n#reverse=True ordena na ordem decrescente", "Valores sorteados:\nJogador1 tirou 6 no dado.\nJogador2 tirou 5 no dado.\nJogador3 tirou 3 no dado.\nJogador4 tirou 6 no dado.\n[('Jogador1', 6), ('Jogador4', 6), ('Jogador2', 5), ('Jogador3', 3)]\n" ], [ "#Crie um programa que leia, nome, ano de nascimento e carteira de trabalho e cadastre-os (com idade) em um dicionário. Se, por acaso, a CTPS for diferente de zero, o dicionário receberá também o ano de contratação e o salário. Calcule e acrescente, além da idade, com quantos anos a pessoa vai se aposentar.\n\nfrom datetime import datetime #Importando o ano do computador\n\ndados = {}\ndados['nome'] = str(input('Nome:'))\nnasc = int(input('Ano de nascimento: '))\ndados['idade'] = datetime.now().year - nasc #ano atual - ano de nascimento\ndados['ctps'] = int(input('Carteira de Trabalho (0 não tem): '))\n\nif dados['ctps'] != 0:\n dados['contratação'] = int(input('Ano de Contratação: '))\n dados['salário'] = float(input('Salário: R$'))\n dados['aposentadoria'] = dados['idade'] + ((dados['contratação'] + 35) - datetime.now().year)\nprint(dados)", "Nome:otavio\nAno de nascimento: 1990\nCarteira de Trabalho (0 não tem): 0\n{'nome': 'otavio', 'idade': 30, 'ctps': 0}\n" ], [ "#Crie um programa que gerencie o aproveitamento de um jogador de futebol. O programa vai ler o nome do jogador e quantas partidas ele jogou. Depois vai ler a quantidade de gols feitos em cada partida. No final, tudo isso será guardado em um dicionário, incluindo o total de gols feitos durante o campeonato.\n\njogador = {}\njogador['nome'] =str(input('Nome do jogador:'))\ntot = int(input(f'Quantas partidas {jogador[\"nome\"]} jogou: '))\n\npartidas = []\nfor c in range(tot):\n partidas.append(int(input(f'Quantos gols na partida {c+1}:')))\n\njogador['gols'] = partidas[:]\njogador['total'] = sum(partidas)\nprint(jogador)", "Nome do jogador:otavio\nQuantas partidas otavio jogou: 5\nQuantos gols na partida 1:2\nQuantos gols na partida 2:0\nQuantos gols na partida 3:1\nQuantos gols na partida 4:3\nQuantos gols na partida 5:0\n{'nome': 'otavio', 'gols': [2, 0, 1, 3, 0], 'total': 6}\n" ], [ "#Crie um programa que leia nome, sexo e idade de várias pessoas, guardando os dados de cada pessoa em um dicionário e todos os dicionários em uma lista. No final, mostre: (a)Quantas pessoas cadastradas. (b)A média de idade. (c)Uma lista com mulheres. (d)Uma lista com idade acima da média. \ngalera = []\npessoa = {}\nsoma = media = 0\nwhile True:\n pessoa.clear\n pessoa['nome'] = str(input('Nome:'))\n while True:\n pessoa['sexo'] = str(input('Sexo: [M/F]')).upper()[0]\n if pessoa['sexo'] in 'MmFf':\n break\n print('ERRO! Por favor, digite apenas M ou F.')\n pessoa['idade'] = int(input('Idade:'))\n soma = soma + pessoa['idade'] \n galera.append(pessoa.copy())\n while True:\n resp = str(input('Quer continuar? [S/N]')).upper()[0]\n if resp in 'SN':\n break\n print('ERRO! Responda apenas S ou N.')\n if resp == 'N':\n break\nprint(galera)\nprint(f'Ao todo temos {len(galera)} pessoas cadastradas.')\nmedia = soma / len(galera)\nprint(f'A média de idade é de {media} anos.')\nprint(f'As mulheres cadastradas foram', end='')\nfor p in galera:\n if p['sexo'] == 'F':\n print(f'{p[\"nome\"]}', end='')\nprint()\nprint('Lista das pessoas que estão acima da média: ')\nfor p in galera:\n if p['idade'] >= media:\n print(' ')\n for k,v in p.items():\n print(f'{k} = {v}', end='')\n print()", "Nome:otavio\nSexo: [M/F]m\nIdade:30\nQuer continuar? [S/N]s\nNome:miguel\nSexo: [M/F]m\nIdade:45\nQuer continuar? [S/N]s\nNome:ana\nSexo: [M/F]f\nIdade:10\nQuer continuar? [S/N]n\n[{'nome': 'otavio', 'sexo': 'M', 'idade': 30}, {'nome': 'miguel', 'sexo': 'M', 'idade': 45}, {'nome': 'ana', 'sexo': 'F', 'idade': 10}]\nAo todo temos 3 pessoas cadastradas.\nA média de idade é de 28.333333333333332 anos.\nAs mulheres cadastradas foramana\nLista das pessoas que estão acima da média: \n \nnome = otaviosexo = Midade = 30\n \nnome = miguelsexo = Midade = 45\n" ], [ "#6. \n\nmatriz1 = []\nmatriz2 = []\nmatriz3 = []\n\nfor i in range(4):\n linha1 = []\n linha2 = []\n linha3 = []\n for j in range(4):\n valor1 = (int(input(f'Matriz 1 posição [{i+1},{j+1}:]')))\n linha1.append(valor1)\n valor2 = (int(input(f'Matriz 2 posição [{i+1},{j+1}:]')))\n linha2.append(valor2)\n if valor1 == valor2:\n linha3.append(valor1)\n elif valor1 > valor2:\n linha3.append(valor1)\n elif valor1 < valor2:\n linha3.append(valor2)\n matriz1.append(linha1[:])\n matriz2.append(linha2[:])\n matriz3.append(linha3[:])\nprint(matriz1)\nprint(matriz2)\nprint(matriz3)", "Matriz 1 posição [1,1:]1\nMatriz 2 posição [1,1:]2\nMatriz 1 posição [1,2:]3\nMatriz 2 posição [1,2:]4\nMatriz 1 posição [1,3:]5\nMatriz 2 posição [1,3:]6\nMatriz 1 posição [1,4:]7\nMatriz 2 posição [1,4:]8\nMatriz 1 posição [2,1:]9\nMatriz 2 posição [2,1:]1\nMatriz 1 posição [2,2:]2\nMatriz 2 posição [2,2:]3\nMatriz 1 posição [2,3:]4\nMatriz 2 posição [2,3:]5\nMatriz 1 posição [2,4:]6\nMatriz 2 posição [2,4:]7\nMatriz 1 posição [3,1:]8\nMatriz 2 posição [3,1:]9\nMatriz 1 posição [3,2:]1\nMatriz 2 posição [3,2:]2\nMatriz 1 posição [3,3:]3\nMatriz 2 posição [3,3:]4\nMatriz 1 posição [3,4:]5\nMatriz 2 posição [3,4:]6\nMatriz 1 posição [4,1:]7\nMatriz 2 posição [4,1:]8\nMatriz 1 posição [4,2:]9\nMatriz 2 posição [4,2:]1\nMatriz 1 posição [4,3:]2\nMatriz 2 posição [4,3:]3\nMatriz 1 posição [4,4:]4\nMatriz 2 posição [4,4:]5\n[[1, 3, 5, 7], [9, 2, 4, 6], [8, 1, 3, 5], [7, 9, 2, 4]]\n[[2, 4, 6, 8], [1, 3, 5, 7], [9, 2, 4, 6], [8, 1, 3, 5]]\n[[2, 4, 6, 8], [9, 3, 5, 7], [9, 2, 4, 6], [8, 9, 3, 5]]\n" ], [ "#7. \n\nmatriz = []\nfor i in range(10):\n linha = []\n for j in range(10):\n if i < j:\n valor = (2*i) + (7*j) - 2\n linha.append(valor)\n elif i == j:\n valor = (3*(i**2)) - 1\n linha.append(valor)\n elif i > j:\n valor = (4*(i**3)) - (5*(j**2)) + 1\n linha.append(valor)\n print(linha)\n matriz.append(linha[:])", "[-1, 5, 12, 19, 26, 33, 40, 47, 54, 61]\n[5, 2, 14, 21, 28, 35, 42, 49, 56, 63]\n[33, 28, 11, 23, 30, 37, 44, 51, 58, 65]\n[109, 104, 89, 26, 32, 39, 46, 53, 60, 67]\n[257, 252, 237, 212, 47, 41, 48, 55, 62, 69]\n[501, 496, 481, 456, 421, 74, 50, 57, 64, 71]\n[865, 860, 845, 820, 785, 740, 107, 59, 66, 73]\n[1373, 1368, 1353, 1328, 1293, 1248, 1193, 146, 68, 75]\n[2049, 2044, 2029, 2004, 1969, 1924, 1869, 1804, 191, 77]\n[2917, 2912, 2897, 2872, 2837, 2792, 2737, 2672, 2597, 242]\n" ], [ "#8. \n\nmatriz = []\nsoma = []\nfor i in range(3):\n linha = []\n for j in range(3):\n valor = int(input(f'Informe um valor para a posição [{i+1},{j+1}]'))\n linha.append(valor)\n if j > i:\n soma.append(valor)\n matriz.append(linha[:]) \nprint(matriz)\ns = sum(soma)\nprint(f'A soma dos valores acima da diagonal principal é {s}.')", "Informe um valor para a posição [1,1]0\nInforme um valor para a posição [1,2]1\nInforme um valor para a posição [1,3]2\nInforme um valor para a posição [2,1]3\nInforme um valor para a posição [2,2]4\nInforme um valor para a posição [2,3]5\nInforme um valor para a posição [3,1]6\nInforme um valor para a posição [3,2]7\nInforme um valor para a posição [3,3]8\n[[0, 1, 2], [3, 4, 5], [6, 7, 8]]\nA soma dos valores acima da diagonal principal é 8.\n" ], [ "#9.\n\nmatriz = []\nsoma = []\nfor i in range(3):\n linha = []\n for j in range(3):\n valor = int(input(f'Informe um valor para a posição [{i+1}.{j+1}]'))\n linha.append(valor)\n if i > j:\n soma.append(valor)\n matriz.append(linha[:])\nprint(matriz)\ns = sum(soma)\nprint(f'A soma dos valores abaixo da diagonal principal é {s}.')", "Informe um valor para a posição [1.1]1\nInforme um valor para a posição [1.2]2\nInforme um valor para a posição [1.3]3\nInforme um valor para a posição [2.1]4\nInforme um valor para a posição [2.2]5\nInforme um valor para a posição [2.3]0\nInforme um valor para a posição [3.1]-10\nInforme um valor para a posição [3.2]-9\nInforme um valor para a posição [3.3]1\n[[1, 2, 3], [4, 5, 0], [-10, -9, 1]]\nA soma dos valores abaixo da diagonal principal é -15.\n" ], [ "#10.\n\nmatriz = []\nsoma = []\nfor i in range(3):\n linha = []\n for j in range(3):\n valor = int(input(f'Informe um valor para a posição [{i+1},{j+1}]'))\n linha.append(valor)\n if i == j:\n soma.append(valor)\n matriz.append(linha[:])\nprint(matriz)\ns = sum(soma)\nprint(f'A soma dos valores da diagonal principal é {s}.')", "Informe um valor para a posição [1,1]0\nInforme um valor para a posição [1,2]2\nInforme um valor para a posição [1,3]9\nInforme um valor para a posição [2,1]1\nInforme um valor para a posição [2,2]8\nInforme um valor para a posição [2,3]3\nInforme um valor para a posição [3,1]7\nInforme um valor para a posição [3,2]4\nInforme um valor para a posição [3,3]6\n[[0, 2, 9], [1, 8, 3], [7, 4, 6]]\nA soma dos valores da diagonal principal é 14.\n" ], [ "#13.\n\nmatriz = []\nfor i in range(4):\n linha = []\n for j in range(4):\n valor = int(input(f'Informe um valor para a posição [{i+1},{j+1}]:'))\n linha.append(valor)\n matriz.append(linha[:])\nprint(matriz)", "Informe um valor para a posição [1,1]:1\nInforme um valor para a posição [1,2]:2\nInforme um valor para a posição [1,3]:3\nInforme um valor para a posição [1,4]:4\nInforme um valor para a posição [2,1]:5\nInforme um valor para a posição [2,2]:6\nInforme um valor para a posição [2,3]:7\nInforme um valor para a posição [2,4]:8\nInforme um valor para a posição [3,1]:9\nInforme um valor para a posição [3,2]:1\nInforme um valor para a posição [3,3]:2\nInforme um valor para a posição [3,4]:3\nInforme um valor para a posição [4,1]:4\nInforme um valor para a posição [4,2]:5\nInforme um valor para a posição [4,3]:6\nInforme um valor para a posição [4,4]:7\n[[1, 2, 3, 4], [5, 6, 7, 8], [9, 1, 2, 3], [4, 5, 6, 7]]\n" ], [ "#14. \n\nfrom random import randint\n\nbingo = []\nfor i in range(5):\n linha = []\n for j in range(5):\n valor = int(randint(1,99))\n linha.append(valor)\n bingo.append(linha[:])\nprint(bingo)", "[[70, 45, 12, 43, 91], [13, 55, 78, 47, 58], [7, 4, 85, 25, 86], [38, 2, 97, 77, 54], [31, 92, 50, 1, 81]]\n" ], [ "#18.\n\nmatriz = []\nvetor = []\ncont1 = 0\ncont2 = 0\ncont3 = 0\nfor i in range(3):\n linha = []\n for j in range(3):\n valor = int(input(f'Informe um valor para a posição [{i+1},{j+1}]:'))\n linha.append(valor)\n if j == 0:\n cont1 = cont1 + valor\n elif j == 1:\n cont2 = cont2 + valor\n elif j == 2:\n cont3 = cont3 + valor\n matriz.append(linha)\nvetor.append(cont1)\nvetor.append(cont2)\nvetor.append(cont3)\nprint(vetor)\nprint(matriz)", "Informe um valor para a posição [1,1]:1\nInforme um valor para a posição [1,2]:2\nInforme um valor para a posição [1,3]:3\nInforme um valor para a posição [2,1]:4\nInforme um valor para a posição [2,2]:5\nInforme um valor para a posição [2,3]:6\nInforme um valor para a posição [3,1]:7\nInforme um valor para a posição [3,2]:8\nInforme um valor para a posição [3,3]:9\n[12, 15, 18]\n[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n" ], [ "#19. FALTA LETRA C\n\nmatriz = []\nmaiornota = 0\nmedia = 0\nsomanotas = 0\nfor i in range(1, 6):\n linha = []\n for j in range(1, 5):\n if j == 1:\n matricula = int(input(f'Número de matrícula do aluno {i}:'))\n linha.append(matricula)\n if j == 2:\n prova = int(input(f'Média das provas do aluno {i}:'))\n linha.append(prova)\n if j == 3:\n trabalho = int(input(f'Média dos trabalhos do aluno {i}:'))\n linha.append(trabalho)\n if j == 4:\n notafinal = prova + trabalho\n print(f'Nota final do aluno {i}: {notafinal}')\n somanotas = somanotas + notafinal\n if notafinal > maiornota:\n maiornota = notafinal \n linha.append(notafinal)\n print('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\n matriz.append(linha[:])\nprint(f'A maior nota é {maiornota}.')\nmedia = somanotas / 5\nprint(f'Média aritmética das notas finais: {media}.')\nprint(matriz)", "Número de matrícula do aluno 1:1\nMédia das provas do aluno 1:3\nMédia dos trabalhos do aluno 1:2\nNota final do aluno 1: 5\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\nNúmero de matrícula do aluno 2:2\nMédia das provas do aluno 2:18\nMédia dos trabalhos do aluno 2:3\nNota final do aluno 2: 21\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\nNúmero de matrícula do aluno 3:3\nMédia das provas do aluno 3:4\nMédia dos trabalhos do aluno 3:6\nNota final do aluno 3: 10\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\nNúmero de matrícula do aluno 4:9\nMédia das provas do aluno 4:5\nMédia dos trabalhos do aluno 4:6\nNota final do aluno 4: 11\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\nNúmero de matrícula do aluno 5:7\nMédia das provas do aluno 5:3\nMédia dos trabalhos do aluno 5:8\nNota final do aluno 5: 11\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\nA maior nota é 21.\nMédia aritmética das notas finais: 11.6.\n[[1, 3, 2, 5], [2, 18, 3, 21], [3, 4, 6, 10], [9, 5, 6, 11], [7, 3, 8, 11]]\n" ], [ "#20.\n\nmatriz = []\nsoma1 = 0\nsoma2 = 0\nsoma3 = 0\nsoma4 = 0\nfor i in range(1, 4):\n linha = []\n for j in range(1, 7):\n valor = int(input(f'Valor para a posição [{i},{j}]:'))\n if j % 2 != 0:\n soma1 = soma1 + valor\n elif j == 2:\n soma2 = soma2 + valor\n elif j == 4:\n soma2 = soma2 + valor\n linha.append(valor)\n matriz.append(linha[:])\nmedia = soma2 / 6\nprint(f'Soma dos elementos das colunas ímpares: {soma1}')\nprint(f'Média dos elementos da segunda e quarta coluna: {media}')\nprint(matriz)", "Valor para a posição [1,1]:1\nValor para a posição [1,2]:2\nValor para a posição [1,3]:3\nValor para a posição [1,4]:4\nValor para a posição [1,5]:5\nValor para a posição [1,6]:6\nValor para a posição [2,1]:7\nValor para a posição [2,2]:8\nValor para a posição [2,3]:9\nValor para a posição [2,4]:0\nValor para a posição [2,5]:1\nValor para a posição [2,6]:2\nValor para a posição [3,1]:3\nValor para a posição [3,2]:4\nValor para a posição [3,3]:5\nValor para a posição [3,4]:6\nValor para a posição [3,5]:7\nValor para a posição [3,6]:8\nSoma dos elementos das colunas ímpares: 41\nMédia dos elementos da segunda e quarta coluna: 4.0\n[[1, 2, 3, 4, 5, 6], [7, 8, 9, 0, 1, 2], [3, 4, 5, 6, 7, 8]]\n" ], [ "#21.\n\nmatriz1 = []\nmatriz2 = []\nmatrizsoma = []\nmatrizsub = []\nfor i in range(1, 3):\n linha1 = []\n linha2 = []\n linhasoma = []\n linhasub = []\n for j in range(1, 3):\n valor1 = float(input(f'Matriz 1 posição [{i},{j}]:'))\n valor2 = float(input(f'Matriz 2 posição [{i},{j}]:'))\n linha1.append(valor1)\n linha2.append(valor2)\n soma = valor1 + valor2\n linhasoma.append(soma)\n sub = valor1 - valor2\n linhasub.append(sub)\n matriz1.append(linha1[:])\n matriz2.append(linha2[:])\n matrizsoma.append(linhasoma)\n matrizsub.append(linhasub)\nprint('-=-=-=-=-=-=-=-=-=-=')\nprint('Matriz 1')\nprint(matriz1)\nprint('-=-=-=-=-=-=-=-=-=-=')\nprint('Matriz 2')\nprint(matriz2)\nprint('-=-=-=-=-=-=-=-=-=-=')\nprint('Matriz 1 + Matriz 2')\nprint(matrizsoma)\nprint('-=-=-=-=-=-=-=-=-=-=')\nprint('Matriz 1 - Matriz 2')\nprint(matrizsub)", "Matriz 1 posição [1,1]:1\nMatriz 2 posição [1,1]:2\nMatriz 1 posição [1,2]:3\nMatriz 2 posição [1,2]:4\nMatriz 1 posição [2,1]:5\nMatriz 2 posição [2,1]:6\nMatriz 1 posição [2,2]:7\nMatriz 2 posição [2,2]:8\n-=-=-=-=-=-=-=-=-=-=\nMatriz 1\n[[1.0, 3.0], [5.0, 7.0]]\n-=-=-=-=-=-=-=-=-=-=\nMatriz 2\n[[2.0, 4.0], [6.0, 8.0]]\n-=-=-=-=-=-=-=-=-=-=\nMatriz 1 + Matriz 2\n[[3.0, 7.0], [11.0, 15.0]]\n-=-=-=-=-=-=-=-=-=-=\nMatriz 1 - Matriz 2\n[[-1.0, -1.0], [-1.0, -1.0]]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c56c3361e9999f105908ab1034e41c4a01dcef
6,250
ipynb
Jupyter Notebook
BoundingBoxBeautiful.ipynb
escudero/BoundingBoxBeautiful
6326bb9ad29535a8168f06146f4ad6ebf603dde8
[ "MIT" ]
null
null
null
BoundingBoxBeautiful.ipynb
escudero/BoundingBoxBeautiful
6326bb9ad29535a8168f06146f4ad6ebf603dde8
[ "MIT" ]
null
null
null
BoundingBoxBeautiful.ipynb
escudero/BoundingBoxBeautiful
6326bb9ad29535a8168f06146f4ad6ebf603dde8
[ "MIT" ]
null
null
null
46.296296
1,358
0.5432
[ [ [ "# Creating a more elegant bounding box", "_____no_output_____" ] ], [ [ "import cv2\nimport numpy as np\n\nfrom google.colab.patches import cv2_imshow", "_____no_output_____" ], [ "def draw_bbox(img, coord1, coord2, color1=(6, 253, 2), color2=(4,230,1), r=5, d=5, thickness=1):\n x1, y1 = coord1\n x2, y2 = coord2\n\n if color2 is not None:\n # Top left\n cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color2, thickness+1)\n cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color2, thickness+1)\n cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color2, thickness+1)\n\n # Top right\n cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color2, thickness+1)\n cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color2, thickness+1)\n cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color2, thickness+1)\n\n # Bottom left\n cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color2, thickness+1)\n cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color2, thickness+1)\n cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color2, thickness+1)\n\n # Bottom right\n cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color2, thickness+1)\n cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color2, thickness+1)\n cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color2, thickness+1)\n\n # Top left\n cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color1, thickness)\n cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color1, thickness)\n cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color1, thickness)\n\n # Top right\n cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color1, thickness)\n cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color1, thickness)\n cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color1, thickness)\n\n # Bottom left\n cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color1, thickness)\n cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color1, thickness)\n cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color1, thickness)\n\n # Bottom right\n cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color1, thickness)\n cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color1, thickness)\n cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color1, thickness)\n\n return img", "_____no_output_____" ], [ "img = np.ones((140, 105, 3), np.uint8)*255\n\nimg = draw_bbox(img, (5,5), (100,30), color1=(6, 253, 2), color2=(4,230,1), r=5, d=4, thickness=1)\n\nimg = draw_bbox(img, (5,40), (100, 65), color1=(6, 253, 2), color2=None, r=5, d=4, thickness=1)\n\nimg = draw_bbox(img, (5,75), (100,100), color1=(255,186,143), color2=(205,80,42), r=5, d=4, thickness=1)\n\nimg = draw_bbox(img, (5,110), (100,135), color1=(109,114,210), color2=(10,2,122), r=5, d=4, thickness=1)\n\ncv2_imshow(img)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
e7c57d9df674e0f37c21fcc82092f0dbcc5eb43f
24,163
ipynb
Jupyter Notebook
chapter4.ipynb
hangulu/ctci
c40e0bb768cb3b7c3b3c3c053ae2d26a026b07e5
[ "Apache-2.0" ]
2
2020-12-23T01:18:35.000Z
2022-03-10T06:25:31.000Z
chapter4.ipynb
LEL-15/ctci
c40e0bb768cb3b7c3b3c3c053ae2d26a026b07e5
[ "Apache-2.0" ]
null
null
null
chapter4.ipynb
LEL-15/ctci
c40e0bb768cb3b7c3b3c3c053ae2d26a026b07e5
[ "Apache-2.0" ]
3
2020-12-23T01:20:23.000Z
2021-11-13T18:10:14.000Z
34.716954
345
0.530439
[ [ [ "## Chapter 4: Trees and Graphs", "_____no_output_____" ] ], [ [ "# Adjancency list graph\nclass Graph():\n def __init__(self):\n self.nodes = []\n def add(node):\n self.nodes.append(node)\nclass Node():\n def __init__(self, name, adjacent=[],marked=False):\n self.name = name\n self.adjacent = adjacent\n self.marked = marked", "_____no_output_____" ] ], [ [ "#### 4.1 Route Between Nodes\nGiven a directed graph, design an algorithm to find out whether there is a route between two nodes.", "_____no_output_____" ] ], [ [ "# Bidirectional breadth-first search.\ndef route(node1, node2):\n # Create the queue\n queue1 = []\n queue2 = []\n node1.marked = True\n node2.marked = True\n queue1.append(node1)\n queue2.append(node2)\n \n while (len(queue1) > 0) and (len(queue2) > 0):\n n1 = queue1.pop(0)\n n2 = queue2.pop(0)\n if n1 == n2:\n return True\n queue1 = check_neighbor(node1, queue1)\n queue2 = check_neighbor(node2, queue2)\n\ndef check_neighbor(node, queue):\n for neighbor in node.adjacent:\n if (neighbor.marked == False):\n neighbor.marked = True\n queue.append(neighbor)\n return queue", "_____no_output_____" ], [ "test = []\nwhile test:\n print(True)", "_____no_output_____" ] ], [ [ "#### 4.2 Minimal Tree\nGiven a sorted (increasing order) array with unique integer elements, write an algorithm to create a binary search tree with minimal height.", "_____no_output_____" ] ], [ [ "class Node():\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\ndef minimal(array):\n start = 0\n end = len(array) - 1\n if end < start:\n return None\n mid = int((start + end) / 2)\n # Recursively place the nodes\n return Node(mid, minimal(array, start, mid - 1), minimal(array, mid + 1, end))", "_____no_output_____" ] ], [ [ "#### 4.3 List of Depths\nGiven a binary tree, design an algorithm which creates a linked list of all the nodes at each depth (e.g., if you have a tree with depth D, you'll have D linked lists).", "_____no_output_____" ] ], [ [ "class LinkedList():\n def __init__(self, data):\n self.data = data\n self.next = None\n def insert(node):\n self.next = node\n\ndef list_of_depths(root, full_list, level):\n if root is None:\n return\n lnkedlst = LinkedList(None)\n if len(full_list) == level:\n lnkedlst = LinkedList(None)\n full_list.append(lnkedlst)\n else:\n lnkedlst = full_list[level]\n lnkedlst.insert(root)\n list_of_depths(root.left, full_list, level + 1)\n list_of_depths(root.right, full_list, level + 1)\n\ndef levels_linked(root):\n full_list = []\n list_of_depths(root, full_list, 0)\n return full_list", "_____no_output_____" ] ], [ [ "#### 4.4 Check Balanced\nImplement a function to check if a binary tree is balanced. For the purposes of this question, a balanced tree is defined to be a tree such that the heights of the two subtrees of any node never differ by more than one.", "_____no_output_____" ] ], [ [ "def check_height(root):\n # Define an error code to return if a sub-tree is not balanced\n min_int = -sys.maxsize - 1\n # The height of the null tree is -1\n if root is None:\n return -1\n # Check the height of the left sub-tree. If it's min-int, it is unbalanced\n left_height = check_height(root.left)\n if left_height == min_int:\n return min_int\n # Check the height of the right sub-tree\n right_height = check_height(root.right)\n if right_height == min_int:\n return min_int\n # Calculate the height difference. If it's more than one, return min_int (the error code). Else, return the height of the balanced sub-tree + 1\n height_diff = left_height - right_height\n if abs(height_diff) > 1:\n return min_int\n else:\n return max(left_height, right_height) + 1\n\ndef is_balanced(root):\n return check_height(root) != (-sys.maxsize - 1)", "_____no_output_____" ] ], [ [ "#### 4.5 Validate BST\nImplement a function to check if a binary tree is a binary search tree.", "_____no_output_____" ] ], [ [ "# A binary tree is a binary search tree if the left child is lower than the parent node which is lower than the right node (pre-order traversal)\ndef validate_bst(root):\n to_visit = [root]\n while len(to_visit) > 0:\n node = to_visit.pop(0)\n if node.left.data is not None:\n if node.data <= node.left.data:\n return False\n else:\n to_visit.append(node.left)\n if node.right.data is not None:\n if node.data > node.right.data:\n return False\n to_visit.append(node.right)\n return True", "_____no_output_____" ], [ "# The above method only works for checking the node and it's children, without checking against the values in the rest of the tree. Below corrects for that.\ndef val_bst(root):\n # Call the recursive function\n return validate_helper(root, None, None)\n\ndef validate_helper(node, mini, maxi):\n # Return True if the node is None\n if node is None:\n return True\n # A given node cannot be greater than the recorded maximum, or less than the recorded minimum\n # If the minimum has been set and the node is less than or equal to the minimum, return False.\n # If the maximum has been set and the node is greater than or equal to the maximum, return False\n if (mini is not None and node.data <= mini) or (maxi is not None and node.data > maxi):\n return False\n # If either sub-tree is False, return False\n if (not validate_helper(node.left, mini, node.data)) or (not validate_helper(node.right, node.data, maxi)):\n return False\n return True", "_____no_output_____" ] ], [ [ "#### 4.6 Successor\nWrite an algorithm to find the \"next\" node (i.e., in-order successor) of a given node in a binary search tree. You may assume that each node has a link to its parent.", "_____no_output_____" ] ], [ [ "def successor(node):\n if node is None:\n return None\n # If the node has a right sub-tree, return the leftmost node in that sub-tree\n if node.right is not None:\n return leftmost(node.right)\n else:\n q = node\n # Find node's parent\n x = q.parent # Not implemented in my Node class, but an assumption from the question\n # Iterate until node is not the left child of its parent (left -> current -> right)\n while x is not None and x.left != q:\n q = x\n x = x.parent\n return x\n\ndef leftmost(node):\n if node is None:\n return None\n while node.left is not None:\n node = node.left\n return node", "_____no_output_____" ] ], [ [ "#### 4.7 Build Order\nYou are given a list of projects and a list of dependencies (which is a list of pairs of projects, where the second project is dependent on the first project). All of a project'sdependencies must be built before the project is. Find a build order that will allow the projects to be built. If there is no valid build order, return an error.", "_____no_output_____" ] ], [ [ "# Topological sort using DFS\n\n# Store the possible states of nodes\nclass State():\n BLANK = 0\n PARTIAL = 1\n COMPLETED = 2\n\n# Create a class to store each vertex\nclass Vertex():\n # Store the vertex's data, state and adjacent vertices\n def __init__(self, key):\n self.id = key\n self.adj = set()\n self.state = State.BLANK\n # Add an edge if it does not already exist\n def add_edge(self, proj):\n self.adj.add(proj)\n def get_edges(self):\n return self.adj\n def get_id(self):\n return self.id\n def set_state(self, state):\n self.state = state\n def get_state(self):\n return self.state\n\n# Create a class to store the entire graph\nclass Graph():\n # Store a dict of vertices and the number of them\n def __init__(self):\n self.vertices = dict() # key = id, value = vertex\n self.num = 0\n # Add vertices by using the dictionary to map from the id to the Vertex object\n def add_vertex(self, key):\n self.num += 1\n self.vertices[key] = Vertex(key)\n # Retrieve vertices by their keys\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n def __contains__(self, data):\n return data in self.vertices\n # Add an edge to the vertices list if it doesn't exist there\n def add_edge(self, end1, end2):\n if end1 not in self.vertices:\n self.add_vertex(end1)\n if end2 not in self.vertices:\n self.add_vertex(end2)\n # Connect the first end to the second end\n self.vertices[end1].add_edge(self.vertices[end2])\n def get_vertices(self):\n return self.vertices.keys()\n # Create an iterable for the graph\n def __iter__(self):\n return iter(self.vertices.values())\n # Reset all the states to BLANK\n def reset_states(self):\n for proj in iter(self):\n proj.set_state(State.BLANK)\n \ndef populate_result(graph):\n result = []\n for proj in iter(graph):\n if not dfs(result, proj):\n return None\n return result\n\n# Recursive DFS\ndef dfs(result, proj):\n if proj.get_state() == State.PARTIAL:\n return False\n # If the state of the current project is BLANK, visit\n if proj.get_state() == State.BLANK:\n proj.set_state(State.PARTIAL)\n # For every vertex in proj's adjacency list, perform DFS\n for adj in proj.get_edges():\n if not dfs(result, adj):\n return False\n proj.set_state(State.COMPLETED)\n # Insert the project id to the result list\n result.insert(0, proj.get_id())\n return True\n\ndef build_order(projects, dependencies):\n graph = Graph()\n for proj in projects:\n graph.add_vertex(proj)\n for to, fro in dependencies:\n graph.add_edge(fro, to)\n return populate_result(graph)", "_____no_output_____" ], [ "projects = [ \"a\", \"b\", \"c\", \"d\", \"e\", \"f\" ]\ndependencies = [ (\"d\", \"a\"), (\"b\", \"f\"), (\"d\", \"b\"), (\"a\", \"f\"), (\"c\", \"d\") ]\nprint(build_order(projects, dependencies))", "['f', 'e', 'b', 'a', 'd', 'c']\n" ] ], [ [ "#### 4.8 First Common Ancestor\nDesign an algorithm and write code to find the first common ancestor of two nodes in a binary tree. Avoid storing additional nodes in a data structure. NOTE: This is not necessarily a binary search tree.", "_____no_output_____" ] ], [ [ "def common_ancestor(root, node1, node2):\n # Check if both nodes are in the tree\n if (not covers(root, node1)) or (not covers(root, node2)):\n return None\n return ancestor_helper(root, node1, node2)\n\ndef ancestor_helper(root, node1, node2):\n if root is None or root == node1 or root == node2:\n return root\n node1_on_left = covers(root.left, node1)\n node2_on_left = covers(root.left, node2)\n # Check if nodes are on the same side\n if not (node1_on_left and node2_on_left):\n return root\n # Find the sub-tree of the child_node\n child_side = root.left if node1_on_left else root.right\n return ancestor_helper(child_side, node1, node2)\n\n# The tree covers the node if the node is somewhere in the stree's sub-trees\ndef covers(root, node1):\n if root is None:\n return False\n if root == node1:\n return True\n return covers(root.left, node1) or covers(root.right, node2)", "_____no_output_____" ] ], [ [ "#### 4.9 BST Sequences\nA binary search tree was created by traversing through an array from left to right and inserting each element. Given a binary search tree with distinct elements, print all possible arrays that could have led to this tree.", "_____no_output_____" ] ], [ [ "def bst_sequences(root):\n result = []\n if root is None:\n result.append([])\n return result\n prefix = []\n prefix.append(root.data)\n # Recurse on the left and right sub-trees\n left_seq = bst_sequences(root.left)\n right_seq = bst_sequences(root.right)\n # Weave together the lists\n for left in left_seq:\n for right in right_seq:\n weaved = []\n weave_lists(left, right, weaved, prefix)\n result.extend(weaved)\n return result\n\ndef weave_lists(first, second, results, prefix):\n if len(first) == 0 or len(second) == 0:\n result = prefix.copy()\n result.extend(first)\n result.extend(second)\n results.append(result)\n return\n head_first = first.pop(0)\n prefix.append(head_first)\n weave_lists(first, second, results, prefix)\n prefix.pop()\n first.insert(0, head_first)\n \n head_second = second.pop()\n prefix.append(head_second)\n weave_lists(first, second, results, prefix)\n prefix.pop()\n second.insert(0, head_second)", "_____no_output_____" ] ], [ [ "#### 4.10 Check Subtree\nT1 and T2 are two very large binary trees, with T1 much bigger than T2. Create an algorithm to determine if T2 is a subtree of T1.\nA tree T2 is a subtree of T1 if there exists a node n in T1 such that the subtree of n is identical to T2.\nThat is, if you cut off the tree at node n, the two trees would be identical.", "_____no_output_____" ] ], [ [ "# The following approach converts the trees to strings based on pre-order traversal (node -> left -> right). If one string is a sub-string of the other, it is a sub-tree\ndef contains_tree(node1, node2):\n string1 = \"\"\n string2 = \"\"\n get_order(node1, string1)\n get_order(node2, string2)\n return string2 in string1\n\ndef get_order(node, string):\n if node is None:\n # Add a null indicator\n string += \"X\"\n return\n string += (node.data + \" \") # Add the root\n # Add left and right\n get_order(node.left, string)\n get_order(node.right, string)", "_____no_output_____" ] ], [ [ "#### 4.11 Random Node\nYou are implementing a binary tree class from scratch which, in addition to insert, find, and delete, has a method getRandomNode() which returns a random node from the tree. All nodes should be equally likely to be chosen. Design and implement an algorithm for getRandomNode, and explain how you would implement the rest of the methods.", "_____no_output_____" ] ], [ [ "import random", "_____no_output_____" ], [ "# Create a tree class that stores the size of the tree\nclass Tree():\n def __init__(self, root=None):\n self.root = root\n self.size = 0 if self.root is None else self.root.size()\n \n def get_random():\n if root is None:\n return None\n # The index is a random number between 0 and the size of the tree\n index = random.randint(0, self.size())\n return root.get_node(index)\n \n # Insert a value into the tree\n def insert_in_order(value):\n if root is None:\n root = RandomNode(value)\n else:\n root.insert_in_order(value)\n\n# A class for each node of the tree. Stores data, left, right, and the size\nclass RandomNode():\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n self.size = 0\n \n # Increment the size of the left until the index\n def get_node(index):\n left_size = 0 if self.left is None else self.left.size()\n if index < self.left_size:\n return self.left.get_node(index)\n elif index == self.left_size:\n return self\n else:\n return self.right.get_node(index - (left_size + 1))\n \n def insert_in_order(value):\n if value <= self.data:\n if self.left is None:\n self.left = RandomNode(value)\n else:\n self.left.insert_in_order(value)\n else:\n if self.right is None:\n self.right = RandomNode(value)\n else:\n self.right.insert_in_order(value)\n self.size += 1\n \n def size():\n return self.size\n \n def find(value):\n if value == self.data:\n return self\n elif value <= self.data:\n return self.left.find(value) if self.left is not None else None\n elif value > self.data:\n return self.right.find(value) if self.right is not None else None\n return None", "_____no_output_____" ] ], [ [ "#### 4.12 Paths with Sum\nYou are given a binary tree in which each node contains an integer value (which might be positive or negative). Design an algorithm to count the number of paths that sum to a given value. The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).", "_____no_output_____" ] ], [ [ "def count_sum_paths(node, target_sum, running_sum, path_count):\n if node is None:\n return 0\n running_sum += node.data\n cur_sum = running_sum - target_sum\n if cur_sum in path_count:\n total_paths = path_count[cur_sum]\n else:\n total_paths = 0\n if running_sum == target_sum:\n total_paths += 1\n increment_hash(path_count, running_sum, 1)\n total_paths += count_sum_paths(node.left, target_sum, running_sum, path_count)\n total_paths += count_sum_paths(node.right, target_sum, running_sum, path_count)\n increment_hash(path_count, running_sum, -1)\n return total_paths\n\ndef increment_hash(path_count, key, delta):\n new_count = (key + delta) if key in path_count else (0 + delta)\n if new_count == 0:\n del path_count[key]\n else:\n path_count[key] = new_count", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c581095a2dee162107efa88b87d647f07189ee
584
ipynb
Jupyter Notebook
laboratory/pgfgantt/pgfgantt_creator.ipynb
matt-ketk/study-hall
6a6837278daefb336643aca7b203c41cab5debcb
[ "MIT" ]
null
null
null
laboratory/pgfgantt/pgfgantt_creator.ipynb
matt-ketk/study-hall
6a6837278daefb336643aca7b203c41cab5debcb
[ "MIT" ]
null
null
null
laboratory/pgfgantt/pgfgantt_creator.ipynb
matt-ketk/study-hall
6a6837278daefb336643aca7b203c41cab5debcb
[ "MIT" ]
null
null
null
16.685714
34
0.525685
[ [ [ "# pgfgantt creator", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7c5a3b78c4fa13f9311f33947b82afc72bfeaf0
221,121
ipynb
Jupyter Notebook
ipinsights-tutorial.ipynb
avoca-dorable/aws_ipinsights
d798fce35942718f08fe7a708c3ab1367e5e3de7
[ "MIT" ]
null
null
null
ipinsights-tutorial.ipynb
avoca-dorable/aws_ipinsights
d798fce35942718f08fe7a708c3ab1367e5e3de7
[ "MIT" ]
null
null
null
ipinsights-tutorial.ipynb
avoca-dorable/aws_ipinsights
d798fce35942718f08fe7a708c3ab1367e5e3de7
[ "MIT" ]
null
null
null
58.220379
19,408
0.638203
[ [ [ "# An Introduction to the Amazon SageMaker IP Insights Algorithm\n#### Unsupervised anomaly detection for susicipous IP addresses\n\n-------\n1. [Introduction](#Introduction)\n2. [Setup](#Setup)\n3. [Training](#Training)\n4. [Inference](#Inference)\n5. [Epilogue](#Epilogue)\n\n## Introduction\n-------\n\nThe Amazon SageMaker IP Insights algorithm uses statistical modeling and neural networks to capture associations between online resources (such as account IDs or hostnames) and IPv4 addresses. Under the hood, it learns vector representations for online resources and IP addresses. This essentially means that if the vector representing an IP address and an online resource are close together, then it is likey for that IP address to access that online resource, even if it has never accessed it before.\n\nIn this notebook, we use the Amazon SageMaker IP Insights algorithm to train a model on synthetic data. We then use this model to perform inference on the data and show how to discover anomalies. After running this notebook, you should be able to:\n\n- obtain, transform, and store data for use in Amazon SageMaker,\n- create an AWS SageMaker training job to produce an IP Insights model,\n- use the model to perform inference with an Amazon SageMaker endpoint.\n\nIf you would like to know more, please check out the [SageMaker IP Inisghts Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights.html). \n\n## Setup\n------\n*This notebook was tested in Amazon SageMaker Studio on a ml.t3.medium instance with Python 3 (Data Science) kernel.*\n\nOur first step is to setup our AWS credentials so that AWS SageMaker can store and access training data and model artifacts.\n\n### Select Amazon S3 Bucket\nWe first need to specify the locations where we will store our training data and trained model artifacts. ***This is the only cell of this notebook that you will need to edit.*** In particular, we need the following data:\n\n- `bucket` - An S3 bucket accessible by this account.\n- `prefix` - The location in the bucket where this notebook's input and output data will be stored. (The default value is sufficient.)", "_____no_output_____" ], [ "## A few notes if you are running the script as a test/learning with AWS free tier:\n \n1. check the doc here to make sure you only used the services (especially instance) covered by AWS free tier\n2. Dont repeat the data generation process, as S3 charged by the number of read/write.\n3. You can start with a much smaller set of users by set NUM_USERS = 100", "_____no_output_____" ] ], [ [ "!python --version", "Python 3.6.13\n" ], [ "bucket = sagemaker.Session().default_bucket()\nprefix = \"sagemaker/ipinsights-tutorial-bwx\"\nexecution_role = sagemaker.get_execution_role()\nregion = boto3.Session().region_name\n\nboto3.Session().client(\"s3\").head_bucket(Bucket=bucket)\ns3://{bucket}/{prefix}\")", "_____no_output_____" ], [ "import boto3\nimport botocore\nimport os\nimport sagemaker\n\n\nbucket = sagemaker.Session().default_bucket()\nprefix = \"sagemaker/ipinsights-tutorial-bwx\"\nexecution_role = sagemaker.get_execution_role()\nregion = boto3.Session().region_name\n\n# check if the bucket exists\ntry:\n boto3.Session().client(\"s3\").head_bucket(Bucket=bucket)\nexcept botocore.exceptions.ParamValidationError as e:\n print(\n \"Hey! You either forgot to specify your S3 bucket or you gave your bucket an invalid name!\"\n )\nexcept botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"403\":\n print(f\"Hey! You don't have permission to access the bucket, {bucket}.\")\n elif e.response[\"Error\"][\"Code\"] == \"404\":\n print(f\"Hey! Your bucket, {bucket}, doesn't exist!\")\n else:\n raise\nelse:\n print(f\"Training input/output will be stored in: s3://{bucket}/{prefix}\")", "Training input/output will be stored in: s3://sagemaker-us-east-1-017681292549/sagemaker/ipinsights-tutorial-bwx\n" ] ], [ [ "Next we download the modules necessary for synthetic data generation they do not exist.", "_____no_output_____" ] ], [ [ "from os import path\n\ntools_bucket = f\"jumpstart-cache-prod-{region}\" # Bucket containing the data generation module.\ntools_prefix = \"1p-algorithms-assets/ip-insights\" # Prefix for the data generation module\ns3 = boto3.client(\"s3\")\n\ndata_generation_file = \"generate_data.py\" # Synthetic data generation module\nscript_parameters_file = \"ip2asn-v4-u32.tsv.gz\"\n\nif not path.exists(data_generation_file):\n print(\"downloaded {} to S3 bucket {}\".format(data_generation_file, tools_prefix))\n s3.download_file(tools_bucket, f\"{tools_prefix}/{data_generation_file}\", data_generation_file)\n\nif not path.exists(script_parameters_file):\n \n s3.download_file(\n tools_bucket, f\"{tools_prefix}/{script_parameters_file}\", script_parameters_file\n )\n print(\"downloaded {} to S3 bucket {}\".format(script_parameters_file, tools_prefix))", "_____no_output_____" ] ], [ [ "### Dataset\n\nApache Web Server (\"httpd\") is the most popular web server used on the internet. And luckily for us, it logs all requests processed by the server - by default. If a web page requires HTTP authentication, the Apache Web Server will log the IP address and authenticated user name for each requested resource. \n\nThe [access logs](https://httpd.apache.org/docs/2.4/logs.html) are typically on the server under the file `/var/log/httpd/access_log`. From the example log output below, we see which IP addresses each user has connected with:\n\n```\n192.168.1.100 - user1 [15/Oct/2018:18:58:32 +0000] \"GET /login_success?userId=1 HTTP/1.1\" 200 476 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\n192.168.1.102 - user2 [15/Oct/2018:18:58:35 +0000] \"GET /login_success?userId=2 HTTP/1.1\" 200 - \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\n...\n```\n\nIf we want to train an algorithm to detect suspicious activity, this dataset is ideal for SageMaker IP Insights.\n\nFirst, we determine the resource we want to be analyzing (such as a login page or access to a protected file). Then, we construct a dataset containing the history of all past user interactions with the resource. We extract out each 'access event' from the log and store the corresponding user name and IP address in a headerless CSV file with two columns. The first column will contain the user identifier string, and the second will contain the IPv4 address in decimal-dot notation. \n\n```\nuser1, 192.168.1.100\nuser2, 193.168.1.102\n...\n```\n\nAs a side note, the dataset should include all access events. That means some `<user_name, ip_address>` pairs will be repeated. \n\n#### User Activity Simulation\nFor this example, we are going to simulate our own web-traffic logs. We mock up a toy website example and simulate users logging into the website from mobile devices. \n\nThe details of the simulation are explained in the script [here](./generate_data.py). \n\n", "_____no_output_____" ] ], [ [ "from generate_data import generate_dataset\n\n# We simulate traffic for 10,000 users. This should yield about 3 million log lines (~700 MB).\nNUM_USERS = 10000\nlog_file = \"ipinsights_web_traffic.log\"\ngenerate_dataset(NUM_USERS, log_file)\n\n# Visualize a few log lines\n!head $log_file", "_____no_output_____" ] ], [ [ "### Prepare the dataset\nNow that we have our logs, we need to transform them into a format that IP Insights can use. As we mentioned above, we need to:\n1. Choose the resource which we want to analyze users' history for\n2. Extract our users' usage history of IP addresses\n3. In addition, we want to separate our dataset into a training and test set. This will allow us to check for overfitting by evaluating our model on 'unseen' login events.\n\nFor the rest of the notebook, we assume that the Apache Access Logs are in the Common Log Format as defined by the [Apache documentation](https://httpd.apache.org/docs/2.4/logs.html#accesslog). We start with reading the logs into a Pandas DataFrame for easy data exploration and pre-processing.", "_____no_output_____" ] ], [ [ "log_file = \"ipinsights_web_traffic.log\"\n", "_____no_output_____" ], [ "import pandas as pd\n\ndf = pd.read_csv(\n log_file,\n sep=\" \",\n na_values=\"-\",\n header=None,\n names=[\n \"ip_address\",\n \"rcf_id\",\n \"user\",\n \"timestamp\",\n \"time_zone\",\n \"request\",\n \"status\",\n \"size\",\n \"referer\",\n \"user_agent\",\n ],\n)\ndf.head()", "_____no_output_____" ] ], [ [ "We convert the log timestamp strings into Python datetimes so that we can sort and compare the data more easily. ", "_____no_output_____" ] ], [ [ "# Convert time stamps to DateTime objects\ndf[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"], format=\"[%d/%b/%Y:%H:%M:%S\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "We also verify the time zones of all of the time stamps. If the log contains more than one time zone, we would need to standardize the timestamps.", "_____no_output_____" ] ], [ [ "# Check if they are all in the same timezone\nnum_time_zones = len(df[\"time_zone\"].unique())\nnum_time_zones", "_____no_output_____" ] ], [ [ "As we see above, there is only one value in the entire `time_zone` column. Therefore, all of the timestamps are in the same time zone, and we do not need to standardize them. We can skip the next cell and go to [1. Selecting a Resource](#1.-Select-Resource).\n\nIf there is more than one time_zone in your dataset, then we parse the timezone offset and update the corresponding datetime object. \n\n**Note:** The next cell takes about 5-10 minutes to run.", "_____no_output_____" ] ], [ [ "from datetime import datetime", "_____no_output_____" ], [ "# from datetime import datetime\nimport pytz\n\n\ndef apply_timezone(row):\n tz = row[1]\n tz_offset = int(tz[:3]) * 60 # Hour offset\n tz_offset += int(tz[3:5]) # Minutes offset\n return row[0].replace(tzinfo=pytz.FixedOffset(tz_offset))\n\n\nif num_time_zones > 1:\n df[\"timestamp\"] = df[[\"timestamp\", \"time_zone\"]].apply(apply_timezone, axis=1)", "_____no_output_____" ] ], [ [ "#### 1. Select Resource\nOur goal is to train an IP Insights algorithm to analyze the history of user logins such that we can predict how suspicious a login event is. \n\nIn our simulated web server, the server logs a `GET` request to the `/login_success` page everytime a user successfully logs in. We filter our Apache logs for `GET` requests for `/login_success`. We also filter for requests that have a `status_code == 200`, to ensure that the page request was well formed. \n\n**Note:** every web server handles logins differently. For your dataset, determine which resource you will need to be analyzing to correctly frame this problem. Depending on your usecase, you may need to do more data exploration and preprocessing.", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "df = df[(df[\"request\"].str.startswith(\"GET /login_success\")) & (df[\"status\"] == 200)]", "_____no_output_____" ] ], [ [ "#### 2. Extract Users and IP address\nNow that our DataFrame only includes log events for the resource we want to analyze, we extract the relevant fields to construct a IP Insights dataset.\n\nIP Insights takes in a headerless CSV file with two columns: an entity (username) ID string and the IPv4 address in decimal-dot notation. Fortunately, the Apache Web Server Access Logs output IP addresses and authentcated usernames in their own columns.\n\n**Note:** Each website handles user authentication differently. If the Access Log does not output an authenticated user, you could explore the website's query strings or work with your website developers on another solution.", "_____no_output_____" ] ], [ [ "df = df[[\"user\", \"ip_address\", \"timestamp\"]]", "_____no_output_____" ] ], [ [ "#### 3. Create training and test dataset\nAs part of training a model, we want to evaluate how it generalizes to data it has never seen before.\n\nTypically, you create a test set by reserving a random percentage of your dataset and evaluating the model after training. However, for machine learning models that make future predictions on historical data, we want to use out-of-time testing. Instead of randomly sampling our dataset, we split our dataset into two contiguous time windows. The first window is the training set, and the second is the test set. \n\nWe first look at the time range of our dataset to select a date to use as the partition between the training and test set.", "_____no_output_____" ] ], [ [ "df[\"timestamp\"].describe()", "/usr/local/lib/python3.6/site-packages/ipykernel_launcher.py:1: FutureWarning: Treating datetime data as categorical rather than numeric in `.describe` is deprecated and will be removed in a future version of pandas. Specify `datetime_is_numeric=True` to silence this warning and adopt the future behavior now.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "We have login events for 10 days. Let's take the first week (7 days) of data as training and then use the last 3 days for the test set.", "_____no_output_____" ] ], [ [ "time_partition = (\n datetime(2018, 11, 11, tzinfo=pytz.FixedOffset(0))\n if num_time_zones > 1\n else datetime(2018, 11, 11)\n)\n\ntrain_df = df[df[\"timestamp\"] <= time_partition]\ntest_df = df[df[\"timestamp\"] > time_partition]", "_____no_output_____" ] ], [ [ "Now that we have our training dataset, we shuffle it. \n\nShuffling improves the model's performance since SageMaker IP Insights uses stochastic gradient descent. This ensures that login events for the same user are less likely to occur in the same mini batch. This allows the model to improve its performance in between predictions of the same user, which will improve training convergence.", "_____no_output_____" ] ], [ [ "# Shuffle train data\ntrain_df = train_df.sample(frac=1)\ntrain_df.head()", "_____no_output_____" ], [ "# check the shape of training and testing\nprint(train_df.shape, test_df.shape)", "(2107898, 3) (904736, 3)\n" ] ], [ [ "### Store Data on S3", "_____no_output_____" ], [ "Now that we have simulated (or scraped) our datasets, we have to prepare and upload it to S3.\n\nWe will be doing local inference, therefore we don't need to upload our test dataset.", "_____no_output_____" ] ], [ [ "# Output dataset as headerless CSV\ntrain_data = train_df.to_csv(index=False, header=False, columns=[\"user\", \"ip_address\"])", "_____no_output_____" ], [ "re\ntrain_data_file = \"train.csv\"\nkey = os.path.join(prefix, \"train\", train_data_file)\ns3_train_data = f\"s3://{bucket}/{key}\"\n\nprint(f\"Uploading data to: {s3_train_data}\")\nboto3.resource(\"s3\").Bucket(bucket).Object(key).put(Body=train_data)\n\n# Configure SageMaker IP Insights Input Channels\ninput_data = {\n \"train\": sagemaker.session.s3_input(\n s3_train_data, distribution=\"FullyReplicated\", content_type=\"text/csv\"\n )\n}", "Uploading data to: s3://sagemaker-us-east-1-017681292549/sagemaker/ipinsights-tutorial-bwx/train/train.csv\n" ] ], [ [ "## Training\n---\nOnce the data is preprocessed and available in the necessary format, the next step is to train our model on the data. There are number of parameters required by the SageMaker IP Insights algorithm to configure the model and define the computational environment in which training will take place. The first of these is to point to a container image which holds the algorithms training and hosting code:", "_____no_output_____" ] ], [ [ "from sagemaker.amazon.amazon_estimator import get_image_uri\n\nimage = get_image_uri(boto3.Session().region_name, \"ipinsights\")", "The method get_image_uri has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\nDefaulting to the only supported framework/algorithm version: 1. Ignoring framework/algorithm version: 1.\n" ] ], [ [ "Then, we need to determine the training cluster to use. The IP Insights algorithm supports both CPU and GPU training. We recommend using GPU machines as they will train faster. However, when the size of your dataset increases, it can become more economical to use multiple CPU machines running with distributed training. See [Recommended Instance Types](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights.html#ip-insights-instances) for more details. \n\n### Training Job Configuration\n- **train_instance_type**: the instance type to train on. We recommend `p3.2xlarge` for single GPU, `p3.8xlarge` for multi-GPU, and `m5.2xlarge` if using distributed training with CPU;\n- **train_instance_count**: the number of worker nodes in the training cluster.\n\nWe need to also configure SageMaker IP Insights-specific hypeparameters:\n\n### Model Hyperparameters\n- **num_entity_vectors**: the total number of embeddings to train. We use an internal hashing mechanism to map the entity ID strings to an embedding index; therefore, using an embedding size larger than the total number of possible values helps reduce the number of hash collisions. We recommend this value to be 2x the total number of unique entites (i.e. user names) in your dataset;\n- **vector_dim**: the size of the entity and IP embedding vectors. The larger the value, the more information can be encoded using these representations but using too large vector representations may cause the model to overfit, especially for small training data sets;\n- **num_ip_encoder_layers**: the number of layers in the IP encoder network. The larger the number of layers, the higher the model capacity to capture patterns among IP addresses. However, large number of layers increases the chance of overfitting. `num_ip_encoder_layers=1` is a good value to start experimenting with;\n- **random_negative_sampling_rate**: the number of randomly generated negative samples to produce per 1 positive sample; `random_negative_sampling_rate=1` is a good value to start experimenting with;\n - Random negative samples are produced by drawing each octet from a uniform distributed of [0, 255];\n- **shuffled_negative_sampling_rate**: the number of shuffled negative samples to produce per 1 positive sample; `shuffled_negative_sampling_rate=1` is a good value to start experimenting with;\n - Shuffled negative samples are produced by shuffling the accounts within a batch;\n\n### Training Hyperparameters\n- **epochs**: the number of epochs to train. Increase this value if you continue to see the accuracy and cross entropy improving over the last few epochs;\n- **mini_batch_size**: how many examples in each mini_batch. A smaller number improves convergence with stochastic gradient descent. But a larger number is necessary if using shuffled_negative_sampling to avoid sampling a wrong account for a negative sample;\n- **learning_rate**: the learning rate for the Adam optimizer (try ranges in [0.001, 0.1]). Too large learning rate may cause the model to diverge since the training would be likely to overshoot minima. On the other hand, too small learning rate slows down the convergence;\n- **weight_decay**: L2 regularization coefficient. Regularization is required to prevent the model from overfitting the training data. Too large of a value will prevent the model from learning anything;\n\nFor more details, see [Amazon SageMaker IP Insights (Hyperparameters)](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights-hyperparameters.html). Additionally, most of these hyperparameters can be found using SageMaker Automatic Model Tuning; see [Amazon SageMaker IP Insights (Model Tuning)](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights-tuning.html) for more details. ", "_____no_output_____" ] ], [ [ "# Set up the estimator with training job configuration\nip_insights = sagemaker.estimator.Estimator(\n image,\n execution_role,\n instance_count=1,\n #instance_type=\"ml.p3.2xlarge\",\n instance_type = 'ml.m5.xlarge',\n output_path=f\"s3://{bucket}/{prefix}/output\",\n sagemaker_session=sagemaker.Session(),\n)\n\n# Configure algorithm-specific hyperparameters\nip_insights.set_hyperparameters(\n num_entity_vectors=\"20000\",\n random_negative_sampling_rate=\"5\",\n vector_dim=\"128\",\n mini_batch_size=\"1000\",\n epochs=\"5\",\n learning_rate=\"0.01\",\n)\n\n# Start the training job (should take about ~1.5 minute / epoch to complete)\nip_insights.fit(input_data)", "2021-12-30 01:57:12 Starting - Starting the training job...\n2021-12-30 01:57:36 Starting - Launching requested ML instancesProfilerReport-1640829432: InProgress\n......\n2021-12-30 01:58:36 Starting - Preparing the instances for training......\n2021-12-30 01:59:38 Downloading - Downloading input data...\n2021-12-30 01:59:56 Training - Downloading the training image.....\n2021-12-30 02:00:57 Training - Training image download completed. Training in progress.\u001b[34mDocker entrypoint called with argument(s): train\u001b[0m\n\u001b[34mRunning default environment configuration script\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Reading default configuration from /opt/amazon/lib/python3.7/site-packages/algorithm/resources/default-input.json: {'batch_metrics_publish_interval': '1000', 'epochs': '10', 'learning_rate': '0.001', 'mini_batch_size': '5000', 'num_entity_vectors': '100000', 'num_ip_encoder_layers': '1', 'random_negative_sampling_rate': '1', 'shuffled_negative_sampling_rate': '1', 'vector_dim': '128', 'weight_decay': '0.00001', '_kvstore': 'auto_gpu', '_log_level': 'info', '_num_gpus': 'auto', '_num_kv_servers': 'auto', '_tuning_objective_metric': ''}\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Merging with provided configuration from /opt/ml/input/config/hyperparameters.json: {'vector_dim': '128', 'random_negative_sampling_rate': '5', 'num_entity_vectors': '20000', 'epochs': '5', 'learning_rate': '0.01', 'mini_batch_size': '1000'}\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Final configuration: {'batch_metrics_publish_interval': '1000', 'epochs': '5', 'learning_rate': '0.01', 'mini_batch_size': '1000', 'num_entity_vectors': '20000', 'num_ip_encoder_layers': '1', 'random_negative_sampling_rate': '5', 'shuffled_negative_sampling_rate': '1', 'vector_dim': '128', 'weight_decay': '0.00001', '_kvstore': 'auto_gpu', '_log_level': 'info', '_num_gpus': 'auto', '_num_kv_servers': 'auto', '_tuning_objective_metric': ''}\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 WARNING 140515720472384] Loggers have already been setup.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] nvidia-smi: took 0.057 seconds to run.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] nvidia-smi identified 0 GPUs.\u001b[0m\n\u001b[34mProcess 1 is a worker.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Using default worker.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Loaded iterator creator application/x-ndarray for content type ('application/x-ndarray', '1.0')\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Loaded iterator creator text/csv for content type ('text/csv', '1.0')\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Checkpoint loading and saving are disabled.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Number of GPUs being used: 0\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829658.5182247, \"EndTime\": 1640829658.5206978, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"initialize.time\": {\"sum\": 2.297639846801758, \"count\": 1, \"min\": 2.297639846801758, \"max\": 2.297639846801758}}}\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829658.5207944, \"EndTime\": 1640829658.520819, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"Meta\": \"init_train_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Total Batches Seen\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Max Records Seen Between Resets\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Max Batches Seen Between Resets\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Reset Count\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] Create Store: local\u001b[0m\n\u001b[34m[02:00:58] /opt/brazil-pkg-cache/packages/AIAlgorithmsMXNet/AIAlgorithmsMXNet-1.3.x_Cuda_10.1.x.3320.0/AL2_x86_64/generic-flavor/src/src/kvstore/./kvstore_local.h:306: Warning: non-default weights detected during kvstore pull. This call has been ignored. Please make sure to use kv.row_sparse_pull() or module.prepare() with row_ids.\u001b[0m\n\u001b[34m[02:00:58] /opt/brazil-pkg-cache/packages/AIAlgorithmsMXNet/AIAlgorithmsMXNet-1.3.x_Cuda_10.1.x.3320.0/AL2_x86_64/generic-flavor/src/src/operator/././../common/utils.h:450: Optimizer with lazy_update = True detected. Be aware that lazy update with row_sparse gradient is different from standard update, and may lead to different empirical results. See https://mxnet.incubator.apache.org/api/python/optimization/optimization.html for more details.\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=0 train binary_classification_accuracy <score>=0.497\u001b[0m\n\u001b[34m[12/30/2021 02:00:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=0 train binary_classification_cross_entropy <loss>=0.6931654663085938\u001b[0m\n\u001b[34m[12/30/2021 02:01:06 INFO 140515720472384] Epoch[0] Batch [1000]#011Speed: 133941.46 samples/sec#011binary_classification_accuracy=0.927374#011binary_classification_cross_entropy=0.198942\u001b[0m\n\u001b[34m[12/30/2021 02:01:06 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=1000 train binary_classification_accuracy <score>=0.9273736263736264\u001b[0m\n\u001b[34m[12/30/2021 02:01:06 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=1000 train binary_classification_cross_entropy <loss>=0.19894166549697861\u001b[0m\n\u001b[34m[12/30/2021 02:01:13 INFO 140515720472384] Epoch[0] Batch [2000]#011Speed: 141583.24 samples/sec#011binary_classification_accuracy=0.949645#011binary_classification_cross_entropy=0.148631\u001b[0m\n\u001b[34m[12/30/2021 02:01:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=2000 train binary_classification_accuracy <score>=0.9496446776611694\u001b[0m\n\u001b[34m[12/30/2021 02:01:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=2000 train binary_classification_cross_entropy <loss>=0.14863135603867073\u001b[0m\n\u001b[34m[12/30/2021 02:01:20 INFO 140515720472384] Epoch[0] Batch [3000]#011Speed: 141008.86 samples/sec#011binary_classification_accuracy=0.959285#011binary_classification_cross_entropy=0.125976\u001b[0m\n\u001b[34m[12/30/2021 02:01:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=3000 train binary_classification_accuracy <score>=0.9592845718093969\u001b[0m\n\u001b[34m[12/30/2021 02:01:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=3000 train binary_classification_cross_entropy <loss>=0.1259763848591709\u001b[0m\n\u001b[34m[12/30/2021 02:01:27 INFO 140515720472384] Epoch[0] Batch [4000]#011Speed: 140482.40 samples/sec#011binary_classification_accuracy=0.964791#011binary_classification_cross_entropy=0.112997\u001b[0m\n\u001b[34m[12/30/2021 02:01:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=4000 train binary_classification_accuracy <score>=0.9647908022994252\u001b[0m\n\u001b[34m[12/30/2021 02:01:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=4000 train binary_classification_cross_entropy <loss>=0.11299730495404256\u001b[0m\n\u001b[34m[12/30/2021 02:01:34 INFO 140515720472384] Epoch[0] Batch [5000]#011Speed: 141370.50 samples/sec#011binary_classification_accuracy=0.968326#011binary_classification_cross_entropy=0.104571\u001b[0m\n\u001b[34m[12/30/2021 02:01:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=5000 train binary_classification_accuracy <score>=0.9683259348130374\u001b[0m\n\u001b[34m[12/30/2021 02:01:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=5000 train binary_classification_cross_entropy <loss>=0.10457097097731333\u001b[0m\n\u001b[34m[12/30/2021 02:01:41 INFO 140515720472384] Epoch[0] Batch [6000]#011Speed: 140469.37 samples/sec#011binary_classification_accuracy=0.970816#011binary_classification_cross_entropy=0.098655\u001b[0m\n\u001b[34m[12/30/2021 02:01:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=6000 train binary_classification_accuracy <score>=0.9708161973004499\u001b[0m\n\u001b[34m[12/30/2021 02:01:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=6000 train binary_classification_cross_entropy <loss>=0.09865453168325197\u001b[0m\n\u001b[34m[12/30/2021 02:01:48 INFO 140515720472384] Epoch[0] Batch [7000]#011Speed: 139782.67 samples/sec#011binary_classification_accuracy=0.972723#011binary_classification_cross_entropy=0.094058\u001b[0m\n\u001b[34m[12/30/2021 02:01:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=7000 train binary_classification_accuracy <score>=0.9727233252392515\u001b[0m\n\u001b[34m[12/30/2021 02:01:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=7000 train binary_classification_cross_entropy <loss>=0.09405811343460727\u001b[0m\n\u001b[34m[12/30/2021 02:01:55 INFO 140515720472384] Epoch[0] Batch [8000]#011Speed: 140263.31 samples/sec#011binary_classification_accuracy=0.974187#011binary_classification_cross_entropy=0.090596\u001b[0m\n\u001b[34m[12/30/2021 02:01:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=8000 train binary_classification_accuracy <score>=0.9741866016747907\u001b[0m\n\u001b[34m[12/30/2021 02:01:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=8000 train binary_classification_cross_entropy <loss>=0.09059624196588807\u001b[0m\n\u001b[34m[12/30/2021 02:02:03 INFO 140515720472384] Epoch[0] Batch [9000]#011Speed: 139964.99 samples/sec#011binary_classification_accuracy=0.975349#011binary_classification_cross_entropy=0.087753\u001b[0m\n\u001b[34m[12/30/2021 02:02:03 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=9000 train binary_classification_accuracy <score>=0.9753492945228308\u001b[0m\n\u001b[34m[12/30/2021 02:02:03 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=9000 train binary_classification_cross_entropy <loss>=0.08775326167614032\u001b[0m\n\u001b[34m[12/30/2021 02:02:10 INFO 140515720472384] Epoch[0] Batch [10000]#011Speed: 137405.33 samples/sec#011binary_classification_accuracy=0.976317#011binary_classification_cross_entropy=0.085435\u001b[0m\n\u001b[34m[12/30/2021 02:02:10 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=10000 train binary_classification_accuracy <score>=0.9763169683031697\u001b[0m\n\u001b[34m[12/30/2021 02:02:10 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=10000 train binary_classification_cross_entropy <loss>=0.08543506357543719\u001b[0m\n\u001b[34m[12/30/2021 02:02:17 INFO 140515720472384] Epoch[0] Batch [11000]#011Speed: 140676.57 samples/sec#011binary_classification_accuracy=0.977119#011binary_classification_cross_entropy=0.083455\u001b[0m\n\u001b[34m[12/30/2021 02:02:17 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=11000 train binary_classification_accuracy <score>=0.9771192618852832\u001b[0m\n\u001b[34m[12/30/2021 02:02:17 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=11000 train binary_classification_cross_entropy <loss>=0.0834549344122361\u001b[0m\n\u001b[34m[12/30/2021 02:02:24 INFO 140515720472384] Epoch[0] Batch [12000]#011Speed: 140612.77 samples/sec#011binary_classification_accuracy=0.977806#011binary_classification_cross_entropy=0.081764\u001b[0m\n\u001b[34m[12/30/2021 02:02:24 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=12000 train binary_classification_accuracy <score>=0.9778056828597617\u001b[0m\n\u001b[34m[12/30/2021 02:02:24 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=12000 train binary_classification_cross_entropy <loss>=0.08176383952364982\u001b[0m\n\u001b[34m[12/30/2021 02:02:31 INFO 140515720472384] Epoch[0] Batch [13000]#011Speed: 141415.17 samples/sec#011binary_classification_accuracy=0.978406#011binary_classification_cross_entropy=0.080288\u001b[0m\n\u001b[34m[12/30/2021 02:02:31 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=13000 train binary_classification_accuracy <score>=0.9784055072686716\u001b[0m\n\u001b[34m[12/30/2021 02:02:31 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=13000 train binary_classification_cross_entropy <loss>=0.08028770379101018\u001b[0m\n\u001b[34m[12/30/2021 02:02:38 INFO 140515720472384] Epoch[0] Batch [14000]#011Speed: 141183.18 samples/sec#011binary_classification_accuracy=0.978931#011binary_classification_cross_entropy=0.078995\u001b[0m\n\u001b[34m[12/30/2021 02:02:38 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=14000 train binary_classification_accuracy <score>=0.9789310763516892\u001b[0m\n\u001b[34m[12/30/2021 02:02:38 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, batch=14000 train binary_classification_cross_entropy <loss>=0.07899502835799248\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] Epoch[0] Train-binary_classification_accuracy=0.979284\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] Epoch[0] Train-binary_classification_cross_entropy=0.078152\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] Epoch[0] Time cost=105.479\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, train binary_classification_accuracy <score>=0.9792840878286798\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=0, train binary_classification_cross_entropy <loss>=0.07815158404086334\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829658.5207527, \"EndTime\": 1640829764.0200803, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"epochs\": {\"sum\": 5.0, \"count\": 1, \"min\": 5, \"max\": 5}, \"update.time\": {\"sum\": 105499.11952018738, \"count\": 1, \"min\": 105499.11952018738, \"max\": 105499.11952018738}}}\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #progress_metric: host=algo-1, completed 20.0 % of epochs\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829658.5209367, \"EndTime\": 1640829764.0203376, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"epoch\": 0, \"Meta\": \"training_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Total Batches Seen\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Max Records Seen Between Resets\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Max Batches Seen Between Resets\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Reset Count\": {\"sum\": 2.0, \"count\": 1, \"min\": 2, \"max\": 2}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #throughput_metric: host=algo-1, train throughput=139861.20214086538 records/second\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 WARNING 140515720472384] Already bound, ignoring bind()\u001b[0m\n\u001b[34m/opt/amazon/lib/python3.7/site-packages/mxnet/module/base_module.py:502: UserWarning: Parameters already initialized and force_init=False. init_params call ignored.\n allow_missing=allow_missing, force_init=force_init)\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 WARNING 140515720472384] optimizer already initialized, ignoring...\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=0 train binary_classification_accuracy <score>=0.986\u001b[0m\n\u001b[34m[12/30/2021 02:02:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=0 train binary_classification_cross_entropy <loss>=0.06770654296875\u001b[0m\n\u001b[34m[12/30/2021 02:02:51 INFO 140515720472384] Epoch[1] Batch [1000]#011Speed: 137869.01 samples/sec#011binary_classification_accuracy=0.985843#011binary_classification_cross_entropy=0.061067\u001b[0m\n\u001b[34m[12/30/2021 02:02:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=1000 train binary_classification_accuracy <score>=0.9858431568431568\u001b[0m\n\u001b[34m[12/30/2021 02:02:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=1000 train binary_classification_cross_entropy <loss>=0.06106659011764603\u001b[0m\n\u001b[34m[12/30/2021 02:02:58 INFO 140515720472384] Epoch[1] Batch [2000]#011Speed: 138088.05 samples/sec#011binary_classification_accuracy=0.985929#011binary_classification_cross_entropy=0.060214\u001b[0m\n\u001b[34m[12/30/2021 02:02:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=2000 train binary_classification_accuracy <score>=0.9859290354822589\u001b[0m\n\u001b[34m[12/30/2021 02:02:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=2000 train binary_classification_cross_entropy <loss>=0.060213889249261174\u001b[0m\n\u001b[34m[12/30/2021 02:03:05 INFO 140515720472384] Epoch[1] Batch [3000]#011Speed: 134430.95 samples/sec#011binary_classification_accuracy=0.985973#011binary_classification_cross_entropy=0.059813\u001b[0m\n\u001b[34m[12/30/2021 02:03:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=3000 train binary_classification_accuracy <score>=0.9859733422192603\u001b[0m\n\u001b[34m[12/30/2021 02:03:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=3000 train binary_classification_cross_entropy <loss>=0.05981333435856871\u001b[0m\n\u001b[34m[12/30/2021 02:03:13 INFO 140515720472384] Epoch[1] Batch [4000]#011Speed: 138264.45 samples/sec#011binary_classification_accuracy=0.986013#011binary_classification_cross_entropy=0.059712\u001b[0m\n\u001b[34m[12/30/2021 02:03:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=4000 train binary_classification_accuracy <score>=0.9860134966258436\u001b[0m\n\u001b[34m[12/30/2021 02:03:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=4000 train binary_classification_cross_entropy <loss>=0.059712011716271066\u001b[0m\n\u001b[34m[12/30/2021 02:03:20 INFO 140515720472384] Epoch[1] Batch [5000]#011Speed: 139038.87 samples/sec#011binary_classification_accuracy=0.986063#011binary_classification_cross_entropy=0.059499\u001b[0m\n\u001b[34m[12/30/2021 02:03:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=5000 train binary_classification_accuracy <score>=0.9860631873625275\u001b[0m\n\u001b[34m[12/30/2021 02:03:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=5000 train binary_classification_cross_entropy <loss>=0.05949896123680537\u001b[0m\n\u001b[34m[12/30/2021 02:03:27 INFO 140515720472384] Epoch[1] Batch [6000]#011Speed: 140046.69 samples/sec#011binary_classification_accuracy=0.986035#011binary_classification_cross_entropy=0.059461\u001b[0m\n\u001b[34m[12/30/2021 02:03:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=6000 train binary_classification_accuracy <score>=0.9860353274454258\u001b[0m\n\u001b[34m[12/30/2021 02:03:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=6000 train binary_classification_cross_entropy <loss>=0.05946066460484684\u001b[0m\n\u001b[34m[12/30/2021 02:03:34 INFO 140515720472384] Epoch[1] Batch [7000]#011Speed: 139521.35 samples/sec#011binary_classification_accuracy=0.986090#011binary_classification_cross_entropy=0.059331\u001b[0m\n\u001b[34m[12/30/2021 02:03:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=7000 train binary_classification_accuracy <score>=0.9860897014712184\u001b[0m\n\u001b[34m[12/30/2021 02:03:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=7000 train binary_classification_cross_entropy <loss>=0.0593311418572284\u001b[0m\n\u001b[34m[12/30/2021 02:03:41 INFO 140515720472384] Epoch[1] Batch [8000]#011Speed: 139234.53 samples/sec#011binary_classification_accuracy=0.986091#011binary_classification_cross_entropy=0.059386\u001b[0m\n\u001b[34m[12/30/2021 02:03:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=8000 train binary_classification_accuracy <score>=0.9860908636420448\u001b[0m\n\u001b[34m[12/30/2021 02:03:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=8000 train binary_classification_cross_entropy <loss>=0.05938634492730874\u001b[0m\n\u001b[34m[12/30/2021 02:03:49 INFO 140515720472384] Epoch[1] Batch [9000]#011Speed: 138099.91 samples/sec#011binary_classification_accuracy=0.986147#011binary_classification_cross_entropy=0.059270\u001b[0m\n\u001b[34m[12/30/2021 02:03:49 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=9000 train binary_classification_accuracy <score>=0.9861468725697144\u001b[0m\n\u001b[34m[12/30/2021 02:03:49 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=9000 train binary_classification_cross_entropy <loss>=0.05926991367901634\u001b[0m\n\u001b[34m[12/30/2021 02:03:56 INFO 140515720472384] Epoch[1] Batch [10000]#011Speed: 139991.25 samples/sec#011binary_classification_accuracy=0.986175#011binary_classification_cross_entropy=0.059210\u001b[0m\n\u001b[34m[12/30/2021 02:03:56 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=10000 train binary_classification_accuracy <score>=0.9861750824917508\u001b[0m\n\u001b[34m[12/30/2021 02:03:56 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=10000 train binary_classification_cross_entropy <loss>=0.05921009378687833\u001b[0m\n\u001b[34m[12/30/2021 02:04:03 INFO 140515720472384] Epoch[1] Batch [11000]#011Speed: 137535.00 samples/sec#011binary_classification_accuracy=0.986201#011binary_classification_cross_entropy=0.059122\u001b[0m\n\u001b[34m[12/30/2021 02:04:03 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=11000 train binary_classification_accuracy <score>=0.9862011635305882\u001b[0m\n\u001b[34m[12/30/2021 02:04:03 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=11000 train binary_classification_cross_entropy <loss>=0.05912183562990384\u001b[0m\n\u001b[34m[12/30/2021 02:04:10 INFO 140515720472384] Epoch[1] Batch [12000]#011Speed: 136475.24 samples/sec#011binary_classification_accuracy=0.986238#011binary_classification_cross_entropy=0.059012\u001b[0m\n\u001b[34m[12/30/2021 02:04:10 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=12000 train binary_classification_accuracy <score>=0.9862378135155404\u001b[0m\n\u001b[34m[12/30/2021 02:04:10 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=12000 train binary_classification_cross_entropy <loss>=0.059012383505102295\u001b[0m\n\u001b[34m[12/30/2021 02:04:18 INFO 140515720472384] Epoch[1] Batch [13000]#011Speed: 137785.26 samples/sec#011binary_classification_accuracy=0.986268#011binary_classification_cross_entropy=0.058932\u001b[0m\n\u001b[34m[12/30/2021 02:04:18 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=13000 train binary_classification_accuracy <score>=0.9862682870548419\u001b[0m\n\u001b[34m[12/30/2021 02:04:18 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=13000 train binary_classification_cross_entropy <loss>=0.05893236765990981\u001b[0m\n\u001b[34m[12/30/2021 02:04:25 INFO 140515720472384] Epoch[1] Batch [14000]#011Speed: 138831.45 samples/sec#011binary_classification_accuracy=0.986298#011binary_classification_cross_entropy=0.058849\u001b[0m\n\u001b[34m[12/30/2021 02:04:25 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=14000 train binary_classification_accuracy <score>=0.9862976215984572\u001b[0m\n\u001b[34m[12/30/2021 02:04:25 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, batch=14000 train binary_classification_cross_entropy <loss>=0.058848739905882186\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] Epoch[1] Train-binary_classification_accuracy=0.986302\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] Epoch[1] Train-binary_classification_cross_entropy=0.058862\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] Epoch[1] Time cost=106.744\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, train binary_classification_accuracy <score>=0.9863016400108431\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #quality_metric: host=algo-1, epoch=1, train binary_classification_cross_entropy <loss>=0.058862226865194\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829764.020166, \"EndTime\": 1640829870.767687, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"update.time\": {\"sum\": 106747.10631370544, \"count\": 1, \"min\": 106747.10631370544, \"max\": 106747.10631370544}}}\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #progress_metric: host=algo-1, completed 40.0 % of epochs\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829764.0205572, \"EndTime\": 1640829870.7679281, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"epoch\": 1, \"Meta\": \"training_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 29510572.0, \"count\": 1, \"min\": 29510572, \"max\": 29510572}, \"Total Batches Seen\": {\"sum\": 29512.0, \"count\": 1, \"min\": 29512, \"max\": 29512}, \"Max Records Seen Between Resets\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Max Batches Seen Between Resets\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Reset Count\": {\"sum\": 4.0, \"count\": 1, \"min\": 4, \"max\": 4}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #throughput_metric: host=algo-1, train throughput=138226.0970247211 records/second\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 WARNING 140515720472384] Already bound, ignoring bind()\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 WARNING 140515720472384] optimizer already initialized, ignoring...\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=0 train binary_classification_accuracy <score>=0.988\u001b[0m\n\u001b[34m[12/30/2021 02:04:30 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=0 train binary_classification_cross_entropy <loss>=0.07839033508300781\u001b[0m\n\u001b[34m[12/30/2021 02:04:37 INFO 140515720472384] Epoch[2] Batch [1000]#011Speed: 141477.97 samples/sec#011binary_classification_accuracy=0.986459#011binary_classification_cross_entropy=0.057827\u001b[0m\n\u001b[34m[12/30/2021 02:04:37 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=1000 train binary_classification_accuracy <score>=0.9864585414585415\u001b[0m\n\u001b[34m[12/30/2021 02:04:37 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=1000 train binary_classification_cross_entropy <loss>=0.057826903831946865\u001b[0m\n\u001b[34m[12/30/2021 02:04:44 INFO 140515720472384] Epoch[2] Batch [2000]#011Speed: 140878.21 samples/sec#011binary_classification_accuracy=0.986550#011binary_classification_cross_entropy=0.057289\u001b[0m\n\u001b[34m[12/30/2021 02:04:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=2000 train binary_classification_accuracy <score>=0.9865502248875562\u001b[0m\n\u001b[34m[12/30/2021 02:04:44 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=2000 train binary_classification_cross_entropy <loss>=0.05728900220643157\u001b[0m\n\u001b[34m[12/30/2021 02:04:52 INFO 140515720472384] Epoch[2] Batch [3000]#011Speed: 141401.02 samples/sec#011binary_classification_accuracy=0.986597#011binary_classification_cross_entropy=0.057021\u001b[0m\n\u001b[34m[12/30/2021 02:04:52 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=3000 train binary_classification_accuracy <score>=0.9865968010663112\u001b[0m\n\u001b[34m[12/30/2021 02:04:52 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=3000 train binary_classification_cross_entropy <loss>=0.05702117189587215\u001b[0m\n\u001b[34m[12/30/2021 02:04:59 INFO 140515720472384] Epoch[2] Batch [4000]#011Speed: 142068.18 samples/sec#011binary_classification_accuracy=0.986629#011binary_classification_cross_entropy=0.057059\u001b[0m\n\u001b[34m[12/30/2021 02:04:59 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=4000 train binary_classification_accuracy <score>=0.9866288427893026\u001b[0m\n\u001b[34m[12/30/2021 02:04:59 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=4000 train binary_classification_cross_entropy <loss>=0.05705870486151961\u001b[0m\n\u001b[34m[12/30/2021 02:05:06 INFO 140515720472384] Epoch[2] Batch [5000]#011Speed: 139169.14 samples/sec#011binary_classification_accuracy=0.986668#011binary_classification_cross_entropy=0.056867\u001b[0m\n\u001b[34m[12/30/2021 02:05:06 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=5000 train binary_classification_accuracy <score>=0.9866680663867227\u001b[0m\n\u001b[34m[12/30/2021 02:05:06 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=5000 train binary_classification_cross_entropy <loss>=0.05686684497130725\u001b[0m\n\u001b[34m[12/30/2021 02:05:13 INFO 140515720472384] Epoch[2] Batch [6000]#011Speed: 142028.36 samples/sec#011binary_classification_accuracy=0.986652#011binary_classification_cross_entropy=0.056919\u001b[0m\n\u001b[34m[12/30/2021 02:05:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=6000 train binary_classification_accuracy <score>=0.9866522246292284\u001b[0m\n\u001b[34m[12/30/2021 02:05:13 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=6000 train binary_classification_cross_entropy <loss>=0.05691928373736315\u001b[0m\n\u001b[34m[12/30/2021 02:05:20 INFO 140515720472384] Epoch[2] Batch [7000]#011Speed: 141145.98 samples/sec#011binary_classification_accuracy=0.986655#011binary_classification_cross_entropy=0.056879\u001b[0m\n\u001b[34m[12/30/2021 02:05:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=7000 train binary_classification_accuracy <score>=0.9866546207684617\u001b[0m\n\u001b[34m[12/30/2021 02:05:20 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=7000 train binary_classification_cross_entropy <loss>=0.05687941269642318\u001b[0m\n\u001b[34m[12/30/2021 02:05:27 INFO 140515720472384] Epoch[2] Batch [8000]#011Speed: 142364.00 samples/sec#011binary_classification_accuracy=0.986653#011binary_classification_cross_entropy=0.056985\u001b[0m\n\u001b[34m[12/30/2021 02:05:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=8000 train binary_classification_accuracy <score>=0.9866527934008249\u001b[0m\n\u001b[34m[12/30/2021 02:05:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=8000 train binary_classification_cross_entropy <loss>=0.05698486019459207\u001b[0m\n\u001b[34m[12/30/2021 02:05:34 INFO 140515720472384] Epoch[2] Batch [9000]#011Speed: 143501.99 samples/sec#011binary_classification_accuracy=0.986676#011binary_classification_cross_entropy=0.056935\u001b[0m\n\u001b[34m[12/30/2021 02:05:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=9000 train binary_classification_accuracy <score>=0.9866764803910677\u001b[0m\n\u001b[34m[12/30/2021 02:05:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=9000 train binary_classification_cross_entropy <loss>=0.05693531916690077\u001b[0m\n\u001b[34m[12/30/2021 02:05:41 INFO 140515720472384] Epoch[2] Batch [10000]#011Speed: 142939.11 samples/sec#011binary_classification_accuracy=0.986684#011binary_classification_cross_entropy=0.056919\u001b[0m\n\u001b[34m[12/30/2021 02:05:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=10000 train binary_classification_accuracy <score>=0.9866839316068393\u001b[0m\n\u001b[34m[12/30/2021 02:05:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=10000 train binary_classification_cross_entropy <loss>=0.05691869504837236\u001b[0m\n\u001b[34m[12/30/2021 02:05:48 INFO 140515720472384] Epoch[2] Batch [11000]#011Speed: 141513.56 samples/sec#011binary_classification_accuracy=0.986686#011binary_classification_cross_entropy=0.056875\u001b[0m\n\u001b[34m[12/30/2021 02:05:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=11000 train binary_classification_accuracy <score>=0.9866860285428597\u001b[0m\n\u001b[34m[12/30/2021 02:05:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=11000 train binary_classification_cross_entropy <loss>=0.05687489840512102\u001b[0m\n\u001b[34m[12/30/2021 02:05:55 INFO 140515720472384] Epoch[2] Batch [12000]#011Speed: 142461.80 samples/sec#011binary_classification_accuracy=0.986704#011binary_classification_cross_entropy=0.056792\u001b[0m\n\u001b[34m[12/30/2021 02:05:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=12000 train binary_classification_accuracy <score>=0.9867041079910007\u001b[0m\n\u001b[34m[12/30/2021 02:05:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=12000 train binary_classification_cross_entropy <loss>=0.05679224156288393\u001b[0m\n\u001b[34m[12/30/2021 02:06:02 INFO 140515720472384] Epoch[2] Batch [13000]#011Speed: 141830.86 samples/sec#011binary_classification_accuracy=0.986717#011binary_classification_cross_entropy=0.056760\u001b[0m\n\u001b[34m[12/30/2021 02:06:02 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=13000 train binary_classification_accuracy <score>=0.9867174063533575\u001b[0m\n\u001b[34m[12/30/2021 02:06:02 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=13000 train binary_classification_cross_entropy <loss>=0.05676031053166199\u001b[0m\n\u001b[34m[12/30/2021 02:06:09 INFO 140515720472384] Epoch[2] Batch [14000]#011Speed: 138937.32 samples/sec#011binary_classification_accuracy=0.986730#011binary_classification_cross_entropy=0.056719\u001b[0m\n\u001b[34m[12/30/2021 02:06:09 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=14000 train binary_classification_accuracy <score>=0.9867301621312763\u001b[0m\n\u001b[34m[12/30/2021 02:06:09 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, batch=14000 train binary_classification_cross_entropy <loss>=0.05671881425671046\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] Epoch[2] Train-binary_classification_accuracy=0.986727\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] Epoch[2] Train-binary_classification_cross_entropy=0.056709\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] Epoch[2] Time cost=104.254\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, train binary_classification_accuracy <score>=0.9867270262943887\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=2, train binary_classification_cross_entropy <loss>=0.05670896640693828\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829870.767744, \"EndTime\": 1640829975.0252588, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"update.time\": {\"sum\": 104257.09104537964, \"count\": 1, \"min\": 104257.09104537964, \"max\": 104257.09104537964}}}\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #progress_metric: host=algo-1, completed 60.0 % of epochs\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829870.768142, \"EndTime\": 1640829975.02551, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"epoch\": 2, \"Meta\": \"training_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 44265858.0, \"count\": 1, \"min\": 44265858, \"max\": 44265858}, \"Total Batches Seen\": {\"sum\": 44268.0, \"count\": 1, \"min\": 44268, \"max\": 44268}, \"Max Records Seen Between Resets\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Max Batches Seen Between Resets\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Reset Count\": {\"sum\": 6.0, \"count\": 1, \"min\": 6, \"max\": 6}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #throughput_metric: host=algo-1, train throughput=141527.3668676963 records/second\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 WARNING 140515720472384] Already bound, ignoring bind()\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 WARNING 140515720472384] optimizer already initialized, ignoring...\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=0 train binary_classification_accuracy <score>=0.985\u001b[0m\n\u001b[34m[12/30/2021 02:06:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=0 train binary_classification_cross_entropy <loss>=0.06285255432128906\u001b[0m\n\u001b[34m[12/30/2021 02:06:22 INFO 140515720472384] Epoch[3] Batch [1000]#011Speed: 138178.71 samples/sec#011binary_classification_accuracy=0.986863#011binary_classification_cross_entropy=0.056253\u001b[0m\n\u001b[34m[12/30/2021 02:06:22 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=1000 train binary_classification_accuracy <score>=0.9868631368631369\u001b[0m\n\u001b[34m[12/30/2021 02:06:22 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=1000 train binary_classification_cross_entropy <loss>=0.056252914421089165\u001b[0m\n\u001b[34m[12/30/2021 02:06:29 INFO 140515720472384] Epoch[3] Batch [2000]#011Speed: 139114.04 samples/sec#011binary_classification_accuracy=0.986953#011binary_classification_cross_entropy=0.055874\u001b[0m\n\u001b[34m[12/30/2021 02:06:29 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=2000 train binary_classification_accuracy <score>=0.9869530234882559\u001b[0m\n\u001b[34m[12/30/2021 02:06:29 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=2000 train binary_classification_cross_entropy <loss>=0.05587372519432575\u001b[0m\n\u001b[34m[12/30/2021 02:06:36 INFO 140515720472384] Epoch[3] Batch [3000]#011Speed: 138902.99 samples/sec#011binary_classification_accuracy=0.986919#011binary_classification_cross_entropy=0.055635\u001b[0m\n\u001b[34m[12/30/2021 02:06:36 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=3000 train binary_classification_accuracy <score>=0.9869193602132622\u001b[0m\n\u001b[34m[12/30/2021 02:06:36 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=3000 train binary_classification_cross_entropy <loss>=0.05563474729258948\u001b[0m\n\u001b[34m[12/30/2021 02:06:43 INFO 140515720472384] Epoch[3] Batch [4000]#011Speed: 139388.36 samples/sec#011binary_classification_accuracy=0.986915#011binary_classification_cross_entropy=0.055746\u001b[0m\n\u001b[34m[12/30/2021 02:06:43 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=4000 train binary_classification_accuracy <score>=0.9869150212446888\u001b[0m\n\u001b[34m[12/30/2021 02:06:43 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=4000 train binary_classification_cross_entropy <loss>=0.05574583171099849\u001b[0m\n\u001b[34m[12/30/2021 02:06:51 INFO 140515720472384] Epoch[3] Batch [5000]#011Speed: 139210.37 samples/sec#011binary_classification_accuracy=0.986921#011binary_classification_cross_entropy=0.055666\u001b[0m\n\u001b[34m[12/30/2021 02:06:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=5000 train binary_classification_accuracy <score>=0.9869214157168567\u001b[0m\n\u001b[34m[12/30/2021 02:06:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=5000 train binary_classification_cross_entropy <loss>=0.05566637403186477\u001b[0m\n\u001b[34m[12/30/2021 02:06:58 INFO 140515720472384] Epoch[3] Batch [6000]#011Speed: 139251.37 samples/sec#011binary_classification_accuracy=0.986899#011binary_classification_cross_entropy=0.055709\u001b[0m\n\u001b[34m[12/30/2021 02:06:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=6000 train binary_classification_accuracy <score>=0.9868988501916347\u001b[0m\n\u001b[34m[12/30/2021 02:06:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=6000 train binary_classification_cross_entropy <loss>=0.05570902763440438\u001b[0m\n\u001b[34m[12/30/2021 02:07:05 INFO 140515720472384] Epoch[3] Batch [7000]#011Speed: 135635.32 samples/sec#011binary_classification_accuracy=0.986920#011binary_classification_cross_entropy=0.055675\u001b[0m\n\u001b[34m[12/30/2021 02:07:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=7000 train binary_classification_accuracy <score>=0.9869197257534638\u001b[0m\n\u001b[34m[12/30/2021 02:07:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=7000 train binary_classification_cross_entropy <loss>=0.055674678436059166\u001b[0m\n\u001b[34m[12/30/2021 02:07:12 INFO 140515720472384] Epoch[3] Batch [8000]#011Speed: 139145.19 samples/sec#011binary_classification_accuracy=0.986905#011binary_classification_cross_entropy=0.055763\u001b[0m\n\u001b[34m[12/30/2021 02:07:12 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=8000 train binary_classification_accuracy <score>=0.986904511936008\u001b[0m\n\u001b[34m[12/30/2021 02:07:12 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=8000 train binary_classification_cross_entropy <loss>=0.05576308773237681\u001b[0m\n\u001b[34m[12/30/2021 02:07:19 INFO 140515720472384] Epoch[3] Batch [9000]#011Speed: 139395.14 samples/sec#011binary_classification_accuracy=0.986909#011binary_classification_cross_entropy=0.055737\u001b[0m\n\u001b[34m[12/30/2021 02:07:19 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=9000 train binary_classification_accuracy <score>=0.9869087879124542\u001b[0m\n\u001b[34m[12/30/2021 02:07:19 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=9000 train binary_classification_cross_entropy <loss>=0.05573662098809463\u001b[0m\n\u001b[34m[12/30/2021 02:07:27 INFO 140515720472384] Epoch[3] Batch [10000]#011Speed: 138785.02 samples/sec#011binary_classification_accuracy=0.986902#011binary_classification_cross_entropy=0.055714\u001b[0m\n\u001b[34m[12/30/2021 02:07:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=10000 train binary_classification_accuracy <score>=0.9869022097790221\u001b[0m\n\u001b[34m[12/30/2021 02:07:27 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=10000 train binary_classification_cross_entropy <loss>=0.055714041816605386\u001b[0m\n\u001b[34m[12/30/2021 02:07:34 INFO 140515720472384] Epoch[3] Batch [11000]#011Speed: 140220.52 samples/sec#011binary_classification_accuracy=0.986906#011binary_classification_cross_entropy=0.055653\u001b[0m\n\u001b[34m[12/30/2021 02:07:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=11000 train binary_classification_accuracy <score>=0.986906099445505\u001b[0m\n\u001b[34m[12/30/2021 02:07:34 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=11000 train binary_classification_cross_entropy <loss>=0.05565311696409973\u001b[0m\n\u001b[34m[12/30/2021 02:07:41 INFO 140515720472384] Epoch[3] Batch [12000]#011Speed: 139171.91 samples/sec#011binary_classification_accuracy=0.986919#011binary_classification_cross_entropy=0.055594\u001b[0m\n\u001b[34m[12/30/2021 02:07:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=12000 train binary_classification_accuracy <score>=0.9869189234230481\u001b[0m\n\u001b[34m[12/30/2021 02:07:41 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=12000 train binary_classification_cross_entropy <loss>=0.05559404292510714\u001b[0m\n\u001b[34m[12/30/2021 02:07:48 INFO 140515720472384] Epoch[3] Batch [13000]#011Speed: 138648.90 samples/sec#011binary_classification_accuracy=0.986931#011binary_classification_cross_entropy=0.055534\u001b[0m\n\u001b[34m[12/30/2021 02:07:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=13000 train binary_classification_accuracy <score>=0.9869309283901239\u001b[0m\n\u001b[34m[12/30/2021 02:07:48 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=13000 train binary_classification_cross_entropy <loss>=0.0555342212483641\u001b[0m\n\u001b[34m[12/30/2021 02:07:55 INFO 140515720472384] Epoch[3] Batch [14000]#011Speed: 139019.88 samples/sec#011binary_classification_accuracy=0.986932#011binary_classification_cross_entropy=0.055522\u001b[0m\n\u001b[34m[12/30/2021 02:07:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=14000 train binary_classification_accuracy <score>=0.9869316477394472\u001b[0m\n\u001b[34m[12/30/2021 02:07:55 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, batch=14000 train binary_classification_cross_entropy <loss>=0.05552226170437071\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] Epoch[3] Train-binary_classification_accuracy=0.986934\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] Epoch[3] Train-binary_classification_cross_entropy=0.055513\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] Epoch[3] Time cost=106.276\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, train binary_classification_accuracy <score>=0.9869339251829764\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #quality_metric: host=algo-1, epoch=3, train binary_classification_cross_entropy <loss>=0.05551349886480338\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829975.0253398, \"EndTime\": 1640830081.3048966, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"update.time\": {\"sum\": 106279.15692329407, \"count\": 1, \"min\": 106279.15692329407, \"max\": 106279.15692329407}}}\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #progress_metric: host=algo-1, completed 80.0 % of epochs\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640829975.0257154, \"EndTime\": 1640830081.305113, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"epoch\": 3, \"Meta\": \"training_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 59021144.0, \"count\": 1, \"min\": 59021144, \"max\": 59021144}, \"Total Batches Seen\": {\"sum\": 59024.0, \"count\": 1, \"min\": 59024, \"max\": 59024}, \"Max Records Seen Between Resets\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Max Batches Seen Between Resets\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Reset Count\": {\"sum\": 8.0, \"count\": 1, \"min\": 8, \"max\": 8}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #throughput_metric: host=algo-1, train throughput=138834.73698362266 records/second\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 WARNING 140515720472384] Already bound, ignoring bind()\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 WARNING 140515720472384] optimizer already initialized, ignoring...\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=0 train binary_classification_accuracy <score>=0.989\u001b[0m\n\u001b[34m[12/30/2021 02:08:01 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=0 train binary_classification_cross_entropy <loss>=0.03831301116943359\u001b[0m\n\u001b[34m[12/30/2021 02:08:08 INFO 140515720472384] Epoch[4] Batch [1000]#011Speed: 138378.57 samples/sec#011binary_classification_accuracy=0.986613#011binary_classification_cross_entropy=0.055956\u001b[0m\n\u001b[34m[12/30/2021 02:08:08 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=1000 train binary_classification_accuracy <score>=0.9866133866133866\u001b[0m\n\u001b[34m[12/30/2021 02:08:08 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=1000 train binary_classification_cross_entropy <loss>=0.055955806751232164\u001b[0m\n\u001b[34m[12/30/2021 02:08:15 INFO 140515720472384] Epoch[4] Batch [2000]#011Speed: 142199.71 samples/sec#011binary_classification_accuracy=0.986809#011binary_classification_cross_entropy=0.055404\u001b[0m\n\u001b[34m[12/30/2021 02:08:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=2000 train binary_classification_accuracy <score>=0.986808595702149\u001b[0m\n\u001b[34m[12/30/2021 02:08:15 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=2000 train binary_classification_cross_entropy <loss>=0.05540410045764853\u001b[0m\n\u001b[34m[12/30/2021 02:08:22 INFO 140515720472384] Epoch[4] Batch [3000]#011Speed: 141240.72 samples/sec#011binary_classification_accuracy=0.986862#011binary_classification_cross_entropy=0.055138\u001b[0m\n\u001b[34m[12/30/2021 02:08:22 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=3000 train binary_classification_accuracy <score>=0.9868617127624125\u001b[0m\n\u001b[34m[12/30/2021 02:08:22 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=3000 train binary_classification_cross_entropy <loss>=0.05513837132458685\u001b[0m\n\u001b[34m[12/30/2021 02:08:29 INFO 140515720472384] Epoch[4] Batch [4000]#011Speed: 140339.25 samples/sec#011binary_classification_accuracy=0.986910#011binary_classification_cross_entropy=0.055135\u001b[0m\n\u001b[34m[12/30/2021 02:08:29 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=4000 train binary_classification_accuracy <score>=0.9869095226193452\u001b[0m\n\u001b[34m[12/30/2021 02:08:29 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=4000 train binary_classification_cross_entropy <loss>=0.05513538383072956\u001b[0m\n\u001b[34m[12/30/2021 02:08:36 INFO 140515720472384] Epoch[4] Batch [5000]#011Speed: 141590.68 samples/sec#011binary_classification_accuracy=0.986940#011binary_classification_cross_entropy=0.055006\u001b[0m\n\u001b[34m[12/30/2021 02:08:36 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=5000 train binary_classification_accuracy <score>=0.9869398120375925\u001b[0m\n\u001b[34m[12/30/2021 02:08:36 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=5000 train binary_classification_cross_entropy <loss>=0.05500639757931745\u001b[0m\n\u001b[34m[12/30/2021 02:08:43 INFO 140515720472384] Epoch[4] Batch [6000]#011Speed: 141670.67 samples/sec#011binary_classification_accuracy=0.986956#011binary_classification_cross_entropy=0.055012\u001b[0m\n\u001b[34m[12/30/2021 02:08:43 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=6000 train binary_classification_accuracy <score>=0.9869560073321113\u001b[0m\n\u001b[34m[12/30/2021 02:08:43 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=6000 train binary_classification_cross_entropy <loss>=0.05501184173775323\u001b[0m\n\u001b[34m[12/30/2021 02:08:51 INFO 140515720472384] Epoch[4] Batch [7000]#011Speed: 140456.50 samples/sec#011binary_classification_accuracy=0.986955#011binary_classification_cross_entropy=0.054981\u001b[0m\n\u001b[34m[12/30/2021 02:08:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=7000 train binary_classification_accuracy <score>=0.986955434937866\u001b[0m\n\u001b[34m[12/30/2021 02:08:51 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=7000 train binary_classification_cross_entropy <loss>=0.05498078312102156\u001b[0m\n\u001b[34m[12/30/2021 02:08:58 INFO 140515720472384] Epoch[4] Batch [8000]#011Speed: 141328.62 samples/sec#011binary_classification_accuracy=0.986965#011binary_classification_cross_entropy=0.055031\u001b[0m\n\u001b[34m[12/30/2021 02:08:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=8000 train binary_classification_accuracy <score>=0.9869648793900763\u001b[0m\n\u001b[34m[12/30/2021 02:08:58 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=8000 train binary_classification_cross_entropy <loss>=0.05503128272255515\u001b[0m\n\u001b[34m[12/30/2021 02:09:05 INFO 140515720472384] Epoch[4] Batch [9000]#011Speed: 139350.05 samples/sec#011binary_classification_accuracy=0.986979#011binary_classification_cross_entropy=0.055005\u001b[0m\n\u001b[34m[12/30/2021 02:09:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=9000 train binary_classification_accuracy <score>=0.9869786690367737\u001b[0m\n\u001b[34m[12/30/2021 02:09:05 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=9000 train binary_classification_cross_entropy <loss>=0.05500530243990037\u001b[0m\n\u001b[34m[12/30/2021 02:09:12 INFO 140515720472384] Epoch[4] Batch [10000]#011Speed: 141372.27 samples/sec#011binary_classification_accuracy=0.986984#011binary_classification_cross_entropy=0.054993\u001b[0m\n\u001b[34m[12/30/2021 02:09:12 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=10000 train binary_classification_accuracy <score>=0.986984201579842\u001b[0m\n\u001b[34m[12/30/2021 02:09:12 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=10000 train binary_classification_cross_entropy <loss>=0.05499327250492953\u001b[0m\n\u001b[34m[12/30/2021 02:09:19 INFO 140515720472384] Epoch[4] Batch [11000]#011Speed: 141220.58 samples/sec#011binary_classification_accuracy=0.986964#011binary_classification_cross_entropy=0.055004\u001b[0m\n\u001b[34m[12/30/2021 02:09:19 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=11000 train binary_classification_accuracy <score>=0.9869635487682938\u001b[0m\n\u001b[34m[12/30/2021 02:09:19 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=11000 train binary_classification_cross_entropy <loss>=0.05500396949132457\u001b[0m\n\u001b[34m[12/30/2021 02:09:26 INFO 140515720472384] Epoch[4] Batch [12000]#011Speed: 141963.93 samples/sec#011binary_classification_accuracy=0.986985#011binary_classification_cross_entropy=0.054923\u001b[0m\n\u001b[34m[12/30/2021 02:09:26 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=12000 train binary_classification_accuracy <score>=0.9869846679443379\u001b[0m\n\u001b[34m[12/30/2021 02:09:26 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=12000 train binary_classification_cross_entropy <loss>=0.05492287140196139\u001b[0m\n\u001b[34m[12/30/2021 02:09:33 INFO 140515720472384] Epoch[4] Batch [13000]#011Speed: 142224.19 samples/sec#011binary_classification_accuracy=0.986989#011binary_classification_cross_entropy=0.054910\u001b[0m\n\u001b[34m[12/30/2021 02:09:33 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=13000 train binary_classification_accuracy <score>=0.9869893854318899\u001b[0m\n\u001b[34m[12/30/2021 02:09:33 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=13000 train binary_classification_cross_entropy <loss>=0.054910142276151555\u001b[0m\n\u001b[34m[12/30/2021 02:09:40 INFO 140515720472384] Epoch[4] Batch [14000]#011Speed: 141572.00 samples/sec#011binary_classification_accuracy=0.986991#011binary_classification_cross_entropy=0.054892\u001b[0m\n\u001b[34m[12/30/2021 02:09:40 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=14000 train binary_classification_accuracy <score>=0.9869911434897507\u001b[0m\n\u001b[34m[12/30/2021 02:09:40 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, batch=14000 train binary_classification_cross_entropy <loss>=0.05489249316692931\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] Epoch[4] Train-binary_classification_accuracy=0.986995\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] Epoch[4] Train-binary_classification_cross_entropy=0.054866\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] Epoch[4] Time cost=104.585\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, train binary_classification_accuracy <score>=0.9869951883979399\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] #quality_metric: host=algo-1, epoch=4, train binary_classification_cross_entropy <loss>=0.054866424535535224\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640830081.3049538, \"EndTime\": 1640830185.8938043, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"update.time\": {\"sum\": 104588.45448493958, \"count\": 1, \"min\": 104588.45448493958, \"max\": 104588.45448493958}}}\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] #progress_metric: host=algo-1, completed 100.0 % of epochs\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640830081.3053226, \"EndTime\": 1640830185.8940654, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\", \"epoch\": 4, \"Meta\": \"training_data_iter\"}, \"Metrics\": {\"Total Records Seen\": {\"sum\": 73776430.0, \"count\": 1, \"min\": 73776430, \"max\": 73776430}, \"Total Batches Seen\": {\"sum\": 73780.0, \"count\": 1, \"min\": 73780, \"max\": 73780}, \"Max Records Seen Between Resets\": {\"sum\": 14755286.0, \"count\": 1, \"min\": 14755286, \"max\": 14755286}, \"Max Batches Seen Between Resets\": {\"sum\": 14756.0, \"count\": 1, \"min\": 14756, \"max\": 14756}, \"Reset Count\": {\"sum\": 10.0, \"count\": 1, \"min\": 10, \"max\": 10}, \"Number of Records Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}, \"Number of Batches Since Last Reset\": {\"sum\": 0.0, \"count\": 1, \"min\": 0, \"max\": 0}}}\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] #throughput_metric: host=algo-1, train throughput=141078.91831818267 records/second\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 WARNING 140515720472384] wait_for_all_workers will not sync workers since the kv store is not running distributed\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640830185.8938808, \"EndTime\": 1640830185.8945844, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"finalize.time\": {\"sum\": 0.1914501190185547, \"count\": 1, \"min\": 0.1914501190185547, \"max\": 0.1914501190185547}}}\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] Saved checkpoint to \"/tmp/tmpbntot631/state-0001.params\"\u001b[0m\n\u001b[34m[12/30/2021 02:09:45 INFO 140515720472384] Test data is not provided.\u001b[0m\n\u001b[34m#metrics {\"StartTime\": 1640830185.8946366, \"EndTime\": 1640830185.931485, \"Dimensions\": {\"Algorithm\": \"ipinsights\", \"Host\": \"algo-1\", \"Operation\": \"training\"}, \"Metrics\": {\"setuptime\": {\"sum\": 66.99132919311523, \"count\": 1, \"min\": 66.99132919311523, \"max\": 66.99132919311523}, \"totaltime\": {\"sum\": 527582.435131073, \"count\": 1, \"min\": 527582.435131073, \"max\": 527582.435131073}}}\u001b[0m\n\n2021-12-30 02:09:59 Uploading - Uploading generated training model\n2021-12-30 02:09:59 Completed - Training job completed\nTraining seconds: 619\nBillable seconds: 619\n" ] ], [ [ "If you see the message\n\n > Completed - Training job completed\n\nat the bottom of the output logs then that means training successfully completed and the output of the SageMaker IP Insights model was stored in the specified output path. You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the \"Jobs\" tab and select training job matching the training job name, below:", "_____no_output_____" ] ], [ [ "print(f\"Training job name: {ip_insights.latest_training_job.job_name}\")", "Training job name: ipinsights-2021-12-30-01-57-12-007\n" ] ], [ [ "## Inference\n-----\n\nNow that we have trained a SageMaker IP Insights model, we can deploy the model to an endpoint to start performing inference on data. In this case, that means providing it a `<user, IP address>` pair and predicting their compatability scores.\n\nWe can create an inference endpoint using the SageMaker Python SDK `deploy()`function from the job we defined above. We specify the instance type where inference will be performed, as well as the initial number of instnaces to spin up. We recommend using the `ml.m5` instance as it provides the most memory at the lowest cost. Verify how large your model is in S3 and pick the instance type with the appropriate amount of memory.", "_____no_output_____" ] ], [ [ "# predictor = ip_insights.deploy(initial_instance_count=1, instance_type=\"ml.m5.xlarge\")\npredictor = ip_insights.deploy(initial_instance_count=1, instance_type=\"ml.m5.xlarge\")\n", "-------!" ] ], [ [ "Congratulations, you now have a SageMaker IP Insights inference endpoint! You could start integrating this endpoint with your production services to start querying incoming requests for abnormal behavior. \n\nYou can confirm the endpoint configuration and status by navigating to the \"Endpoints\" tab in the AWS SageMaker console and selecting the endpoint matching the endpoint name below:", "_____no_output_____" ] ], [ [ "print(f\"Endpoint name: {predictor.endpoint}\")", "_____no_output_____" ] ], [ [ "### Data Serialization/Deserialization\nWe can pass data in a variety of formats to our inference endpoint. In this example, we will pass CSV-formmated data. Other available formats are JSON-formated and JSON Lines-formatted. We make use of the SageMaker Python SDK utilities: `csv_serializer` and `json_deserializer` when configuring the inference endpoint", "_____no_output_____" ] ], [ [ "from sagemaker.predictor import csv_serializer, json_deserializer\n\npredictor.serializer = csv_serializer\npredictor.deserializer = json_deserializer", "_____no_output_____" ] ], [ [ "Now that the predictor is configured, it is as easy as passing in a matrix of inference data.\nWe can take a few samples from the simulated dataset above, so we can see what the output looks like.", "_____no_output_____" ] ], [ [ "inference_data = [(data[0], data[1]) for data in train_df[:5].values]\npredictor.predict(\n inference_data, initial_args={\"ContentType\": \"text/csv\", \"Accept\": \"application/json\"}\n)", "The csv_serializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\nThe json_deserializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ] ], [ [ "By default, the predictor will only output the `dot_product` between the learned IP address and the online resource (in this case, the user ID). The dot product summarizes the compatibility between the IP address and online resource. The larger the value, the more the algorithm thinks the IP address is likely to be used by the user. This compatability score is sufficient for most applications, as we can define a threshold for what we constitute as an anomalous score.\n\nHowever, more advanced users may want to inspect the learned embeddings and use them in further applications. We can configure the predictor to provide the learned embeddings by specifing the `verbose=True` parameter to the Accept heading. You should see that each 'prediction' object contains three keys: `ip_embedding`, `entity_embedding`, and `dot_product`. ", "_____no_output_____" ] ], [ [ "predictor.predict(\n inference_data,\n initial_args={\"ContentType\": \"text/csv\", \"Accept\": \"application/json; verbose=True\"},\n)", "The csv_serializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\nThe json_deserializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ] ], [ [ "## Compute Anomaly Scores\n----\nThe `dot_product` output of the model provides a good measure of how compatible an IP address and online resource are. However, the range of the dot_product is unbounded. This means to be able to consider an event as anomolous we need to define a threshold. Such that when we score an event, if the dot_product is above the threshold we can flag the behavior as anomolous.However, picking a threshold can be more of an art, and a good threshold depends on the specifics of your problem and dataset. \n\nIn the following section, we show how to pick a simple threshold by comparing the score distributions between known normal and malicious traffic:\n1. We construct a test set of 'Normal' traffic;\n2. Inject 'Malicious' traffic into the dataset;\n3. Plot the distribution of dot_product scores for the model on 'Normal' trafic and the 'Malicious' traffic.\n3. Select a threshold value which separates the normal distribution from the malicious traffic threshold. This value is based on your false-positive tolerance.\n\n### 1. Construct 'Normal' Traffic Dataset\n\nWe previously [created a test set](#3.-Create-training-and-test-dataset) from our simulated Apache access logs dataset. We use this test dataset as the 'Normal' traffic in the test case. ", "_____no_output_____" ] ], [ [ "test_df.head()", "_____no_output_____" ] ], [ [ "### 2. Inject Malicious Traffic\nIf we had a dataset with enough real malicious activity, we would use that to determine a good threshold. Those are hard to come by. So instead, we simulate malicious web traffic that mimics a realistic attack scenario. \n\nWe take a set of user accounts from the test set and randomly generate IP addresses. The users should not have used these IP addresses during training. This simulates an attacker logging in to a user account without knowledge of their IP history.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom generate_data import draw_ip\n\n\ndef score_ip_insights(predictor, df):\n def get_score(result):\n \"\"\"Return the negative to the dot product of the predictions from the model.\"\"\"\n return [-prediction[\"dot_product\"] for prediction in result[\"predictions\"]]\n\n df = df[[\"user\", \"ip_address\"]]\n result = predictor.predict(df.values) # what is df.values \n return get_score(result)\n\n\ndef create_test_case(train_df, test_df, num_samples, attack_freq):\n \"\"\"Creates a test case from provided train and test data frames.\n\n This generates test case for accounts that are both in training and testing data sets.\n\n :param train_df: (panda.DataFrame with columns ['user', 'ip_address']) training DataFrame\n :param test_df: (panda.DataFrame with columns ['user', 'ip_address']) testing DataFrame\n :param num_samples: (int) number of test samples to use\n :param attack_freq: (float) the ratio of negative_samples:positive_samples to generate for test case\n :return: DataFrame with both good and bad traffic, with labels\n \"\"\"\n # Get all possible accounts. The IP Insights model can only make predictions on users it has seen in training\n # Therefore, filter the test dataset for unseen accounts, as their results will not mean anything.\n valid_accounts = set(train_df[\"user\"])\n valid_test_df = test_df[test_df[\"user\"].isin(valid_accounts)]\n\n good_traffic = valid_test_df.sample(num_samples, replace=False)\n good_traffic = good_traffic[[\"user\", \"ip_address\"]]\n good_traffic[\"label\"] = 0\n\n # Generate malicious traffic\n num_bad_traffic = int(num_samples * attack_freq)\n bad_traffic_accounts = np.random.choice(\n list(valid_accounts), size=num_bad_traffic, replace=True\n )\n bad_traffic_ips = [draw_ip() for i in range(num_bad_traffic)]\n bad_traffic = pd.DataFrame({\"user\": bad_traffic_accounts, \"ip_address\": bad_traffic_ips})\n bad_traffic[\"label\"] = 1\n\n # All traffic labels are: 0 for good traffic; 1 for bad traffic.\n all_traffic = good_traffic.append(bad_traffic)\n\n return all_traffic", "Loaded ASN List: 827696 ASNs.\n" ], [ "NUM_SAMPLES = 100000\ntest_case = create_test_case(train_df, test_df, num_samples=NUM_SAMPLES, attack_freq=1)\ntest_case.head()", "_____no_output_____" ], [ "test_case['label'].value_counts()", "_____no_output_____" ], [ "test_case_scores = score_ip_insights(predictor, test_case)", "The csv_serializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\nThe json_deserializer has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ] ], [ [ "### 3. Plot Distribution\n\nNow, we plot the distribution of scores. Looking at this distribution will inform us on where we can set a good threshold, based on our risk tolerance. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nn, x = np.histogram(test_case_scores[:NUM_SAMPLES], bins=100, density=True)\nplt.plot(x[1:], n)\n\nn, x = np.histogram(test_case_scores[NUM_SAMPLES:], bins=100, density=True)\nplt.plot(x[1:], n)\n\nplt.legend([\"Normal\", \"Random IP\"])\nplt.xlabel(\"IP Insights Score\")\nplt.ylabel(\"Frequency\")\n\nplt.figure()", "_____no_output_____" ] ], [ [ "### 4. Selecting a Good Threshold\n\nAs we see in the figure above, there is a clear separation between normal traffic and random traffic. \nWe could select a threshold depending on the application.\n\n- If we were working with low impact decisions, such as whether to ask for another factor or authentication during login, we could use a `threshold = 0.0`. This would result in catching more true-positives, at the cost of more false-positives. \n\n- If our decision system were more sensitive to false positives, we could choose a larger threshold, such as `threshold = 10.0`. That way if we were sending the flagged cases to manual investigation, we would have a higher confidence that the acitivty was suspicious. ", "_____no_output_____" ] ], [ [ "threshold = 0.0\n\nflagged_cases = test_case[np.array(test_case_scores) > threshold]\n\nnum_flagged_cases = len(flagged_cases)\nnum_true_positives = len(flagged_cases[flagged_cases[\"label\"] == 1])\nnum_false_positives = len(flagged_cases[flagged_cases[\"label\"] == 0])\nnum_all_positives = len(test_case.loc[test_case[\"label\"] == 1])\n\nprint(f\"When threshold is set to: {threshold}\")\nprint(f\"Total of {num_flagged_cases} flagged cases\")\nprint(f\"Total of {num_true_positives} flagged cases are true positives\")\nprint(f\"True Positive Rate: {num_true_positives / float(num_flagged_cases)}\")\nprint(f\"Recall: {num_true_positives / float(num_all_positives)}\")\nprint(f\"Precision: {num_true_positives / float(num_flagged_cases)}\")", "When threshold is set to: 0.0\nTotal of 102539 flagged cases\nTotal of 98149 flagged cases are true positives\nTrue Positive Rate: 0.9571870215235179\nRecall: 0.98149\nPrecision: 0.9571870215235179\n" ] ], [ [ "## Epilogue\n----\n\nIn this notebook, we have showed how to configure the basic training, deployment, and usage of the Amazon SageMaker IP Insights algorithm. All SageMaker algorithms come with support for two additional services that make optimizing and using the algorithm that much easier: Automatic Model Tuning and Batch Transform service. \n\n\n### Amazon SageMaker Automatic Model Tuning\nThe results above were based on using the default hyperparameters of the SageMaker IP Insights algorithm. If we wanted to improve the model's performance even more, we can use [Amazon SageMaker Automatic Model Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html) to automate the process of finding the hyperparameters. \n\n#### Validation Dataset\nPreviously, we separated our dataset into a training and test set to validate the performance of a single IP Insights model. However, when we do model tuning, we train many IP Insights models in parallel. If we were to use the same test dataset to select the best model, we bias our model selection such that we don't know if we selected the best model in general, or just the best model for that particular dateaset. \n\nTherefore, we need to separate our test set into a validation dataset and a test dataset. The validation dataset is used for model selection. Then once we pick the model with the best performance, we evaluate it the winner on a test set just as before. \n\n#### Validation Metrics\nFor SageMaker Automatic Model Tuning to work, we need an objective metric which determines the performance of the model we want to optimize. Because SageMaker IP Insights is an usupervised algorithm, we do not have a clearly defined metric for performance (such as percentage of fraudulent events discovered). \n\nWe allow the user to provide a validation set of sample data (same format as training data bove) through the `validation` channel. We then fix the negative sampling strategy to use `random_negative_sampling_rate=1` and `shuffled_negative_sampling_rate=0` and generate a validation dataset by assigning corresponding labels to the real and simulated data. We then calculate the model's `descriminator_auc` metric. We do this by taking the model's predicted labels and the 'true' simulated labels and compute the Area Under ROC Curve (AUC) on the model's performance.\n\nWe set up the `HyperParameterTuner` to maximize the `discriminator_auc` on the validation dataset. We also need to set the search space for the hyperparameters. We give recommended ranges for the hyperparmaeters in the [Amazon SageMaker IP Insights (Hyperparameters)](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights-hyperparameters.html) documentation. \n", "_____no_output_____" ] ], [ [ "test_df[\"timestamp\"].describe()", "/usr/local/lib/python3.6/site-packages/ipykernel_launcher.py:1: FutureWarning: Treating datetime data as categorical rather than numeric in `.describe` is deprecated and will be removed in a future version of pandas. Specify `datetime_is_numeric=True` to silence this warning and adopt the future behavior now.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "The test set we constructed above spans 3 days. We reserve the first day as the validation set and the subsequent two days for the test set. ", "_____no_output_____" ] ], [ [ "time_partition = (\n datetime(2018, 11, 13, tzinfo=pytz.FixedOffset(0))\n if num_time_zones > 1\n else datetime(2018, 11, 13)\n)\n\nvalidation_df = test_df[test_df[\"timestamp\"] < time_partition]\ntest_df = test_df[test_df[\"timestamp\"] >= time_partition]\n\nvalid_data = validation_df.to_csv(index=False, header=False, columns=[\"user\", \"ip_address\"])", "_____no_output_____" ] ], [ [ "We then upload the validation data to S3 and specify it as the validation channel. ", "_____no_output_____" ] ], [ [ "# Upload data to S3 key\nvalidation_data_file = \"valid.csv\"\nkey = os.path.join(prefix, \"validation\", validation_data_file)\nboto3.resource(\"s3\").Bucket(bucket).Object(key).put(Body=valid_data)\ns3_valid_data = f\"s3://{bucket}/{key}\"\n\nprint(f\"Validation data has been uploaded to: {s3_valid_data}\")\n\n# Configure SageMaker IP Insights Input Channels\ninput_data = {\"train\": s3_train_data, \"validation\": s3_valid_data}", "Validation data has been uploaded to: s3://sagemaker-us-east-1-017681292549/sagemaker/ipinsights-tutorial-bwx/validation/valid.csv\n" ], [ "from sagemaker.tuner import HyperparameterTuner, IntegerParameter\n\n# Configure HyperparameterTuner\nip_insights_tuner = HyperparameterTuner(\n estimator=ip_insights, # previously-configured Estimator object\n objective_metric_name=\"validation:discriminator_auc\",\n hyperparameter_ranges={\"vector_dim\": IntegerParameter(64, 1024)},\n max_jobs=4,\n max_parallel_jobs=2,\n)\n\n# Start hyperparameter tuning job\nip_insights_tuner.fit(input_data, include_cls_metadata=False)", "............................................................................................................................................................................................................................................................" ], [ "# Wait for all the jobs to finish\nip_insights_tuner.wait()\n\n# Visualize training job results\nip_insights_tuner.analytics().dataframe()", "_____no_output_____" ], [ "# Visualize training job results\nip_insights_tuner.analytics().dataframe()", "_____no_output_____" ], [ "# Deploy best model\ntuned_predictor = ip_insights_tuner.deploy(\n initial_instance_count=1,\n instance_type=\"ml.m4.xlarge\",\n serializer=csv_serializer,\n deserializer=json_deserializer,\n)", "\n2021-12-30 03:04:33 Starting - Preparing the instances for training\n2021-12-30 03:04:33 Downloading - Downloading input data\n2021-12-30 03:04:33 Training - Training image download completed. Training in progress.\n2021-12-30 03:04:33 Uploading - Uploading generated training model\n2021-12-30 03:04:33 Completed - Training job completed\n-----" ], [ "# Make a prediction against the SageMaker endpoint\ntuned_predictor.predict(\n inference_data, initial_args={\"ContentType\": \"text/csv\", \"Accept\": \"application/json\"}\n)", "_____no_output_____" ] ], [ [ "We should have the best performing model from the training job! Now we can determine thresholds and make predictions just like we did with the inference endpoint [above](#Inference).", "_____no_output_____" ], [ "### Batch Transform\nLet's say we want to score all of the login events at the end of the day and aggregate flagged cases for investigators to look at in the morning. If we store the daily login events in S3, we can use IP Insights with [Amazon SageMaker Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html) to run inference and store the IP Insights scores back in S3 for future analysis.\n\nBelow, we take the training job from before and evaluate it on the validation data we put in S3.", "_____no_output_____" ] ], [ [ "transformer = ip_insights.transformer(instance_count=1, instance_type=\"ml.m4.xlarge\")\n\ntransformer.transform(s3_valid_data, content_type=\"text/csv\", split_type=\"Line\")", "_____no_output_____" ], [ "# Wait for Transform Job to finish\ntransformer.wait()", "_____no_output_____" ], [ "print(f\"Batch Transform output is at: {transformer.output_path}\")", "_____no_output_____" ] ], [ [ "### Stop and Delete the Endpoint\nIf you are done with this model, then we should delete the endpoint before we close the notebook. Or else you will continue to pay for the endpoint while it is running. \n\nTo do so execute the cell below. Alternately, you can navigate to the \"Endpoints\" tab in the SageMaker console, select the endpoint with the name stored in the variable endpoint_name, and select \"Delete\" from the \"Actions\" dropdown menu.", "_____no_output_____" ] ], [ [ "ip_insights_tuner.delete_endpoint()\nsagemaker.Session().delete_endpoint(predictor.endpoint)", "The function delete_endpoint is a no-op in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\nThe endpoint attribute has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e7c5b521d18f1fb818f7df0a418ede9a44c5f545
115,335
ipynb
Jupyter Notebook
analysis/Keystrokes vs Quality.ipynb
JahanviNShah/inmt
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
[ "MIT" ]
39
2020-04-14T09:41:41.000Z
2022-02-21T14:32:06.000Z
analysis/Keystrokes vs Quality.ipynb
JahanviNShah/inmt
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
[ "MIT" ]
17
2020-05-11T23:44:57.000Z
2021-02-17T10:53:15.000Z
analysis/Keystrokes vs Quality.ipynb
JahanviNShah/inmt
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
[ "MIT" ]
20
2020-04-14T11:22:43.000Z
2021-11-25T03:57:21.000Z
82.97482
31,804
0.706715
[ [ [ "import pandas as pd\nimport re\nimport ast\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import levene\nimport matplotlib.pyplot as plt\n\nsns.set(color_codes=True)", "_____no_output_____" ], [ "story_quality = pd.read_csv('./data/story_quality.csv')\nstory_quality.head()", "_____no_output_____" ], [ "keystrokes = pd.read_csv('data/mt.dockeystroke_complete.csv')\nkeystrokes.head()", "_____no_output_____" ], [ "# def break_user(x):\n# xs1 = x.split('|')\n \n# if len(xs1) > 1:\n# return xs1[0]\n \n# def breakdown_level(x):\n# xs1 = x.split('|')\n# if len(xs1) > 1:\n# xs2 = xs1[1].split(' ')\n# xs2 = list(filter(None, xs2))\n# if len(xs2) >= 3:\n# if re.match(r\"L\\d\", xs2[-1]):\n# return xs2[-1]\n# return ''\n\n# def breakdown_type(x):\n# xs1 = x.split('|')\n# if len(xs1) > 1:\n# xs2 = xs1[1].split(' ')\n# xs2 = list(filter(None, xs2))\n# if len(xs2) >= 3:\n# if re.match(r\"L\\d\", xs2[-1]):\n# return xs2[-2]\n# return ''\n\n# def breakdown_story(x):\n# xs1 = x.split('|')\n# if len(xs1) > 1:\n# xs2 = xs1[1].split(' ')\n# xs2 = list(filter(None, xs2))\n# if len(xs2) >= 3:\n# if re.match(r\"L\\d\", xs2[-1]):\n# return ' '.join(xs2[:-2])\n# return ''\n\n# def breakdown_direction(x):\n# xs1 = x.split('|')\n# if len(xs1) > 1:\n# return xs1[2].strip()", "_____no_output_____" ], [ "# keystrokes['user'] = keystrokes['translatedSet'].apply(lambda x: break_user(x))\n# keystrokes['type'] = keystrokes['translatedSet'].apply(lambda x: breakdown_type(x))\n# keystrokes['level'] = keystrokes['translatedSet'].apply(lambda x: breakdown_level(x))\n# keystrokes['story'] = keystrokes['translatedSet'].apply(lambda x: breakdown_story(x))\n# keystrokes['direction'] = keystrokes['translatedSet'].apply(lambda x: breakdown_direction(x))\n\nkeystrokes['total_keys'] = keystrokes['keystrokeseries'].apply(lambda x: len(ast.literal_eval(x)))\nkeystrokes['total_time'] = keystrokes['keystrokeseries'].apply(lambda x: ast.literal_eval(x)[-1][1])", "_____no_output_____" ], [ "keystrokes.head()", "_____no_output_____" ], [ "keystrokes = keystrokes.replace(np.nan, '', regex=True)\nkeystrokes = keystrokes[keystrokes['translatedSet'].str.contains(\"Test User\")]", "_____no_output_____" ], [ "len(keystrokes)", "_____no_output_____" ], [ "quakey = pd.merge(keystrokes, story_quality, how='inner', left_on=['translatedSet'], right_on = ['translatedSet'])", "_____no_output_____" ], [ "# import seaborn as sns\n# sns.set(color_codes=True)\n# ax = sns.lmplot(x=\"bleu-4\", y=\"total_time\", hue=\"type\", data=quakey, order=3)\n# ax", "_____no_output_____" ], [ "sns.set(style=\"whitegrid\")\nax = sns.boxplot(x=\"type\", y=\"total_time\", data=quakey)\nax", "_____no_output_____" ], [ "quant_thres = 0.99\nnew_quakey = quakey[(quakey[\"total_time\"] < quakey[\"total_time\"].quantile(quant_thres)) & (quakey[\"total_keys\"] < quakey[\"total_keys\"].quantile(quant_thres))]", "_____no_output_____" ], [ "sns.set(style=\"whitegrid\")\nax = sns.boxplot(x=\"type\", y=\"total_keys\", data=new_quakey)\nax", "_____no_output_____" ], [ "sns.set(style=\"whitegrid\")\nax = sns.boxplot(x=\"type\", y=\"total_keys\", data=new_quakey)\nax", "_____no_output_____" ], [ "new_quakey", "_____no_output_____" ], [ "col = \"bleu-4\"\nlevel = \"L4\"\nx = \"total_keys\"\ny = col\n# y = \"total_keys\"\nax = sns.lmplot(x=x, y=y, hue=\"type\", hue_order=[\"MT\", \"PE\", \"BL\"], data=new_quakey[(new_quakey['level'] == level) & (new_quakey[col] != 0)])\n# ax.set(ylim=(0,6000000))\n# ax.set(ylim=(0,8000))\nax.set(ylim=(0,1))\nax.set(xlim=(0,None))", "_____no_output_____" ], [ "new_quakey = new_quakey.replace(np.nan, '', regex=True)", "_____no_output_____" ], [ "# new_quakey[(new_quakey['tgt'] != '') & (new_quakey['bleu-4'] == 0)]\nnew_quakey.head()", "_____no_output_____" ], [ "story_quality[story_quality['tgt'] == '']", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c5bb0b7b5d4f05a647c75de16785616ada7eae
52,496
ipynb
Jupyter Notebook
archive/sentiment_paraphraser.ipynb
puzzler10/travis_attack
14f9d4c467ee160f829c46ca568eade90a528d8a
[ "Apache-2.0" ]
1
2022-02-18T05:13:00.000Z
2022-02-18T05:13:00.000Z
archive/sentiment_paraphraser.ipynb
puzzler10/travis_attack
14f9d4c467ee160f829c46ca568eade90a528d8a
[ "Apache-2.0" ]
null
null
null
archive/sentiment_paraphraser.ipynb
puzzler10/travis_attack
14f9d4c467ee160f829c46ca568eade90a528d8a
[ "Apache-2.0" ]
null
null
null
42.61039
418
0.51842
[ [ [ "GLUE sets: model will be trained on eval set, so you shouldn't also test on the eval set. The problem is that the labels are withheld for the test set. \nStart with SNLI. MultiNLI is a later option too. As is rotten_tomatoes. \n* Victim model performance on dataset train, valid, test set. (done, written code to measure it)\n* Create new paraphrased valid + test datasets (done a preliminary version on the valid set) \n* Measure victim model performance on paraphrased datasets (done. on vanilla valid set is about 87% accuracy. generating 16 paraphrases (i.e. not many) and evaluating performance on all of them, we get ~75% accuracy)\n* Get document embeddings of original and paraphrased and compare (done)\n * https://github.com/UKPLab/sentence-transformers\n* Write a simple way to measure paraphrase quality (done) \n* Construct reward function \n", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import os\nimport torch \nfrom torch.utils.data import DataLoader\nfrom datasets import load_dataset, load_metric\nimport datasets, transformers\nfrom transformers import pipeline, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer\nfrom pprint import pprint\nimport numpy as np, pandas as pd\nimport scipy\nfrom utils import * # local script \nimport pyarrow\nfrom sentence_transformers import SentenceTransformer, util\nfrom IPython.core.debugger import set_trace\nfrom GPUtil import showUtilization\nimport seaborn as sns\nfrom itertools import repeat\nfrom collections import defaultdict\nfrom IPython.display import Markdown\n\npath_cache = './cache/'\npath_results = \"./results/\"\n\nseed = 420\ntorch.manual_seed(seed)\nnp.random.seed(seed)\ntorch.cuda.manual_seed(seed)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \ndevicenum = torch.cuda.current_device() if device.type == 'cuda' else -1\nn_wkrs = 4 * torch.cuda.device_count()\nbatch_size = 64\npd.set_option(\"display.max_colwidth\", 400)", "_____no_output_____" ], [ "# Paraphrase model (para)\npara_name = \"tuner007/pegasus_paraphrase\"\npara_tokenizer = AutoTokenizer.from_pretrained(para_name)\npara_model = AutoModelForSeq2SeqLM.from_pretrained(para_name).to(device)", "_____no_output_____" ], [ "# Victim Model (VM)\nvm_name = \"textattack/distilbert-base-cased-snli\"\nvm_tokenizer = AutoTokenizer.from_pretrained(vm_name)\nvm_model = AutoModelForSequenceClassification.from_pretrained(vm_name).to(device)\nvm_idx2lbl = vm_model.config.id2label\nvm_lbl2idx = vm_model.config.label2id\nvm_num_labels = vm_model.num_labels", "_____no_output_____" ], [ "# Semantic Similarity model \nembedding_model = SentenceTransformer('paraphrase-distilroberta-base-v1')", "_____no_output_____" ], [ "dataset = load_dataset(\"snli\")\ntrain,valid,test = dataset['train'],dataset['validation'],dataset['test']\n\nlabel_cname = 'label'\nremove_minus1_labels = lambda x: x[label_cname] != -1\ntrain = train.filter(remove_minus1_labels)\nvalid = valid.filter(remove_minus1_labels)\ntest = test.filter(remove_minus1_labels)\n\n# make sure that all datasets have the same number of labels as what the victim model predicts\nassert train.features[label_cname].num_classes == vm_num_labels\nassert valid.features[label_cname].num_classes == vm_num_labels\nassert test.features[ label_cname].num_classes == vm_num_labels\n\ntrain_dl = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=n_wkrs)\nvalid_dl = DataLoader(valid, batch_size=batch_size, shuffle=True, num_workers=n_wkrs)\ntest_dl = DataLoader( test, batch_size=batch_size, shuffle=True, num_workers=n_wkrs)", "Reusing dataset snli (/data/tproth/.cache/huggingface/datasets/snli/plain_text/1.0.0/1f60b67533b65ae0275561ff7828aad5ee4282d0e6f844fd148d05d3c6ea251b)\n" ], [ "def get_paraphrases(input_text,num_return_sequences,num_beams, num_beam_groups=1,diversity_penalty=0):\n batch = para_tokenizer(input_text,truncation=True,padding='longest', return_tensors=\"pt\").to(device)\n translated = para_model.generate(**batch,num_beams=num_beams, num_return_sequences=num_return_sequences, \n temperature=1.5, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty)\n tgt_text = para_tokenizer.batch_decode(translated, skip_special_tokens=True)\n return tgt_text\n\ndef gen_dataset_paraphrases(x, cname_input, cname_output, n_seed_seqs=32): \n \"\"\" x: one row of a dataset. \n cname_input: column to generate paraphrases for \n cname_output: column name to give output of paraphrases \n n_seed_seqs: rough indicator of how many paraphrases to return. \n For now, keep at 4,8,16,32,64 etc\"\"\"\n # TODO: figure out how to batch this. \n if n_seed_seqs % 4 != 0: raise ValueError(\"keep n_seed_seqs divisible by 4 for now\")\n n = n_seed_seqs/2\n #low diversity (ld) paraphrases \n ld_l = get_paraphrases(x[cname_input],num_return_sequences=int(n),\n num_beams=int(n))\n #high diversity (hd) paraphrases. We can use num_beam_groups and diversity_penalty as hyperparameters. \n hd_l = get_paraphrases(x[cname_input],num_return_sequences=int(n),\n num_beams=int(n), num_beam_groups=int(n),diversity_penalty=50002.5)\n l = ld_l + hd_l \n x[cname_output] = l #TODO: change to list(set(l)) \n return x \n", "_____no_output_____" ], [ "# Generate paraphrase dataset\nn_seed_seqs = 48\ndate = '20210629'\nfname = path_cache + 'valid_small_'+ date + '_' + str(n_seed_seqs)\nif os.path.exists(fname): # simple caching\n valid_small = datasets.load_from_disk(fname)\nelse:\n valid_small = valid.shard(20, 0, contiguous=True)\n valid_small = valid_small.map(lambda x: gen_dataset_paraphrases(x, n_seed_seqs=n_seed_seqs,\n cname_input='hypothesis', cname_output='hypothesis_paraphrases'),\n batched=False)\n valid_small.save_to_disk(fname)\n \n ", "_____no_output_____" ], [ "# Create a new version of paraphrase dataset by repeating all other fields to be same \n# length as number of paraphrases. \ndef create_paraphrase_dataset(batch, l_cname): \n \"\"\"Repeat the other fields to be the same length as the number of paraphrases.\n l_cname: column name that contains the list of paraphrases\"\"\" \n return_d = defaultdict(list) \n for o in zip(*batch.values()):\n d = dict(zip(batch.keys(), o))\n n_paraphrases = len(d[l_cname])\n for k,v in d.items(): \n return_d[k] += v if k == l_cname else [v for o in range(n_paraphrases)]\n return return_d \n\nfname = path_cache + 'valid_small_paraphrases_' + date + '_'+ str(n_seed_seqs)\nif os.path.exists(fname): \n valid_small_paraphrases = datasets.load_from_disk(fname)\nelse:\n # Need to call this with batched=True to work. \n valid_small_paraphrases = valid_small.map(lambda x: create_paraphrase_dataset(x,\n l_cname='hypothesis_paraphrases'), \n batched=True)\n valid_small_paraphrases.save_to_disk(fname)\n", "_____no_output_____" ], [ "# Generate results dataframe \ndef get_vm_scores(): \n \"\"\"very hacky procedure to generate victim model scores \"\"\"\n # Get preds and accuracy on the paraphrase dataset\n print(\"Getting victim model scores.\")\n some_dl = DataLoader(valid_small_paraphrases, batch_size=batch_size, shuffle=False, \n num_workers=n_wkrs, pin_memory=True)\n dl = some_dl\n metric = load_metric('accuracy')\n para_probs_l,orig_probs_l = [], []\n assert vm_model.training == False # checks that model is in eval mode \n #monitor = Monitor(2) # track GPU usage and memory\n with torch.no_grad():\n for i, data in enumerate(dl): \n if i % 50 == 0 : print(i, \"out of\", len(dl))\n labels,premise = data['label'].to(device),data[\"premise\"]\n paraphrases,orig = data[\"hypothesis_paraphrases\"],data[\"hypothesis\"]\n\n # predictions for original\n inputs = vm_tokenizer(premise,orig,padding=True,truncation=True, return_tensors=\"pt\")\n inputs.to(device)\n outputs = vm_model(**inputs, labels=labels)\n probs = outputs.logits.softmax(1)\n preds = probs.argmax(1)\n orig_probs_l.append(probs.cpu()) \n\n # predictions for paraphrases\n inputs = vm_tokenizer(premise,paraphrases, padding=True,truncation=True, return_tensors=\"pt\")\n inputs.to(device)\n outputs = vm_model(**inputs, labels=labels)\n probs = outputs.logits.softmax(1)\n preds = probs.argmax(1)\n para_probs_l.append(probs.cpu())\n metric.add_batch(predictions=preds, references=labels)\n\n orig_probs_t, para_probs_t = torch.cat(orig_probs_l),torch.cat(para_probs_l)\n #monitor.stop()\n return para_probs_t, orig_probs_t\n\ndef generate_sim_scores(): \n \"\"\"Function to just loop and generate sim scores for each input\"\"\"\n print(\"Getting similarity scores\")\n sim_score_l = []\n for i, data in enumerate(valid_small): \n if i % 50 == 0 : print(i, \"out of\", len(valid_small))\n orig, para = data['hypothesis'], data['hypothesis_paraphrases']\n orig_emb,para_emb = embedding_model.encode(orig),embedding_model.encode(para)\n cos_sim = util.cos_sim(orig_emb,para_emb)[0]\n sim_score_l.append(cos_sim)\n sim_score_t = torch.cat(sim_score_l)\n return sim_score_t\n\nfname = path_cache + 'results_df_'+ date + \"_\" + str(n_seed_seqs) + \".csv\"\nif os.path.exists(fname):\n results_df = pd.read_csv(fname)\nelse: \n sim_score_t = generate_sim_scores()\n para_probs_t, orig_probs_t = get_vm_scores()\n vm_para_scores = torch.tensor([r[idx] for idx,r in zip(valid_small_paraphrases['label'],para_probs_t)])\n vm_orig_scores = torch.tensor([r[idx] for idx,r in zip(valid_small_paraphrases['label'],orig_probs_t)])\n \n results_df = pd.DataFrame({'premise': valid_small_paraphrases['premise'],\n 'orig': valid_small_paraphrases['hypothesis'],\n 'para': valid_small_paraphrases['hypothesis_paraphrases'],\n 'sim_score': sim_score_t,\n 'label_true': valid_small_paraphrases['label'], \n 'label_vm_orig': orig_probs_t.argmax(1),\n 'label_vm_para': para_probs_t.argmax(1),\n 'vm_orig_truelabel': vm_orig_scores, \n 'vm_para_truelabel': vm_para_scores,\n 'vm_truelabel_change': vm_orig_scores - vm_para_scores,\n 'vm_orig_class0': orig_probs_t[:,0], \n 'vm_orig_class1': orig_probs_t[:,1], \n 'vm_orig_class2': orig_probs_t[:,2], \n 'vm_para_class0': para_probs_t[:,0], \n 'vm_para_class1': para_probs_t[:,1], \n 'vm_para_class2': para_probs_t[:,2] \n })\n results_df['vm_truelabel_change_X_sim_score'] = results_df['vm_truelabel_change'] * results_df['sim_score']\n results_df.to_csv(fname, index_label = 'idx')", "_____no_output_____" ] ], [ [ "### Permutation method to detect label flips", "_____no_output_____" ], [ "Take each example $Ex$ in the filtered set and generate paraphrases (e.g. 16) of it (or it might work better with a simple token-replacement strategy). Run each through the victim model (might be better with a different model, but still trained on dataset) and record predictions. Then tally up the label predictions (or maybe take average of the probabilities). Each prediction is a vote for the true label. \n\nIdea is that if $Ex$ changes ground truth label to class 4, then most of the paraphrases of $Ex$ will be of class 4 too. If $Ex$ is truly adversarial, then most of the paraphrases of $Ex$ are likely to be of the original class (or at least of other classes). So in other words: \n* if `is_adversarial = 1` then we expect most votes to be for other classes to `label_vm_para`. This means we expect more variance in the voting. If we take model confidence for the class of `label_vm_para` and work out entropy/variance, we expect it to be high. \n* if `is_adversarial = 0` then we expect most votes to be for the same class as `label_vm_para`. This means we expect less variance in the voting. If we take model confidence for the class of `label_vm_para` and work out entropy/variance, we expect it to be low. \n\nVariations \n\n* Instead of generating further paraphrases for all label flippers, try the checklist tests on the input. e.g. replace number/proper noun\n* Try systematic perturbations\n* Record probability of the true class or the predicted class and put it into a distribution. Calculate entropy of it (STRIP style). The idea is that there is some reliable difference in these probabilities between ground-truth flips and otherwise and that entropy can be used as a rough measurement to distinguish between it. \n* Can try the above while keeping track of sentence embeddings + attention layers ", "_____no_output_____" ] ], [ [ "# Read in manually labelled data. This is to track results. \nfname = path_cache + 'results_df_48_20210514_labelled_subset.csv'\ndset_advlbl = load_dataset('csv', data_files=fname)['train'].train_test_split(test_size=0.25)\ntrain_advlbl,test_advlbl = dset_advlbl['train'],dset_advlbl['test']\n\n# # as pandas df\n# df_advlbl = pd.read_csv(fname)\n# train_advlbl,_,test_advlbl = create_train_valid_test(df_advlbl, frac_train=0.75, frac_valid = 0.001)\n# # To join with the original. (might be some issues with the idx/row-number col)\n# # x = pd.merge(results_df, df_advlbl, on =['idx', 'premise','orig', 'para'])", "Using custom data configuration default-ebc62bd8d2fb84e0\nReusing dataset csv (/data/tproth/.cache/huggingface/datasets/csv/default-ebc62bd8d2fb84e0/0.0.0/2dc6629a9ff6b5697d82c25b73731dd440507a69cbce8b425db50b751e8fcfd0)\n" ] ], [ [ "#### Paraphrases of paraphrases ", "_____no_output_____" ], [ "nlp dataset -> gen_paraphrases (returns dataset) -> create_paraphrase_dataset -> get vm labels -> save in data frame ", "_____no_output_____" ] ], [ [ "n = 48\ncols_to_drop = ['is_adversarial','label_true','label_vm_orig','orig','sim_score']\ndef paraphrase_and_return_dict(x, n_seed_seqs=16): \n x['perms'] = get_paraphrases(x['para'], num_return_sequences=n, num_beams=n, \n num_beam_groups=8, diversity_penalty=100000.0)\n return x \ntrain_advlbl_perms = train_advlbl.map(lambda x: paraphrase_and_return_dict(x, n_seed_seqs=n),\n batched=False, remove_columns = cols_to_drop)\ntrain_advlbl_expanded = train_advlbl_perms.map(lambda x: create_paraphrase_dataset(x, l_cname='perms'),\n batched=True)", "_____no_output_____" ], [ "# Get victim model predictions for each prediction \nadvlbl_expanded_dl = DataLoader(train_advlbl_expanded, batch_size=batch_size, shuffle=False, \n num_workers=n_wkrs, pin_memory=True)\ndl = advlbl_expanded_dl\nprobs_l = []\nassert vm_model.training == False # checks that model is in eval mode \nwith torch.no_grad():\n for i, data in enumerate(dl): \n if i % 50 == 0 : print(i, \"out of\", len(dl))\n premise,perms = data[\"premise\"],data[\"perms\"]\n # predictions for original\n inputs = vm_tokenizer(premise,perms,padding=True,truncation=True, return_tensors=\"pt\")\n inputs.to(device)\n outputs = vm_model(**inputs)\n probs = outputs.logits.softmax(1)\n # preds = probs.argmax(1)\n probs_l.append(probs.cpu()) \n\nprobs_t = torch.cat(probs_l)\npreds_t = torch.argmax(probs_t,1)", "0 out of 149\n50 out of 149\n100 out of 149\n" ], [ "# Bring back to original\ntrain_advlbl_expanded = train_advlbl_expanded.add_column('vm_label', preds_t.tolist())\ntrain_advlbl_expanded = train_advlbl_expanded.add_column('vm_prob0', probs_t[:,0].tolist())\ntrain_advlbl_expanded = train_advlbl_expanded.add_column('vm_prob1', probs_t[:,1].tolist())\ntrain_advlbl_expanded = train_advlbl_expanded.add_column('vm_prob2', probs_t[:,2].tolist())", "_____no_output_____" ], [ "\n# Make into pandas_df \nadvlbl_df = pd.DataFrame(train_advlbl_expanded) \nadvlbl_df.vm_label = advlbl_df.vm_label.astype('category')\n\n# Count \"votes\" of each set of permutations \nvotes_df = advlbl_df.groupby(['idx'])['vm_label'].describe()\nvotes_df = votes_df.rename(columns={'count':'votes','unique': \"n_cats_with_votes\",\n \"top\": 'top_cat', 'freq': 'top_cat_votes'})", "_____no_output_____" ], [ "# Get entropy and variance from each set of permutations, then choose only the values\n# that correspond to the predicted label of the paraphrase\ndef get_entropy(x, bins=10): \n \"\"\"Return shannon entropy of a vector. Used in pandas summary functions\"\"\"\n # the bins parameters affects the entropy quite a bit (it introduces zeros)\n hist,_ = np.histogram(x, bins=bins) \n hist = hist/sum(hist) # turn into PMF (not strictly required for scipy entropy, but easier to interpret)\n return scipy.stats.entropy(hist)\ngrp = advlbl_df.groupby(['idx'])[['vm_prob0','vm_prob1','vm_prob2']]\nentropy_df = grp.agg(func = get_entropy)\nvar_df = grp.agg(func = 'var')\nentropy_df.columns = [o + \"_entropy\" for o in entropy_df.columns]\nvar_df.columns = [o + \"_var\" for o in var_df.columns]", "_____no_output_____" ], [ "label_df = advlbl_df[['idx','label_vm_para']].drop_duplicates()\ndef choose_col_of_df_from_label_column(df, labeldf, name='entropy'): \n \"\"\"Picks columns of df corresponding to the predicted vm label of the paraphrase. \n Works only if probs of classes are the first columns of df in order.\"\"\"\n df = df.merge(labeldf,left_index=True, right_on='idx')\n v = df['label_vm_para'].values\n # See https://stackoverflow.com/a/61234228/5381490\n df[name+'_label_vm_para'] = np.take_along_axis(df.values, v[:,None] ,axis=1)\n return df \nentropy_df = choose_col_of_df_from_label_column(entropy_df, label_df, name='entropy')\nvar_df = choose_col_of_df_from_label_column(var_df, label_df, name='var')", "_____no_output_____" ], [ "# Change original labelled set to a pandas data frame and merge it in \ntrain_advlbl_df,test_advlbl_df = pd.DataFrame(dset_advlbl['train']),pd.DataFrame(dset_advlbl['test'])\ntrain_advlbl_df = pd.merge(train_advlbl_df, votes_df, left_on ='idx', right_index=True)\ntrain_advlbl_df = pd.merge(train_advlbl_df, entropy_df[['idx','entropy_label_vm_para']], \n left_on ='idx', right_on='idx')\ntrain_advlbl_df = pd.merge(train_advlbl_df, var_df[['idx', 'var_label_vm_para']], \n left_on ='idx', right_on='idx')", "_____no_output_____" ], [ "# Calculate label flip percentage and measure success\ntrain_advlbl_df['label_flip'] = train_advlbl_df['top_cat'] != train_advlbl_df['label_vm_para'] \ndef permutation_success(x,y): \n result = None\n if x == 1 and y == True: result = True\n elif x == 0 and y == False: result = True\n elif x == -1 or x == -2: result = \"To be determined\"\n else: result = False\n return result\nv1,v2 = train_advlbl_df['is_adversarial'].values, train_advlbl_df['label_flip'].values\ntrain_advlbl_df['permutation_success'] = list(map(permutation_success, v1,v2))\n\npd.crosstab(index=train_advlbl_df['label_flip'], \n columns=train_advlbl_df['is_adversarial'],\n margins=True)", "_____no_output_____" ], [ "train_advlbl_df.label_flip.value_counts()", "_____no_output_____" ], [ "advlbl_df", "_____no_output_____" ], [ "#### Exploring the method via reporting ####\n\n## Set up parameters \nidx = train_advlbl_df.sample()[['idx']].values[0][0] #sample an index randomly from the table\nmain_tbl = train_advlbl_df.query(\"idx==@idx\")\ndef getval(cname): return main_tbl.loc[:,cname].values[0]\nprem,hyp,para,sim_score = getval('premise'),getval('orig'),getval('para'),getval('sim_score') \nlabel_true,label_vm_orig,label_vm_para = getval('label_true'),getval('label_vm_orig'),getval('label_vm_para')\nadvlbl = getval('is_adversarial')\nd_advlbl2str = {\n 1: \"is a **successful** adversarial example\",\n 0: \"is **unsuccessful**: it flips the true label\",\n -1: \"contains a hypothesis paraphrase that **doesn't make sense** or is nonsensical.\", \n -2: \"is **excluded**: the original label might be wrong\"\n}\nadvstr = d_advlbl2str[advlbl]\nperm_samples = advlbl_df.query(\"idx==@idx\").sample(5).to_markdown()\nncats,top_cat,top_cat_votes = getval('n_cats_with_votes'),getval('top_cat'),getval('top_cat_votes')\n\nlabel_flip = top_cat != label_vm_para\nlabel_flip_to_orig_label = top_cat == label_vm_orig\nlabel_flip_to_diff_label = top_cat != label_vm_para and top_cat != label_vm_orig\n\nresults_msg = \"\"\nif not label_flip: results_msg += \"This does not flip the predicted label. \\n\"\nif label_flip_to_orig_label: results_msg += \"This flips the label to the vm predicted label (\" +\\\n str(label_vm_orig) + \") of the original hypothesis. \\n\"\nif label_flip_to_diff_label: results_msg += \"This flips the predicted label but to a different class to the vm prediction of the original hypothesis.\\n\"\n\nresults_msg += \"\\n\"\nif advlbl == 1: \n results_msg += \"If the theory is correct we expected a label flip for an adversarial example.\\n \"\n if label_flip: results_msg += \"The label flip occured, so this was **successful**.\\n\"\n else: results_msg += \"The label flip did not occur, so this was **unsuccessful**.\\n\" \nelif advlbl == 0: \n results_msg += \"If the theory is correct we expect the label does not flip for an unadversarial example.\\n \"\n if label_flip: results_msg += \"The label flip occured, so this was **unsuccessful**.\\n\"\n else: results_msg += \"The label flip did not occur, so this was **successful**.\\n\" \nelif advlbl == -1: \n results_msg += \"The original paraphrase didn't make sense, so we should figure out how to detect this.\\n \"\nelse: \n results_msg += \"The SNLI example was wrong or strange: disregard this example.\\n\"\n\n## Insert into template \nMarkdown(f\"\"\"\nExample with idx **{idx}** \n\n{main_tbl.to_markdown(index=True)} \n\n\n* **Premise**: `{prem}` \n* **Hypothesis (original)**: `{hyp}` (True label **{label_true}**, Victim Model (VM) label **{label_vm_orig}**) \n* **Hypothesis paraphrase**: `{para}` (VM label **{label_vm_para}**) \n\nThis example {advstr}. \n\nWe generate {n} further *permutations* of the hypothesis paraphrase and get VM votes and confidence for \neach of them. The label of the hypothesis paraphrase was **{label_vm_para}**. \nHere are five of these permutations (randomly chosen): \n\n{perm_samples}\n\n**Voting strategy results** \n\nWe get {ncats} categories with votes. The most voted for category is **label {top_cat}** with {top_cat_votes}\nvotes. The paraphrase initially had label **{label_vm_para}**.\n\n{results_msg}\n\n\nNow we look at the variance and entropy of the predicted probabilities of each class. \nWe are interested in class **{label_vm_para}** as it is the label of the hypothesis paraphrase. \n\n*Entropy* \n\n{entropy_df.query(\"idx==@idx\").round(2).to_markdown(index=True)}\n\n*Variance* \n\n{var_df.query(\"idx==@idx\").round(2).to_markdown(index=True)}\n\n\n\n\n\"\"\")\n", "_____no_output_____" ], [ "# # calculates performance of victim model on a dataloader\n\n# dl = valid_dl\n# metric = load_metric('accuracy')\n# for i, data in enumerate(dl): \n# if i % 10 == 0 : print(i, \"out of\", len(dl)) \n# labels,premise,hypothesis = data['label'].to(device),data[\"premise\"],data[\"hypothesis\"]\n# inputs = vm_tokenizer(premise,hypothesis, padding=True,truncation=True, return_tensors=\"pt\")\n# inputs.to(device)\n# outputs = vm_model(**inputs, labels=labels)\n# probs = outputs.logits.softmax(1)\n# preds = probs.argmax(1)\n# metric.add_batch(predictions=preds, references=labels)\n\n# metric.compute()\n", "_____no_output_____" ], [ "# # Score semantic similarity with cross encoders\n\n# from sentence_transformers.cross_encoder import CrossEncoder\n# cross_encoder= CrossEncoder('cross-encoder/quora-distilroberta-base')\n# i =11\n# data = valid_small[i]\n# orig, para = data['hypothesis'], data['hypothesis_paraphrases']\n# orig_rep = [orig for i in range(len(para))]\n# pairs = list(zip(orig_rep,para))\n# scores = cross_encoder.predict(pairs)\n# results_df = pd.DataFrame({'pairs':pairs, 'para': para,'score': cos_sim})\n# print(orig)\n# results_df.sort_values('score', ascending=False)", "_____no_output_____" ], [ "# # with sentence transformers\n\n# valid_small_dl = DataLoader(valid_small, batch_size=4, shuffle=False, \n# num_workers=n_wkrs, pin_memory=True)\n# sim_score_l = []\n# for i, data in enumerate(valid_small_dl): \n# pass\n# orig, para = data['hypothesis'], data['hypothesis_paraphrases']\n# orig_emb,para_emb = embedding_model.encode(orig),embedding_model.encode(para)\n# # cos_sim = util.cos_sim(orig_emb,para_emb)[0]\n# # results_df = pd.DataFrame({'para': para,'score': cos_sim})\n# # print(orig)\n# # results_df.sort_values('score', ascending=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c5d3da232de4511ca650b528be4ab5364df5ac
22,257
ipynb
Jupyter Notebook
Python/ML_basic/1.Pythonic Code/1-3.Basic Linear Algebra.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
Python/ML_basic/1.Pythonic Code/1-3.Basic Linear Algebra.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
Python/ML_basic/1.Pythonic Code/1-3.Basic Linear Algebra.ipynb
statKim/TIL
3297d09023d97653773b35160794d3324b95c111
[ "MIT" ]
null
null
null
32.539474
1,866
0.558521
[ [ [ "# Basic Linear Algebra\n- **Pythonic Code**로 짜보기\n> https://github.com/TEAMLAB-Lecture/AI-python-connect/tree/master/lab_assignments/lab_1", "_____no_output_____" ], [ "### Problem #1 - vector_size_check", "_____no_output_____" ] ], [ [ "# 정답 코드\ndef vector_size_check(*vector_variables): # input값의 개수가 그때그때 다를 수 있게 asterisk 사용 => input값을 tuple로 묶음\n return all(len(vector_variables[0]) == x # all([True,True]) : True, all([True,False]) : False\n for x in [len(vector) for vector in vector_variables[1:]])", "_____no_output_____" ], [ "# 실행결과\nprint(vector_size_check([1,2,3], [2,3,4], [5,6,7]))\nprint(vector_size_check([1,3], [2,4], [6,7]))\nprint(vector_size_check([1,3,4], [4], [6,7]))", "True\nTrue\nFalse\n" ] ], [ [ "### Problem #2 - vector_addition", "_____no_output_____" ] ], [ [ "# 정답 코드\ndef vector_addition(*vector_variables):\n if vector_size_check(*vector_variables) == False:\n raise ArithmeticError\n return [sum(elements) for elements in zip(*vector_variables)]", "_____no_output_____" ], [ "# 실행결과\nprint(vector_addition([1,3], [2,4], [6,7]))\nprint(vector_addition([1,5], [10,4], [4,7]))\nprint(vector_addition([1,3,4], [4], [6,7]))", "[9, 14]\n[15, 16]\n" ] ], [ [ "### Problem #3 - vector_subtraction", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef vector_subtraction(*vector_variables):\n if vector_size_check(*vector_variables) == False:\n raise ArithmeticError\n return [elements[0] - sum(elements[1:]) for elements in zip(*vector_variables)]", "_____no_output_____" ], [ "# 정답 코드\ndef vector_subtraction(*vector_variables):\n if vector_size_check(*vector_variables) == False:\n raise ArithmeticError\n return [elements[0]*2 - sum(elements) for elements in zip(*vector_variables)]", "_____no_output_____" ], [ "# 실행결과\nprint(vector_subtraction([1,3], [2,4]))\nprint(vector_subtraction([1,5], [10,4], [4,7]))", "[-1, -1]\n[-13, -6]\n" ] ], [ [ "### Problem #4 - scalar_vector_product", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef scalar_vector_product(alpha, vector_variable):\n return [alpha*vec for vec in vector_variable]", "_____no_output_____" ], [ "# 실행결과\nprint(scalar_vector_product(5, [1,2,3]))\nprint(scalar_vector_product(3, [2,2]))\nprint(scalar_vector_product(4, [1]))", "[5, 10, 15]\n[6, 6]\n[4]\n" ] ], [ [ "### Problem #5 - matrix_size_check", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef matrix_size_check(*matrix_variables):\n return (all(len(matrix_variables[0]) == xdim \n for xdim in [len(x) for x in matrix_variables]) and\n all(len(matrix_variables[0][0]) == ydim \n for ydim in set([len(y) for matrix in matrix_variables for y in matrix])))", "_____no_output_____" ], [ "# 정답 코드\n# 각 행렬의 x, y dimension으로 이루어진 set의 length가 1이면 길이가 같은거임!!\ndef matrix_size_check(*matrix_variables):\n return (all([len(set(len(matrix[0]) for matrix in matrix_variables)) == 1]) and\n all([len(matrix_variables[0]) == len(matrix) for matrix in matrix_variables]))", "_____no_output_____" ], [ "# 실행결과\nmatrix_x = [[2, 2], [2, 2], [2, 2]]\nmatrix_y = [[2, 5], [2, 1]]\nmatrix_z = [[2, 4], [5, 3]]\nmatrix_w = [[2, 5], [1, 1], [2, 2]]\n\nprint(matrix_size_check(matrix_x, matrix_y, matrix_z))\nprint(matrix_size_check(matrix_y, matrix_z))\nprint(matrix_size_check(matrix_x, matrix_w))", "False\nTrue\nTrue\n" ] ], [ [ "### Problem #6 - is_matrix_equal", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\n# 각 원소별로 같은 list에 넣은 다음 set을 취해 중복제거 => length=1\n# 이 과정을 모든 matrix의 위치에서 반복해서 전체 length list를 만들고 set을 취해 중복제거한 length가 1이면 같은 matrix\ndef is_matrix_equal(*matrix_variables):\n return len(set([len(set(elements)) for row in zip(*matrix_variables) for elements in zip(*row)])) == 1", "_____no_output_____" ], [ "# 정답 코드\ndef is_matrix_equal(*matrix_variables):\n # print([matrix for matrix in zip(*matrix_variables)])\n return all( [all([len(set(row)) == 1 for row in zip(*matrix)]) # 각 위치에 해당하는 것들을 모아서 set 취한 후 len이 1인지 아닌지 check\n for matrix in zip(*matrix_variables)] )", "_____no_output_____" ], [ "# 실행결과\nmatrix_x = [[2, 2], [2, 2]]\nmatrix_y = [[2, 5], [2, 1]]\n\nprint(is_matrix_equal(matrix_x, matrix_y, matrix_y, matrix_y))\nprint(is_matrix_equal(matrix_x, matrix_x))", "False\nTrue\n" ] ], [ [ "### Problem #7 - matrix_addition", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef matrix_addition(*matrix_variables):\n if matrix_size_check(*matrix_variables) == False:\n raise ArithmeticError\n return [ [sum(element) for element in zip(*row)]\n for row in zip(*matrix_variables)]", "_____no_output_____" ], [ "# 실행결과\nmatrix_x = [[2, 2], [2, 2]]\nmatrix_y = [[2, 5], [2, 1]]\nmatrix_z = [[2, 4], [5, 3]]\n\nprint(matrix_addition(matrix_x, matrix_y)) # Expected value: [[4, 7], [4, 3]]\nprint(matrix_addition(matrix_x, matrix_y, matrix_z)) # Expected value: [[6, 11], [9, 6]]", "[[4, 7], [4, 3]]\n[[6, 11], [9, 6]]\n" ] ], [ [ "### Problem #8 - matrix_subtraction", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef matrix_subtraction(*matrix_variables):\n if matrix_size_check(*matrix_variables) == False:\n raise ArithmeticError\n return [ [element[0] - sum(element[1:]) for element in zip(*row)]\n for row in zip(*matrix_variables)]", "_____no_output_____" ], [ "# 정답 코드\ndef matrix_subtraction(*matrix_variables):\n if matrix_size_check(*matrix_variables) == False:\n raise ArithmeticError\n return [ [2*element[0] - sum(element) for element in zip(*row)]\n for row in zip(*matrix_variables)]", "_____no_output_____" ], [ "# 실행결과\nmatrix_x = [[2, 2], [2, 2]]\nmatrix_y = [[2, 5], [2, 1]]\nmatrix_z = [[2, 4], [5, 3]]\n\nprint(matrix_subtraction(matrix_x, matrix_y)) # Expected value: [[0, -3], [0, 1]]\nprint(matrix_subtraction(matrix_x, matrix_y, matrix_z)) # Expected value: [[-2, -7], [-5, -2]]", "[[0, -3], [0, 1]]\n[[-2, -7], [-5, -2]]\n" ] ], [ [ "### Problem #9 - matrix_transpose", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef matrix_transpose(matrix_variable):\n return [[*new_row] for new_row in zip(*matrix_variable)]", "_____no_output_____" ], [ "# 정답 코드\ndef matrix_transpose(matrix_variable):\n return [ [element for element in row] for row in zip(*matrix_variable)]", "_____no_output_____" ], [ "# 실행결과\nmatrix_w = [[2, 5], [1, 1], [2, 2]]\nmatrix_transpose(matrix_w)", "_____no_output_____" ] ], [ [ "### Problem #10 - scalar_matrix_product", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef scalar_matrix_product(alpha, matrix_variable):\n return [ [alpha*element for element in row] for row in matrix_variable]", "_____no_output_____" ], [ "# 정답 코드\ndef scalar_matrix_product(alpha, matrix_variable):\n return [ scalar_vector_product(alpha, row) for row in matrix_variable]", "_____no_output_____" ], [ "# 실행결과\nmatrix_x = [[2, 2], [2, 2], [2, 2]]\nmatrix_y = [[2, 5], [2, 1]]\nmatrix_z = [[2, 4], [5, 3]]\nmatrix_w = [[2, 5], [1, 1], [2, 2]]\n\nprint(scalar_matrix_product(3, matrix_x)) #Expected value: [[6, 6], [6, 6], [6, 6]]\nprint(scalar_matrix_product(2, matrix_y)) #Expected value: [[4, 10], [4, 2]]\nprint(scalar_matrix_product(4, matrix_z)) #Expected value: [[8, 16], [20, 12]]\nprint(scalar_matrix_product(3, matrix_w)) #Expected value: [[6, 15], [3, 3], [6, 6]]", "[[6, 6], [6, 6], [6, 6]]\n[[4, 10], [4, 2]]\n[[8, 16], [20, 12]]\n[[6, 15], [3, 3], [6, 6]]\n" ] ], [ [ "### Problem #11 - is_product_availability_matrix", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef is_product_availability_matrix(matrix_a, matrix_b):\n return len(matrix_a[0]) == len(matrix_b)", "_____no_output_____" ], [ "# 정답 코드\ndef is_product_availability_matrix(matrix_a, matrix_b):\n return len([column_vector for column_vector in zip(*matrix_a)]) == len(matrix_b)", "_____no_output_____" ], [ "# 실행결과\nmatrix_x= [[2, 5], [1, 1]]\nmatrix_y = [[1, 1, 2], [2, 1, 1]]\nmatrix_z = [[2, 4], [5, 3], [1, 3]]\n\nprint(is_product_availability_matrix(matrix_y, matrix_z)) # Expected value: True\nprint(is_product_availability_matrix(matrix_z, matrix_x)) # Expected value: True\nprint(is_product_availability_matrix(matrix_z, matrix_w)) # Expected value: False //matrix_w가없습니다\nprint(is_product_availability_matrix(matrix_x, matrix_x)) # Expected value: True", "True\nTrue\nFalse\nTrue\n" ] ], [ [ "### Problem #12 - matrix_product", "_____no_output_____" ] ], [ [ "# 내가 짠 코드\ndef matrix_product(matrix_a, matrix_b):\n if is_product_availability_matrix(matrix_a, matrix_b) == False:\n raise ArithmeticError\n return [ [ sum( [element[0] * element[1] for element in zip(column, row)] ) for column in zip(*matrix_b) ]\n for row in matrix_a]", "_____no_output_____" ], [ "# 정답 코드\ndef matrix_product(matrix_a, matrix_b):\n if is_product_availability_matrix(matrix_a, matrix_b) == False:\n raise ArithmeticError\n return [ [sum(a*b for a,b in zip(row_a, column_b))\n for column_b in zip(*matrix_b) ]\n for row_a in matrix_a]", "_____no_output_____" ], [ "# 실행결과\nmatrix_x= [[2, 5], [1, 1]]\nmatrix_y = [[1, 1, 2], [2, 1, 1]]\nmatrix_z = [[2, 4], [5, 3], [1, 3]]\n\nprint(matrix_product(matrix_y, matrix_z)) # Expected value: [[9, 13], [10, 14]]\nprint(matrix_product(matrix_z, matrix_x)) # Expected value: [[8, 14], [13, 28], [5, 8]]\nprint(matrix_product(matrix_x, matrix_x)) # Expected value: [[9, 15], [3, 6]]\nprint(matrix_product(matrix_z, matrix_w)) # Expected value: False", "[[9, 13], [10, 14]]\n[[8, 14], [13, 28], [5, 8]]\n[[9, 15], [3, 6]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c5df6072117ac268f543aa017725cba680a25a
23,830
ipynb
Jupyter Notebook
site/en/tutorials/images/transfer_learning_with_hub.ipynb
miried/tensorflow-docs
408b987c4419956eb1d03569327d437eb49e0d05
[ "Apache-2.0" ]
1
2020-10-13T08:16:15.000Z
2020-10-13T08:16:15.000Z
site/en/tutorials/images/transfer_learning_with_hub.ipynb
miried/tensorflow-docs
408b987c4419956eb1d03569327d437eb49e0d05
[ "Apache-2.0" ]
null
null
null
site/en/tutorials/images/transfer_learning_with_hub.ipynb
miried/tensorflow-docs
408b987c4419956eb1d03569327d437eb49e0d05
[ "Apache-2.0" ]
null
null
null
27.581019
280
0.508057
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Transfer learning with TensorFlow Hub\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/images/transfer_learning_with_hub\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>", "_____no_output_____" ], [ "[TensorFlow Hub](https://tfhub.dev/) is a repository of pre-trained TensorFlow models.\n\nThis tutorial demonstrates how to:\n\n1. Use models from TensorFlow Hub with `tf.keras`\n1. Use an image classification model from TensorFlow Hub\n1. Do simple transfer learning to fine-tune a model for your own image classes", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import numpy as np\nimport time\n\nimport PIL.Image as Image\nimport matplotlib.pylab as plt\n\nimport tensorflow as tf\nimport tensorflow_hub as hub", "_____no_output_____" ] ], [ [ "## An ImageNet classifier\n\nYou'll start by using a pretrained classifer model to take an image and predict what it's an image of - no training required!", "_____no_output_____" ], [ "### Download the classifier\n\nUse `hub.KerasLayer` to load a [MobileNetV2 model](https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2) from TensorFlow Hub. Any [compatible image classifier model](https://tfhub.dev/s?q=tf2&module-type=image-classification) from tfhub.dev will work here.", "_____no_output_____" ] ], [ [ "classifier_model =\"https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4\" #@param {type:\"string\"}", "_____no_output_____" ], [ "IMAGE_SHAPE = (224, 224)\n\nclassifier = tf.keras.Sequential([\n hub.KerasLayer(classifier_model, input_shape=IMAGE_SHAPE+(3,))\n])", "_____no_output_____" ] ], [ [ "### Run it on a single image", "_____no_output_____" ], [ "Download a single image to try the model on.", "_____no_output_____" ] ], [ [ "grace_hopper = tf.keras.utils.get_file('image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg')\ngrace_hopper = Image.open(grace_hopper).resize(IMAGE_SHAPE)\ngrace_hopper", "_____no_output_____" ], [ "grace_hopper = np.array(grace_hopper)/255.0\ngrace_hopper.shape", "_____no_output_____" ] ], [ [ "Add a batch dimension, and pass the image to the model.", "_____no_output_____" ] ], [ [ "result = classifier.predict(grace_hopper[np.newaxis, ...])\nresult.shape", "_____no_output_____" ] ], [ [ "The result is a 1001 element vector of logits, rating the probability of each class for the image.\n\nSo the top class ID can be found with argmax:", "_____no_output_____" ] ], [ [ "predicted_class = np.argmax(result[0], axis=-1)\npredicted_class", "_____no_output_____" ] ], [ [ "### Decode the predictions\n\nTake the predicted class ID and fetch the `ImageNet` labels to decode the predictions", "_____no_output_____" ] ], [ [ "labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')\nimagenet_labels = np.array(open(labels_path).read().splitlines())", "_____no_output_____" ], [ "plt.imshow(grace_hopper)\nplt.axis('off')\npredicted_class_name = imagenet_labels[predicted_class]\n_ = plt.title(\"Prediction: \" + predicted_class_name.title())", "_____no_output_____" ] ], [ [ "## Simple transfer learning", "_____no_output_____" ], [ "But what if you want to train a classifier for a dataset with different classes? You can also use a model from TFHub to train a custom image classier by retraining the top layer of the model to recognize the classes in our dataset.", "_____no_output_____" ], [ "### Dataset\n\n For this example you will use the TensorFlow flowers dataset:", "_____no_output_____" ] ], [ [ "data_root = tf.keras.utils.get_file(\n 'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',\n untar=True)", "_____no_output_____" ] ], [ [ "The simplest way to load this data into our model is using `tf.keras.preprocessing.image.ImageDataGenerator`,\n\nTensorFlow Hub's conventions for image models is to expect float inputs in the `[0, 1]` range. Use the `ImageDataGenerator`'s `rescale` parameter to achieve this.", "_____no_output_____" ] ], [ [ "image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)\nimage_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)", "_____no_output_____" ] ], [ [ "The resulting object is an iterator that returns `image_batch, label_batch` pairs.", "_____no_output_____" ] ], [ [ "for image_batch, label_batch in image_data:\n print(\"Image batch shape: \", image_batch.shape)\n print(\"Label batch shape: \", label_batch.shape)\n break", "_____no_output_____" ] ], [ [ "### Run the classifier on a batch of images", "_____no_output_____" ], [ "Now run the classifier on the image batch.", "_____no_output_____" ] ], [ [ "result_batch = classifier.predict(image_batch)\nresult_batch.shape", "_____no_output_____" ], [ "predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]\npredicted_class_names", "_____no_output_____" ] ], [ [ "Now check how these predictions line up with the images:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,9))\nplt.subplots_adjust(hspace=0.5)\nfor n in range(30):\n plt.subplot(6,5,n+1)\n plt.imshow(image_batch[n])\n plt.title(predicted_class_names[n])\n plt.axis('off')\n_ = plt.suptitle(\"ImageNet predictions\")", "_____no_output_____" ] ], [ [ "See the `LICENSE.txt` file for image attributions.\n\nThe results are far from perfect, but reasonable considering that these are not the classes the model was trained for (except \"daisy\").", "_____no_output_____" ], [ "### Download the headless model\n\nTensorFlow Hub also distributes models without the top classification layer. These can be used to easily do transfer learning.\n\nAny [compatible image feature vector model](https://tfhub.dev/s?module-type=image-feature-vector&q=tf2) from tfhub.dev will work here.", "_____no_output_____" ] ], [ [ "feature_extractor_model = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\" #@param {type:\"string\"}", "_____no_output_____" ] ], [ [ "Create the feature extractor. Use `trainable=False` to freeze the variables in the feature extractor layer, so that the training only modifies the new classifier layer.", "_____no_output_____" ] ], [ [ "feature_extractor_layer = hub.KerasLayer(\n feature_extractor_model, input_shape=(224, 224, 3), trainable=False)", "_____no_output_____" ] ], [ [ "It returns a 1280-length vector for each image:", "_____no_output_____" ] ], [ [ "feature_batch = feature_extractor_layer(image_batch)\nprint(feature_batch.shape)", "_____no_output_____" ] ], [ [ "### Attach a classification head\n\nNow wrap the hub layer in a `tf.keras.Sequential` model, and add a new classification layer.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential([\n feature_extractor_layer,\n tf.keras.layers.Dense(image_data.num_classes)\n])\n\nmodel.summary()", "_____no_output_____" ], [ "predictions = model(image_batch)", "_____no_output_____" ], [ "predictions.shape", "_____no_output_____" ] ], [ [ "### Train the model\n\nUse compile to configure the training process:", "_____no_output_____" ] ], [ [ "model.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['acc'])", "_____no_output_____" ] ], [ [ "Now use the `.fit` method to train the model.\n\nTo keep this example short train just 2 epochs. To visualize the training progress, use a custom callback to log the loss and accuracy of each batch individually, instead of the epoch average.", "_____no_output_____" ] ], [ [ "class CollectBatchStats(tf.keras.callbacks.Callback):\n def __init__(self):\n self.batch_losses = []\n self.batch_acc = []\n\n def on_train_batch_end(self, batch, logs=None):\n self.batch_losses.append(logs['loss'])\n self.batch_acc.append(logs['acc'])\n self.model.reset_metrics()", "_____no_output_____" ], [ "steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)\n\nbatch_stats_callback = CollectBatchStats()\n\nhistory = model.fit(image_data, epochs=2,\n steps_per_epoch=steps_per_epoch,\n callbacks=[batch_stats_callback])", "_____no_output_____" ] ], [ [ "Now after, even just a few training iterations, we can already see that the model is making progress on the task.", "_____no_output_____" ] ], [ [ "plt.figure()\nplt.ylabel(\"Loss\")\nplt.xlabel(\"Training Steps\")\nplt.ylim([0,2])\nplt.plot(batch_stats_callback.batch_losses)", "_____no_output_____" ], [ "plt.figure()\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Training Steps\")\nplt.ylim([0,1])\nplt.plot(batch_stats_callback.batch_acc)", "_____no_output_____" ] ], [ [ "### Check the predictions\n\nTo redo the plot from before, first get the ordered list of class names:", "_____no_output_____" ] ], [ [ "class_names = sorted(image_data.class_indices.items(), key=lambda pair:pair[1])\nclass_names = np.array([key.title() for key, value in class_names])\nclass_names", "_____no_output_____" ] ], [ [ "Run the image batch through the model and convert the indices to class names.", "_____no_output_____" ] ], [ [ "predicted_batch = model.predict(image_batch)\npredicted_id = np.argmax(predicted_batch, axis=-1)\npredicted_label_batch = class_names[predicted_id]", "_____no_output_____" ] ], [ [ "Plot the result", "_____no_output_____" ] ], [ [ "label_id = np.argmax(label_batch, axis=-1)", "_____no_output_____" ], [ "plt.figure(figsize=(10,9))\nplt.subplots_adjust(hspace=0.5)\nfor n in range(30):\n plt.subplot(6,5,n+1)\n plt.imshow(image_batch[n])\n color = \"green\" if predicted_id[n] == label_id[n] else \"red\"\n plt.title(predicted_label_batch[n].title(), color=color)\n plt.axis('off')\n_ = plt.suptitle(\"Model predictions (green: correct, red: incorrect)\")", "_____no_output_____" ] ], [ [ "## Export your model\n\nNow that you've trained the model, export it as a SavedModel for use later on.", "_____no_output_____" ] ], [ [ "t = time.time()\n\nexport_path = \"/tmp/saved_models/{}\".format(int(t))\nmodel.save(export_path)\n\nexport_path", "_____no_output_____" ] ], [ [ "Now confirm that we can reload it, and it still gives the same results:", "_____no_output_____" ] ], [ [ "reloaded = tf.keras.models.load_model(export_path)", "_____no_output_____" ], [ "result_batch = model.predict(image_batch)\nreloaded_result_batch = reloaded.predict(image_batch)", "_____no_output_____" ], [ "abs(reloaded_result_batch - result_batch).max()", "_____no_output_____" ] ], [ [ "This SavedModel can be loaded for inference later, or converted to [TFLite](https://www.tensorflow.org/lite/convert/) or [TFjs](https://github.com/tensorflow/tfjs-converter).\n", "_____no_output_____" ], [ "## Learn more\n\nCheck out more [tutorials](https://www.tensorflow.org/hub/tutorials) for using image models from TensorFlow Hub.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
e7c5e77c539211a2313663753022b409b559127e
877,776
ipynb
Jupyter Notebook
notebooks/1.1.first_whole_analysis.ipynb
TLouf/multiling-twitter
9a39b5b70da53ca717cb74480697f3756a95b8e4
[ "RSA-MD" ]
1
2021-05-09T15:42:04.000Z
2021-05-09T15:42:04.000Z
notebooks/1.1.first_whole_analysis.ipynb
TLouf/multiling-twitter
9a39b5b70da53ca717cb74480697f3756a95b8e4
[ "RSA-MD" ]
3
2020-10-21T09:04:03.000Z
2021-06-02T02:05:13.000Z
notebooks/1.1.first_whole_analysis.ipynb
TLouf/multiling-twitter
9a39b5b70da53ca717cb74480697f3756a95b8e4
[ "RSA-MD" ]
null
null
null
306.16533
334,611
0.924158
[ [ [ "any function that's passed to a multiprocessing function must be defined globally, even the callback function", "_____no_output_____" ], [ "size decompressed = 3.7 * compressed", "_____no_output_____" ], [ "# Config", "_____no_output_____" ] ], [ [ "# Reload all src modules every time before executing the Python code typed\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import os\nimport sys\nimport json\nimport cProfile\nimport pandas as pd\nimport geopandas as geopd\nimport numpy as np\nimport multiprocessing as mp\ntry:\n import cld3\nexcept ModuleNotFoundError:\n pass\nimport pycld2\nfrom shapely.geometry import MultiPolygon\nfrom shapely.geometry import Polygon\nfrom shapely.geometry import Point\nimport matplotlib.cm as csshm\nimport matplotlib.pyplot as plt\nimport descartes\nimport datetime\nimport src.utils.geometry as geo\nimport src.utils.places_to_cells as places_to_cells\nimport src.utils.join_and_count as join_and_count\nimport src.utils.make_config as make_config\nimport src.data.shp_extract as shp_extract\nimport src.data.text_process as text_process\nimport src.data.access as data_access\nimport src.data.user_filters as ufilters\nimport src.data.user_agg as uagg\nimport src.data.metrics as metrics\nimport src.data.process as data_process\nimport src.data.cells_results as cells_results\nimport src.visualization.grid_viz as grid_viz\nimport src.visualization.helpers as helpers_viz\nfrom dotenv import load_dotenv\nload_dotenv()\n\npd.reset_option(\"display.max_rows\")", "_____no_output_____" ], [ "data_dir_path = os.environ['DATA_DIR']\ntweets_files_format = 'tweets_{}_{}_{}.json.gz'\nplaces_files_format = 'places_{}_{}_{}.json.gz'\nssh_domain = os.environ['IFISC_DOMAIN']\nssh_username = os.environ['IFISC_USERNAME']\nfig_dir = os.path.join('..', 'reports', 'figures')\nproject_data_dir = os.path.join('..', 'data')\nexternal_data_dir = os.path.join(project_data_dir, 'external')\ninterim_data_dir = os.path.join(project_data_dir, 'interim')\nprocessed_data_dir = os.path.join(project_data_dir, 'processed')\ncell_data_path_format = os.path.join(\n processed_data_dir, '{0}', '{0}_cc={1}_r={2}_cell_size={3}m.{4}')\n\nlatlon_proj = 'epsg:4326'\nLANGS_DICT = dict([(lang[1],lang[0].lower().capitalize())\n for lang in pycld2.LANGUAGES])\n\ncountry_codes = ('BE', 'BO', 'CA', 'CH', 'EE', 'ES', 'FR', 'HK', 'ID', 'LT', \n 'LV', 'MY', 'PE', 'RO', 'SG', 'TN', 'UA')\nwith open(os.path.join(external_data_dir, 'countries.json')) as f:\n countries_study_data = json.load(f)\nwith open(os.path.join(external_data_dir, 'langs_agg.json')) as f:\n langs_agg_dict = json.load(f)\n\n# Country-specific parameters\ncc = 'BE'\nregion = None #'New York City'\n# region = 'Quebec'\n# region = 'Cataluña'\narea_dict = make_config.area_dict(countries_study_data, cc, region=region)\ncountry_name = area_dict['readable'] \ncc_fig_dir = os.path.join(fig_dir, cc)\nif not os.path.exists(cc_fig_dir):\n os.makedirs(os.path.join(cc_fig_dir, 'count'))\n os.makedirs(os.path.join(cc_fig_dir, 'prop'))\nxy_proj = area_dict['xy_proj']\ncc_timezone = area_dict['timezone']\nplot_langs_list = area_dict['local_langs']\nmin_poly_area = area_dict.get('min_poly_area')\nmax_place_area = area_dict.get('max_place_area') or 1e9\nvalid_uids_path = os.path.join(interim_data_dir, f'valid_uids_{cc}_{country_name}.csv')", "_____no_output_____" ] ], [ [ "# Getting the data", "_____no_output_____" ], [ "## Places, area and grid", "_____no_output_____" ] ], [ [ "shapefile_dict = make_config.shapefile_dict(area_dict, cc, region=region)\n \nshapefile_path = os.path.join(\n external_data_dir, shapefile_dict['name'], shapefile_dict['name'])\nshape_df = geopd.read_file(shapefile_path)\nshape_df = geo.extract_shape(\n shape_df, shapefile_dict, xy_proj=xy_proj, min_area=min_poly_area)\nshape_df", "_____no_output_____" ] ], [ [ "Places can be a point too -> treat them like tweets with coords in this case", "_____no_output_____" ] ], [ [ "places_files_paths = [\n os.path.join(data_dir_path, places_files_format.format(2015, 2018, cc)),\n os.path.join(data_dir_path, places_files_format.format(2019, 2019, cc))]\nall_raw_places_df = []\nfor file in places_files_paths:\n raw_places_df = data_access.return_json(file,\n ssh_domain=ssh_domain, ssh_username=ssh_username, compression='gzip')\n all_raw_places_df.append(\n raw_places_df[['id', 'bounding_box', 'name', 'place_type']])\n# We drop the duplicate places (based on their ID)\nplaces_df = pd.concat(all_raw_places_df).drop_duplicates(subset='id')\nplaces_geodf, places_in_xy = geo.make_places_geodf(places_df, shape_df,\n xy_proj=xy_proj)\nplaces_geodf.head()", "_____no_output_____" ], [ "from matplotlib.patches import Patch\n # plt.rc('text', usetex=True)\n # plt.rc('font', family='serif')\nshape_df = geopd.read_file(shapefile_path)\nshape_df = geo.extract_shape(\n shape_df, shapefile_dict, xy_proj=xy_proj, min_area=min_poly_area)\nmercator_proj = 'epsg:3857'\nfig, ax = plt.subplots(1, figsize=(10, 6))\n\nxlabel = 'position (km)'\nylabel = 'position (km)'\nshape_df_mercator = shape_df.to_crs(mercator_proj)\narea_df_bounds = shape_df_mercator.geometry.iloc[0].bounds\n# We translate the whole geometries so that the origin (x,y) = (0,0) is\n# located at the bottom left corner of the shape's bounding box.\nx_off = -area_df_bounds[0]\ny_off = -area_df_bounds[1]\nshape_df_mercator.geometry = shape_df_mercator.translate(xoff=x_off, yoff=y_off)\n\nshape_df_bounds = shape_df.geometry.iloc[0].bounds\n# We translate the whole geometries so that the origin (x,y) = (0,0) is\n# located at the bottom left corner of the shape's bounding box.\nx_off = -(shape_df_bounds[2] + shape_df_bounds[0]) / 2 + (area_df_bounds[2] - area_df_bounds[0]) / 2 + 100e3\ny_off = -(shape_df_bounds[3] + shape_df_bounds[1]) / 2 + (area_df_bounds[3] - area_df_bounds[1]) / 2 - 100e3\nshape_df.geometry = shape_df.translate(xoff=x_off, yoff=y_off)\n# The order here is important, the area's boundaries will be drawn on top\n# of the choropleth, and the cells with null values will be in null_color\nshape_df_mercator.plot(ax=ax, color='#587cf3', edgecolor='black')\nshape_df.plot(ax=ax, color='#0833c1', edgecolor='black')\n\nxticks_km = ax.get_xticks() / 1000\nax.set_xticklabels([f'{t:.0f}' for t in xticks_km])\nyticks_km = ax.get_yticks() / 1000\nax.set_yticklabels([f'{t:.0f}' for t in yticks_km])\n\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nhandles = [Patch(facecolor='#587cf3', label='EPSG:3857'), \n Patch(facecolor='#0833c1', label='EPSG:3067')]\nax.legend(handles=handles, bbox_to_anchor=(1.05, 1), loc=2)\nplt.savefig('mercator_finland.pdf', bbox_inches='tight')\nplt.show()\nplt.close()", "_____no_output_____" ], [ "cell_size = 10000\ncells_df, cells_in_area_df, Nx, Ny = geo.create_grid(\n shape_df, cell_size, xy_proj=xy_proj, intersect=True)\ngrid_test_df = cells_in_area_df.copy()\ngrid_test_df['metric'] = 1\n# save_path = os.path.join(cc_fig_dir, f'grid_cc={cc}_cell_size={cell_size}m.pdf')\nsave_path = None\nplot_kwargs = dict(alpha=0.7, edgecolor='w', linewidths=0.5, cmap='plasma')\nax = grid_viz.plot_grid(grid_test_df, shape_df, metric_col='metric', show=True, \n save_path=save_path, xy_proj=xy_proj, **plot_kwargs)", "_____no_output_____" ], [ "cells_df, cells_in_area_df, Nx, Ny = geo.create_grid(\n shape_df, cell_size, xy_proj=xy_proj, intersect=True, places_geodf=places_geodf)", "_____no_output_____" ], [ "import mplleaflet\ncells_in_shape_df.to_crs(latlon_proj).plot(edgecolor='w', figsize=(6,10))\nmplleaflet.display()", "/home/thomaslouf/Documents/code/multiling-twitter/.venv/lib/python3.6/site-packages/IPython/core/display.py:701: UserWarning:\n\nConsider using IPython.display.IFrame instead\n\n" ], [ "tweets_files_paths = [\n os.path.join(data_dir_path, tweets_files_format.format(2015, 2019, cc))]\n# os.path.join(data_dir_path, tweets_files_format.format(2019, 2019, cc))]\n\ntweets_access_res = None", "_____no_output_____" ] ], [ [ "## Reading the data", "_____no_output_____" ] ], [ [ "def profile_pre_process(tweets_file_path, chunk_start, chunk_size):\n cProfile.runctx(\n '''data_access.read_data(\n tweets_file_path, chunk_start, chunk_size, dfs_to_join=[places_geodf])''', \n globals(), locals())\n\ntweets_access_res = []\ndef collect_tweets_access_res(res):\n global tweets_access_res\n if res.shape[0] > 0:\n tweets_access_res.append(res)\n \npool = mp.Pool(8)\nfor file_path in tweets_files_paths:\n for chunk_start, chunk_size in data_access.chunkify(\n file_path, size=1e9, ssh_domain=ssh_domain, \n ssh_username=ssh_username):\n args = (file_path, chunk_start, chunk_size)\n kwargs = {'cols': ['text', 'id', 'lang', 'place_id', 'coordinates', \n 'uid', 'created_at', 'source'],\n 'dfs_to_join': [places_geodf]}\n pool.apply_async(\n data_access.read_data, args, kwargs, callback=collect_tweets_access_res)\npool.close()\npool.join()\n\ntweets_access_res = data_process.post_multi(tweets_access_res)", "1000MB read, 846686 tweets unpacked.\n0 tweets remaining after filters.\n" ], [ "tweeted_months = None\ntweets_pb_months = None\nfirst_day = datetime.datetime(year=2015, month=1, day=1)\nfor res in tweets_access_res:\n tweets_df = res.copy()\n tweets_df = tweets_df.loc[tweets_df['created_at'] > first_day]\n tweets_df['month'] = tweets_df['created_at'].dt.to_period('M')\n has_gps = tweets_df['coordinates'].notnull()\n geometry = tweets_df.loc[has_gps, 'coordinates'].apply(\n lambda x: Point(x['coordinates']))\n tweets_coords = geopd.GeoSeries(geometry, crs=latlon_proj, \n index=tweets_df.loc[has_gps].index)\n tweets_df = tweets_df.join(places_geodf, on='place_id', how='left')\n coords_in_place = tweets_coords.within(\n geopd.GeoSeries(tweets_df.loc[has_gps, 'geometry']))\n \n tweeted_months = join_and_count.increment_counts(\n tweeted_months, tweets_df, ['month'])\n tweets_pb_months = join_and_count.increment_counts(tweets_pb_months, \n tweets_df.loc[has_gps].loc[~coords_in_place], ['month'])", "_____no_output_____" ], [ "months_counts = tweeted_months.join(tweets_pb_months, rsuffix='_pb', how='left')\nmonths_counts['prop'] = months_counts['count_pb'] / months_counts['count']\nax = months_counts['prop'].plot.bar()\nticks = np.arange(0,60,5)\ntick_labels = ax.get_xticklabels()\n_ = ax.set_xticks(ticks)\n_ = ax.set_xticklabels([tick_labels[i] for i in ticks])\n_ = ax.set_ylabel('proportion')\n_ = ax.set_title('Proportion of tweets with coords outside of place')", "_____no_output_____" ] ], [ [ "## Filtering out users", "_____no_output_____" ], [ "Filters: user-based imply a loop over all the raw_tweets_df, and must be applied before getting tweets_lang_df and even tweets_loc_df, because these don't interest us at all.", "_____no_output_____" ], [ "This filter requires us to loop over all files and aggregate the results to get the valid UIDs out", "_____no_output_____" ] ], [ [ "if tweets_access_res is None:\n def get_df_fun(arg0):\n return data_access.read_json_wrapper(*arg0)\nelse:\n def get_df_fun(arg0):\n return arg0\n\ndef chunk_users_months(df_access, get_df_fun, places_geodf,\n cols=None, ref_year=2015):\n raw_tweets_df = get_df_fun(df_access)\n raw_tweets_df = data_access.filter_df(\n raw_tweets_df, cols=cols, dfs_to_join=[places_geodf])\n months_counts = uagg.users_months(raw_tweets_df, ref_year=ref_year)\n return months_counts\n\nusers_months_res = []\ndef collect_users_months_res(res):\n global users_months_res\n if res.shape[0] > 0:\n users_months_res.append(res)\n\npool = mp.Pool(8)\nfor df_access in data_access.yield_tweets_access(\n tweets_files_paths, tweets_res=tweets_access_res):\n args = (df_access, get_df_fun, places_geodf)\n kwargs = {'cols': ['id', 'uid', 'created_at']}\n pool.apply_async(\n chunk_users_months, args, kwargs, \n callback=collect_users_months_res, error_callback=print)\npool.close()\npool.join()\n\ntweeted_months_users = join_and_count.init_counts(['uid', 'month'])\nfor res in users_months_res:\n tweeted_months_users = join_and_count.increment_join(tweeted_months_users, \n res)\n \ntweeted_months_users = tweeted_months_users['count']\ntotal_nr_users = len(tweeted_months_users.index.levels[0])\nprint(f'In total, there are {total_nr_users} distinct users in the whole dataset.')", "1000MB read, 846686 tweets unpacked.\n846158 tweets remaining after filters.\nThere are 53374 distinct users in this chunk.\n1000MB read, 852716 tweets unpacked.\n1000MB read, 844912 tweets unpacked.\n851955 tweets remaining after filters.\nThere are 58630 distinct users in this chunk.\n844554 tweets remaining after filters.\n1000MB read, 841957 tweets unpacked.\nThere are 50863 distinct users in this chunk.\n841447 tweets remaining after filters.\nThere are 47048 distinct users in this chunk.\n1000MB read, 839053 tweets unpacked.\n838673 tweets remaining after filters.\nThere are 54232 distinct users in this chunk.\n1000MB read, 846221 tweets unpacked.\n115.6MB read, 94907 tweets unpacked.\n94823 tweets remaining after filters.\nThere are 10020 distinct users in this chunk.\n845824 tweets remaining after filters.\n1000MB read, 810121 tweets unpacked.\nThere are 52818 distinct users in this chunk.\n1000MB read, 831641 tweets unpacked.\n809591 tweets remaining after filters.\n831108 tweets remaining after filters.\nThere are 46235 distinct users in this chunk.\nThere are 57018 distinct users in this chunk.\n1000MB read, 813545 tweets unpacked.\n1000MB read, 819327 tweets unpacked.\n813030 tweets remaining after filters.\n818656 tweets remaining after filters.\nThere are 46563 distinct users in this chunk.\n798.3MB read, 649307 tweets unpacked.\nThere are 51456 distinct users in this chunk.\n648928 tweets remaining after filters.\nThere are 40558 distinct users in this chunk.\nIn total, there are 335094 distinct users in the whole dataset.\n" ], [ "local_uids = ufilters.consec_months(tweeted_months_users)\nbot_uids = ufilters.bot_activity(tweeted_months_users)\n# We have local_uids: index of uids with a column full of True, and bot_uids:\n# index of uids with a column full of False. When we multiply them, the uids\n# in local_uids which are not in bot_uids are assigned NaN, and the ones which \n# are in bot_uids are assigned False. When we convert to the boolean type,\n# the NaNs turn to True.\nvalid_uids = (local_uids * bot_uids).astype('bool').rename('valid')\nvalid_uids = valid_uids.loc[valid_uids]\nprint(f'This leaves us with {len(valid_uids)} valid users in the whole dataset.')", "There are 66972 users with at least 3 months of activity in the dataset.\nThere are 36284 users considered local in the dataset, as they have been active for 3 consecutive months in this area at least once.\n0 users have been found to be bots because of their excessive activity, tweeting more than 3 times per minute.\nThis leaves us with 36284 valid users in the whole dataset.\n" ] ], [ [ "Then we have to loop over all files once again to apply the speed filter, which is expensive, thus done last (we thus benefit from having some users already filtered out, so smaller tweets dataframes)", "_____no_output_____" ] ], [ [ "if tweets_access_res is None:\n def get_df_fun(arg0):\n return data_access.read_json_wrapper(*arg0)\nelse:\n def get_df_fun(arg0):\n return arg0\n\ndef speed_filter(df_access, get_df_fun, valid_uids, places_in_xy, max_distance,\n cols=None):\n tweets_df = get_df_fun(df_access)\n tweets_df = data_access.filter_df(\n tweets_df, cols=cols, dfs_to_join=[places_in_xy, valid_uids])\n too_fast_uids = ufilters.too_fast(tweets_df, places_in_xy, max_distance)\n return too_fast_uids\n\narea_bounds = shape_df.to_crs(xy_proj).geometry.iloc[0].bounds\n# Get an upper limit of the distance that can be travelled inside the area\nmax_distance = np.sqrt((area_bounds[0]-area_bounds[2])**2 \n + (area_bounds[1]-area_bounds[3])**2)\ncols = ['uid', 'created_at', 'place_id', 'coordinates']\n\ntoo_fast_uids_list = []\ndef collect_too_fast_uids_list(res):\n global too_fast_uids_list\n if res.shape[0] > 0:\n too_fast_uids_list.append(res)\n \npool = mp.Pool(8)\nfor df_access in data_access.yield_tweets_access(\n tweets_files_paths, tweets_res=tweets_access_res):\n args = (df_access, get_df_fun,\n valid_uids, places_geodf, max_distance)\n kwargs = {'cols': cols}\n pool.apply_async(\n speed_filter, args, kwargs, callback=collect_too_fast_uids_list,\n error_callback=print)\npool.close()\npool.join()\n\ntoo_fast_uids_series = pd.Series([])\ntoo_fast_uids_series.index.name = 'uid'\nfor too_fast_uids in too_fast_uids_list:\n too_fast_uids_series = (too_fast_uids_series * too_fast_uids).fillna(False)\nprint(f'In total, there are {len(too_fast_uids_series)} too fast users left to '\n 'filter out in the whole dataset.')\n\nvalid_uids = (valid_uids * too_fast_uids_series).astype('bool').rename('valid')\nvalid_uids = valid_uids.loc[valid_uids]\nprint(f'This leaves us with {len(valid_uids)} valid users in the whole dataset.')\nvalid_uids.to_csv(valid_uids_path, header=True)", "_____no_output_____" ] ], [ [ "## Processing", "_____no_output_____" ], [ "We don't filter out tweets with a useless place (one too large) here, because these tweets can still be useful for language detection. So this filter is only applied later on. Similarly, we keep tweets with insufficient text to make a reliable language detection, because they can still be useful for residence attribution.", "_____no_output_____" ] ], [ [ "valid_uids = pd.read_csv(valid_uids_path, index_col='uid', header=0)\n\nif tweets_access_res is None:\n def get_df_fun(arg0):\n return data_access.read_json_wrapper(*arg0)\nelse:\n def get_df_fun(arg0):\n return arg0\n \ntweets_process_res = []\ndef collect_tweets_process_res(res):\n global tweets_process_res\n if res.shape[0] > 0:\n tweets_process_res.append(res)\n\ndef access_and_process(df_access, get_df_fun, valid_uids, places_geodf, \n langs_agg_dict, text_col='text', min_nr_words=4, \n cld='pycld2', latlon_proj='epsg:4326'):\n tweets_loc_df = get_df_fun(df_access)\n cols = ['text', 'id', 'lang', 'place_id', 'coordinates', 'uid',\n 'created_at', 'source']\n tweets_loc_df = data_access.filter_df(\n tweets_loc_df, cols=cols, dfs_to_join=[places_geodf, valid_uids])\n tweets_lang_df = data_process.process(\n tweets_loc_df, places_geodf, langs_agg_dict,\n min_nr_words=min_nr_words, cld=cld)\n return tweets_lang_df\n\npool = mp.Pool(8)\nfor df_access in data_access.yield_tweets_access(\n tweets_files_paths, tweets_res=tweets_access_res):\n args = (df_access, get_df_fun,\n valid_uids, places_geodf, langs_agg_dict)\n kwargs = {'min_nr_words': 4, 'cld': 'pycld2'}\n pool.apply_async(\n access_and_process, args, kwargs, callback=collect_tweets_process_res,\n error_callback=print)\npool.close()\npool.join()\n \ntweets_process_res = data_process.post_multi(tweets_process_res)", "/home/thomaslouf/Documents/code/multiling-twitter/.venv/lib/python3.6/site-packages/pandas/core/indexing.py:1418: FutureWarning:\n\n\nPassing list-likes to .loc or [] with any missing label will raise\nKeyError in the future, you can use .reindex() as an alternative.\n\nSee the documentation here:\nhttps://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike\n\n" ] ], [ [ "# Study at the tweet level", "_____no_output_____" ], [ "## Make tweet counts data", "_____no_output_____" ] ], [ [ "tweet_level_label = 'tweets in {}'\nplot_langs_dict = make_config.langs_dict(area_dict, tweet_level_label)", "_____no_output_____" ] ], [ [ "Why sjoin so slow? It tests on every cell, even though it's exclusive: if one cell matches no other will. Solution: loop over cells, ordered by the counts obtained from places, and stop at first match, will greatly reduce the number of 'within' operations -> update: doesn't seem possible, deleting from spatial index is extremely slow", "_____no_output_____" ] ], [ [ "def get_langs_counts(tweets_lang_df, max_place_area, cells_in_area_df):\n tweets_df = tweets_lang_df.copy()\n relevant_area_mask = tweets_df['area'] < max_place_area\n tweets_df = tweets_df.loc[relevant_area_mask]\n # The following mask accounts for both tweets with GPS coordinates and\n # tweets within places which are a point.\n has_gps = tweets_df['area'] == 0\n # Here the tweets with coordinates outside the grid are out, because of the\n # inner join\n tweets_cells_df = geopd.sjoin(tweets_df.loc[has_gps], cells_in_area_df,\n op='within', rsuffix='cell', how='inner')\n nr_out_tweets = len(tweets_df.loc[has_gps]) - len(tweets_cells_df)\n print(f'{nr_out_tweets} tweets have been found outside of the grid and'\n ' filtered out as a result.')\n tweets_places_df = tweets_df.loc[~has_gps]\n return tweets_cells_df, tweets_places_df\n \nwith mp.Pool(8) as pool:\n map_parameters = [(res, max_place_area, cells_in_area_df) \n for res in tweets_process_res]\n print('entering the loop')\n tweets_pre_cell_res = (\n pool.starmap_async(get_langs_counts, map_parameters).get())\n\ncells_langs_counts = None\nplaces_langs_counts = None\n\nfor res in tweets_pre_cell_res:\n tweets_cells_df = res[0]\n tweets_places_df = res[1]\n groupby_cols = ['cld_lang', 'cell_id']\n cells_langs_counts = join_and_count.increment_counts(\n cells_langs_counts, tweets_cells_df, groupby_cols)\n groupby_cols = ['cld_lang', 'place_id']\n places_langs_counts = join_and_count.increment_counts(\n places_langs_counts, tweets_places_df, groupby_cols)\n\nplaces_langs_counts = places_langs_counts['count']\nplaces_counts = (places_langs_counts.groupby('place_id')\n .sum()\n .rename('total_count')\n .to_frame())\ncells_langs_counts = cells_langs_counts['count']\ncells_counts = (cells_langs_counts.groupby('cell_id')\n .sum()\n .rename('total_count')\n .to_frame())", "entering the loop\n109 tweets have been found outside of the grid and filtered out as a result.\n123 tweets have been found outside of the grid and filtered out as a result.\n119 tweets have been found outside of the grid and filtered out as a result.\n149 tweets have been found outside of the grid and filtered out as a result.\n120 tweets have been found outside of the grid and filtered out as a result.\n129 tweets have been found outside of the grid and filtered out as a result.\n180 tweets have been found outside of the grid and filtered out as a result.\n127 tweets have been found outside of the grid and filtered out as a result.\n148 tweets have been found outside of the grid and filtered out as a result.\n103 tweets have been found outside of the grid and filtered out as a result.\n100 tweets have been found outside of the grid and filtered out as a result.\n111 tweets have been found outside of the grid and filtered out as a result.\n96 tweets have been found outside of the grid and filtered out as a result.\n101 tweets have been found outside of the grid and filtered out as a result.\n95 tweets have been found outside of the grid and filtered out as a result.\n83 tweets have been found outside of the grid and filtered out as a result.\n80 tweets have been found outside of the grid and filtered out as a result.\n59 tweets have been found outside of the grid and filtered out as a result.\n110 tweets have been found outside of the grid and filtered out as a result.\n168 tweets have been found outside of the grid and filtered out as a result.\n161 tweets have been found outside of the grid and filtered out as a result.\n97 tweets have been found outside of the grid and filtered out as a result.\n116 tweets have been found outside of the grid and filtered out as a result.\n160 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n197 tweets have been found outside of the grid and filtered out as a result.\n220 tweets have been found outside of the grid and filtered out as a result.\n123 tweets have been found outside of the grid and filtered out as a result.\n134 tweets have been found outside of the grid and filtered out as a result.\n143 tweets have been found outside of the grid and filtered out as a result.\n128 tweets have been found outside of the grid and filtered out as a result.\n110 tweets have been found outside of the grid and filtered out as a result.\n147 tweets have been found outside of the grid and filtered out as a result.\n181 tweets have been found outside of the grid and filtered out as a result.\n166 tweets have been found outside of the grid and filtered out as a result.\n195 tweets have been found outside of the grid and filtered out as a result.\n171 tweets have been found outside of the grid and filtered out as a result.\n156 tweets have been found outside of the grid and filtered out as a result.\n124 tweets have been found outside of the grid and filtered out as a result.\n148 tweets have been found outside of the grid and filtered out as a result.\n163 tweets have been found outside of the grid and filtered out as a result.\n119 tweets have been found outside of the grid and filtered out as a result.\n133 tweets have been found outside of the grid and filtered out as a result.\n97 tweets have been found outside of the grid and filtered out as a result.\n192 tweets have been found outside of the grid and filtered out as a result.\n128 tweets have been found outside of the grid and filtered out as a result.\n104 tweets have been found outside of the grid and filtered out as a result.\n131 tweets have been found outside of the grid and filtered out as a result.\n115 tweets have been found outside of the grid and filtered out as a result.\n138 tweets have been found outside of the grid and filtered out as a result.\n135 tweets have been found outside of the grid and filtered out as a result.\n144 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n115 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n101 tweets have been found outside of the grid and filtered out as a result.\n108 tweets have been found outside of the grid and filtered out as a result.\n149 tweets have been found outside of the grid and filtered out as a result.\n168 tweets have been found outside of the grid and filtered out as a result.\n125 tweets have been found outside of the grid and filtered out as a result.\n132 tweets have been found outside of the grid and filtered out as a result.\n140 tweets have been found outside of the grid and filtered out as a result.\n104 tweets have been found outside of the grid and filtered out as a result.\n144 tweets have been found outside of the grid and filtered out as a result.\n134 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n165 tweets have been found outside of the grid and filtered out as a result.\n122 tweets have been found outside of the grid and filtered out as a result.\n129 tweets have been found outside of the grid and filtered out as a result.\n157 tweets have been found outside of the grid and filtered out as a result.\n114 tweets have been found outside of the grid and filtered out as a result.\n107 tweets have been found outside of the grid and filtered out as a result.\n125 tweets have been found outside of the grid and filtered out as a result.\n169 tweets have been found outside of the grid and filtered out as a result.\n225 tweets have been found outside of the grid and filtered out as a result.\n383 tweets have been found outside of the grid and filtered out as a result.\n387 tweets have been found outside of the grid and filtered out as a result.\n163 tweets have been found outside of the grid and filtered out as a result.\n271 tweets have been found outside of the grid and filtered out as a result.\n216 tweets have been found outside of the grid and filtered out as a result.\n239 tweets have been found outside of the grid and filtered out as a result.\n251 tweets have been found outside of the grid and filtered out as a result.\n244 tweets have been found outside of the grid and filtered out as a result.\n170 tweets have been found outside of the grid and filtered out as a result.\n244 tweets have been found outside of the grid and filtered out as a result.\n178 tweets have been found outside of the grid and filtered out as a result.\n169 tweets have been found outside of the grid and filtered out as a result.\n223 tweets have been found outside of the grid and filtered out as a result.\n215 tweets have been found outside of the grid and filtered out as a result.\n225 tweets have been found outside of the grid and filtered out as a result.\n210 tweets have been found outside of the grid and filtered out as a result.\n220 tweets have been found outside of the grid and filtered out as a result.\n195 tweets have been found outside of the grid and filtered out as a result.\n125 tweets have been found outside of the grid and filtered out as a result.\n157 tweets have been found outside of the grid and filtered out as a result.\n168 tweets have been found outside of the grid and filtered out as a result.\n107 tweets have been found outside of the grid and filtered out as a result.\n173 tweets have been found outside of the grid and filtered out as a result.\n137 tweets have been found outside of the grid and filtered out as a result.\n119 tweets have been found outside of the grid and filtered out as a result.\n108 tweets have been found outside of the grid and filtered out as a result.\n136 tweets have been found outside of the grid and filtered out as a result.\n160 tweets have been found outside of the grid and filtered out as a result.\n103 tweets have been found outside of the grid and filtered out as a result.\n114 tweets have been found outside of the grid and filtered out as a result.\n149 tweets have been found outside of the grid and filtered out as a result.\n95 tweets have been found outside of the grid and filtered out as a result.\n101 tweets have been found outside of the grid and filtered out as a result.\n108 tweets have been found outside of the grid and filtered out as a result.\n92 tweets have been found outside of the grid and filtered out as a result.\n84 tweets have been found outside of the grid and filtered out as a result.\n107 tweets have been found outside of the grid and filtered out as a result.\n110 tweets have been found outside of the grid and filtered out as a result.\n145 tweets have been found outside of the grid and filtered out as a result.\n96 tweets have been found outside of the grid and filtered out as a result.\n144 tweets have been found outside of the grid and filtered out as a result.\n111 tweets have been found outside of the grid and filtered out as a result.\n218 tweets have been found outside of the grid and filtered out as a result.\n187 tweets have been found outside of the grid and filtered out as a result.\n157 tweets have been found outside of the grid and filtered out as a result.\n169 tweets have been found outside of the grid and filtered out as a result.\n140 tweets have been found outside of the grid and filtered out as a result.\n102 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n130 tweets have been found outside of the grid and filtered out as a result.\n106 tweets have been found outside of the grid and filtered out as a result.\n101 tweets have been found outside of the grid and filtered out as a result.\n105 tweets have been found outside of the grid and filtered out as a result.\n89 tweets have been found outside of the grid and filtered out as a result.\n97 tweets have been found outside of the grid and filtered out as a result.\n107 tweets have been found outside of the grid and filtered out as a result.\n117 tweets have been found outside of the grid and filtered out as a result.\n112 tweets have been found outside of the grid and filtered out as a result.\n106 tweets have been found outside of the grid and filtered out as a result.\n108 tweets have been found outside of the grid and filtered out as a result.\n114 tweets have been found outside of the grid and filtered out as a result.\n106 tweets have been found outside of the grid and filtered out as a result.\n88 tweets have been found outside of the grid and filtered out as a result.\n107 tweets have been found outside of the grid and filtered out as a result.\n109 tweets have been found outside of the grid and filtered out as a result.\n128 tweets have been found outside of the grid and filtered out as a result.\n133 tweets have been found outside of the grid and filtered out as a result.\n199 tweets have been found outside of the grid and filtered out as a result.\n221 tweets have been found outside of the grid and filtered out as a result.\n161 tweets have been found outside of the grid and filtered out as a result.\n165 tweets have been found outside of the grid and filtered out as a result.\n193 tweets have been found outside of the grid and filtered out as a result.\n196 tweets have been found outside of the grid and filtered out as a result.\n185 tweets have been found outside of the grid and filtered out as a result.\n311 tweets have been found outside of the grid and filtered out as a result.\n149 tweets have been found outside of the grid and filtered out as a result.\n158 tweets have been found outside of the grid and filtered out as a result.\n181 tweets have been found outside of the grid and filtered out as a result.\n185 tweets have been found outside of the grid and filtered out as a result.\n300 tweets have been found outside of the grid and filtered out as a result.\n236 tweets have been found outside of the grid and filtered out as a result.\n250 tweets have been found outside of the grid and filtered out as a result.\n314 tweets have been found outside of the grid and filtered out as a result.\n237 tweets have been found outside of the grid and filtered out as a result.\n447 tweets have been found outside of the grid and filtered out as a result.\n301 tweets have been found outside of the grid and filtered out as a result.\n266 tweets have been found outside of the grid and filtered out as a result.\n240 tweets have been found outside of the grid and filtered out as a result.\n262 tweets have been found outside of the grid and filtered out as a result.\n182 tweets have been found outside of the grid and filtered out as a result.\n90 tweets have been found outside of the grid and filtered out as a result.\n131 tweets have been found outside of the grid and filtered out as a result.\n116 tweets have been found outside of the grid and filtered out as a result.\n82 tweets have been found outside of the grid and filtered out as a result.\n151 tweets have been found outside of the grid and filtered out as a result.\n121 tweets have been found outside of the grid and filtered out as a result.\n126 tweets have been found outside of the grid and filtered out as a result.\n139 tweets have been found outside of the grid and filtered out as a result.\n100 tweets have been found outside of the grid and filtered out as a result.\n105 tweets have been found outside of the grid and filtered out as a result.\n129 tweets have been found outside of the grid and filtered out as a result.\n160 tweets have been found outside of the grid and filtered out as a result.\n121 tweets have been found outside of the grid and filtered out as a result.\n141 tweets have been found outside of the grid and filtered out as a result.\n120 tweets have been found outside of the grid and filtered out as a result.\n21 tweets have been found outside of the grid and filtered out as a result.\n19 tweets have been found outside of the grid and filtered out as a result.\n95 tweets have been found outside of the grid and filtered out as a result.\n218 tweets have been found outside of the grid and filtered out as a result.\n77 tweets have been found outside of the grid and filtered out as a result.\n217 tweets have been found outside of the grid and filtered out as a result.\n168 tweets have been found outside of the grid and filtered out as a result.\n214 tweets have been found outside of the grid and filtered out as a result.\n267 tweets have been found outside of the grid and filtered out as a result.\n211 tweets have been found outside of the grid and filtered out as a result.\n193 tweets have been found outside of the grid and filtered out as a result.\n245 tweets have been found outside of the grid and filtered out as a result.\n196 tweets have been found outside of the grid and filtered out as a result.\n169 tweets have been found outside of the grid and filtered out as a result.\n156 tweets have been found outside of the grid and filtered out as a result.\n129 tweets have been found outside of the grid and filtered out as a result.\n81 tweets have been found outside of the grid and filtered out as a result.\n54 tweets have been found outside of the grid and filtered out as a result.\n87 tweets have been found outside of the grid and filtered out as a result.\n68 tweets have been found outside of the grid and filtered out as a result.\n74 tweets have been found outside of the grid and filtered out as a result.\n76 tweets have been found outside of the grid and filtered out as a result.\n51 tweets have been found outside of the grid and filtered out as a result.\n55 tweets have been found outside of the grid and filtered out as a result.\n35 tweets have been found outside of the grid and filtered out as a result.\n66 tweets have been found outside of the grid and filtered out as a result.\n66 tweets have been found outside of the grid and filtered out as a result.\n75 tweets have been found outside of the grid and filtered out as a result.\n64 tweets have been found outside of the grid and filtered out as a result.\n53 tweets have been found outside of the grid and filtered out as a result.\n66 tweets have been found outside of the grid and filtered out as a result.\n81 tweets have been found outside of the grid and filtered out as a result.\n54 tweets have been found outside of the grid and filtered out as a result.\n74 tweets have been found outside of the grid and filtered out as a result.\n60 tweets have been found outside of the grid and filtered out as a result.\n67 tweets have been found outside of the grid and filtered out as a result.\n91 tweets have been found outside of the grid and filtered out as a result.\n53 tweets have been found outside of the grid and filtered out as a result.\n78 tweets have been found outside of the grid and filtered out as a result.\n235 tweets have been found outside of the grid and filtered out as a result.\n69 tweets have been found outside of the grid and filtered out as a result.\n77 tweets have been found outside of the grid and filtered out as a result.\n56 tweets have been found outside of the grid and filtered out as a result.\n76 tweets have been found outside of the grid and filtered out as a result.\n62 tweets have been found outside of the grid and filtered out as a result.\n90 tweets have been found outside of the grid and filtered out as a result.\n108 tweets have been found outside of the grid and filtered out as a result.\n98 tweets have been found outside of the grid and filtered out as a result.\n75 tweets have been found outside of the grid and filtered out as a result.\n66 tweets have been found outside of the grid and filtered out as a result.\n65 tweets have been found outside of the grid and filtered out as a result.\n57 tweets have been found outside of the grid and filtered out as a result.\n41 tweets have been found outside of the grid and filtered out as a result.\n42 tweets have been found outside of the grid and filtered out as a result.\n36 tweets have been found outside of the grid and filtered out as a result.\n37 tweets have been found outside of the grid and filtered out as a result.\n44 tweets have been found outside of the grid and filtered out as a result.\n69 tweets have been found outside of the grid and filtered out as a result.\n49 tweets have been found outside of the grid and filtered out as a result.\n" ] ], [ [ "Places -> cells", "_____no_output_____" ] ], [ [ "# We count the number of users speaking a local language in each cell and place \n# of residence.\nlocal_langs = [lang for lang in plot_langs_dict]\nplaces_local_counts = places_langs_counts.reset_index(level='cld_lang')\nlocal_langs_mask = places_local_counts['cld_lang'].isin(local_langs)\nplaces_local_counts = (places_local_counts.loc[local_langs_mask]\n .groupby('place_id')['count']\n .sum()\n .rename('local_count'))\nplaces_counts = places_counts.join(places_local_counts, how='left')\n\ncells_local_counts = cells_langs_counts.reset_index(level='cld_lang')\nlocal_langs_mask = cells_local_counts['cld_lang'].isin(local_langs)\ncells_local_counts = (cells_local_counts.loc[local_langs_mask]\n .groupby('cell_id')['count']\n .sum()\n .rename('local_count'))\ncells_counts = cells_counts.join(cells_local_counts, how='left')\n\ncell_plot_df = places_to_cells.get_counts(\n places_counts, places_langs_counts, places_geodf,\n cells_in_area_df, plot_langs_dict)\n\n# We add the counts from the tweets with coordinates\ncell_plot_df = join_and_count.increment_join(\n cell_plot_df, cells_counts['total_count'], count_col='total_count')\ncell_plot_df = join_and_count.increment_join(\n cell_plot_df, cells_counts['local_count'], count_col='local_count')\ncell_plot_df = cell_plot_df.loc[cell_plot_df['total_count'] > 0]\n\nfor plot_lang, lang_dict in plot_langs_dict.items():\n lang_count_col = lang_dict['count_col']\n cells_lang_counts = cells_langs_counts.xs(plot_lang).rename(lang_count_col)\n cell_plot_df = join_and_count.increment_join(\n cell_plot_df, cells_lang_counts, count_col=lang_count_col)\n \n level_lang_label = tweet_level_label.format(lang_dict['readable'])\n sum_lang = cell_plot_df[lang_count_col].sum()\n print(f'There are {sum_lang:.0f} {level_lang_label}.')\n \ncell_plot_df['cell_id'] = cell_plot_df.index\ncell_data_path = cell_data_path_format.format('tweets', cc, cell_size)\ncell_plot_df.to_file(cell_data_path, driver='GeoJSON')", "There are 9010159 tweets in Spanish.\nThere are 5035352 tweets in Catalan.\n" ] ], [ [ "## Plots", "_____no_output_____" ] ], [ [ "# cell_size = 20000\ncell_data_path = cell_data_path_format.format('tweets', cc, cell_size)\ncell_plot_df = geopd.read_file(cell_data_path)\ncell_plot_df.index = cell_plot_df['cell_id']\ncell_plot_df, plot_langs_dict = metrics.calc_by_cell(cell_plot_df, plot_langs_dict)", "_____no_output_____" ], [ "for plot_lang, plot_dict in plot_langs_dict.items():\n count_lang_col = plot_dict['count_col']\n readable_lang = plot_dict['readable']\n save_path = os.path.join(cc_fig_dir, 'count',\n f'tweet_counts_cc={cc}_lang={plot_lang}_cell_size={cell_size}m.pdf')\n plot_title = f'Distribution of {readable_lang} speakers in {country_name}'\n cbar_label = plot_dict['count_label']\n plot_kwargs = dict(edgecolor='w', linewidths=0.2, cmap='Purples')\n ax_count = grid_viz.plot_grid(\n cell_plot_df, shape_df, metric_col=count_lang_col, save_path=save_path, \n show=False, log_scale=True, title=plot_title, cbar_label=cbar_label,\n xy_proj=xy_proj, **plot_kwargs)\n \n prop_lang_col = plot_dict['prop_col']\n save_path = os.path.join(cc_fig_dir, 'prop',\n f'tweets_prop_cc={cc}_lang={plot_lang}_cell_size={cell_size}m.pdf')\n plot_title = '{} predominance in {}'.format(readable_lang, country_name)\n cbar_label = plot_dict['prop_label']\n # Avoid sequential colormaps starting or ending with white, as white is \n # reserved for an absence of data\n plot_kwargs = dict(edgecolor='w', linewidths=0.2, cmap='plasma')\n ax_prop = grid_viz.plot_grid(\n cell_plot_df, shape_df, metric_col=prop_lang_col, save_path=save_path, \n title=plot_title, cbar_label=cbar_label, vmin=0, vmax=1, xy_proj=xy_proj, \n **plot_kwargs)", "_____no_output_____" ], [ "save_path = os.path.join(cc_fig_dir, \n f'tweets_prop_cc={cc}_cell_size={cell_size}m.html')\nprop_dict = {'name': 'prop', 'readable': 'proportion', 'vmin': 0, 'vmax': 1}\nfig = grid_viz.plot_interactive(\n cell_plot_df, shape_df, plot_langs_dict, prop_dict,\n save_path=save_path, plotly_renderer='iframe_connected', show=True)", "_____no_output_____" ] ], [ [ "# Study at the user level", "_____no_output_____" ], [ "Users who have tagged their tweets with gps coordinates seem to do it regularly, as the median of the proportion of tweets they geo tag is at more than 75% on the first chunk -> it's worth it to try and get their cell of residence", "_____no_output_____" ] ], [ [ "a = tweets_process_res[0].copy()\na['has_gps'] = a['area'] == 0\ngps_uids = a.loc[a['has_gps'], 'uid'].unique()\na = a.loc[a['uid'].isin(gps_uids)].groupby(['uid', 'has_gps']).size().rename('count').to_frame()\na = a.join(a.groupby('uid')['count'].sum().rename('sum'))\nb = a.reset_index()\nb = b.loc[b['has_gps']]\nb['ratio'] = b['count'] / b['sum']\nb['ratio'].describe()", "_____no_output_____" ] ], [ [ "If there's one or more cells where a user tweeted in proportion more than relevant_th of the time, we take among these cells the one where they tweeted the most outside work hours. Otherwise, we take the relevant place where they tweeted the most outside work hours, or we default to the place where they tweeted the most.", "_____no_output_____" ] ], [ [ "user_level_label = '{}-speaking users'\nlang_relevant_prop = 0.1\nlang_relevant_count = 5\ncell_relevant_th = 0.1\nplot_langs_dict = make_config.langs_dict(area_dict, user_level_label)", "_____no_output_____" ] ], [ [ "If valid_uids is already generated, we only loop once over the tweets df and do the whole processing in one go on each file, thus keeping very little in memory", "_____no_output_____" ] ], [ [ "valid_uids = pd.read_csv(valid_uids_path, index_col='uid', header=0)\ncells_df_list = [cells_in_area_df]\n\nif tweets_access_res is None:\n def get_df_fun(arg0):\n return data_access.read_json_wrapper(*arg0)\nelse:\n def get_df_fun(arg0):\n return arg0\n \nuser_agg_res = []\ndef collect_user_agg_res(res):\n global user_agg_res\n user_agg_res.append(res)\n \npool = mp.Pool(8)\nfor df_access in data_access.yield_tweets_access(tweets_files_paths):\n args = (df_access, get_df_fun, valid_uids, places_geodf, langs_agg_dict,\n cells_df_list, max_place_area, cc_timezone)\n kwargs = {'min_nr_words': 4, 'cld': 'pycld2'}\n pool.apply_async(\n uagg.get_lang_loc_habits, args, kwargs, callback=collect_user_agg_res,\n error_callback=print)\npool.close()\npool.join()\n\nuser_langs_counts = join_and_count.init_counts(['uid', 'cld_lang'])\nuser_cells_habits = join_and_count.init_counts(['uid', 'cell_id', \n 'isin_workhour'])\nuser_places_habits = join_and_count.init_counts(['uid', 'place_id', \n 'isin_workhour'])\nfor lang_res, cell_res, place_res in user_agg_res:\n user_langs_counts = join_and_count.increment_join(user_langs_counts, \n lang_res)\n user_cells_habits = join_and_count.increment_join(user_cells_habits, \n cell_res[0])\n user_places_habits = join_and_count.increment_join(user_places_habits, \n place_res)", "1000MB read, 846686 tweets unpacked.\n487136 tweets remaining after filters.\n1000MB read, 852716 tweets unpacked.\n477614 tweets remaining after filters.\n1000MB read, 844912 tweets unpacked.\nstarting lang detect\n471237 tweets remaining after filters.\nstarting lang detect\n1000MB read, 841957 tweets unpacked.\nstarting lang detect\n493881 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\n1000MB read, 839053 tweets unpacked.\nchunk lang detect done\nchunk lang detect done\n475712 tweets remaining after filters.\n1000MB read, 846221 tweets unpacked.\n479240 tweets remaining after filters.\n1000MB read, 831641 tweets unpacked.\nchunk lang detect done\n447460 tweets remaining after filters.\nstarting lang detect\nstarting lang detect\nstarting lang detect\n1000MB read, 819327 tweets unpacked.\n464618 tweets remaining after filters.\n115.6MB read, 94907 tweets unpacked.\n48148 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\n1000MB read, 810121 tweets unpacked.\nchunk lang detect done\nchunk lang detect done\nstarting lang detect\n457673 tweets remaining after filters.\n1000MB read, 813545 tweets unpacked.\nstarting lang detect\n440606 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\n798.3MB read, 649307 tweets unpacked.\n372235 tweets remaining after filters.\nchunk lang detect done\nstarting lang detect\nchunk lang detect done\n" ] ], [ [ "## Language(s) attribution", "_____no_output_____" ], [ "very few users are actually filtered out by language attribution: not more worth it to generate user_langs_counts, user_cells_habits and user_places_habits inside of tweets_lang_df loop, so as to drop tweets_langs_df, and only return these user level, lightweight DFs", "_____no_output_____" ], [ " Here we get rid of users whose language we couldn't identify", "_____no_output_____" ] ], [ [ "# Residence attribution is the longest to run, and by a long shot, so we'll start\n# with language to filter out uids in tweets_df before doing it\ngroupby_cols = ['uid', 'cld_lang']\nuser_langs_counts = None\nfor res in tweets_process_res:\n tweets_lang_df = res.copy()\n # Here we don't filter out based on max_place_area, because these tweets\n # are still useful for language attribution.\n tweets_lang_df = tweets_lang_df.loc[tweets_lang_df['cld_lang'].notnull()]\n user_langs_counts = join_and_count.increment_counts(\n user_langs_counts, tweets_lang_df, groupby_cols)", "_____no_output_____" ], [ "user_langs_agg = uagg.get_lang_grp(user_langs_counts, area_dict,\n lang_relevant_prop=lang_relevant_prop,\n lang_relevant_count=lang_relevant_count,\n fig_dir=fig_dir, show_fig=True)", "We were able to attribute at least one language to 33779 users\n" ] ], [ [ "Attribute users to a group: mono, bi, tri, ... lingual\n\nProblem: need more tweets to detect multilingualism, eg users with only three tweets in the dataset are very unlikely to be detected as multilinguals", "_____no_output_____" ] ], [ [ "users_ling_grp = uagg.get_ling_grp(\n user_langs_agg, area_dict, lang_relevant_prop=lang_relevant_prop,\n lang_relevant_count=lang_relevant_count, fig_dir=fig_dir, show_fig=True)", "_____no_output_____" ] ], [ [ "## Pre-residence attribution", "_____no_output_____" ] ], [ [ "with mp.Pool(8) as pool:\n map_parameters = [(res, cells_in_area_df,\n max_place_area, cc_timezone) \n for res in tweets_process_res]\n print('entering the loop')\n tweets_pre_resid_res = (\n pool.starmap_async(data_process.prep_resid_attr, map_parameters).get())\n \nuser_places_habits = None\nuser_cells_habits = None\nfor res in tweets_pre_resid_res:\n # We first count the number of times a user has tweeted in each place inside\n # and outside work hours.\n tweets_places_df = res[1]\n groupby_cols = ['uid', 'place_id', 'isin_workhour']\n user_places_habits = join_and_count.increment_counts(\n user_places_habits, tweets_places_df, groupby_cols)\n # Then we do the same thing except in each cell, using the tweets with\n # coordinates.\n tweets_cells_df = res[0]\n groupby_cols = ['uid', 'cell_id', 'isin_workhour']\n user_cells_habits = join_and_count.increment_counts(\n user_cells_habits, tweets_cells_df, groupby_cols)", "_____no_output_____" ] ], [ [ "Here we took number of speakers, whether they're multilingual or monolingual, if they speak a language, they count as one in that language's count", "_____no_output_____" ], [ "## Residence attribution", "_____no_output_____" ] ], [ [ "user_home_cell, user_only_place = uagg.get_residence(\n user_cells_habits, user_places_habits, place_relevant_th=cell_relevant_th,\n cell_relevant_th=cell_relevant_th)", "_____no_output_____" ] ], [ [ "## Generate cell data", "_____no_output_____" ] ], [ [ "cell_plot_df = data_process.from_users_area_and_lang(\n cells_in_area_df, places_geodf, user_only_place,\n user_home_cell, user_langs_agg, users_ling_grp,\n plot_langs_dict, multiling_grps, cell_data_path_format)", "There are 9012 German-speaking users.\nThere are 7927 French-speaking users.\nThere are 1887 Italian-speaking users.\n" ] ], [ [ "GeoJSON should always be in lat, lon, WGS84 to be read by external programs, so in plotly for instance we need to make sure we come back to latlon_proj", "_____no_output_____" ], [ "## Plots", "_____no_output_____" ] ], [ [ "cell_size = 10000\ncell_data_path = cell_data_path_format.format(\n 'users_cell_data', cc, cell_size, 'geojson')\ncell_plot_df = geopd.read_file(cell_data_path)\ncell_plot_df.index = cell_plot_df['cell_id']\ncell_plot_df, plot_langs_dict = metrics.calc_by_cell(cell_plot_df, \n plot_langs_dict)", "_____no_output_____" ], [ "prop_dict = {'name': 'prop', 'readable': 'Proportion', 'log_scale': False, \n 'vmin': 0, 'vmax': 1, 'total_count_col': 'local_count'}\nmetric = prop_dict['name']\nsave_path_format = os.path.join(\n cc_fig_dir, metric, \n f'users_{metric}_cc={cc}_grp={{grp}}_cell_size={cell_size}m.pdf')\nax = helpers_viz.metric_grid(\n cell_plot_df, prop_dict, shape_df, plot_langs_dict, country_name, \n cmap='plasma', save_path_format=save_path_format, xy_proj=xy_proj, \n min_count=0, null_color='k')", "_____no_output_____" ], [ "save_path = os.path.join(cc_fig_dir, \n f'users_prop_cc={cc}_cell_size={cell_size}m.html')\nprop_dict = {'name': 'prop', 'readable': 'Proportion', 'log_scale': False, \n 'vmin': 0, 'vmax': 1, 'total_count_col': 'local_count'}\nfig = grid_viz.plot_interactive(\n cell_plot_df, shape_df, plot_langs_dict, prop_dict,\n save_path=save_path, plotly_renderer='iframe_connected', show=True)", "_____no_output_____" ] ], [ [ "# Generate cell data files in loops", "_____no_output_____" ], [ "In all the above, the cell size and cc are supposed constant, defined in config. Here we first assume the cell size is not constant, then the cc", "_____no_output_____" ] ], [ [ "import sys\nimport logging\nimport logging.config\nimport traceback\nimport IPython\n\n# logger = logging.getLogger(__name__)\n# load config from file\nlogging.config.fileConfig('logging.ini', disable_existing_loggers=False)\n\ndef showtraceback(self):\n traceback_lines = traceback.format_exception(*sys.exc_info())\n del traceback_lines[1]\n message = ''.join(traceback_lines)\n logging.error(message)\n# sys.stderr.write(message)\nIPython.core.interactiveshell.InteractiveShell.showtraceback = showtraceback", "_____no_output_____" ], [ "tweets_files_format = 'tweets_{}_{}_{}.json.gz'\nplaces_files_format = 'places_{}_{}_{}.json.gz'\nsource_data_dir = os.environ['DATA_DIR']\nfig_dir = os.path.join('..', 'reports', 'figures')\nproject_data_dir = os.path.join('..', 'data')\nexternal_data_dir = os.path.join(project_data_dir, 'external')\ninterim_data_dir = os.path.join(project_data_dir, 'interim')\nprocessed_data_dir = os.path.join(project_data_dir, 'processed')\ncell_data_path_format = os.path.join(\n processed_data_dir, '{0}', '{0}_cc={1}_r={2}_cell_size={3}m.{4}')\nnull_reply_id = 'e39d05b72f25767869d44391919434896bb055772d7969f74472032b03bc18418911f3b0e6dd47ff8f3b2323728225286c3cb36914d28dc7db40bdd786159c0a'\nwith open(os.path.join(external_data_dir, 'countries.json')) as f:\n countries_study_data = json.load(f)\nwith open(os.path.join(external_data_dir, 'langs_agg.json')) as f:\n langs_agg_dict = json.load(f)", "_____no_output_____" ] ], [ [ "## Countries loop ", "_____no_output_____" ] ], [ [ "cc = 'PY'\nregions = ()\n# regions = ('New York City', 'Puerto Rico')\n# regions = ('Catalonia', 'Balearic islands', 'Galicia', 'Valencian Community', \n# 'Basque country')\n# regions = ('Louisiana', 'Texas', 'New Mexico', 'Arizona', 'Nevada',\n# 'California')\nvalid_uids_path_format = os.path.join(interim_data_dir, 'valid_uids_{}_{}.csv')\n\nareas_dict = {'cc': cc, 'regions': {}}\nif not regions:\n areas_dict['regions'] = {cc: countries_study_data[cc]}\nfor r in regions:\n areas_dict['regions'][r] = countries_study_data[cc]['regions'][r]\n\ncell_sizes_list = [30000, 40000]\ndata_years = [(2015, 2019)]\ntweets_files_paths = [\n os.path.join(source_data_dir,\n tweets_files_format.format(year_from, year_to, cc))\n for year_from, year_to in data_years]\nplaces_files_paths = [\n os.path.join(source_data_dir,\n places_files_format.format(year_from, year_to, cc))\n for year_from, year_to in data_years]\nlang_relevant_prop = 0.1\nlang_relevant_count = 5\ncell_relevant_th = 0.1\n\ndef get_df_fun(arg0):\n return data_access.read_json_wrapper(*arg0)\n\nareas_dict = geo.init_cc(\n areas_dict, cell_sizes_list, places_files_paths, project_data_dir)", "reading of places data files is done\nplaces dataframe for PY generated\ncells dataframes for PY generated\n" ], [ "filters_pass_res = []\ndef collect_filters_pass_res(res):\n global filters_pass_res\n filters_pass_res.append(res)\n\nareas_dict = ufilters.get_valid_uids(\n areas_dict, get_df_fun, collect_filters_pass_res,\n filters_pass_res, tweets_files_paths, cpus=8)\nfor region, region_dict in areas_dict['regions'].items():\n valid_uids = region_dict['valid_uids']\n valid_uids_path = valid_uids_path_format.format(areas_dict['cc'], \n region_dict['readable'])\n valid_uids.to_csv(valid_uids_path, header=True)", "2020-04-30 14:28:47,520 - src.data.user_filters - INFO - - starting on chunk 0\n2020-04-30 14:28:56,632 - src.data.user_filters - INFO - - starting on chunk 1\n2020-04-30 14:29:05,689 - src.data.user_filters - INFO - - starting on chunk 2\n2020-04-30 14:29:15,060 - src.data.user_filters - INFO - - starting on chunk 3\n2020-04-30 14:29:24,542 - src.data.user_filters - INFO - - starting on chunk 4\n2020-04-30 14:29:34,210 - src.data.user_filters - INFO - - starting on chunk 5\n2020-04-30 14:29:39,310 - src.data.access - INFO - 1000MB read, 856450 tweets unpacked.\n2020-04-30 14:29:44,016 - src.data.user_filters - INFO - - starting on chunk 6\n2020-04-30 14:29:45,657 - src.data.access - INFO - 830503 tweets remaining after filters.\nThere are 43347 distinct users in this chunk.\n2020-04-30 14:29:54,297 - src.data.access - INFO - 1000MB read, 860004 tweets unpacked.\n2020-04-30 14:29:54,570 - src.data.user_filters - INFO - - starting on chunk 7\n2020-04-30 14:30:00,481 - src.data.access - INFO - 833795 tweets remaining after filters.\nThere are 45903 distinct users in this chunk.\n2020-04-30 14:30:05,207 - src.data.user_filters - INFO - - starting on chunk 8\n2020-04-30 14:30:15,839 - src.data.user_filters - INFO - - starting on chunk 9\n2020-04-30 14:30:18,746 - src.data.access - INFO - 1000MB read, 855918 tweets unpacked.\n2020-04-30 14:30:23,952 - src.data.user_filters - INFO - - starting on chunk 10\n2020-04-30 14:30:25,876 - src.data.access - INFO - 839844 tweets remaining after filters.\nThere are 50945 distinct users in this chunk.\n2020-04-30 14:30:32,944 - src.data.user_filters - INFO - - starting on chunk 11\n2020-04-30 14:30:38,910 - src.data.user_filters - INFO - 928 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:30:42,067 - src.data.user_filters - INFO - - starting on chunk 12\n2020-04-30 14:30:43,744 - src.data.access - INFO - 1000MB read, 860149 tweets unpacked.\n2020-04-30 14:30:48,685 - src.data.access - INFO - 1000MB read, 852304 tweets unpacked.\n2020-04-30 14:30:50,981 - src.data.access - INFO - 830914 tweets remaining after filters.\n2020-04-30 14:30:52,732 - src.data.user_filters - INFO - - starting on chunk 13\n2020-04-30 14:30:54,662 - src.data.access - INFO - 829686 tweets remaining after filters.\n2020-04-30 14:30:55,920 - src.data.user_filters - INFO - 910 users have been found in this chunk with a speed exceeding 1008 km/h.\nThere are 51157 distinct users in this chunk.\nThere are 40656 distinct users in this chunk.\n2020-04-30 14:31:03,307 - src.data.user_filters - INFO - - starting on chunk 14\n2020-04-30 14:31:12,800 - src.data.user_filters - INFO - - starting on chunk 15\n2020-04-30 14:31:15,162 - src.data.access - INFO - 1000MB read, 851517 tweets unpacked.\n2020-04-30 14:31:22,288 - src.data.user_filters - INFO - - starting on chunk 16\n2020-04-30 14:31:22,781 - src.data.access - INFO - 830631 tweets remaining after filters.\nThere are 39905 distinct users in this chunk.\n2020-04-30 14:31:31,985 - src.data.user_filters - INFO - - starting on chunk 17\n2020-04-30 14:31:34,223 - src.data.access - INFO - 1000MB read, 857154 tweets unpacked.\n2020-04-30 14:31:37,094 - src.data.user_filters - INFO - 1016 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:31:38,734 - src.data.user_filters - INFO - 570 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:31:41,638 - src.data.access - INFO - 836223 tweets remaining after filters.\n2020-04-30 14:31:43,435 - src.data.user_filters - INFO - - starting on chunk 18\n2020-04-30 14:31:46,161 - src.data.user_filters - INFO - 966 users have been found in this chunk with a speed exceeding 1008 km/h.\nThere are 42144 distinct users in this chunk.\n2020-04-30 14:31:47,548 - src.data.user_filters - INFO - - starting on chunk 19\n2020-04-30 14:31:57,085 - src.data.access - INFO - 1000MB read, 853436 tweets unpacked.\n2020-04-30 14:31:58,871 - src.data.user_filters - INFO - - starting on chunk 20\n2020-04-30 14:32:06,950 - src.data.access - INFO - 828636 tweets remaining after filters.\n2020-04-30 14:32:08,623 - src.data.user_filters - INFO - - starting on chunk 21\n2020-04-30 14:32:11,316 - src.data.user_filters - INFO - 1090 users have been found in this chunk with a speed exceeding 1008 km/h.\nThere are 42005 distinct users in this chunk.\n2020-04-30 14:32:20,022 - src.data.user_filters - INFO - - starting on chunk 22\n2020-04-30 14:32:20,106 - src.data.user_filters - INFO - - starting on chunk 23\n2020-04-30 14:32:32,619 - src.data.user_filters - INFO - 1138 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:32:39,230 - src.data.access - INFO - 1000MB read, 845672 tweets unpacked.\n2020-04-30 14:32:45,322 - src.data.access - INFO - 816596 tweets remaining after filters.\nThere are 40337 distinct users in this chunk.\n2020-04-30 14:32:52,182 - src.data.user_filters - INFO - 1224 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:32:57,379 - src.data.access - INFO - 1000MB read, 848179 tweets unpacked.\n2020-04-30 14:33:03,180 - src.data.access - INFO - 825441 tweets remaining after filters.\nThere are 43081 distinct users in this chunk.\n2020-04-30 14:33:27,443 - src.data.user_filters - INFO - 1209 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:33:45,620 - src.data.user_filters - INFO - 1171 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:33:47,327 - src.data.access - INFO - 1000MB read, 850081 tweets unpacked.\n2020-04-30 14:33:52,430 - src.data.access - INFO - 832340 tweets remaining after filters.\n2020-04-30 14:33:55,396 - src.data.access - INFO - 1000MB read, 835652 tweets unpacked.\nThere are 46516 distinct users in this chunk.\n2020-04-30 14:34:01,165 - src.data.access - INFO - 812334 tweets remaining after filters.\nThere are 49149 distinct users in this chunk.\n2020-04-30 14:34:18,265 - src.data.access - INFO - 1000MB read, 844306 tweets unpacked.\n2020-04-30 14:34:24,983 - src.data.access - INFO - 817267 tweets remaining after filters.\nThere are 42611 distinct users in this chunk.\n2020-04-30 14:34:38,944 - src.data.user_filters - INFO - 1088 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:34:40,076 - src.data.user_filters - INFO - 1136 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:34:50,196 - src.data.access - INFO - 1000MB read, 839777 tweets unpacked.\n2020-04-30 14:34:56,445 - src.data.access - INFO - 813159 tweets remaining after filters.\nThere are 48657 distinct users in this chunk.\n2020-04-30 14:35:10,347 - src.data.user_filters - INFO - 1090 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:35:24,970 - src.data.access - INFO - 1000MB read, 848350 tweets unpacked.\n2020-04-30 14:35:30,752 - src.data.access - INFO - 836221 tweets remaining after filters.\nThere are 46390 distinct users in this chunk.\n2020-04-30 14:35:41,498 - src.data.access - INFO - 1000MB read, 823809 tweets unpacked.\n2020-04-30 14:35:44,887 - src.data.user_filters - INFO - 1141 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:35:47,049 - src.data.access - INFO - 806398 tweets remaining after filters.\nThere are 41537 distinct users in this chunk.\n2020-04-30 14:36:05,754 - src.data.access - INFO - 1000MB read, 968962 tweets unpacked.\n2020-04-30 14:36:08,152 - src.data.access - INFO - 960231 tweets remaining after filters.\nThere are 45214 distinct users in this chunk.\n2020-04-30 14:36:21,048 - src.data.access - INFO - 1000MB read, 823591 tweets unpacked.\n2020-04-30 14:36:27,092 - src.data.access - INFO - 811465 tweets remaining after filters.\n2020-04-30 14:36:29,918 - src.data.user_filters - INFO - 1179 users have been found in this chunk with a speed exceeding 1008 km/h.\nThere are 42623 distinct users in this chunk.\n2020-04-30 14:36:32,634 - src.data.user_filters - INFO - 928 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:36:48,021 - src.data.access - INFO - 1000MB read, 970879 tweets unpacked.\n2020-04-30 14:36:48,488 - src.data.access - INFO - 1000MB read, 821340 tweets unpacked.\n2020-04-30 14:36:50,324 - src.data.access - INFO - 964187 tweets remaining after filters.\n2020-04-30 14:36:54,381 - src.data.access - INFO - 806306 tweets remaining after filters.\n2020-04-30 14:36:54,679 - src.data.user_filters - INFO - 1893 users have been found in this chunk with a speed exceeding 1008 km/h.\nThere are 47310 distinct users in this chunk.\nThere are 45347 distinct users in this chunk.\n2020-04-30 14:37:03,954 - src.data.access - INFO - 8.958MB read, 8868 tweets unpacked.\n2020-04-30 14:37:03,996 - src.data.access - INFO - 8827 tweets remaining after filters.\nThere are 2307 distinct users in this chunk.\n2020-04-30 14:37:04,674 - src.data.user_filters - INFO - 31 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:37:07,612 - src.data.user_filters - INFO - 1301 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:37:29,694 - src.data.access - INFO - 300.8MB read, 246438 tweets unpacked.\n2020-04-30 14:37:30,735 - src.data.access - INFO - 243306 tweets remaining after filters.\nThere are 20733 distinct users in this chunk.\n2020-04-30 14:37:31,968 - src.data.user_filters - INFO - 2112 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:37:32,987 - src.data.user_filters - INFO - 1573 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:37:39,602 - src.data.access - INFO - 1000MB read, 977109 tweets unpacked.\n2020-04-30 14:37:40,825 - src.data.user_filters - INFO - 550 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:37:42,382 - src.data.access - INFO - 972956 tweets remaining after filters.\nThere are 42931 distinct users in this chunk.\n2020-04-30 14:37:51,559 - src.data.access - INFO - 1000MB read, 823703 tweets unpacked.\n2020-04-30 14:37:55,980 - src.data.access - INFO - 810612 tweets remaining after filters.\nThere are 43537 distinct users in this chunk.\n2020-04-30 14:38:20,139 - src.data.user_filters - INFO - 2016 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:38:32,870 - src.data.user_filters - INFO - 1525 users have been found in this chunk with a speed exceeding 1008 km/h.\n2020-04-30 14:39:50,740 - src.data.user_filters - INFO - There are 408490 distinct users in the whole dataset in Belgium.\n2020-04-30 14:39:51,528 - src.data.user_filters - INFO - There are 106421 users with at least 3 months of activity in the dataset.\n2020-04-30 14:39:51,864 - src.data.user_filters - INFO - There are 65164 users considered local in the dataset, as they have been active for 3 consecutive months in this area at least once.\n2020-04-30 14:39:53,487 - src.data.user_filters - INFO - 0 users have been found to be bots because of their excessive activity, tweeting more than 3 times per minute.\n2020-04-30 14:39:53,568 - src.data.user_filters - INFO - There are 18115 too fast users to filter out in the whole dataset in Belgium.\n2020-04-30 14:39:53,689 - src.data.user_filters - INFO - This leaves us with 62807 valid users in the whole dataset in Belgium.\n" ], [ "for region, region_dict in areas_dict['regions'].items():\n valid_uids_path = valid_uids_path_format.format(areas_dict['cc'], \n region_dict['readable'])\n valid_uids = pd.read_csv(valid_uids_path, index_col='uid', header=0)\n areas_dict['regions'][region]['valid_uids'] = valid_uids\n \nuser_agg_res = []\ndef collect_user_agg_res(res):\n global user_agg_res\n user_agg_res.append(res)\n\ncells_results.from_scratch(\n areas_dict, \n tweets_files_paths, get_df_fun, collect_user_agg_res, \n user_agg_res, langs_agg_dict, cell_data_path_format, null_reply_id,\n lang_relevant_prop=0.1, lang_relevant_count=5, cell_relevant_th=0.1,\n place_relevant_th=0.1, fig_dir=fig_dir)", "2020-06-04 11:38:43,882 - src.data.cells_results - INFO - starting on chunk 0\n2020-06-04 11:38:53,009 - src.data.cells_results - INFO - starting on chunk 1\n2020-06-04 11:39:02,469 - src.data.cells_results - INFO - starting on chunk 2\n2020-06-04 11:39:11,418 - src.data.cells_results - INFO - starting on chunk 3\n2020-06-04 11:39:20,707 - src.data.cells_results - INFO - starting on chunk 4\n2020-06-04 11:39:27,000 - src.data.access - INFO - 1000MB read, 1045895 tweets unpacked.\n2020-06-04 11:39:30,043 - src.data.cells_results - INFO - starting on chunk 5\n2020-06-04 11:39:36,508 - src.data.access - INFO - 685997 tweets remaining after filters.\n2020-06-04 11:39:39,542 - src.data.cells_results - INFO - starting on chunk 6\n2020-06-04 11:39:48,955 - src.data.cells_results - INFO - starting on chunk 7\n2020-06-04 11:39:55,285 - src.data.access - INFO - 1000MB read, 1028962 tweets unpacked.\n2020-06-04 11:39:58,725 - src.data.cells_results - INFO - starting on chunk 8\n2020-06-04 11:40:04,441 - src.data.access - INFO - 689652 tweets remaining after filters.\n2020-06-04 11:40:09,551 - src.data.cells_results - INFO - starting on chunk 9\n2020-06-04 11:40:12,718 - src.data.access - INFO - 1000MB read, 1047956 tweets unpacked.\n2020-06-04 11:40:19,169 - src.data.access - INFO - 1000MB read, 1037459 tweets unpacked.\n2020-06-04 11:40:19,395 - src.data.cells_results - INFO - starting on chunk 10\n2020-06-04 11:40:20,772 - src.data.access - INFO - 736129 tweets remaining after filters.\n2020-06-04 11:40:23,005 - src.data.access - INFO - 399167 tweets remaining after filters.\nstarting lang detect\n2020-06-04 11:40:29,337 - src.data.cells_results - INFO - starting on chunk 11\n2020-06-04 11:40:39,673 - src.data.cells_results - INFO - starting on chunk 12\n2020-06-04 11:40:48,276 - src.data.access - INFO - 1000MB read, 1034588 tweets unpacked.\nstarting lang detect\n2020-06-04 11:40:49,590 - src.data.cells_results - INFO - starting on chunk 13\n2020-06-04 11:40:55,487 - src.data.access - INFO - 806720 tweets remaining after filters.\n2020-06-04 11:40:59,996 - src.data.cells_results - INFO - starting on chunk 14\n2020-06-04 11:41:09,973 - src.data.cells_results - INFO - starting on chunk 15\nchunk lang detect done\nchunk lang detect done\n2020-06-04 11:41:12,302 - src.data.cells_results - INFO - starting on chunk 16\n2020-06-04 11:41:14,339 - src.data.access - INFO - 1000MB read, 1029088 tweets unpacked.\nstarting lang detect\n2020-06-04 11:41:21,664 - src.data.access - INFO - 1000MB read, 1027359 tweets unpacked.\nstarting lang detect\n2020-06-04 11:41:28,232 - src.data.access - INFO - 786160 tweets remaining after filters.\n2020-06-04 11:41:32,904 - src.data.access - INFO - 787053 tweets remaining after filters.\n2020-06-04 11:41:37,174 - src.data.access - INFO - 1000MB read, 1023918 tweets unpacked.\nstarting lang detect\n2020-06-04 11:41:42,815 - src.data.access - INFO - 506928 tweets remaining after filters.\nchunk lang detect done\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\nstarting lang detect\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\nchunk lang detect done\n2020-06-04 11:44:08,137 - src.data.access - INFO - 1000MB read, 1029333 tweets unpacked.\n2020-06-04 11:44:16,766 - src.data.access - INFO - 817965 tweets remaining after filters.\nstarting lang detect\n2020-06-04 11:45:03,610 - src.data.access - INFO - 1000MB read, 1031636 tweets unpacked.\n2020-06-04 11:45:14,222 - src.data.access - INFO - 789319 tweets remaining after filters.\n2020-06-04 11:45:42,423 - src.data.access - INFO - 1000MB read, 1024005 tweets unpacked.\nchunk lang detect done\n2020-06-04 11:45:50,486 - src.data.access - INFO - 782099 tweets remaining after filters.\nstarting lang detect\n2020-06-04 11:46:11,575 - src.data.access - INFO - 1000MB read, 1007675 tweets unpacked.\n2020-06-04 11:46:18,973 - src.data.access - INFO - 806602 tweets remaining after filters.\nstarting lang detect\n2020-06-04 11:46:32,714 - src.data.access - INFO - 1000MB read, 999877 tweets unpacked.\n2020-06-04 11:46:37,413 - src.data.access - INFO - 787906 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\n2020-06-04 11:47:30,941 - src.data.access - INFO - 1000MB read, 1001811 tweets unpacked.\n2020-06-04 11:47:37,340 - src.data.access - INFO - 792710 tweets remaining after filters.\n2020-06-04 11:47:46,093 - src.data.access - INFO - 1000MB read, 1001733 tweets unpacked.\n2020-06-04 11:47:52,697 - src.data.access - INFO - 791140 tweets remaining after filters.\nchunk lang detect done\nstarting lang detect\n2020-06-04 11:48:11,068 - src.data.access - INFO - 1000MB read, 1001287 tweets unpacked.\nstarting lang detect\n2020-06-04 11:48:16,768 - src.data.access - INFO - 816335 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\n2020-06-04 11:49:26,586 - src.data.access - INFO - 220.2MB read, 221086 tweets unpacked.\n2020-06-04 11:49:27,570 - src.data.access - INFO - 166977 tweets remaining after filters.\nstarting lang detect\nchunk lang detect done\nchunk lang detect done\nWe were able to attribute at least one language to 48204 users\n2020-06-04 11:51:14,318 - src.data.cells_results - INFO - lang attribution done\n2020-06-04 11:51:26,374 - src.data.cells_results - INFO - There are 45737 Spanish-speaking users.\n2020-06-04 11:51:26,376 - src.data.cells_results - INFO - There are 3986 Guarani-speaking users.\n2020-06-04 11:51:26,377 - src.data.cells_results - INFO - There are 4571 Portuguese-speaking users.\n2020-06-04 11:51:26,378 - src.data.cells_results - INFO - saving at ../data/processed/users_cell_data/users_cell_data_cc=PY_r=Paraguay_cell_size=30000m.geojson.\n2020-06-04 11:51:26,391 - fiona._env - ERROR - ../data/processed/users_cell_data/users_cell_data_cc=PY_r=Paraguay_cell_size=30000m.geojson: No such file or directory\n2020-06-04 11:51:26,393 - fiona._env - WARNING - driver GeoJSON does not support creation option ENCODING\n2020-06-04 11:51:37,003 - src.data.cells_results - INFO - There are 45743 Spanish-speaking users.\n2020-06-04 11:51:37,006 - src.data.cells_results - INFO - There are 3986 Guarani-speaking users.\n2020-06-04 11:51:37,007 - src.data.cells_results - INFO - There are 4571 Portuguese-speaking users.\n2020-06-04 11:51:37,007 - src.data.cells_results - INFO - saving at ../data/processed/users_cell_data/users_cell_data_cc=PY_r=Paraguay_cell_size=40000m.geojson.\n2020-06-04 11:51:37,019 - fiona._env - ERROR - ../data/processed/users_cell_data/users_cell_data_cc=PY_r=Paraguay_cell_size=40000m.geojson: No such file or directory\n2020-06-04 11:51:37,020 - fiona._env - WARNING - driver GeoJSON does not support creation option ENCODING\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7c5f8a5082d9987c29a04ec2a461f0d40cc24c8
21,269
ipynb
Jupyter Notebook
approaches/gbdt/feature_engineering.ipynb
gtsli/grocery_e-receipt
29717d45fccd39b8ce4bdc976293e7e936e419e8
[ "MIT" ]
2
2019-04-05T02:01:13.000Z
2020-10-23T05:20:48.000Z
approaches/gbdt/feature_engineering.ipynb
gtsli/grocery_e-receipt
29717d45fccd39b8ce4bdc976293e7e936e419e8
[ "MIT" ]
null
null
null
approaches/gbdt/feature_engineering.ipynb
gtsli/grocery_e-receipt
29717d45fccd39b8ce4bdc976293e7e936e419e8
[ "MIT" ]
null
null
null
27.37323
92
0.365085
[ [ [ "import pandas as pd\nimport numpy\nimport json\nimport string", "_____no_output_____" ], [ "path_name = \"../../synthetic_data/data/final_branded_train_labels.json\"\nwith open(path_name) as f:\n data = json.load(f)", "_____no_output_____" ], [ "y = [item[0] for item in data]\nx = [item[1] for item in data]", "_____no_output_____" ], [ "colors = [\n\"BG\", \"BEIGE\",\n\"BK\", \"BLACK\", \"BLCK\", \"BLK\",\n\"BL\", \"BLUE\", \"BLU\",\n\"BN\", \"BROWN\", \"BRWN\",\n\"BZ\", \"BRONZE\", \"BRNZ\",\n\"CH\", \"CHARCOAL\", \"CHRCL\",\n\"CL\", \"CLEAR\", \"CLR\",\n\"DK\", \"DARK\", \"DRK\"\n\"GD\", \"GOLD\", \"GLD\",\n\"GN\", \"GREEN\", \"GRN\",\n\"GY\", \"GRAY\", \"GRY\",\n\"GT\", \"GRANITE\", \"GRNT\",\n\"LT\", \"LIGHT\", \"LGHT\",\n\"OR\", \"ORANGE\", \"ORNG\",\n\"PK\", \"PINK\", \"PNK\",\n\"RD\", \"RED\",\n\"TL\", \"TRANSLUCENT\", \"TRNSLCNT\",\n\"TN\", \"TAN\",\n\"TP\", \"TRANSPARENT\", \"TRNSPRNT\",\n\"VT\", \"VIOLET\", \"VLT\"\n\"WT\", \"WHITE\", \"WHT\"\n\"YL\", \"YELLOW\", \"YLLW\", \"YLW\"]\n\nunits = [\n 'PACK',\n 'PCK',\n 'PK'\n 'OZ',\n 'OUNCE',\n 'CT',\n 'COUNT',\n 'LB',\n 'LBS']", "_____no_output_____" ], [ "with open('../../synthetic_data/data/abbreviated_brands.json') as json_file: \n data = json.load(json_file)\n abbreviated_brands = [x[1] for x in data]\n \nwith open('../../synthetic_data/data/abbreviated_descriptors.json') as json_file: \n data = json.load(json_file)\n abbreviated_descriptors = [x[1] for x in data]", "_____no_output_____" ], [ "def num_vowels(x):\n num_vowels=0\n for char in x:\n if char in \"aeiouAEIOU\":\n num_vowels += 1\n return int(num_vowels)", "_____no_output_____" ], [ "def num_words(x):\n return int(len(x.split()))", "_____no_output_____" ], [ "def num_chars(x):\n return int(len(x))", "_____no_output_____" ], [ "def num_digits(x):\n num_digits=0\n for char in x:\n if char.isnumeric():\n num_digits += 1\n return int(num_digits)", "_____no_output_____" ], [ "def num_symbols(x):\n num_symbols=0\n for char in x:\n if not char.isnumeric() and not char.isspace() and not char.isalpha():\n num_symbols += 1\n return int(num_symbols)", "_____no_output_____" ], [ "def num_letters(x, letter):\n num_letters=0\n for char in x:\n if char == letter:\n num_letters += 1\n return int(num_letters)", "_____no_output_____" ], [ "def is_plural(x):\n if x[len(x) - 1] == 'S':\n return 1\n return 0", "_____no_output_____" ], [ "def has_char_repeat(x):\n for i in range(1, len(x) - 1):\n if x[i - 1] == x[i]:\n return 1\n return 0", "_____no_output_____" ], [ "def has_color(x):\n words = x.split()\n for word in words:\n if word in colors:\n return 1\n return 0", "_____no_output_____" ], [ "def has_brand(x):\n for abbr_brand in abbreviated_brands:\n if abbr_brand in x:\n return 1\n return 0", "_____no_output_____" ], [ "def has_descriptor(x):\n for abbr_descriptor in abbreviated_descriptors:\n if abbr_descriptor in x:\n return 1\n return 0", "_____no_output_____" ], [ "def has_unit(x):\n words = x.split()\n for word in words:\n if word in units:\n return 1\n return 0", "_____no_output_____" ], [ "df = pd.DataFrame(data={\"x\":x, \"y\":y})\ndf.head()", "_____no_output_____" ], [ "df[\"num_vowels\"] = df.apply(lambda row: num_vowels(row.x), axis=1)", "_____no_output_____" ], [ "df[\"num_words\"] = df.apply(lambda row: num_words(row.x), axis=1)", "_____no_output_____" ], [ "df[\"num_chars\"] = df.apply(lambda row: num_chars(row.x), axis=1)", "_____no_output_____" ], [ "df[\"num_digits\"] = df.apply(lambda row: num_digits(row.x), axis=1)", "_____no_output_____" ], [ "df[\"num_symbols\"] = df.apply(lambda row: num_symbols(row.x), axis=1)", "_____no_output_____" ], [ "%%time\nfor letter in string.ascii_uppercase:\n df[\"num_\" + letter] = df.apply(lambda row: num_letters(row.x, letter), axis=1)", "Wall time: 7min\n" ], [ "df[\"is_plural\"] = df.apply(lambda row: is_plural(row.x), axis=1)", "_____no_output_____" ], [ "df[\"has_char_repeat\"] = df.apply(lambda row: has_char_repeat(row.x), axis=1)", "_____no_output_____" ], [ "df[\"has_color\"] = df.apply(lambda row: has_color(row.x), axis=1)", "_____no_output_____" ], [ "df[\"has_brand\"] = df.apply(lambda row: has_brand(row.x), axis=1)", "_____no_output_____" ], [ "df[\"has_descriptor\"] = df.apply(lambda row: has_descriptor(row.x), axis=1)", "_____no_output_____" ], [ "df[\"has_unit\"] = df.apply(lambda row: has_unit(row.x), axis=1)", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ], [ "len(df.columns.values)", "_____no_output_____" ], [ "len(df)", "_____no_output_____" ], [ "df.to_csv(\"featured_df.csv\", index=False, encoding=\"utf-8\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c60726ed999c8e10f4b71fdfe84f97e74abe29
838,019
ipynb
Jupyter Notebook
plots/ttl_daily_specialDays.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
plots/ttl_daily_specialDays.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
plots/ttl_daily_specialDays.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
1,252.644245
421,010
0.942068
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"whitegrid\")\nfrom sklearn.kernel_ridge import KernelRidge\nfrom scipy.interpolate import UnivariateSpline as spline\n%matplotlib inline\n\ndf = pd.read_csv('ttl_daily.csv', names=['date', 'cnt']).ix[1:,:]\ndf['date'] = pd.to_datetime(df['date'])\ndf = df.sort_values(by='date')\ndf = df.reset_index().drop('index', 1)", "_____no_output_____" ], [ "print(df.shape)\ndf.head()", "(3652, 2)\n" ], [ "df['m_d'] = df.date.apply(lambda x: x.strftime('%m-%d')) \ndf['d'] = df.date.apply(lambda x: x.strftime('%d'))\ndf.head()", "_____no_output_____" ], [ "vals = df['cnt'].values\ndates = df['date'].values", "_____no_output_____" ], [ "sns.hls_palette(8, l=.3, s=.7)", "_____no_output_____" ], [ "special_days = ['01-01', '02-14', '04-01', '07-14', '10-31', '12-24', '12-25']\nfestivals = ['New Year', 'Valentines', 'April Fool', 'Independence', 'Halloween', 'Xmas Eve', 'Xmas']\ncolors = sns.color_palette(\"hls\", 7)", "_____no_output_____" ], [ "df_sp = df[df.m_d.isin(special_days)]\ndf_sp.head()", "_____no_output_____" ], [ "df_1st = df[df.d == '01']\ndf_1st.head()", "_____no_output_____" ], [ "def smooth(x, y, nb):\n y_smooth = np.zeros(x.shape[0])\n for i in range(len(x)):\n if i-nb < 0:\n y_smooth[i] = np.mean(y[:i+11])\n elif i+nb+1 > len(y):\n y_smooth[i] = np.mean(y[i-nb:])\n else:\n y_smooth[i] = np.mean(y[i-nb:i+nb+1])\n return y_smooth", "_____no_output_____" ], [ "x = df.index.values\ny = np.array(vals)\ndays = x.reshape([-1,1])\ny_smooth_avg = smooth(x, y, 10)\n\nx_sp = df_sp.index.values\ny_sp = df_sp.cnt.values\ndays_sp = x_sp.reshape([-1, 1])\n\nx_1st = df_1st.index.values\ny_1st = df_1st.cnt.values\ndays_1st = x_1st.reshape([-1, 1])", "_____no_output_____" ], [ "print(y_sp.shape, days_sp.shape, len(y_sp), len(days_sp))", "(70,) (70, 1) 70 70\n" ], [ "x_sp[1]", "_____no_output_____" ], [ "def smooth(x, y, nb):\n y_smooth = np.zeros(x.shape[0])\n for i in range(len(x)):\n if i-nb < 0:\n y_smooth[i] = np.mean(y[:i+11])\n elif i+nb+1 > len(y):\n y_smooth[i] = np.mean(y[i-nb:])\n else:\n y_smooth[i] = np.mean(y[i-nb:i+nb+1])\n return y_smooth", "_____no_output_____" ], [ "plt.figure(figsize=(20, 10))\n\n\nplt.scatter(days, vals, s=20, alpha=.5, c='skyblue', label= 'Crime by Day')\nplt.plot(days, y_smooth_avg, c='steelblue', alpha=.9, linewidth=3, label='Smoothed Crime Signal')\n\n#\nplt.scatter(days_sp, y_sp, c=colors, s=80, label='Festival')\n# plt.scatter(days_1st, y_1st, c='red', s=30, alpha=0.5, label='First Day of Month')\n\nfont = {'family': 'Helvetica Neue', #'serif',\n# 'color': 'darkred', #'darkred',\n 'weight': 'normal',\n 'size': 14}\n\nfor idx in range(len(y_sp)):\n plt.text(x_sp[idx]+5, y_sp[idx]+15, festivals[idx % len(festivals)], fontdict=font)\n\n\nplt.xlim(xmin=0, xmax=len(y))\nplt.ylim(ymin=300, ymax=1850)\nplt.xticks(np.arange(0, len(y)+1, 365).tolist(), np.arange(2006, 2017).tolist())\nplt.ylabel('number of crimes per day', fontsize = 20)\nplt.xlabel('Time, Graphed by Days', fontsize = 20)\nplt.title('NYC Crime with Festivals', fontsize = 30)\nplt.legend(fontsize = 15, loc=0)\nplt.show()", "/Users/Viola/anaconda/lib/python3.5/site-packages/matplotlib/font_manager.py:1288: UserWarning: findfont: Font family ['Helvetica Neue'] not found. Falling back to Bitstream Vera Sans\n (prop.get_family(), self.defaultFamily[fontext]))\n" ], [ "plt.figure(figsize=(20, 10))\n\n\nplt.scatter(days, vals, s=20, alpha=.5, c='skyblue', label= 'Crime by Day')\nplt.plot(days, y_smooth_avg, c='steelblue', alpha=.9, linewidth=3, label='Smoothed Crime Signal')\n\n#\n# plt.scatter(days_sp, y_sp, c='yellow', s=100, label='Festival')\nplt.scatter(days_1st, y_1st, c='red', s=30, alpha=0.5, label='First Day of Month')\n\nplt.xlim(xmin=0, xmax=len(y))\nplt.ylim(ymin=300, ymax=1850)\nplt.xticks(np.arange(0, len(y)+1, 365).tolist(), np.arange(2006, 2017).tolist())\nplt.ylabel('number of crimes per day', fontsize = 20)\nplt.xlabel('Time, Graphed by Days', fontsize = 20)\nplt.title('NYC Crime with First Day of Month', fontsize = 30)\nplt.legend(fontsize = 15, loc=0)\nplt.show()", "_____no_output_____" ], [ "x = np.arange(len(vals))\ny = vals\n\nfrom scipy import interpolate\ntck = interpolate.splrep(x, y, s=0)\nxnew = np.arange(0, 2*np.pi, np.pi/50)\nynew = interpolate.splev(xnew, tck, der=0)\n\n\nplt.figure(figsize=(25, 10))\nplt.plot(x, y, 'x', xnew, ynew, xnew, np.sin(xnew), x, y, 'r')\n# plt.legend(['Linear', 'Cubic Spline', 'True', 'LSQUnivariateSpline'])\nplt.legend(['Cubic Spline'])\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c60804cf79aa61868f332ffa1f436ae8237cb9
4,497
ipynb
Jupyter Notebook
examples/notebooks/tutorial.ipynb
Singlesnail/vedo
c61ad3aca5c926d4b41b8a468aefe8fc02f242ab
[ "CC0-1.0" ]
null
null
null
examples/notebooks/tutorial.ipynb
Singlesnail/vedo
c61ad3aca5c926d4b41b8a468aefe8fc02f242ab
[ "CC0-1.0" ]
null
null
null
examples/notebooks/tutorial.ipynb
Singlesnail/vedo
c61ad3aca5c926d4b41b8a468aefe8fc02f242ab
[ "CC0-1.0" ]
null
null
null
32.352518
98
0.498999
[ [ [ "###########################################################\n# Quick tutorial. Check out more examples in directories:\n#\texamples/basic \n#\texamples/advanced\n#\texamples/volumetric\n#\texamples/simulations\n#\texamples/other\n\nfrom vedo import *\n############################################################\n# Caveat: in notebooks the rendering backend is K3D, so\n# some of the features are disabled wrt the python scripts.\n# Uncomment:\nembedWindow(False) #to pop an external VTK rendering window\n# In a VTK rendering window press:\n# q, to return to the notebook (window becomes unresponsive)\n# type interactive() to return to window interaction\n# type closeWindow() to close the rendering window\n############################################################\n\n# Declare an instance of the class Plotter\nvp = Plotter()\n\n# Load a vtk file as a Mesh(vtkActor) and visualize it.\n# (The actual mesh corresponds to the outer shape of\n# an embryonic mouse limb at about 11 days of gestation).\n# Choose a tomato color for the internal surface of the mesh.\nvp.load(datadir+\"270.vtk\").c(\"aqua\")\nvp.show() # picks what is automatically stored in python list vp.actors\n\n######## Press now q in window to return to the script #########\n\nvp.close() # (only necessary for the vtk backend) ", "_____no_output_____" ], [ "#########################################################################################\n# Load 3 meshes assigning each a different color,\n# by default use their file names as legend entries.\n# No need to use any variables, as meshes are stored internally in list vp.actors:\nvp = Plotter(title=\"Three limb shapes\")\nvp.load(datadir+\"250.vtk\").color([1, 0.4, 0]).alpha(0.3)\nvp.load(datadir+\"270.vtk\").color([1, 0.6, 0]).alpha(0.3)\nvp.load(datadir+\"290.vtk\").color([1, 0.8, 0]).alpha(0.3)\nprint(\"Loaded Mesh(vtkActor) objects: \", len(vp.actors))\nvp.show()\nvp.close()", "Loaded Mesh(vtkActor) objects: 3\n" ], [ "#########################################################################################\n# Draw a spline through a set of points:\nfrom random import gauss, uniform as u\nvp = Plotter()\n\npts = [(u(0, 2), u(0, 2), u(0, 2) + i) for i in range(8)] # build python list of points\nvp += Points(pts, r=5) # add the vertex mesh to the internal list of objects to be shown\n\nfor i in range(10):\n sp = Spline(pts, smooth=i/10.0, degree=2).color(i)\n sp.legend(\"smoothing \" + str(i/10.0))\n vp += sp\nvp.show(axes=1) # render the internal list of objects in vp.actors\nvp.close()", "_____no_output_____" ], [ "#########################################################################################\n# Increase the number of points in a mesh using subdivide()\nvp = Plotter()\nm1 = vp.load(datadir+\"beethoven.ply\")\n\nm2 = m1.clone().subdivide() # make a copy and increase the nr of points of the mesh\nvp.show(m1, m2.addPos([10,0,0]))\nvp.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
e7c6139907318b31d71c53a2f51894ab29901a2a
145,016
ipynb
Jupyter Notebook
tutorials/Tutorial5_Evaluation.ipynb
arthurbarros/haystack
886f5ba90ed15aecd10c509a8e57334eefcf69c2
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial5_Evaluation.ipynb
arthurbarros/haystack
886f5ba90ed15aecd10c509a8e57334eefcf69c2
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial5_Evaluation.ipynb
arthurbarros/haystack
886f5ba90ed15aecd10c509a8e57334eefcf69c2
[ "Apache-2.0" ]
null
null
null
46.839793
252
0.587556
[ [ [ "# Evalutaion\nTo be able to make a statement about the performance of a question-asnwering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved.", "_____no_output_____" ], [ "## Start an Elasticsearch server\nYou can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source.", "_____no_output_____" ] ], [ [ "# Recommended: Start Elasticsearch using Docker\n#! docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.6.2", "_____no_output_____" ], [ "# In Colab / No Docker environments: Start Elasticsearch from source\n! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz -q\n! tar -xzf elasticsearch-7.6.2-linux-x86_64.tar.gz\n! chown -R daemon:daemon elasticsearch-7.6.2\n\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\nes_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'],\n stdout=PIPE, stderr=STDOUT,\n preexec_fn=lambda: os.setuid(1) # as daemon\n )\n# wait until ES has started\n! sleep 30", "_____no_output_____" ], [ "# install haystack\n! pip install git+git://github.com/deepset-ai/haystack.git@fix_tutorial_5", "_____no_output_____" ], [ "from farm.utils import initialize_device_settings\n\ndevice, n_gpu = initialize_device_settings(use_cuda=True)", "06/05/2020 16:11:23 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n" ], [ "\nfrom haystack.indexing.io import fetch_archive_from_http\n\n# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents\ndoc_dir = \"../data/nq\"\ns3_url = \"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset.json.zip\"\nfetch_archive_from_http(url=s3_url, output_dir=doc_dir)", "06/05/2020 16:11:26 - INFO - haystack.indexing.io - Fetching from https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset.json.zip to `../data/nq`\n100%|██████████| 621983/621983 [00:01<00:00, 477723.47B/s]\n" ], [ "# Connect to Elasticsearch\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\n\ndocument_store = ElasticsearchDocumentStore(host=\"localhost\", username=\"\", password=\"\", create_index=False)", "_____no_output_____" ], [ "# Add evaluation data to Elasticsearch database\ndocument_store.add_eval_data(\"../data/nq/nq_dev_subset.json\")", "06/05/2020 16:11:30 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:1.613s]\n06/05/2020 16:11:31 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:0.453s]\n" ] ], [ [ "## Initialize components of QA-System", "_____no_output_____" ] ], [ [ "# Initialize Retriever\nfrom haystack.retriever.elasticsearch import ElasticsearchRetriever\n\nretriever = ElasticsearchRetriever(document_store=document_store)", "_____no_output_____" ], [ "# Initialize Reader\nfrom haystack.reader.farm import FARMReader\n\nreader = FARMReader(\"deepset/roberta-base-squad2\")", "06/05/2020 16:11:31 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n06/05/2020 16:11:31 - INFO - farm.infer - Could not find `deepset/roberta-base-squad2` locally. Try to download from model hub ...\n06/05/2020 16:11:32 - INFO - filelock - Lock 140574308859240 acquired on /root/.cache/torch/transformers/f7d4b9379a9c487fa03ccf3d8e00058faa9d664cf01fc03409138246f48760da.c6288e0f84ec797ba5c525c923a5bbc479b47c761aded9734a5f6a473b044c8d.lock\n" ], [ "# Initialize Finder which sticks together Reader and Retriever\nfrom haystack.finder import Finder\n\nfinder = Finder(reader, retriever)", "_____no_output_____" ] ], [ [ "## Evaluation of Retriever", "_____no_output_____" ] ], [ [ "# Evaluate Retriever on its own\nretriever_eval_results = retriever.eval()\n\n## Retriever Recall is the proportion of questions for which the correct document containing the answer is\n## among the correct documents\nprint(\"Retriever Recall:\", retriever_eval_results[\"recall\"])\n## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank\nprint(\"Retriever Mean Avg Precision:\", retriever_eval_results[\"map\"])", "06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.170s]\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.069s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.022s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.021s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.027s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.026s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.024s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.018s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.017s]\n06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.007s]\n06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - For 54 out of 54 questions (100.00%), the answer was in the top-10 candidate passages selected by the retriever.\n" ] ], [ [ "## Evaluation of Reader", "_____no_output_____" ] ], [ [ "# Evaluate Reader on its own\nreader_eval_results = reader.eval(document_store=document_store, device=device)\n\n# Evaluation of Reader can also be done directly on a SQuAD-formatted file\n# without passing the data to Elasticsearch\n#reader_eval_results = reader.eval_on_file(\"../data/natural_questions\", \"dev_subset.json\", device=device)\n\n## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer\nprint(\"Reader Top-N-Recall:\", reader_eval_results[\"top_n_recall\"])\n## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer\nprint(\"Reader Exact Match:\", reader_eval_results[\"EM\"])\n## Reader F1-Score is the average overlap between the predicted answers and the correct answers\nprint(\"Reader F1-Score:\", reader_eval_results[\"f1\"])", "06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.022s]\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.005s]\n06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.003s]\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search?scroll=5m&size=1000 [status:200 request:0.039s]\n06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.010s]\n06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.003s]\nEvaluating: 100%|██████████| 78/78 [00:31<00:00, 2.50it/s]\n" ] ], [ [ "## Evaluation of Finder", "_____no_output_____" ] ], [ [ "# Evaluate combination of Reader and Retriever through Finder\nfinder_eval_results = finder.eval()\n\nprint(\"\\n___Retriever Metrics in Finder___\")\nprint(\"Retriever Recall:\", finder_eval_results[\"retriever_recall\"])\nprint(\"Retriever Mean Avg Precision:\", finder_eval_results[\"retriever_map\"])\n\n# Reader is only evaluated with those questions, where the correct document is among the retrieved ones\nprint(\"\\n___Reader Metrics in Finder___\")\nprint(\"Reader Top-1 accuracy:\", finder_eval_results[\"reader_top1_accuracy\"])\nprint(\"Reader Top-1 accuracy (has answer):\", finder_eval_results[\"reader_top1_accuracy_has_answer\"])\nprint(\"Reader Top-k accuracy:\", finder_eval_results[\"reader_top_k_accuracy\"])\nprint(\"Reader Top-k accuracy (has answer):\", finder_eval_results[\"reader_topk_accuracy_has_answer\"])\nprint(\"Reader Top-1 EM:\", finder_eval_results[\"reader_top1_em\"])\nprint(\"Reader Top-1 EM (has answer):\", finder_eval_results[\"reader_top1_em_has_answer\"])\nprint(\"Reader Top-k EM:\", finder_eval_results[\"reader_topk_em\"])\nprint(\"Reader Top-k EM (has answer):\", finder_eval_results[\"reader_topk_em_has_answer\"])\nprint(\"Reader Top-1 F1:\", finder_eval_results[\"reader_top1_f1\"])\nprint(\"Reader Top-1 F1 (has answer):\", finder_eval_results[\"reader_top1_f1_has_answer\"])\nprint(\"Reader Top-k F1:\", finder_eval_results[\"reader_topk_f1\"])\nprint(\"Reader Top-k F1 (has answer):\", finder_eval_results[\"reader_topk_f1_has_answer\"])\nprint(\"Reader Top-1 no-answer accuracy:\", finder_eval_results[\"reader_top1_no_answer_accuracy\"])\nprint(\"Reader Top-k no-answer accuracy:\", finder_eval_results[\"reader_topk_no_answer_accuracy\"])\n\n# Time measurements\nprint(\"\\n___Time Measurements___\")\nprint(\"Total retrieve time:\", finder_eval_results[\"total_retrieve_time\"])\nprint(\"Avg retrieve time per question:\", finder_eval_results[\"avg_retrieve_time\"])\nprint(\"Total reader timer:\", finder_eval_results[\"total_reader_time\"])\nprint(\"Avg read time per question:\", finder_eval_results[\"avg_reader_time\"])\nprint(\"Total Finder time:\", finder_eval_results[\"total_finder_time\"])", "06/05/2020 16:13:44 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.014s]\n06/05/2020 16:13:44 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:13:44 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.021s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.038s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.004s]\n06/05/2020 16:13:46 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.002s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.88 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.92 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.46 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.81 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.82 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.35 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.40 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.37 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.04 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.12 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.03 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.30 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.59 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.99 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.86 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.76 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.90 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.93 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.86 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.77 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.83 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.18 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.20 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.23 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.88 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.13 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 15.60 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.08 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.90 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.14 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.84 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.14 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.39 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.56 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.35 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.76 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.23 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.57 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.74 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.07 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.16 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.98 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.64 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.53 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.64 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.32 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 16.33 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.06 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.18 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.91 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.89 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.83 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.66 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.21 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.13 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.22 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.12 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.67 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.64 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.49 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.64 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.25 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.62 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.53 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.05 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.88 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.43 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.97 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.70 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.67 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.52 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.17 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.42 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.25 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.84 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.35 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.57 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.17 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.58 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.86 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.70 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.50 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.02 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.86 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.10 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.78 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.77 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.43 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.20 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.64 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.94 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.65 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.67 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.13 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.21 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.76 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.62 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.58 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.04 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.81 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.96 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.51 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.61 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.58 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.74 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.18 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.94 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.53 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.07 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.34 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.86 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.24 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.07 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.23 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.16 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.78 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.39 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 1.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.06 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.11 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.40 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.60 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.65 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.90 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.13 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.84 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.43 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.38 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.30 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.42 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.65 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.81 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.56 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.90 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.32 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.53 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.30 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.91 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.46 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.59 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 29.40 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.04 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.55 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.39 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.16 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.80 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.39 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.35 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.96 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.57 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.57 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.86 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.47 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.50 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.16 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.59 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.10 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 13.85 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.34 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.20 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.60 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.68 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.31 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.73 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.75 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.59 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.00 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.57 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.70 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.53 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.07 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.39 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.13 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.51 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.96 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.30 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.68 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.02 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.33 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.30 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.56 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.81 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.23 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.44 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.40 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.97 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.45 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.73 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.84 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.09 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.74 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.71 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.00 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.44 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.32 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.27 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.19 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.61 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 8.17 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.12 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.56 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.34 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.61 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.82 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.51 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.92 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.13 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.08 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 14.27 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 2.00 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.39 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 1.65 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.36 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.05 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.59 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.28 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.67 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.54 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.03 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.98 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.60 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.46 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.55 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.43 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.28 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.88 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.50 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.92 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.59 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.64 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.17 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.29 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.27 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.62 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.86 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.03 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.30 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.18 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.13 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.18 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.75 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.42 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.76 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.37 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.97 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.02 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.14 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.99 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.94 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.67 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.04 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.24 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.25 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.88 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.75 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.95 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.30 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.90 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.40 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.85 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.27 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.79 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.29 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.08 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.44 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.48 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.26 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.50 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.00 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.01 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.52 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:02<00:00, 1.47 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.70 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.27 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.44 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.75 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.30 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.03 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 9.13 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.93 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.50 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.74 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.15 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.01 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.38 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.64 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.20 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.68 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.46 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.58 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.55 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.66 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.89 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.30 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.56 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.08 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.08 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.22 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.28 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.03 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 3.01 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.76 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.37 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.43 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.33 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.28 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.18 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.42 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.54 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 13.85 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.41 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.32 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.73 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.77 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.04 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.53 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.13 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.30 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.44 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.01 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.69 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.38 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.80 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.78 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.29 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.05 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.76 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.22 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.75 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.52 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.89 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.85 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.03 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.97 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.20 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.53 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.24 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.93 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.72 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.29 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.87 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.81 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.93 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.24 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.80 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.60 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.18 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.89 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.51 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.69 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.47 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.59 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.66 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.39 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.02 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.42 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.23 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.22 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.48 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.05 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.73 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.53 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.54 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.09 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.86 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.37 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.95 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.41 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.29 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.21 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.26 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.26 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.65 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:04<00:00, 1.08 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.77 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.22 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.32 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:05<00:00, 1.61 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.57 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.89 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.61 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 23.65 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.93 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.57 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.06 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.68 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.59 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.54 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.69 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.75 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.57 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.19 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.07 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 10.10 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.64 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.56 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.95 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.47 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.20 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.47 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.40 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.01 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.12 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.64 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.25 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.05 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:02<00:00, 1.41s/ Batches]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.15 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.92 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.93 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.87 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.70 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.88 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.38 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.29 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.05 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.57 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.76 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.70 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.96 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.43 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.36 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:03<00:00, 1.08s/ Batches]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.24 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.22 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.12 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.34 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.99 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.99 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.85 Batches/s]\nInferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.98 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.63 Batches/s]\nInferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.02 Batches/s]\nInferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.55 Batches/s]\nInferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.56 Batches/s]\nInferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.05 Batches/s]\n06/05/2020 16:25:44 - INFO - haystack.finder - 37 out of 54 questions were correctly answered (68.52%).\n06/05/2020 16:25:44 - INFO - haystack.finder - 0 questions could not be answered due to the retriever.\n06/05/2020 16:25:44 - INFO - haystack.finder - 17 questions could not be answered due to the reader.\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7c62072808ca9296ad88c8f5b5805f435fc2286
956,930
ipynb
Jupyter Notebook
notebooks/clean_demo_video.ipynb
manodg/beepose
1a40a4609fcf6492bfdc719bc574fcfce60e19f4
[ "BSD-3-Clause" ]
7
2020-02-17T22:32:18.000Z
2020-08-28T01:26:36.000Z
notebooks/clean_demo_video.ipynb
manodg/beepose
1a40a4609fcf6492bfdc719bc574fcfce60e19f4
[ "BSD-3-Clause" ]
7
2020-02-18T00:09:42.000Z
2020-10-29T22:36:04.000Z
notebooks/clean_demo_video.ipynb
manodg/beepose
1a40a4609fcf6492bfdc719bc574fcfce60e19f4
[ "BSD-3-Clause" ]
6
2020-02-18T17:04:34.000Z
2022-03-24T03:04:44.000Z
893.492063
908,972
0.94063
[ [ [ "import os\nimport sys\nsys.path.append('..')\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\nsession = tf.Session(config=config)", "_____no_output_____" ], [ "%matplotlib inline\nimport cv2\nimport time\nimport pylab\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nimport matplotlib.pyplot as plt\nimport glob,os\nfrom IPython.display import clear_output\nfrom keras.models import load_model\n#from src.inference.inference import inference\n#from src.train.train_stages import\nfrom beepose.models.train_model import get_testing_model_new", "Using TensorFlow backend.\n" ], [ "import keras.backend as K\ndef eucl_loss(x, y):\n return K.sum(K.square(x - y)) / 10 / 2\n", "_____no_output_____" ], [ "video = cv2.VideoCapture('/mnt/storage/Gurabo/videos/Gurabo/mp4/C02_170621110000.mp4')#\n#video = cv2.VideoCapture('/home/irodriguez/JANELIA/src/Data/Videos/new_box/mp4/126_04_R_180907110000AM.mp4')#\n#video = cv2.VideoCapture('/mnt/storage/Gurabo/videos/Gurabo/mp4/166_01_R_170818010000.mp4')#'/mnt/storage/Gurabo/videos/Gurabo/mp4/C02_170610090000.mp4')", "_____no_output_____" ], [ "t,im=video.read()\nFPS = 20\nstart_frame =1000\nvideo.set(cv2.CAP_PROP_POS_MSEC,start_frame*1000.0/FPS)\nim = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)", "_____no_output_____" ], [ "plt.rcParams['figure.figsize'] = (20,15)\nplt.imshow(im)", "_____no_output_____" ], [ "colors = [[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \\\n [255, 0, 0],[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]\nparams = { 'scale_search':[1], 'thre1':{0:0.4,1:0.45,2:0.4,3:0.4,4:0.4,5:0.4,5:0.09,6:0.09,7:0.01}, 'thre2': 0.08, 'thre3': 0.4, 'min_num': 4, 'mid_num': 10, 'crop_ratio': 2.5, 'bbox_ratio': 0.25} \nmodel_params = {'boxsize': 368, 'padValue': 128, 'np': '12', 'stride': 8} ", "_____no_output_____" ], [ "pathmodel='../models/pose/complete_5p_2.best_day.h5'\nmodel = load_model(pathmodel)", "WARNING:tensorflow:From /home/irodriguez/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n" ], [ "#custom_objects={'eucl_loss': eucl_loss}\nmodel = load_model('/home/malvarez/shrimppose/beepose/data/raw/bee/inference_model.h5')#load_model\n\n#('../src/train/testing_new_augmenter/complete_model_2_8_16.h5',custom_objects=custom_objects)#", "_____no_output_____" ], [ "keras_weights_file = pathmodel#\"training/weights_logs/5p_2_new/weights_2.best.h5\"#\"training/weights_logs/5p_2_stages/weights_2.best.h5\"\nnp1=12\nnp2=6\nstages=2\n# authors of original model don't use\n# vgg normalization (subtracting mean) on input images\n#model = get_testing_model_new(np1=np1,np2=np2,stages=stages)\n#model.load_weights(keras_weights_file)", "_____no_output_____" ], [ "import beepose", "_____no_output_____" ], [ "beepose", "_____no_output_____" ], [ "from beepose.inference.inference import inference \nimport matplotlib.pyplot as plt \nplt.rcParams['figure.figsize'] = (10,15)\nresize_factor=4\ntim=[]\nprof=[]\ndet,mapi =[],[]\nFPS=20\nstart_frame=30000\nend_frame=31000\nnp1=np1\nnp2=np2\n\nmapIdx=[[0,1],[2,3],[4,5],[6,7]]\nlimbseq=[[1,3],[3,2],[2,4],[2,5],[1,2]]\nnumparts=5\n#mapIdx = [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11],[12,13]]\n#limbseq = [[1,3],[3,2],[2,1],[1,4],[1,5],[6,2],[7,2],[2,8]]\n\n#video = cv2.VideoCapture('../Data/Videos/Hurricane/1_01_R_170808020000.mp4')\n#video = cv2.VideoCapture('/mnt/storage/Gurabo/videos/Gurabo/mp4/C02_170610110000.mp4')\n #../Data/Videos/C02_170622120000.mp4')\nvideo.set(cv2.CAP_PROP_POS_MSEC,start_frame*1000.0/FPS)\nshow = True\nsave = False\nframe_detections={}\ntry:\n for idx in range(start_frame,end_frame): \n #print(idx)\n t,im = video.read()\n \n im = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)\n im2=cv2.resize(im,(im.shape[1]//resize_factor,im.shape[0]//resize_factor))\n \n tic1 = time.time()\n canvas,mappings,parts = inference(im2, model,params, model_params,show=show,np1=np2,np2=np1,resize=resize_factor,\n numparts=numparts,mapIdx=mapIdx,limbSeq=limbseq,distance_tolerance=300)\n frame_detections[idx]={}\n frame_detections[idx]['mapping']=mappings\n frame_detections[idx]['parts']=parts\n if show:\n canvas2 =cv2.resize(canvas,(im.shape[1],im.shape[0]))\n pylab.imshow(cv2.cvtColor(canvas2,cv2.COLOR_BGR2RGB))\n plt.title(idx)\n pylab.show()\n #break\n if save:\n cv2.imwrite('results/night/camera_%05d.jpg'%idx,canvas2) \n clear_output(wait=True) \n toc1 = time.time()\n tim.append(toc1-tic1)\nexcept KeyboardInterrupt:\n # Release the Video Device\n # Message to be displayed after releasing the device\n print (\"Released Video Resource\")", "_____no_output_____" ], [ "images = glob.glob('../../src/keras_openpose_bee/dataset/POLLEN/*jpg')\nimages.sort()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt \nplt.rcParams['figure.figsize'] = (10,15)\nresize_factor=4\ntim=[]\nprof=[]\ndet,mapi =[],[]\nFPS=20\nstart_frame=123\nend_frame=500\nnp1=16\nnp2=8\nmapIdx = [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11],[12,13]]\nlimbseq = [[1,3],[3,2],[2,1],[1,4],[1,5],[2,6],[2,7],[2,8]]\n#video = cv2.VideoCapture('../Data/Videos/Hurricane/1_01_R_170808020000.mp4')\n#video = cv2.VideoCapture('/mnt/storage/Gurabo/videos/Gurabo/mp4/C02_170610110000.mp4')\n #../Data/Videos/C02_170622120000.mp4')\nvideo.set(cv2.CAP_PROP_POS_MSEC,start_frame*1000.0/FPS)\nshow = True\nsave = True\nframe_detections={}\ntry:\n for idx in range(len(images)): \n #print(idx)\n #t,im = video.read()\n im = cv2.imread(images[idx])\n #im = cv2.cvtColor(im,cv2.COLOR_RGB2GRAY)\n im2=cv2.resize(im,(im.shape[1]//resize_factor,im.shape[0]//resize_factor))\n \n tic1 = time.time()\n canvas,mappings,parts = inference(im2, model,params, model_params,show=show,np1=np2,np2=np1,resize=resize_factor,\n numparts=8,mapIdx=mapIdx,limbSeq=limbseq,distance_tolerance=250)\n frame_detections[idx]={}\n frame_detections[idx]['mapping']=mappings\n frame_detections[idx]['parts']=parts\n if show:\n canvas2 =cv2.resize(canvas,(im.shape[1],im.shape[0]))\n pylab.imshow(cv2.cvtColor(canvas2,cv2.COLOR_BGR2RGB))\n pylab.show()\n #break\n if save:\n filename=os.path.join('results/',images[idx].split('/')[-1])\n cv2.imwrite(filename,canvas2) \n clear_output(wait=True) \n toc1 = time.time()\n tim.append(toc1-tic1)\n plt.title(images[idx])\nexcept KeyboardInterrupt:\n # Release the Video Device\n # Message to be displayed after releasing the device\n print (\"Released Video Resource\")", "_____no_output_____" ], [ "import glob,os,sys\nsys.path.append('..')\nimport cv2\nimport math\nfrom beepose.utils import util \nimport numpy as np\nimport json \nfrom scipy.ndimage.filters import gaussian_filter\n\nFPS=20\n\n# Color constant\ncolors= [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \\\n [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \\\n [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]\n\n \n\ndef inference(input_image,model, params, model_params,show=True,np1=19,np2=38,resize=1,distance_tolerance=310,numparts=5,\n mapIdx=[[0,1],[2,3],[4,5],[6,7],[8,9]],\n limbSeq=[[1,3],[3,2],[2,4],[2,5],[1,2]]):\n \"\"\"\n This function uses the model to generate the heatmaps and pafs then use them to produce the poses. \n \n inputs: \n \n - input_image : An image\n - model : A trained keras model \n - params : Parameters used for adapting the image to match training\n - model_params : Parameters for padding the images after resizing\n - show : Boolean to generate a canvas with the poses on there. \n - np1 : Number of channels for pafs. \n - np2 : Number of channels for heatmaps. \n - resize: Resize factor of the image. \n - distance_tolerance: Maximum distance between two parts. \n - numparts: Number of parts\n - mapIdx: configuration for the pafs 0 based \n - limbSeq: configuration of the poses. It should match with the pafs configuration. 1 based\n \n Outputs : \n - canvas: if Show, generates an image with the pose. \n - mapping : How the parts are connected. \n - parts : Detections for each of the parts considered. \n \n model_params['boxsize'] \n model_params['stride']\n model_params['padValue']\n params['scale_search']\n params['thre1']\n params['thre2']\n \"\"\"\n profiling ={}\n oriImg = input_image#cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)#cv2.imread(input_image) # B,G,R order\n canvas = oriImg.copy()#cv2.imread(input_image) \n multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]\n\n heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], np1))\n paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], np2))\n \n\n scale =1\n imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],\n model_params['padValue'])\n input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)\n output_blobs = model.predict(input_img)\n \n # extract outputs, resize, and remove padding\n heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps\n heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],interpolation=cv2.INTER_CUBIC)\n heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3],:]\n heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)\n paf = np.squeeze(output_blobs[0]) # output 0 is PAFs\n paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],\n interpolation=cv2.INTER_CUBIC)\n paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]\n paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)\n \n heatmap_avg = heatmap[...] +heatmap_avg #/ len(multiplier)\n paf_avg = paf[...] +paf_avg# / len(multiplier)\n \n\n all_peaks = []\n peak_counter = 0\n threshold_detection = params['thre1']\n for part in range(numparts):\n map_ori = heatmap_avg[:, :, part]\n map = map_ori#gaussian_filter(map_ori, sigma=3)\n\n map_left = np.zeros(map.shape)\n map_left[1:, :] = map[:-1, :]\n map_right = np.zeros(map.shape)\n map_right[:-1, :] = map[1:, :]\n map_up = np.zeros(map.shape)\n map_up[:, 1:] = map[:, :-1]\n map_down = np.zeros(map.shape)\n map_down[:, :-1] = map[:, 1:]\n \n peaks_binary = np.logical_and.reduce(\n (map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > threshold_detection[part]))\n peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse\n if part in [1,5,6,7]:\n peaks = list(non_max_suppression_op(peaks))\n peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]\n id = range(peak_counter, peak_counter + len(peaks))\n peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]\n\n all_peaks.append(peaks_with_score_and_id)\n \n peak_counter += len(peaks)\n connection_all = []\n special_k = []\n mid_num = 20\n for k in range(len(mapIdx)):\n score_mid = paf_avg[:, :, [x for x in mapIdx[k]]]\n #print(len(all_peaks))\n candA = all_peaks[limbSeq[k][0] - 1]\n candB = all_peaks[limbSeq[k][1] - 1]\n nA = len(candA)\n nB = len(candB)\n indexA, indexB = limbSeq[k]\n if (nA != 0 and nB != 0):\n connection_candidate = []\n for i in range(nA):\n for j in range(nB):\n vec = np.subtract(candB[j][:2], candA[i][:2])\n norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])\n # failure case when 2 body parts overlaps\n if norm == 0:\n continue\n if norm >distance_tolerance//resize:\n continue \n \n vec = np.divide(vec, norm)\n\n startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \\\n np.linspace(candA[i][1], candB[j][1], num=mid_num)))\n\n vec_x = np.array(\n [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \\\n for I in range(len(startend))])\n vec_y = np.array(\n [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \\\n for I in range(len(startend))])\n\n score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])\n score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(\n 0.5 * oriImg.shape[0] / norm - 1, 0)\n criterion1 = len(np.nonzero(score_midpts > params['thre2'])[0]) > 0.7 * len(\n score_midpts)\n criterion2 = score_with_dist_prior > 0\n if criterion1 and criterion2:\n connection_candidate.append([i, j, score_with_dist_prior,\n score_with_dist_prior + candA[i][2] + candB[j][2]])\n\n connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)\n connection = np.zeros((0, 5))\n for c in range(len(connection_candidate)):\n i, j, s = connection_candidate[c][0:3]\n if (i not in connection[:, 3] and j not in connection[:, 4]):\n connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])\n if (len(connection) >= min(nA, nB)):\n break\n\n connection_all.append(connection)\n else:\n special_k.append(k)\n connection_all.append([])\n # last number in each row is the total parts number of that animal\n # the second last number in each row is the score of the overall configuration\n subset = -1 * np.ones((0, 20))\n candidate = np.array([item for sublist in all_peaks for item in sublist])\n for k in range(len(mapIdx)):\n if k not in special_k:\n partAs = connection_all[k][:, 0]\n partBs = connection_all[k][:, 1]\n indexA, indexB = np.array(limbSeq[k]) - 1\n\n for i in range(len(connection_all[k])): # = 1:size(temp,1)\n found = 0\n subset_idx = [-1, -1]\n for j in range(len(subset)): # 1:size(subset,1):\n if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:\n subset_idx[found] = j\n found += 1\n\n if found == 1:\n j = subset_idx[0]\n if (subset[j][indexB] != partBs[i]):\n subset[j][indexB] = partBs[i]\n subset[j][-1] += 1\n subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n elif found == 2: # if found 2 and disjoint, merge them\n j1, j2 = subset_idx\n membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]\n if len(np.nonzero(membership == 2)[0]) == 0: # merge\n subset[j1][:-2] += (subset[j2][:-2] + 1)\n subset[j1][-2:] += subset[j2][-2:]\n subset[j1][-2] += connection_all[k][i][2]\n subset = np.delete(subset, j2, 0)\n else: # as like found == 1\n subset[j1][indexB] = partBs[i]\n subset[j1][-1] += 1\n subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n\n # if find no partA in the subset, create a new subset\n elif not found and k < numparts:\n row = -1 * np.ones(20)\n row[indexA] = partAs[i]\n row[indexB] = partBs[i]\n row[-1] = 2\n row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \\\n connection_all[k][i][2]\n subset = np.vstack([subset, row])\n # delete some rows of subset which has few parts occur\n #deleteIdx = [];\n #for i in range(len(subset)):\n # if subset[i][-1] < 2 or subset[i][-2] / subset[i][-1] < 0.4:\n # deleteIdx.append(i)\n #subset = np.delete(subset, deleteIdx, axis=0)\n temp_parts ={}\n parts={}\n for i in range(numparts):#17\n temp_parts[i]=[]\n for j in range(len(all_peaks[i])):\n a=all_peaks[i][j][0]*resize\n b=all_peaks[i][j][1]*resize\n c=all_peaks[i][j][2]\n temp_parts[i].append([a,b,c])\n parts[i]=temp_parts[i]\n mappings=[]\n for i in range(numparts):#17\n for n in range(len(subset)):\n kind=limbSeq[i]\n index = subset[n][np.array(kind) - 1]\n if -1 in index:\n continue\n Y = candidate[index.astype(int), 0]\n X = candidate[index.astype(int), 1]\n S = candidate[index.astype(int), 2]\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((X[0]*resize - X[1]*resize) ** 2 + (Y[0]*resize - Y[1]*resize) ** 2) ** 0.5\n angle = math.degrees(math.atan2(X[0]*resize - X[1]*resize, Y[0]*resize - Y[1]*resize))\n mappings.append([[int(Y[0])*resize,int(X[0])*resize],[int(Y[1])*resize,int(X[1])*resize],np.array(S).mean(),length,angle,kind])\n if show:\n #canvas = cv2.imread(input_image) # B,G,R order\n size=1\n thick=-1\n for i in range(numparts):#17\n if i > 4 and i<7:\n size=4\n thick =1\n if i>6:\n size=4\n thick =3\n for j in range(len(all_peaks[i])):\n \n cv2.circle(canvas, all_peaks[i][j][0:2], size, colors[i], thickness=thick)\n\n stickwidth = 10//(resize-1) #4\n\n for i in range(numparts):#17\n for n in range(len(subset)):\n index = subset[n][np.array(limbSeq[i]) - 1]\n if -1 in index:\n continue\n cur_canvas = canvas.copy()\n Y = candidate[index.astype(int), 0]\n X = candidate[index.astype(int), 1]\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5\n angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))\n polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0,\n 360, 1)\n cv2.fillConvexPoly(cur_canvas, polygon, colors[i])\n canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)\n \n return canvas,mappings,parts\n\n\n", "_____no_output_____" ], [ "import glob,os,sys\nsys.path.append('..')\nimport cv2\nimport math\nfrom beepose.utils import util \nimport numpy as np\nimport json \nfrom scipy.ndimage.filters import gaussian_filter\n\nFPS=20\n\n# Color constant\ncolors= [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \\\n [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \\\n [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]\n\n \n\ndef inference(input_image,model, params, model_params,show=True,np1=19,np2=38,resize=1,distance_tolerance=310,numparts=5,\n mapIdx=[[0,1],[2,3],[4,5],[6,7],[8,9]],\n limbSeq=[[1,3],[3,2],[2,4],[2,5],[1,2]]):\n \"\"\"\n This function uses the model to generate the heatmaps and pafs then use them to produce the poses. \n \n inputs: \n \n - input_image : An image\n - model : A trained keras model \n - params : Parameters used for adapting the image to match training\n - model_params : Parameters for padding the images after resizing\n - show : Boolean to generate a canvas with the poses on there. \n - np1 : Number of channels for pafs. \n - np2 : Number of channels for heatmaps. \n - resize: Resize factor of the image. \n - distance_tolerance: Maximum distance between two parts. \n - numparts: Number of parts\n - mapIdx: configuration for the pafs 0 based \n - limbSeq: configuration of the poses. It should match with the pafs configuration. 1 based\n \n Outputs : \n - canvas: if Show, generates an image with the pose. \n - mapping : How the parts are connected. \n - parts : Detections for each of the parts considered. \n \n model_params['boxsize'] \n model_params['stride']\n model_params['padValue']\n params['scale_search']\n params['thre1']\n params['thre2']\n \"\"\"\n profiling ={}\n oriImg = input_image#cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)#cv2.imread(input_image) # B,G,R order\n canvas = oriImg.copy()#cv2.imread(input_image) \n multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]\n\n heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], np1))\n paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], np2))\n \n\n scale =1\n imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],\n model_params['padValue'])\n input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)\n output_blobs = model.predict(input_img)\n \n # extract outputs, resize, and remove padding\n heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps\n heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],interpolation=cv2.INTER_CUBIC)\n heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3],:]\n heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)\n paf = np.squeeze(output_blobs[0]) # output 0 is PAFs\n paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],\n interpolation=cv2.INTER_CUBIC)\n paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]\n paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)\n \n heatmap_avg = heatmap[...] +heatmap_avg #/ len(multiplier)\n paf_avg = paf[...] +paf_avg# / len(multiplier)\n \n\n all_peaks = []\n peak_counter = 0\n threshold_detection = params['thre1']\n for part in range(numparts):\n map_ori = heatmap_avg[:, :, part]\n map = gaussian_filter(map_ori, sigma=3)\n\n map_left = np.zeros(map.shape)\n map_left[1:, :] = map[:-1, :]\n map_right = np.zeros(map.shape)\n map_right[:-1, :] = map[1:, :]\n map_up = np.zeros(map.shape)\n map_up[:, 1:] = map[:, :-1]\n map_down = np.zeros(map.shape)\n map_down[:, :-1] = map[:, 1:]\n \n peaks_binary = np.logical_and.reduce(\n (map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > threshold_detection[part]))\n peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse\n if part in [1,5,6,7]:\n peaks = list(non_max_suppression_op(peaks))\n peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]\n id = range(peak_counter, peak_counter + len(peaks))\n peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]\n\n all_peaks.append(peaks_with_score_and_id)\n \n peak_counter += len(peaks)\n connection_all = []\n special_k = []\n mid_num = 20\n for k in range(len(mapIdx)):\n score_mid = paf_avg[:, :, [x for x in mapIdx[k]]]\n #print(len(all_peaks))\n candA = all_peaks[limbSeq[k][0] - 1]\n candB = all_peaks[limbSeq[k][1] - 1]\n nA = len(candA)\n nB = len(candB)\n indexA, indexB = limbSeq[k]\n if (nA != 0 and nB != 0):\n connection_candidate = []\n for i in range(nA):\n for j in range(nB):\n vec = np.subtract(candB[j][:2], candA[i][:2])\n norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])\n # failure case when 2 body parts overlaps\n if norm == 0:\n continue\n if norm >distance_tolerance//resize:\n continue \n \n vec = np.divide(vec, norm)\n\n startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \\\n np.linspace(candA[i][1], candB[j][1], num=mid_num)))\n\n vec_x = np.array(\n [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \\\n for I in range(len(startend))])\n vec_y = np.array(\n [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \\\n for I in range(len(startend))])\n\n score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])\n score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(\n 0.5 * oriImg.shape[0] / norm - 1, 0)\n criterion1 = len(np.nonzero(score_midpts > params['thre2'])[0]) > 0.7 * len(\n score_midpts)\n criterion2 = score_with_dist_prior > 0\n if criterion1 and criterion2:\n connection_candidate.append([i, j, score_with_dist_prior,\n score_with_dist_prior + candA[i][2] + candB[j][2]])\n\n connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)\n connection = np.zeros((0, 5))\n for c in range(len(connection_candidate)):\n i, j, s = connection_candidate[c][0:3]\n if (i not in connection[:, 3] and j not in connection[:, 4]):\n connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])\n if (len(connection) >= min(nA, nB)):\n break\n\n connection_all.append(connection)\n else:\n special_k.append(k)\n connection_all.append([])\n # last number in each row is the total parts number of that animal\n # the second last number in each row is the score of the overall configuration\n subset = -1 * np.ones((0, 20))\n candidate = np.array([item for sublist in all_peaks for item in sublist])\n for k in range(len(mapIdx)):\n if k not in special_k:\n partAs = connection_all[k][:, 0]\n partBs = connection_all[k][:, 1]\n indexA, indexB = np.array(limbSeq[k]) - 1\n\n for i in range(len(connection_all[k])): # = 1:size(temp,1)\n found = 0\n subset_idx = [-1, -1]\n for j in range(len(subset)): # 1:size(subset,1):\n if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:\n subset_idx[found] = j\n found += 1\n\n if found == 1:\n j = subset_idx[0]\n if (subset[j][indexB] != partBs[i]):\n subset[j][indexB] = partBs[i]\n subset[j][-1] += 1\n subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n elif found == 2: # if found 2 and disjoint, merge them\n j1, j2 = subset_idx\n membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]\n if len(np.nonzero(membership == 2)[0]) == 0: # merge\n subset[j1][:-2] += (subset[j2][:-2] + 1)\n subset[j1][-2:] += subset[j2][-2:]\n subset[j1][-2] += connection_all[k][i][2]\n subset = np.delete(subset, j2, 0)\n else: # as like found == 1\n subset[j1][indexB] = partBs[i]\n subset[j1][-1] += 1\n subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n\n # if find no partA in the subset, create a new subset\n elif not found and k < numparts:\n row = -1 * np.ones(20)\n row[indexA] = partAs[i]\n row[indexB] = partBs[i]\n row[-1] = 2\n row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \\\n connection_all[k][i][2]\n subset = np.vstack([subset, row])\n # delete some rows of subset which has few parts occur\n #deleteIdx = [];\n #for i in range(len(subset)):\n # if subset[i][-1] < 2 or subset[i][-2] / subset[i][-1] < 0.4:\n # deleteIdx.append(i)\n #subset = np.delete(subset, deleteIdx, axis=0)\n temp_parts ={}\n parts={}\n for i in range(numparts):#17\n temp_parts[i]=[]\n for j in range(len(all_peaks[i])):\n a=all_peaks[i][j][0]*resize\n b=all_peaks[i][j][1]*resize\n c=all_peaks[i][j][2]\n temp_parts[i].append([a,b,c])\n parts[i]=temp_parts[i]\n mappings=[]\n for i in range(numparts):#17\n for n in range(len(subset)):\n kind=limbSeq[i]\n index = subset[n][np.array(kind) - 1]\n if -1 in index:\n continue\n Y = candidate[index.astype(int), 0]\n X = candidate[index.astype(int), 1]\n S = candidate[index.astype(int), 2]\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((X[0]*resize - X[1]*resize) ** 2 + (Y[0]*resize - Y[1]*resize) ** 2) ** 0.5\n angle = math.degrees(math.atan2(X[0]*resize - X[1]*resize, Y[0]*resize - Y[1]*resize))\n mappings.append([[int(Y[0])*resize,int(X[0])*resize],[int(Y[1])*resize,int(X[1])*resize],np.array(S).mean(),length,angle,kind])\n if show:\n #canvas = cv2.imread(input_image) # B,G,R order\n size=1\n thick=-1\n for i in range(numparts):#17\n if i > 4 and i<7:\n size=4\n thick =1\n if i>6:\n size=4\n thick =3\n for j in range(len(all_peaks[i])):\n \n cv2.circle(canvas, all_peaks[i][j][0:2], size, colors[i], thickness=thick)\n\n stickwidth = 10//(resize-1) #4\n\n for i in range(numparts):#17\n for n in range(len(subset)):\n index = subset[n][np.array(limbSeq[i]) - 1]\n if -1 in index:\n continue\n cur_canvas = canvas.copy()\n Y = candidate[index.astype(int), 0]\n X = candidate[index.astype(int), 1]\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5\n angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))\n polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0,\n 360, 1)\n cv2.fillConvexPoly(cur_canvas, polygon, colors[i])\n canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)\n \n return canvas,mappings,parts\n\n\n\nimport numpy as np\n\nimport numpy as np\n\ndef boxes2peaks(boxes,size=15):\n dets=[]\n for b in boxes:\n dets.append((b[0]+size,b[1]+size))\n return dets\ndef peaks2boxes(parts,size=15):\n boxes=[]\n for p in parts:\n boxes.append([p[0]-size,p[1]-size,p[0]+size,p[1]+size])\n return np.array(boxes)\n\n\ndef non_max_suppression_op(peaks,overlap=0.6,size=15):\n boxes= non_max_suppression_fast(peaks2boxes(peaks,size),overlap)\n dets = boxes2peaks(boxes,size)\n return dets\n \n# Malisiewicz et al.\ndef non_max_suppression_fast(boxes, overlapThresh):\n\t# if there are no boxes, return an empty list\n\tif len(boxes) == 0:\n\t\treturn []\n \n\t# if the bounding boxes integers, convert them to floats --\n\t# this is important since we'll be doing a bunch of divisions\n\tif boxes.dtype.kind == \"i\":\n\t\tboxes = boxes.astype(\"float\")\n \n\t# initialize the list of picked indexes\t\n\tpick = []\n \n\t# grab the coordinates of the bounding boxes\n\tx1 = boxes[:,0]\n\ty1 = boxes[:,1]\n\tx2 = boxes[:,2]\n\ty2 = boxes[:,3]\n \n\t# compute the area of the bounding boxes and sort the bounding\n\t# boxes by the bottom-right y-coordinate of the bounding box\n\tarea = (x2 - x1 + 1) * (y2 - y1 + 1)\n\tidxs = np.argsort(y2)\n \n\t# keep looping while some indexes still remain in the indexes\n\t# list\n\twhile len(idxs) > 0:\n\t\t# grab the last index in the indexes list and add the\n\t\t# index value to the list of picked indexes\n\t\tlast = len(idxs) - 1\n\t\ti = idxs[last]\n\t\tpick.append(i)\n \n\t\t# find the largest (x, y) coordinates for the start of\n\t\t# the bounding box and the smallest (x, y) coordinates\n\t\t# for the end of the bounding box\n\t\txx1 = np.maximum(x1[i], x1[idxs[:last]])\n\t\tyy1 = np.maximum(y1[i], y1[idxs[:last]])\n\t\txx2 = np.minimum(x2[i], x2[idxs[:last]])\n\t\tyy2 = np.minimum(y2[i], y2[idxs[:last]])\n \n\t\t# compute the width and height of the bounding box\n\t\tw = np.maximum(0, xx2 - xx1 + 1)\n\t\th = np.maximum(0, yy2 - yy1 + 1)\n \n\t\t# compute the ratio of overlap\n\t\toverlap = (w * h) / area[idxs[:last]]\n \n\t\t# delete all indexes from the index list that have\n\t\tidxs = np.delete(idxs, np.concatenate(([last],\n\t\t\tnp.where(overlap > overlapThresh)[0])))\n \n\t# return only the bounding boxes that were picked using the\n\t# integer data type\n\treturn boxes[pick].astype(\"int\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c6410bbe03a12fe25ac6dc1aa2d9570d29966d
17,687
ipynb
Jupyter Notebook
3-TIPOS-DE-DADOS-EXE.ipynb
jmarcellopereira/Minicurso-SECITEC-2019
d62a0d6dd1a6a02fe5775cfe6601b178768f8711
[ "MIT" ]
null
null
null
3-TIPOS-DE-DADOS-EXE.ipynb
jmarcellopereira/Minicurso-SECITEC-2019
d62a0d6dd1a6a02fe5775cfe6601b178768f8711
[ "MIT" ]
null
null
null
3-TIPOS-DE-DADOS-EXE.ipynb
jmarcellopereira/Minicurso-SECITEC-2019
d62a0d6dd1a6a02fe5775cfe6601b178768f8711
[ "MIT" ]
null
null
null
18.309524
290
0.470402
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7c64436f523a6100d9da7ec92839c66b3376116
47,770
ipynb
Jupyter Notebook
notepads/ML Pipeline Preparation.ipynb
ranjeetraj2005/Disaster_Response_System
5b4f10c43ae99afc823807968785c4a307ade335
[ "MIT" ]
null
null
null
notepads/ML Pipeline Preparation.ipynb
ranjeetraj2005/Disaster_Response_System
5b4f10c43ae99afc823807968785c4a307ade335
[ "MIT" ]
null
null
null
notepads/ML Pipeline Preparation.ipynb
ranjeetraj2005/Disaster_Response_System
5b4f10c43ae99afc823807968785c4a307ade335
[ "MIT" ]
null
null
null
39.446738
337
0.404291
[ [ [ "# ML Pipeline Preparation\nFollow the instructions below to help you create your ML pipeline.\n### 1. Import libraries and load data from database.\n- Import Python libraries\n- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)\n- Define feature and target variables X and Y", "_____no_output_____" ] ], [ [ "# import libraries\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport re\nimport nltk\nimport string\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score", "_____no_output_____" ], [ "nltk.download(['punkt', 'wordnet', 'stopwords'])", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "nltk.download(['averaged_perceptron_tagger'])", "[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.\n" ], [ "# load data from database\nengine = create_engine('sqlite:///DisasterResponse.db')\nprint(engine)\ndf = pd.read_sql_table('labeled_messages', engine)\nX = df['message']\nY = df.drop(['message', 'genre', 'id', 'original'], axis=1) ", "Engine(sqlite:///DisasterResponse.db)\n" ] ], [ [ "### 2. Write a tokenization function to process your text data", "_____no_output_____" ] ], [ [ "stop_words = nltk.corpus.stopwords.words(\"english\")\nlemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\nremove_punc_table = str.maketrans('', '', string.punctuation)", "_____no_output_____" ], [ "def tokenize(text):\n # normalize case and remove punctuation\n text = text.translate(remove_punc_table).lower()\n \n # tokenize text\n tokens = nltk.word_tokenize(text)\n \n # lemmatize and remove stop words\n #return [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n return [lemmatizer.lemmatize(word).lower().strip() for word in tokens if word not in stop_words]\n", "_____no_output_____" ] ], [ [ "### 3. Build a machine learning pipeline\nThis machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.", "_____no_output_____" ] ], [ [ "forest_clf = RandomForestClassifier(n_estimators=10)\npipeline = Pipeline([\n ('tfidf', TfidfVectorizer(tokenizer=tokenize)),\n ('forest', MultiOutputClassifier(forest_clf))\n ])", "_____no_output_____" ] ], [ [ "### 4. Train pipeline\n- Split data into train and test sets\n- Train pipeline", "_____no_output_____" ] ], [ [ "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)", "_____no_output_____" ], [ "#X_train", "_____no_output_____" ], [ "pipeline.fit(X_train, Y_train)", "_____no_output_____" ] ], [ [ "### 5. Test your model\nReport the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.", "_____no_output_____" ] ], [ [ "Y_pred = pipeline.predict(X_test)", "_____no_output_____" ], [ "for i, col in enumerate(Y_test):\n print(col)\n print(classification_report(Y_test[col], Y_pred[:, i]))", "related\n precision recall f1-score support\n\n 0.0 0.31 0.14 0.20 2096\n 1.0 0.76 0.90 0.82 6497\n\n accuracy 0.71 8593\n macro avg 0.54 0.52 0.51 8593\nweighted avg 0.65 0.71 0.67 8593\n\nrequest\n precision recall f1-score support\n\n 0.0 0.84 0.98 0.90 7117\n 1.0 0.41 0.08 0.14 1476\n\n accuracy 0.82 8593\n macro avg 0.62 0.53 0.52 8593\nweighted avg 0.76 0.82 0.77 8593\n\noffer\n precision recall f1-score support\n\n 0.0 0.99 1.00 1.00 8547\n 1.0 0.00 0.00 0.00 46\n\n accuracy 0.99 8593\n macro avg 0.50 0.50 0.50 8593\nweighted avg 0.99 0.99 0.99 8593\n\naid_related\n precision recall f1-score support\n\n 0.0 0.61 0.81 0.69 5105\n 1.0 0.46 0.23 0.31 3488\n\n accuracy 0.58 8593\n macro avg 0.53 0.52 0.50 8593\nweighted avg 0.55 0.58 0.54 8593\n\nmedical_help\n precision recall f1-score support\n\n 0.0 0.92 0.99 0.96 7924\n 1.0 0.08 0.01 0.01 669\n\n accuracy 0.92 8593\n macro avg 0.50 0.50 0.48 8593\nweighted avg 0.86 0.92 0.88 8593\n\nmedical_products\n precision recall f1-score support\n\n 0.0 0.95 1.00 0.97 8161\n 1.0 0.03 0.00 0.00 432\n\n accuracy 0.95 8593\n macro avg 0.49 0.50 0.49 8593\nweighted avg 0.90 0.95 0.92 8593\n\nsearch_and_rescue\n precision recall f1-score support\n\n 0.0 0.97 1.00 0.99 8348\n 1.0 0.00 0.00 0.00 245\n\n accuracy 0.97 8593\n macro avg 0.49 0.50 0.49 8593\nweighted avg 0.94 0.97 0.96 8593\n\nsecurity\n precision recall f1-score support\n\n 0.0 0.98 1.00 0.99 8426\n 1.0 0.00 0.00 0.00 167\n\n accuracy 0.98 8593\n macro avg 0.49 0.50 0.50 8593\nweighted avg 0.96 0.98 0.97 8593\n\nmilitary\n precision recall f1-score support\n\n 0.0 0.97 1.00 0.98 8328\n 1.0 0.00 0.00 0.00 265\n\n accuracy 0.97 8593\n macro avg 0.48 0.50 0.49 8593\nweighted avg 0.94 0.97 0.95 8593\n\nchild_alone\n precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 8593\n\n accuracy 1.00 8593\n macro avg 1.00 1.00 1.00 8593\nweighted avg 1.00 1.00 1.00 8593\n\nwater\n precision recall f1-score support\n\n 0.0 0.94 1.00 0.96 8038\n 1.0 0.09 0.01 0.01 555\n\n accuracy 0.93 8593\n macro avg 0.51 0.50 0.49 8593\nweighted avg 0.88 0.93 0.90 8593\n\nfood\n" ] ], [ [ "### 6. Improve your model\nUse grid search to find better parameters. ", "_____no_output_____" ] ], [ [ "'''\nparameters = {\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'tfidf__max_df': (0.8, 1.0),\n 'tfidf__max_features': (None, 10000),\n 'forest__estimator__n_estimators': [50, 100],\n 'forest__estimator__min_samples_split': [2, 4]\n}\n'''\n\nparameters = {\n 'tfidf__ngram_range': ((1, 1), (1, 2))\n}\n\ncv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=-1, verbose= 10)", "_____no_output_____" ], [ "cv.fit(X_train, Y_train)", "Fitting 3 folds for each of 2 candidates, totalling 6 fits\n[CV] tfidf__ngram_range=(1, 1) .......................................\n[CV] ........... tfidf__ngram_range=(1, 1), score=0.139, total= 48.2s\n[CV] tfidf__ngram_range=(1, 1) .......................................\n" ] ], [ [ "### 7. Test your model\nShow the accuracy, precision, and recall of the tuned model. \n\nSince this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!", "_____no_output_____" ] ], [ [ "def evaluate_model(model, X_test, Y_test):\n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred, target_names=category_names))\n# print('Accuracy: ', accuracy_score(Y_test, Y_pred))\n# print('Precision: ', precision_score(Y_test, Y_pred, average='weighted'))\n# print('Recall: ', recall_score(Y_test, Y_pred, average='weighted'))", "_____no_output_____" ], [ "print('Accuracy: ', accuracy_score(Y_test, Y_pred))\nprint('Precision: ', precision_score(Y_test, Y_pred, average='weighted'))\nprint('Recall: ', recall_score(Y_test, Y_pred, average='weighted'))", "Accuracy: 0.144652624229\nPrecision: 0.400912141504\nRecall: 0.277450871544\n" ] ], [ [ "### 8. Try improving your model further. Here are a few ideas:\n* try other machine learning algorithms\n* add other features besides the TF-IDF", "_____no_output_____" ] ], [ [ "from sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,AdaBoostClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report\nfrom scipy.stats import hmean\nfrom scipy.stats.mstats import gmean", "_____no_output_____" ], [ "def tokenize_text(text):\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "_____no_output_____" ], [ "class StartingVerbExtractor(BaseEstimator, TransformerMixin):\n\n def starting_verb(self, text):\n sentence_list = nltk.sent_tokenize(text)\n for sentence in sentence_list:\n pos_tags = nltk.pos_tag(tokenize_text(sentence))\n if len(pos_tags) == 0:\n print('pos_tags:', pos_tags)\n first_word, first_tag = pos_tags[0]\n if first_tag in ['VB', 'VBP'] or first_word == 'RT':\n return True\n return False\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_tagged = pd.Series(X).apply(self.starting_verb)\n return pd.DataFrame(X_tagged)", "_____no_output_____" ], [ "def new_model_pipeline():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n return pipeline", "_____no_output_____" ], [ "def multioutput_fscore(y_true,y_pred,beta=1):\n score_list = []\n if isinstance(y_pred, pd.DataFrame) == True:\n y_pred = y_pred.values\n if isinstance(y_true, pd.DataFrame) == True:\n y_true = y_true.values\n for column in range(0,y_true.shape[1]):\n score = fbeta_score(y_true[:,column],y_pred[:,column],beta,average='weighted')\n score_list.append(score)\n f1score_numpy = np.asarray(score_list)\n f1score_numpy = f1score_numpy[f1score_numpy<1]\n f1score = gmean(f1score_numpy)\n return f1score", "_____no_output_____" ], [ "model = new_model_pipeline()\n\nparameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n# 'features__text_pipeline__vect__max_df': (0.75, 1.0),\n# 'features__text_pipeline__vect__max_features': (None, 5000),\n# 'features__text_pipeline__tfidf__use_idf': (True, False),\n# 'clf__n_estimators': [10, 100],\n# 'clf__learning_rate': [0.01, 0.1],\n# 'features__transformer_weights': (\n# {'text_pipeline': 1, 'starting_verb': 0.5},\n# {'text_pipeline': 0.5, 'starting_verb': 1},\n# {'text_pipeline': 0.8, 'starting_verb': 1},\n# )\n}\n\nscorer = make_scorer(multioutput_fscore,greater_is_better = True)\n\ncv = GridSearchCV(model, param_grid=parameters, scoring = scorer,verbose = 2, n_jobs = -1)\n\ncv.fit(X_train, Y_train)", "Fitting 5 folds for each of 2 candidates, totalling 10 fits\n[CV] features__text_pipeline__vect__ngram_range=(1, 1) ...............\n[CV] features__text_pipeline__vect__ngram_range=(1, 1), total= 1.9min\n[CV] features__text_pipeline__vect__ngram_range=(1, 1) ...............\n" ] ], [ [ "### 9. Export your model as a pickle file", "_____no_output_____" ] ], [ [ "import joblib\njoblib.dump(cv.best_estimator_, 'disaster_model.pkl')", "_____no_output_____" ] ], [ [ "### 10. Use this notebook to complete `train.py`\nUse the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7c646f1a9e5d15f98faa3756a379d35da020d76
24,567
ipynb
Jupyter Notebook
VGG16/TensorFlow/.ipynb_checkpoints/vgg16_tensorflow-checkpoint.ipynb
user-ZJ/deep-learning
3da122dde31c883980dfe3056d38668b88c6e445
[ "Apache-2.0" ]
12
2018-10-19T03:17:42.000Z
2022-03-23T07:35:04.000Z
VGG16/TensorFlow/.ipynb_checkpoints/vgg16_tensorflow-checkpoint.ipynb
user-ZJ/deep-learning
3da122dde31c883980dfe3056d38668b88c6e445
[ "Apache-2.0" ]
null
null
null
VGG16/TensorFlow/.ipynb_checkpoints/vgg16_tensorflow-checkpoint.ipynb
user-ZJ/deep-learning
3da122dde31c883980dfe3056d38668b88c6e445
[ "Apache-2.0" ]
4
2018-10-19T03:17:44.000Z
2021-01-19T07:32:55.000Z
36.395556
245
0.499328
[ [ [ "# TensorFlow实现VGG16", "_____no_output_____" ], [ "## 导入需要使用的库", "_____no_output_____" ] ], [ [ "import inspect\nimport os\n\nimport numpy as np\nimport tensorflow as tf", "D:\\anaconda\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "## 定义卷积层", "_____no_output_____" ] ], [ [ "'''Convolution op wrapper, use RELU activation after convolution\n Args:\n layer_name: e.g. conv1, pool1...\n x: input tensor, [batch_size, height, width, channels]\n out_channels: number of output channels (or comvolutional kernels)\n kernel_size: the size of convolutional kernel, VGG paper used: [3,3]\n stride: A list of ints. 1-D of length 4. VGG paper used: [1, 1, 1, 1]\n is_pretrain: if load pretrained parameters, freeze all conv layers. \n Depending on different situations, you can just set part of conv layers to be freezed.\n the parameters of freezed layers will not change when training.\n Returns:\n 4D tensor\n'''\ndef conv_layer(layer_name, x, out_channels, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=True):\n in_channels = x.get_shape()[-1]\n with tf.variable_scope(layer_name):\n w = tf.get_variable(name='weights',\n trainable=is_pretrain,\n shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],\n initializer=tf.contrib.layers.xavier_initializer()) # default is uniform distribution initialization\n b = tf.get_variable(name='biases',\n trainable=is_pretrain,\n shape=[out_channels],\n initializer=tf.constant_initializer(0.0))\n \n x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')\n x = tf.nn.bias_add(x, b, name='bias_add')\n\n \n x = tf.nn.relu(x, name='relu')\n \n return x\n ", "_____no_output_____" ] ], [ [ "## 定义池化层", "_____no_output_____" ] ], [ [ "'''Pooling op\n Args:\n x: input tensor\n kernel: pooling kernel, VGG paper used [1,2,2,1], the size of kernel is 2X2\n stride: stride size, VGG paper used [1,2,2,1]\n padding:\n is_max_pool: boolen\n if True: use max pooling\n else: use avg pooling\n'''\ndef pool(layer_name, x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True):\n if is_max_pool:\n x = tf.nn.max_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\n else:\n x = tf.nn.avg_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\n return x", "_____no_output_____" ] ], [ [ "## 定义全连接层\n", "_____no_output_____" ] ], [ [ "'''Wrapper for fully connected layers with RELU activation as default\n Args:\n layer_name: e.g. 'FC1', 'FC2'\n x: input feature map\n out_nodes: number of neurons for current FC layer\n'''\ndef fc_layer(layer_name, x, out_nodes,keep_prob=0.8):\n shape = x.get_shape()\n # 处理没有预先做flatten的输入\n if len(shape) == 4:\n size = shape[1].value * shape[2].value * shape[3].value\n else:\n size = shape[-1].value\n\n with tf.variable_scope(layer_name):\n w = tf.get_variable('weights',\n shape=[size, out_nodes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('biases',\n shape=[out_nodes],\n initializer=tf.constant_initializer(0.0))\n \n flat_x = tf.reshape(x, [-1, size]) # flatten into 1D\n \n x = tf.nn.bias_add(tf.matmul(flat_x, w), b)\n x = tf.nn.relu(x)\n x = tf.nn.dropout(x, keep_prob)\n \n return x\n ", "_____no_output_____" ] ], [ [ "## 定义VGG16网络", "_____no_output_____" ] ], [ [ "def vgg16_net(x, n_classes, is_pretrain=True):\n with tf.name_scope('VGG16'):\n x = conv_layer('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n with tf.name_scope('pool1'):\n x = pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\n \n x = conv_layer('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n with tf.name_scope('pool2'):\n x = pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\n \n x = conv_layer('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n with tf.name_scope('pool3'):\n x = pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\n \n x = conv_layer('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n with tf.name_scope('pool4'):\n x = pool('pool4', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\n \n x = conv_layer('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n x = conv_layer('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)\n with tf.name_scope('pool5'):\n x = pool('pool5', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)\n \n x = fc_layer('fc6', x, out_nodes=4096)\n assert x.get_shape().as_list()[1:] == [4096]\n\n x = fc_layer('fc7', x, out_nodes=4096)\n \n fc8 = fc_layer('fc8', x, out_nodes=n_classes)\n # softmax = tf.nn.softmax(fc8)\n \n \n return x\n \n ", "_____no_output_____" ] ], [ [ "# 定义损失函数\n采用交叉熵计算损失", "_____no_output_____" ] ], [ [ "'''Compute loss\n Args:\n logits: logits tensor, [batch_size, n_classes]\n labels: one-hot labels\n'''\ndef loss(logits, labels):\n \n with tf.name_scope('loss') as scope:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels,name='cross-entropy')\n loss = tf.reduce_mean(cross_entropy, name='loss')\n tf.summary.scalar(scope+'/loss', loss)\n return loss", "_____no_output_____" ] ], [ [ "# 定义准确率", "_____no_output_____" ] ], [ [ "'''\n Evaluate the quality of the logits at predicting the label.\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n labels: Labels tensor,\n'''\ndef accuracy(logits, labels):\n with tf.name_scope('accuracy') as scope:\n correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))\n correct = tf.cast(correct, tf.float32)\n accuracy = tf.reduce_mean(correct)*100.0\n tf.summary.scalar(scope+'/accuracy', accuracy)\n return accuracy", "_____no_output_____" ] ], [ [ "# 定义优化函数", "_____no_output_____" ] ], [ [ "def optimize(loss, learning_rate, global_step):\n '''optimization, use Gradient Descent as default\n '''\n with tf.name_scope('optimizer'):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op", "_____no_output_____" ] ], [ [ "# 定义加载模型函数", "_____no_output_____" ] ], [ [ "def load_with_skip(data_path, session, skip_layer):\n data_dict = np.load(data_path, encoding='latin1').item()\n for key in data_dict:\n if key not in skip_layer:\n with tf.variable_scope(key, reuse=True):\n for subkey, data in zip(('weights', 'biases'), data_dict[key]):\n session.run(tf.get_variable(subkey).assign(data))", "_____no_output_____" ] ], [ [ "# 定义训练图片读取函数", "_____no_output_____" ] ], [ [ "def read_cifar10(data_dir, is_train, batch_size, shuffle):\n \"\"\"Read CIFAR10\n \n Args:\n data_dir: the directory of CIFAR10\n is_train: boolen\n batch_size:\n shuffle: \n Returns:\n label: 1D tensor, tf.int32\n image: 4D tensor, [batch_size, height, width, 3], tf.float32\n \n \"\"\"\n img_width = 32\n img_height = 32\n img_depth = 3\n label_bytes = 1\n image_bytes = img_width*img_height*img_depth\n \n \n with tf.name_scope('input'):\n \n if is_train:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' %ii)\n for ii in np.arange(1, 6)]\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n \n filename_queue = tf.train.string_input_producer(filenames)\n \n reader = tf.FixedLengthRecordReader(label_bytes + image_bytes)\n \n key, value = reader.read(filename_queue)\n \n record_bytes = tf.decode_raw(value, tf.uint8)\n \n label = tf.slice(record_bytes, [0], [label_bytes]) \n label = tf.cast(label, tf.int32)\n \n image_raw = tf.slice(record_bytes, [label_bytes], [image_bytes]) \n image_raw = tf.reshape(image_raw, [img_depth, img_height, img_width]) \n image = tf.transpose(image_raw, (1,2,0)) # convert from D/H/W to H/W/D \n image = tf.cast(image, tf.float32)\n\n \n# # data argumentation\n\n# image = tf.random_crop(image, [24, 24, 3])# randomly crop the image size to 24 x 24\n# image = tf.image.random_flip_left_right(image)\n# image = tf.image.random_brightness(image, max_delta=63)\n# image = tf.image.random_contrast(image,lower=0.2,upper=1.8)\n\n\n \n image = tf.image.per_image_standardization(image) #substract off the mean and divide by the variance \n\n\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label], \n batch_size = batch_size,\n num_threads= 64,\n capacity = 20000,\n min_after_dequeue = 3000)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size = batch_size,\n num_threads = 64,\n capacity= 2000)\n ## ONE-HOT \n n_classes = 10\n label_batch = tf.one_hot(label_batch, depth= n_classes)\n label_batch = tf.cast(label_batch, dtype=tf.int32)\n label_batch = tf.reshape(label_batch, [batch_size, n_classes])\n \n return images, label_batch", "_____no_output_____" ] ], [ [ "# 定义训练函数", "_____no_output_____" ] ], [ [ "IMG_W = 32\nIMG_H = 32\nN_CLASSES = 10\nBATCH_SIZE = 32\nlearning_rate = 0.01\nMAX_STEP = 10 # it took me about one hour to complete the training.\nIS_PRETRAIN = False", "_____no_output_____" ], [ "image_size = 224 # 输入图像尺寸\nimages = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))\nvgg16_net(images,keep_prob)\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)", "_____no_output_____" ], [ "def train():\n pre_trained_weights = './/vgg16_pretrain//vgg16.npy'\n data_dir = './/data//cifar-10-batches-bin//'\n train_log_dir = './/logs//train//'\n val_log_dir = './/logs//val//'\n \n with tf.name_scope('input'):\n tra_image_batch, tra_label_batch = read_cifar10(data_dir=data_dir,\n is_train=True,\n batch_size= BATCH_SIZE,\n shuffle=True)\n val_image_batch, val_label_batch = read_cifar10(data_dir=data_dir,\n is_train=False,\n batch_size= BATCH_SIZE,\n shuffle=False)\n \n x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])\n y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])\n \n \n logits = vgg16_net(x, N_CLASSES, IS_PRETRAIN)\n loss_1 = loss(logits, y_)\n accuracy = accuracy(logits, y_)\n \n my_global_step = tf.Variable(0, name='global_step', trainable=False) \n train_op = optimize(loss_1, learning_rate, my_global_step)\n \n saver = tf.train.Saver(tf.global_variables())\n summary_op = tf.summary.merge_all()\n \n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n \n print(x.shape())\n print(y_.shape())\n \n if(IS_PRETRAIN):\n load_with_skip(pre_trained_weights, sess, ['fc6','fc7','fc8']) \n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)\n val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)\n \n try:\n for step in np.arange(MAX_STEP):\n if coord.should_stop():\n break\n \n tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])\n _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],\n feed_dict={x:tra_images, y_:tra_labels}) \n if step % 50 == 0 or (step + 1) == MAX_STEP: \n print ('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, tra_loss, tra_acc))\n summary_str = sess.run(summary_op)\n tra_summary_writer.add_summary(summary_str, step)\n \n if step % 200 == 0 or (step + 1) == MAX_STEP:\n val_images, val_labels = sess.run([val_image_batch, val_label_batch])\n val_loss, val_acc = sess.run([loss, accuracy],\n feed_dict={x:val_images,y_:val_labels})\n print('** Step %d, val loss = %.2f, val accuracy = %.2f%% **' %(step, val_loss, val_acc))\n \n summary_str = sess.run(summary_op)\n val_summary_writer.add_summary(summary_str, step)\n \n if step % 2000 == 0 or (step + 1) == MAX_STEP:\n checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n \n except tf.errors.OutOfRangeError:\n print('Done training -- epoch limit reached')\n finally:\n coord.request_stop()\n \n coord.join(threads)\n \n ", "_____no_output_____" ], [ "train()", "_____no_output_____" ] ], [ [ "## VGG16使用", "_____no_output_____" ] ], [ [ "def time_tensorflow_run(session, target, feed, info_string):\n num_steps_burn_in = 10 # 预热轮数\n total_duration = 0.0 # 总时间\n total_duration_squared = 0.0 # 总时间的平方和用以计算方差\n for i in range(num_batches + num_steps_burn_in):\n start_time = time.time()\n _ = session.run(target,feed_dict=feed)\n duration = time.time() - start_time\n if i >= num_steps_burn_in: # 只考虑预热轮数之后的时间\n if not i % 10:\n print('%s:step %d,duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))\n total_duration += duration\n total_duration_squared += duration * duration\n mn = total_duration / num_batches # 平均每个batch的时间\n vr = total_duration_squared / num_batches - mn * mn # 方差\n sd = math.sqrt(vr) # 标准差\n print('%s: %s across %d steps, %.3f +/- %.3f sec/batch' % (datetime.now(), info_string, num_batches, mn, sd))", "_____no_output_____" ], [ "def run_benchmark():\n with tf.Graph().as_default():\n '''定义图片尺寸224,利用tf.random_normal函数生成标准差为0.1的正态分布的随机数来构建224x224的随机图片'''\n image_size = 224 # 输入图像尺寸\n images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))\n #构建keep_prob的placeholder\n keep_prob = tf.placeholder(tf.float32)\n prediction,softmax,fc8,p = vgg16_net(images,keep_prob)\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n #设置keep_prob为1.0,运用time_tensorflow_run来评测forward运算随机\n time_tensorflow_run(sess, prediction,{keep_prob:1.0}, \"Forward\")\n # 用以模拟训练的过程\n objective = tf.nn.l2_loss(fc8) # 给一个loss\n grad = tf.gradients(objective, p) # 相对于loss的 所有模型参数的梯度\n #评测backward运算时间\n time_tensorflow_run(sess, grad, {keep_prob:0.5},\"Forward-backward\")", "_____no_output_____" ], [ "batch_size = 32\nnum_batches = 100\nrun_benchmark()", "_____no_output_____" ] ], [ [ "## 其他参数", "_____no_output_____" ] ], [ [ "# Construct model\npred = conv_net(x, weights, biases, keep_prob)\n \n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n \n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n# Initializing the variables\ninit = tf.global_variables_initializer()\nsaver=tf.train.Saver()", "_____no_output_____" ] ], [ [ "https://blog.csdn.net/roguesir/article/details/77051250\nhttps://blog.csdn.net/zhangwei15hh/article/details/78417789\nhttps://blog.csdn.net/v1_vivian/article/details/77898652", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7c6516d26cf7e6ec799303f360c7ad671265ed5
1,375
ipynb
Jupyter Notebook
docs/paper/paper_demo.ipynb
yizhouzhao/GenMotion
67e73d06155d888eabda1187aa6ed3bbd796a814
[ "MIT" ]
32
2021-11-15T07:20:19.000Z
2022-03-15T11:54:19.000Z
docs/paper/paper_demo.ipynb
yizhouzhao/GenMotion
67e73d06155d888eabda1187aa6ed3bbd796a814
[ "MIT" ]
null
null
null
docs/paper/paper_demo.ipynb
yizhouzhao/GenMotion
67e73d06155d888eabda1187aa6ed3bbd796a814
[ "MIT" ]
3
2021-12-05T22:04:27.000Z
2022-03-05T16:30:57.000Z
29.891304
131
0.644364
[ [ [ "# 1. Load hyper parameters\nopt = vars(HDM05Params()) # from algorithm.encoder_recurrent_decoder.params import HDM05Params\n# 2. Load dataset\ndataset = HDM05Dataset(data_path, opt) # from dataset.hdm05.hdm05_data_utils import HDM05Dataset\n# 3. Load model/architecture\nmodel = EncoderRecurrentDecoder(opt) # from algorithm.encoder_recurrent_decoder.models import EncoderRecurrentDecoder\n# 4. Train model \ntrainer = HDM05Trainer(dataset, model, opt, device) # from algorithm.encoder_recurrent_decoder.trainer import HDM05Trainer\n# 5. Sample animation\nsampler = HDM05Sampler(save_path, opt, device) # from algorithm.encoder_recurrent_decoder.sampler import HDM05Sampler\nsampler.sample(input_motion) \n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7c68fa6d46902554b59d56ba9381d7bfb4537f1
1,037,113
ipynb
Jupyter Notebook
Autoencoders Faces Colorization in PyTorch.ipynb
shinw97/Faces-Colourisation-using-Autoencoders-in-PyTorch-and-TensorFlow-Keras
65cf2cbb6c127c1812f46778d57837cdbea0cad6
[ "MIT" ]
3
2021-05-30T10:27:42.000Z
2022-02-06T15:15:51.000Z
Autoencoders Faces Colorization in PyTorch.ipynb
shinw97/Faces-Colourisation-using-Autoencoders-in-PyTorch-and-TensorFlow-Keras
65cf2cbb6c127c1812f46778d57837cdbea0cad6
[ "MIT" ]
null
null
null
Autoencoders Faces Colorization in PyTorch.ipynb
shinw97/Faces-Colourisation-using-Autoencoders-in-PyTorch-and-TensorFlow-Keras
65cf2cbb6c127c1812f46778d57837cdbea0cad6
[ "MIT" ]
null
null
null
1,037,113
1,037,113
0.9515
[ [ [ "!nvidia-smi", "Wed Aug 5 06:02:13 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.57 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n| N/A 44C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "VERSION = \"1.5\" #@param [\"1.5\" , \"20200325\", \"nightly\"]\n!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py\n!python pytorch-xla-env-setup.py --version $VERSION", " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 5115 100 5115 0 0 31000 0 --:--:-- --:--:-- --:--:-- 31000\nUpdating... This may take around 2 minutes.\nUninstalling torch-1.6.0+cu101:\n" ], [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport torch.optim as optim\nimport numpy as np\nimport cv2\nimport math\nfrom google.colab.patches import cv2_imshow", "_____no_output_____" ], [ "class Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.layers_dict = nn.ModuleDict({\n 'conv2D_1' : nn.Conv2d(3, 64, 3, padding=1),\n 'conv2D_1_same' : nn.Conv2d(64, 64, 3, padding=1),\n 'conv2D_2' : nn.Conv2d(64, 128, 3, padding=1),\n 'conv2D_2_same' : nn.Conv2d(128, 128, 3, padding=1),\n 'conv2D_3' : nn.Conv2d(128, 256, 3, padding=1),\n 'conv2D_3_same_1' : nn.Conv2d(256, 256, 3, padding=1),\n 'conv2D_3_same_2' : nn.Conv2d(256, 256, 3, padding=1),\n 'conv2D_4' : nn.Conv2d(256, 512, 3, padding=1),\n 'conv2D_4_same_1' : nn.Conv2d(512, 512, 3, padding=1),\n 'conv2D_4_same_2' : nn.Conv2d(512, 512, 3, padding=1),\n 'conv2D_4_same_3' : nn.Conv2d(512, 512, 3, padding=1),\n 'conv2D_4_same_4' : nn.Conv2d(512, 512, 3, padding=1),\n 'conv2D_4_same_5' : nn.Conv2d(512, 512, 3, padding=1),\n 'maxPool2D' : nn.MaxPool2d(2, 2),\n 'batchNorm_1' : nn.BatchNorm2d(64),\n 'batchNorm_2' : nn.BatchNorm2d(128),\n 'batchNorm_3' : nn.BatchNorm2d(256),\n 'batchNorm_4' : nn.BatchNorm2d(512),\n })\n\n # for key, value in self.layers_dict.items():\n # self.register_parameter(key, )\n\n # self.conv2D_1 = nn.Conv2d(3, 64, 3, padding=1)\n # self.conv2D_1_same = nn.Conv2d(64, 64, 3, padding=1)\n # self.conv2D_2 = nn.Conv2d(64, 128, 3, padding=1)\n # self.conv2D_2_same = nn.Conv2d(128, 128, 3, padding=1)\n \n # self.conv2D_3 = nn.Conv2d(128, 256, 3, padding=1)\n # self.conv2D_3_same = nn.Conv2d(256, 256, 3, padding=1)\n # self.conv2D_3_same = nn.Conv2d(256, 256, 3, padding=1)\n # self.conv2D_3_same = nn.Conv2d(256, 256, 3, padding=1)\n\n # self.conv2D_4 = nn.Conv2d(256, 512, 3, padding=1)\n # self.conv2D_4_same = nn.Conv2d(512, 512, 3, padding=1)\n # self.maxPool2D = nn.MaxPool2d(2, 2)\n # self.output_shape = None\n # self.forward(torch.FloatTensor(np.random.rand(1, 3, 224, 224)))\n \n def forward(self, x):\n x = F.leaky_relu(self.layers_dict['conv2D_1'](x))\n x = F.leaky_relu(self.layers_dict['conv2D_1_same'](x))\n x = self.layers_dict['maxPool2D'](x)\n\n x = F.leaky_relu(self.layers_dict['conv2D_2'](x))\n x = F.leaky_relu(self.layers_dict['conv2D_2_same'](x))\n x = self.layers_dict['maxPool2D'](x)\n\n x = F.leaky_relu(self.layers_dict['conv2D_3'](x))\n\n for i in range(2):\n x = F.leaky_relu(self.layers_dict['conv2D_3_same_{}'.format(i + 1)](x))\n \n x = self.layers_dict['maxPool2D'](x)\n\n x = F.leaky_relu(self.layers_dict['conv2D_4'](x))\n \n for i in range(2):\n x = F.leaky_relu(self.layers_dict['conv2D_4_same_{}'.format(i + 1)](x))\n \n x = self.layers_dict['maxPool2D'](x)\n \n for i in range(3):\n x = F.leaky_relu(self.layers_dict['conv2D_4_same_{}'.format(i + 3)](x))\n \n x = self.layers_dict['maxPool2D'](x)\n \n \n return x", "_____no_output_____" ], [ "# GPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nencoder = Encoder().to(device)\n\nprint('VGG-16 Encoder')\nsummary(encoder, (3, 416, 416))", "VGG-16 Encoder\n----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Conv2d-1 [-1, 64, 416, 416] 1,792\n Conv2d-2 [-1, 64, 416, 416] 36,928\n MaxPool2d-3 [-1, 64, 208, 208] 0\n Conv2d-4 [-1, 128, 208, 208] 73,856\n Conv2d-5 [-1, 128, 208, 208] 147,584\n MaxPool2d-6 [-1, 128, 104, 104] 0\n Conv2d-7 [-1, 256, 104, 104] 295,168\n Conv2d-8 [-1, 256, 104, 104] 590,080\n Conv2d-9 [-1, 256, 104, 104] 590,080\n MaxPool2d-10 [-1, 256, 52, 52] 0\n Conv2d-11 [-1, 512, 52, 52] 1,180,160\n Conv2d-12 [-1, 512, 52, 52] 2,359,808\n Conv2d-13 [-1, 512, 52, 52] 2,359,808\n MaxPool2d-14 [-1, 512, 26, 26] 0\n Conv2d-15 [-1, 512, 26, 26] 2,359,808\n Conv2d-16 [-1, 512, 26, 26] 2,359,808\n Conv2d-17 [-1, 512, 26, 26] 2,359,808\n MaxPool2d-18 [-1, 512, 13, 13] 0\n================================================================\nTotal params: 14,714,688\nTrainable params: 14,714,688\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 1.98\nForward/backward pass size (MB): 396.75\nParams size (MB): 56.13\nEstimated Total Size (MB): 454.87\n----------------------------------------------------------------\n" ], [ "class Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n self.layers_dict = nn.ModuleDict({\n 'conv2DTrans_1_same_1' : nn.ConvTranspose2d(512, 512, 3, padding=1),\n 'conv2DTrans_1_same_2' : nn.ConvTranspose2d(512, 512, 3, padding=1),\n 'conv2DTrans_1_same_3' : nn.ConvTranspose2d(512, 512, 3, padding=1),\n 'conv2DTrans_1_same_4' : nn.ConvTranspose2d(512, 512, 3, padding=1),\n 'conv2DTrans_1_same_5' : nn.ConvTranspose2d(512, 512, 3, padding=1),\n 'conv2DTrans_1' : nn.ConvTranspose2d(512, 256, 3, padding=1),\n 'conv2DTrans_2_same_1' : nn.ConvTranspose2d(256, 256, 3, padding=1),\n 'conv2DTrans_2_same_2' : nn.ConvTranspose2d(256, 256, 3, padding=1),\n 'conv2DTrans_2' : nn.ConvTranspose2d(256, 128, 3, padding=1),\n 'conv2DTrans_3_same' : nn.ConvTranspose2d(128, 128, 3, padding=1),\n 'conv2DTrans_3' : nn.ConvTranspose2d(128, 64, 3, padding=1),\n 'conv2DTrans_4_same' : nn.ConvTranspose2d(64, 64, 3, padding=1),\n 'conv2DTrans_4' : nn.ConvTranspose2d(64, 3, 3, padding=1),\n 'upSample' : nn.Upsample(scale_factor=2),\n 'batchNorm_4' : nn.BatchNorm2d(64),\n 'batchNorm_3' : nn.BatchNorm2d(128),\n 'batchNorm_2' : nn.BatchNorm2d(256),\n 'batchNorm_1' : nn.BatchNorm2d(512),\n })\n # self.conv2DTrans_1_same = nn.ConvTranspose2d(512, 512, 3, padding=1)\n # self.conv2DTrans_1 = nn.ConvTranspose2d(512, 256, 3, padding=1)\n # self.conv2DTrans_2_same = nn.ConvTranspose2d(256, 256, 3, padding=1)\n # self.conv2DTrans_2 = nn.ConvTranspose2d(256, 128, 3, padding=1)\n # self.conv2DTrans_3_same = nn.ConvTranspose2d(128, 128, 3, padding=1)\n # self.conv2DTrans_3 = nn.ConvTranspose2d(128, 64, 3, padding=1)\n # self.conv2DTrans_4_same = nn.ConvTranspose2d(64, 64, 3, padding=1)\n # self.conv2DTrans_4 = nn.ConvTranspose2d(64, 3, 3, padding=1)\n # self.upSample = nn.Upsample(scale_factor=2)\n \n def forward(self, x):\n\n x = self.layers_dict['upSample'](x)\n \n\n for i in range(3):\n x = F.leaky_relu(self.layers_dict['conv2DTrans_1_same_{}'.format(i + 1)](x))\n \n \n x = self.layers_dict['upSample'](x)\n\n for i in range(2):\n x = F.leaky_relu(self.layers_dict['conv2DTrans_1_same_{}'.format(i + 4)](x))\n \n x = F.leaky_relu(self.layers_dict['conv2DTrans_1'](x))\n \n x = self.layers_dict['upSample'](x)\n for i in range(2):\n x = F.leaky_relu(self.layers_dict['conv2DTrans_2_same_{}'.format(i + 1)](x))\n \n\n x = F.leaky_relu(self.layers_dict['conv2DTrans_2'](x))\n x = self.layers_dict['upSample'](x)\n\n x = F.leaky_relu(self.layers_dict['conv2DTrans_3_same'](x))\n x = F.leaky_relu(self.layers_dict['conv2DTrans_3'](x))\n \n x = self.layers_dict['upSample'](x)\n\n x = F.leaky_relu(self.layers_dict['conv2DTrans_4_same'](x))\n x = torch.sigmoid(self.layers_dict['conv2DTrans_4'](x))\n return x", "_____no_output_____" ], [ "decoder = Decoder().to(device)\n\nprint('VGG-16 Decoder')\nsummary(decoder, (512, 13, 13))", "VGG-16 Decoder\n----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Upsample-1 [-1, 512, 26, 26] 0\n ConvTranspose2d-2 [-1, 512, 26, 26] 2,359,808\n ConvTranspose2d-3 [-1, 512, 26, 26] 2,359,808\n ConvTranspose2d-4 [-1, 512, 26, 26] 2,359,808\n Upsample-5 [-1, 512, 52, 52] 0\n ConvTranspose2d-6 [-1, 512, 52, 52] 2,359,808\n ConvTranspose2d-7 [-1, 512, 52, 52] 2,359,808\n ConvTranspose2d-8 [-1, 256, 52, 52] 1,179,904\n Upsample-9 [-1, 256, 104, 104] 0\n ConvTranspose2d-10 [-1, 256, 104, 104] 590,080\n ConvTranspose2d-11 [-1, 256, 104, 104] 590,080\n ConvTranspose2d-12 [-1, 128, 104, 104] 295,040\n Upsample-13 [-1, 128, 208, 208] 0\n ConvTranspose2d-14 [-1, 128, 208, 208] 147,584\n ConvTranspose2d-15 [-1, 64, 208, 208] 73,792\n Upsample-16 [-1, 64, 416, 416] 0\n ConvTranspose2d-17 [-1, 64, 416, 416] 36,928\n ConvTranspose2d-18 [-1, 3, 416, 416] 1,731\n================================================================\nTotal params: 14,714,179\nTrainable params: 14,714,179\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 0.33\nForward/backward pass size (MB): 400.05\nParams size (MB): 56.13\nEstimated Total Size (MB): 456.51\n----------------------------------------------------------------\n" ], [ "class Autoencoder(nn.Module):\n def __init__(self, encoder, decoder):\n super(Autoencoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x", "_____no_output_____" ], [ "autoencoder = Autoencoder(encoder=encoder, decoder=decoder).to(device)\n\nprint('Combined Autoencoder')\nsummary(autoencoder, (3, 416, 416))", "Combined Autoencoder\n----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Conv2d-1 [-1, 64, 416, 416] 1,792\n Conv2d-2 [-1, 64, 416, 416] 36,928\n MaxPool2d-3 [-1, 64, 208, 208] 0\n Conv2d-4 [-1, 128, 208, 208] 73,856\n Conv2d-5 [-1, 128, 208, 208] 147,584\n MaxPool2d-6 [-1, 128, 104, 104] 0\n Conv2d-7 [-1, 256, 104, 104] 295,168\n Conv2d-8 [-1, 256, 104, 104] 590,080\n Conv2d-9 [-1, 256, 104, 104] 590,080\n MaxPool2d-10 [-1, 256, 52, 52] 0\n Conv2d-11 [-1, 512, 52, 52] 1,180,160\n Conv2d-12 [-1, 512, 52, 52] 2,359,808\n Conv2d-13 [-1, 512, 52, 52] 2,359,808\n MaxPool2d-14 [-1, 512, 26, 26] 0\n Conv2d-15 [-1, 512, 26, 26] 2,359,808\n Conv2d-16 [-1, 512, 26, 26] 2,359,808\n Conv2d-17 [-1, 512, 26, 26] 2,359,808\n MaxPool2d-18 [-1, 512, 13, 13] 0\n Encoder-19 [-1, 512, 13, 13] 0\n Upsample-20 [-1, 512, 26, 26] 0\n ConvTranspose2d-21 [-1, 512, 26, 26] 2,359,808\n ConvTranspose2d-22 [-1, 512, 26, 26] 2,359,808\n ConvTranspose2d-23 [-1, 512, 26, 26] 2,359,808\n Upsample-24 [-1, 512, 52, 52] 0\n ConvTranspose2d-25 [-1, 512, 52, 52] 2,359,808\n ConvTranspose2d-26 [-1, 512, 52, 52] 2,359,808\n ConvTranspose2d-27 [-1, 256, 52, 52] 1,179,904\n Upsample-28 [-1, 256, 104, 104] 0\n ConvTranspose2d-29 [-1, 256, 104, 104] 590,080\n ConvTranspose2d-30 [-1, 256, 104, 104] 590,080\n ConvTranspose2d-31 [-1, 128, 104, 104] 295,040\n Upsample-32 [-1, 128, 208, 208] 0\n ConvTranspose2d-33 [-1, 128, 208, 208] 147,584\n ConvTranspose2d-34 [-1, 64, 208, 208] 73,792\n Upsample-35 [-1, 64, 416, 416] 0\n ConvTranspose2d-36 [-1, 64, 416, 416] 36,928\n ConvTranspose2d-37 [-1, 3, 416, 416] 1,731\n Decoder-38 [-1, 3, 416, 416] 0\n================================================================\nTotal params: 29,428,867\nTrainable params: 29,428,867\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 1.98\nForward/backward pass size (MB): 801.43\nParams size (MB): 112.26\nEstimated Total Size (MB): 915.67\n----------------------------------------------------------------\n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "# Load pre-trained weights (if available)\n\nWEIGHT_PATH = \"<path to weights.pth>\"\nautoencoder.load_state_dict(torch.load(WEIGHT_PATH))", "_____no_output_____" ], [ "TRAINING_IMAGES_DIR = \"<path to img_align_celeba.zip>\"", "_____no_output_____" ], [ "!unzip {TRAINING_IMAGES_DIR} -d /content/", "_____no_output_____" ], [ "import os\nimport random\n\nimg_dir = os.listdir('img_align_celeba')\n\nrandom.Random(2).shuffle(img_dir)\n\nprint(\"Total Images:\", len(img_dir))\n\nval_portion = math.ceil(len(img_dir) * 0.1)\n\nval_img_dir = img_dir[-val_portion:]\nimg_dir = img_dir[:-val_portion]\n\nprint(\"Training set:\", len(img_dir))\nprint(\"Validation Set:\", len(val_img_dir))", "Total Images: 202599\nTraining set: 182339\nValidation Set: 20260\n" ], [ "def generate_Xy(l_image, img_dims):\n X, y = [], []\n for img_d in l_image:\n coloured = cv2.resize(cv2.imread(img_d), img_dims[:2])\n gray_coloured = cv2.cvtColor(cv2.cvtColor(coloured, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)\n X.append(gray_coloured / 255)\n y.append(coloured / 255)\n return np.asarray(X, dtype=np.float32), np.asarray(y, dtype=np.float32)", "_____no_output_____" ], [ "def set_generator(batch_size, imgs, img_dims):\n pointer = 0\n end_of_epoch = False\n while True:\n l_image = []\n for i in range(batch_size):\n if pointer == len(imgs):\n end_of_epoch = True\n break\n l_image.append('img_align_celeba/' + imgs[pointer])\n pointer += 1\n \n if l_image != []:\n # print(l_image)\n X, y = generate_Xy(l_image, img_dims)\n yield (X, y)\n \n if end_of_epoch:\n break", "_____no_output_____" ], [ "IMG_DIM = (224, 224)\nBATCH_SIZE = 16\nEPOCHS = 1\nlast_batch_size = len(img_dir) % BATCH_SIZE\nn_batch = math.ceil(len(img_dir) / BATCH_SIZE)\ntrain_loss_trace = []\nval_loss_trace = []", "_____no_output_____" ], [ "loss_func = nn.MSELoss()\noptimizer = optim.Adam(autoencoder.parameters(), lr=0.0001)", "_____no_output_____" ], [ "def test_on_image(val_img_dir, autoencoder, device, n_image=1, img_dim = (224, 224)):\n for img in val_img_dir[:n_image]:\n coloured = cv2.resize(cv2.imread('img_align_celeba/' + img), img_dim)\n gray_coloured = cv2.cvtColor(cv2.cvtColor(coloured, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)\n test_image = gray_coloured.astype(np.float32) / 255\n test_image = np.reshape(test_image, (1, img_dim[0], img_dim[1], 3))\n test_image = np.transpose(test_image, (0,3,1,2))\n test_image = torch.from_numpy(test_image).to(device)\n with torch.no_grad():\n autoencoder.eval()\n y_hat = autoencoder(test_image)\n y_hat = y_hat.cpu().data.numpy()\n y_hat = np.transpose(y_hat, (0,2,3,1))\n # y_hat = np.rollaxis(y_hat, 3, 1)\n y_hat = y_hat * 255\n test_result = np.concatenate((gray_coloured, coloured, np.reshape(y_hat, (img_dim[0], img_dim[1], 3))), axis=1)\n test_result = cv2.resize(test_result, (224 * 3, 224))\n cv2_imshow(test_result)", "_____no_output_____" ], [ "from IPython.display import clear_output, display\nimport matplotlib.pyplot as plt\n\nfor e in range(EPOCHS):\n running_loss = []\n # dh = display('epoch start.',display_id=True)\n for i, data in enumerate(set_generator(BATCH_SIZE, img_dir, IMG_DIM)):\n # get the inputs\n X_train, y_train = data\n \n # X_train, y_train = np.rollaxis(X_train, 3, 1), np.rollaxis(y_train, 3, 1)\n X_train, y_train = np.transpose(X_train, (0,3,1,2)), np.transpose(y_train, (0,3,1,2))\n \n X_train, y_train = torch.from_numpy(X_train).to(device), torch.from_numpy(y_train).to(device)\n \n # zero the parameter gradients\n optimizer.zero_grad()\n autoencoder.train()\n # forward + backward + optimize\n y_hat = autoencoder(X_train)\n loss = loss_func(y_hat, y_train)\n loss.backward()\n # xm.optimizer_step(optimizer, barrier=True)\n optimizer.step()\n\n # print statistics\n # running_loss.append(loss.item())\n\n # dh.update('[epoch: {}, batch: {}] loss: {} av.loss: {}'.format((e + 1), i + 1, str(loss.item()).ljust(20), np.mean(running_loss)))\n print('[epoch: {}, batch: {} of {}] loss: {}'.format((e + 1), i + 1, n_batch, str(loss.item()).ljust(20)))\n\n if i % 10 == 0:\n clear_output(wait=True)\n test_on_image(val_img_dir[512:], autoencoder, device, img_dim=IMG_DIM)\n print()\n # plt.plot(running_loss)\n # plt.title(\"Train Loss\")\n # plt.ylabel(\"Loss\")\n # plt.xlabel(\"Batch Iteration\")\n # plt.show()\n # print()\n\nprint('Finished Training')", "_____no_output_____" ], [ "test_on_image(val_img_dir[967:], autoencoder, device, img_dim=IMG_DIM)", "_____no_output_____" ], [ "torch.save(autoencoder.state_dict(), WEIGHT_PATH)", "_____no_output_____" ], [ "def colourize(i_dir, autoencoder, input_shape):\n # coloured = cv2.resize(cv2.imread('img_align_celeba/' + val_img_dir[93]), (224, 224))\n # coloured = cv2.resize(cv2.imread('black-and-white-portrait-tips-3107.jpg'), (224, 224))\n coloured = cv2.resize(cv2.imread(i_dir), (input_shape[0], input_shape[1]))\n\n gray_coloured = cv2.cvtColor(cv2.cvtColor(coloured, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)\n test_image = gray_coloured.astype(np.float32) / 255\n test_image = np.reshape(test_image, (1, input_shape[0], input_shape[1], 3))\n test_image = np.transpose(test_image, (0,3,1,2))\n test_image = torch.from_numpy(test_image).to(device)\n \n with torch.no_grad():\n autoencoder.eval()\n y_hat = autoencoder(test_image)\n y_hat = y_hat.cpu().data.numpy()\n y_hat = np.transpose(y_hat, (0,2,3,1))\n # y_hat = np.rollaxis(y_hat, 3, 1)\n y_hat = y_hat * 255\n output = np.reshape(y_hat, (input_shape[0], input_shape[1], 3))\n\n blended = cv2.addWeighted(gray_coloured.astype(np.float32),0.5, output, 0.7, 0)\n\n hsv = cv2.cvtColor(blended, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n # h += value # 4\n s += 0.1 # 5\n # v += value # 6\n final_hsv = cv2.merge((h, s, v))\n enhanced = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n\n kernel = np.array([[0,-1,0], [-1,5,-1], [0,-1,0]])\n enhanced = cv2.filter2D(enhanced, -1, kernel)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n output = cv2.resize(output, (input_shape[0], input_shape[1]))\n cv2.rectangle(output, (0,0),(100,20),(0,0,0),cv2.FILLED)\n cv2.putText(output, 'Net. Output', (3, 15), font, 0.5, (255,255,255), 1, cv2.LINE_AA)\n \n cv2.rectangle(gray_coloured, (0,0),(100,20),(0,0,0), cv2.FILLED)\n cv2.putText(gray_coloured, 'Gray Image', (3, 15), font, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\n cv2.rectangle(coloured, (0,0),(90,20),(0,0,0), cv2.FILLED)\n cv2.putText(coloured, 'Ori. Image', (3, 15), font, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\n\n cv2.rectangle(blended, (0,0),(70,20),(0,0,0),cv2.FILLED)\n cv2.putText(blended, 'Blended', (3, 15), font, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\n cv2.rectangle(enhanced, (0,0),(82,20),(0,0,0),cv2.FILLED)\n cv2.putText(enhanced, 'Enhanced', (3, 15), font, 0.5, (255,255,255), 1, cv2.LINE_AA)\n \n blank_ver_line = np.array([[[255] * 3] * 50] * 224)\n print(blank_ver_line.shape)\n\n test_result = np.concatenate((gray_coloured, output, blended, enhanced, blank_ver_line, coloured), axis=1)\n cv2_imshow(cv2.resize(test_result, (1120,224)))", "_____no_output_____" ], [ "for i_dir in random.sample(val_img_dir, 1):\n colourize('img_align_celeba/' + i_dir, autoencoder, (224, 224, 3))", "(224, 50, 3)\n" ], [ "colourize('<path to image>', autoencoder, (224, 224, 3))", "(224, 50, 3)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7c6905800445f33bb18f1c9093f0b4b69a5f54e
50,592
ipynb
Jupyter Notebook
HMM Tagger.ipynb
DeepanshKhurana/udacityproject-hmm-tagger-nlp
5426617123f2eb246840e3b7ccc7cbba59ace54c
[ "MIT" ]
null
null
null
HMM Tagger.ipynb
DeepanshKhurana/udacityproject-hmm-tagger-nlp
5426617123f2eb246840e3b7ccc7cbba59ace54c
[ "MIT" ]
null
null
null
HMM Tagger.ipynb
DeepanshKhurana/udacityproject-hmm-tagger-nlp
5426617123f2eb246840e3b7ccc7cbba59ace54c
[ "MIT" ]
null
null
null
42.657673
660
0.589401
[ [ [ "# Project: Part of Speech Tagging with Hidden Markov Models \n---\n### Introduction\n\nPart of speech tagging is the process of determining the syntactic category of a word from the words in its surrounding context. It is often used to help disambiguate natural language phrases because it can be done quickly with high accuracy. Tagging can be used for many NLP tasks like determining correct pronunciation during speech synthesis (for example, _dis_-count as a noun vs dis-_count_ as a verb), for information retrieval, and for word sense disambiguation.\n\nIn this notebook, you'll use the [Pomegranate](http://pomegranate.readthedocs.io/) library to build a hidden Markov model for part of speech tagging using a \"universal\" tagset. Hidden Markov models have been able to achieve [>96% tag accuracy with larger tagsets on realistic text corpora](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf). Hidden Markov models have also been used for speech recognition and speech generation, machine translation, gene recognition for bioinformatics, and human gesture recognition for computer vision, and more. \n\n![](_post-hmm.png)\n\nThe notebook already contains some code to get you started. You only need to add some new functionality in the areas indicated to complete the project; you will not need to modify the included code beyond what is requested. Sections that begin with **'IMPLEMENTATION'** in the header indicate that you must provide code in the block that follows. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n**Note:** Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You must then **export the notebook** by running the last cell in the notebook, or by using the menu above and navigating to **File -> Download as -> HTML (.html)** Your submissions should include both the `html` and `ipynb` files.\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n**Note:** Code and Markdown cells can be executed using the `Shift + Enter` keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n</div>", "_____no_output_____" ], [ "### The Road Ahead\nYou must complete Steps 1-3 below to pass the project. The section on Step 4 includes references & resources you can use to further explore HMM taggers.\n\n- [Step 1](#Step-1:-Read-and-preprocess-the-dataset): Review the provided interface to load and access the text corpus\n- [Step 2](#Step-2:-Build-a-Most-Frequent-Class-tagger): Build a Most Frequent Class tagger to use as a baseline\n- [Step 3](#Step-3:-Build-an-HMM-tagger): Build an HMM Part of Speech tagger and compare to the MFC baseline\n- [Step 4](#Step-4:-[Optional]-Improving-model-performance): (Optional) Improve the HMM tagger", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n**Note:** Make sure you have selected a **Python 3** kernel in Workspaces or the hmm-tagger conda environment if you are running the Jupyter server on your own machine.\n</div>", "_____no_output_____" ] ], [ [ "# Jupyter \"magic methods\" -- only need to be run once per kernel restart\n%load_ext autoreload\n%aimport helpers, tests\n%autoreload 1", "_____no_output_____" ], [ "# import python modules -- this cell needs to be run again if you make changes to any of the files\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom IPython.core.display import HTML\nfrom itertools import chain\nfrom collections import Counter, defaultdict\nfrom helpers import show_model, Dataset\nfrom pomegranate import State, HiddenMarkovModel, DiscreteDistribution", "_____no_output_____" ] ], [ [ "## Step 1: Read and preprocess the dataset\n---\nWe'll start by reading in a text corpus and splitting it into a training and testing dataset. The data set is a copy of the [Brown corpus](https://en.wikipedia.org/wiki/Brown_Corpus) (originally from the [NLTK](https://www.nltk.org/) library) that has already been pre-processed to only include the [universal tagset](https://arxiv.org/pdf/1104.2086.pdf). You should expect to get slightly higher accuracy using this simplified tagset than the same model would achieve on a larger tagset like the full [Penn treebank tagset](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), but the process you'll follow would be the same.\n\nThe `Dataset` class provided in helpers.py will read and parse the corpus. You can generate your own datasets compatible with the reader by writing them to the following format. The dataset is stored in plaintext as a collection of words and corresponding tags. Each sentence starts with a unique identifier on the first line, followed by one tab-separated word/tag pair on each following line. Sentences are separated by a single blank line.\n\nExample from the Brown corpus. \n```\nb100-38532\nPerhaps\tADV\nit\tPRON\nwas\tVERB\nright\tADJ\n;\t.\n;\t.\n\nb100-35577\n...\n```", "_____no_output_____" ] ], [ [ "data = Dataset(\"tags-universal.txt\", \"brown-universal.txt\", train_test_split=0.8)\n\nprint(\"There are {} sentences in the corpus.\".format(len(data)))\nprint(\"There are {} sentences in the training set.\".format(len(data.training_set)))\nprint(\"There are {} sentences in the testing set.\".format(len(data.testing_set)))\n\nassert len(data) == len(data.training_set) + len(data.testing_set), \\\n \"The number of sentences in the training set + testing set should sum to the number of sentences in the corpus\"", "There are 57340 sentences in the corpus.\nThere are 45872 sentences in the training set.\nThere are 11468 sentences in the testing set.\n" ] ], [ [ "### The Dataset Interface\n\nYou can access (mostly) immutable references to the dataset through a simple interface provided through the `Dataset` class, which represents an iterable collection of sentences along with easy access to partitions of the data for training & testing. Review the reference below, then run and review the next few cells to make sure you understand the interface before moving on to the next step.\n\n```\nDataset-only Attributes:\n training_set - reference to a Subset object containing the samples for training\n testing_set - reference to a Subset object containing the samples for testing\n\nDataset & Subset Attributes:\n sentences - a dictionary with an entry {sentence_key: Sentence()} for each sentence in the corpus\n keys - an immutable ordered (not sorted) collection of the sentence_keys for the corpus\n vocab - an immutable collection of the unique words in the corpus\n tagset - an immutable collection of the unique tags in the corpus\n X - returns an array of words grouped by sentences ((w11, w12, w13, ...), (w21, w22, w23, ...), ...)\n Y - returns an array of tags grouped by sentences ((t11, t12, t13, ...), (t21, t22, t23, ...), ...)\n N - returns the number of distinct samples (individual words or tags) in the dataset\n\nMethods:\n stream() - returns an flat iterable over all (word, tag) pairs across all sentences in the corpus\n __iter__() - returns an iterable over the data as (sentence_key, Sentence()) pairs\n __len__() - returns the nubmer of sentences in the dataset\n```\n\nFor example, consider a Subset, `subset`, of the sentences `{\"s0\": Sentence((\"See\", \"Spot\", \"run\"), (\"VERB\", \"NOUN\", \"VERB\")), \"s1\": Sentence((\"Spot\", \"ran\"), (\"NOUN\", \"VERB\"))}`. The subset will have these attributes:\n\n```\nsubset.keys == {\"s1\", \"s0\"} # unordered\nsubset.vocab == {\"See\", \"run\", \"ran\", \"Spot\"} # unordered\nsubset.tagset == {\"VERB\", \"NOUN\"} # unordered\nsubset.X == ((\"Spot\", \"ran\"), (\"See\", \"Spot\", \"run\")) # order matches .keys\nsubset.Y == ((\"NOUN\", \"VERB\"), (\"VERB\", \"NOUN\", \"VERB\")) # order matches .keys\nsubset.N == 7 # there are a total of seven observations over all sentences\nlen(subset) == 2 # because there are two sentences\n```\n\n<div class=\"alert alert-block alert-info\">\n**Note:** The `Dataset` class is _convenient_, but it is **not** efficient. It is not suitable for huge datasets because it stores multiple redundant copies of the same data.\n</div>", "_____no_output_____" ], [ "#### Sentences\n\n`Dataset.sentences` is a dictionary of all sentences in the training corpus, each keyed to a unique sentence identifier. Each `Sentence` is itself an object with two attributes: a tuple of the words in the sentence named `words` and a tuple of the tag corresponding to each word named `tags`.", "_____no_output_____" ] ], [ [ "key = 'b100-38532'\nprint(\"Sentence: {}\".format(key))\nprint(\"words:\\n\\t{!s}\".format(data.sentences[key].words))\nprint(\"tags:\\n\\t{!s}\".format(data.sentences[key].tags))", "Sentence: b100-38532\nwords:\n\t('Perhaps', 'it', 'was', 'right', ';', ';')\ntags:\n\t('ADV', 'PRON', 'VERB', 'ADJ', '.', '.')\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n**Note:** The underlying iterable sequence is **unordered** over the sentences in the corpus; it is not guaranteed to return the sentences in a consistent order between calls. Use `Dataset.stream()`, `Dataset.keys`, `Dataset.X`, or `Dataset.Y` attributes if you need ordered access to the data.\n</div>\n\n#### Counting Unique Elements\n\nYou can access the list of unique words (the dataset vocabulary) via `Dataset.vocab` and the unique list of tags via `Dataset.tagset`.", "_____no_output_____" ] ], [ [ "print(\"There are a total of {} samples of {} unique words in the corpus.\"\n .format(data.N, len(data.vocab)))\nprint(\"There are {} samples of {} unique words in the training set.\"\n .format(data.training_set.N, len(data.training_set.vocab)))\nprint(\"There are {} samples of {} unique words in the testing set.\"\n .format(data.testing_set.N, len(data.testing_set.vocab)))\nprint(\"There are {} words in the test set that are missing in the training set.\"\n .format(len(data.testing_set.vocab - data.training_set.vocab)))\n\nassert data.N == data.training_set.N + data.testing_set.N, \\\n \"The number of training + test samples should sum to the total number of samples\"", "There are a total of 1161192 samples of 56057 unique words in the corpus.\nThere are 928458 samples of 50536 unique words in the training set.\nThere are 232734 samples of 25112 unique words in the testing set.\nThere are 5521 words in the test set that are missing in the training set.\n" ] ], [ [ "#### Accessing word and tag Sequences\nThe `Dataset.X` and `Dataset.Y` attributes provide access to ordered collections of matching word and tag sequences for each sentence in the dataset.", "_____no_output_____" ] ], [ [ "# accessing words with Dataset.X and tags with Dataset.Y \nfor i in range(2): \n print(\"Sentence {}:\".format(i + 1), data.X[i])\n print()\n print(\"Labels {}:\".format(i + 1), data.Y[i])\n print()", "Sentence 1: ('Mr.', 'Podger', 'had', 'thanked', 'him', 'gravely', ',', 'and', 'now', 'he', 'made', 'use', 'of', 'the', 'advice', '.')\n\nLabels 1: ('NOUN', 'NOUN', 'VERB', 'VERB', 'PRON', 'ADV', '.', 'CONJ', 'ADV', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 2: ('But', 'there', 'seemed', 'to', 'be', 'some', 'difference', 'of', 'opinion', 'as', 'to', 'how', 'far', 'the', 'board', 'should', 'go', ',', 'and', 'whose', 'advice', 'it', 'should', 'follow', '.')\n\nLabels 2: ('CONJ', 'PRT', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADP', 'ADV', 'ADV', 'DET', 'NOUN', 'VERB', 'VERB', '.', 'CONJ', 'DET', 'NOUN', 'PRON', 'VERB', 'VERB', '.')\n\n" ] ], [ [ "#### Accessing (word, tag) Samples\nThe `Dataset.stream()` method returns an iterator that chains together every pair of (word, tag) entries across all sentences in the entire corpus.", "_____no_output_____" ] ], [ [ "# use Dataset.stream() (word, tag) samples for the entire corpus\nprint(\"\\nStream (word, tag) pairs:\\n\")\nfor i, pair in enumerate(data.stream()):\n print(\"\\t\", pair)\n if i > 5: break", "\nStream (word, tag) pairs:\n\n\t ('Mr.', 'NOUN')\n\t ('Podger', 'NOUN')\n\t ('had', 'VERB')\n\t ('thanked', 'VERB')\n\t ('him', 'PRON')\n\t ('gravely', 'ADV')\n\t (',', '.')\n" ] ], [ [ "\nFor both our baseline tagger and the HMM model we'll build, we need to estimate the frequency of tags & words from the frequency counts of observations in the training corpus. In the next several cells you will complete functions to compute the counts of several sets of counts. ", "_____no_output_____" ], [ "## Step 2: Build a Most Frequent Class tagger\n---\n\nPerhaps the simplest tagger (and a good baseline for tagger performance) is to simply choose the tag most frequently assigned to each word. This \"most frequent class\" tagger inspects each observed word in the sequence and assigns it the label that was most often assigned to that word in the corpus.", "_____no_output_____" ], [ "### IMPLEMENTATION: Pair Counts\n\nComplete the function below that computes the joint frequency counts for two input sequences.", "_____no_output_____" ] ], [ [ "def pair_counts(sequences_A, sequences_B):\n \"\"\"Return a dictionary keyed to each unique value in the first sequence list\n that counts the number of occurrences of the corresponding value from the\n second sequences list.\n \n For example, if sequences_A is tags and sequences_B is the corresponding\n words, then if 1244 sequences contain the word \"time\" tagged as a NOUN, then\n you should return a dictionary such that pair_counts[NOUN][time] == 1244\n \"\"\"\n \n pairs = defaultdict(lambda: defaultdict(int))\n for (tag, word) in zip(sequences_A, sequences_B):\n pairs[tag][word]+=1\n \n return pairs\n\n\ntags = [t for i, (w, t) in enumerate(data.stream())]\nwords = [w for i, (w, t) in enumerate(data.stream())]\n\n# Calculate C(t_i, w_i)\nemission_counts = pair_counts(tags, words)\nprint(emission_counts.keys())\n\nassert len(emission_counts) == 12, \\\n \"Uh oh. There should be 12 tags in your dictionary.\"\nassert max(emission_counts[\"NOUN\"], key=emission_counts[\"NOUN\"].get) == 'time', \\\n \"Hmmm...'time' is expected to be the most common NOUN.\"\nHTML('<div class=\"alert alert-block alert-success\">Your emission counts look good!</div>')", "dict_keys(['NOUN', 'VERB', 'PRON', 'ADV', '.', 'CONJ', 'ADP', 'DET', 'PRT', 'ADJ', 'X', 'NUM'])\n" ] ], [ [ "### IMPLEMENTATION: Most Frequent Class Tagger\n\nUse the `pair_counts()` function and the training dataset to find the most frequent class label for each word in the training data, and populate the `mfc_table` below. The table keys should be words, and the values should be the appropriate tag string.\n\nThe `MFCTagger` class is provided to mock the interface of Pomegranite HMM models so that they can be used interchangeably.", "_____no_output_____" ] ], [ [ "# Create a lookup table mfc_table where mfc_table[word] contains the tag label most frequently assigned to that word\nfrom collections import namedtuple\n\nFakeState = namedtuple(\"FakeState\", \"name\")\n\nclass MFCTagger:\n # NOTE: You should not need to modify this class or any of its methods\n missing = FakeState(name=\"<MISSING>\")\n \n def __init__(self, table):\n self.table = defaultdict(lambda: MFCTagger.missing)\n self.table.update({word: FakeState(name=tag) for word, tag in table.items()})\n \n def viterbi(self, seq):\n \"\"\"This method simplifies predictions by matching the Pomegranate viterbi() interface\"\"\"\n return 0., list(enumerate([\"<start>\"] + [self.table[w] for w in seq] + [\"<end>\"]))\n\n\n# TODO: calculate the frequency of each tag being assigned to each word (hint: similar, but not\n# the same as the emission probabilities) and use it to fill the mfc_table\n\n\ntags = [t for i, (w, t) in enumerate(data.training_set.stream())]\nwords = [w for i, (w, t) in enumerate(data.training_set.stream())]\n\nword_counts = pair_counts(words, tags)\n\nmfc_table = {}\n\nfor w, t in word_counts.items():\n mfc_table[w] = max(t.keys(), key=lambda key: t[key])\n \n \n#dict((word, max(tags.keys(), key=lambda key: tags[key])) for word, tags in word_counts.items())\n\n# DO NOT MODIFY BELOW THIS LINE\nmfc_model = MFCTagger(mfc_table) # Create a Most Frequent Class tagger instance\n\nassert len(mfc_table) == len(data.training_set.vocab), \"\"\nassert all(k in data.training_set.vocab for k in mfc_table.keys()), \"\"\nassert sum(int(k not in mfc_table) for k in data.testing_set.vocab) == 5521, \"\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger has all the correct words!</div>')", "_____no_output_____" ] ], [ [ "### Making Predictions with a Model\nThe helper functions provided below interface with Pomegranate network models & the mocked MFCTagger to take advantage of the [missing value](http://pomegranate.readthedocs.io/en/latest/nan.html) functionality in Pomegranate through a simple sequence decoding function. Run these functions, then run the next cell to see some of the predictions made by the MFC tagger.", "_____no_output_____" ] ], [ [ "def replace_unknown(sequence):\n \"\"\"Return a copy of the input sequence where each unknown word is replaced\n by the literal string value 'nan'. Pomegranate will ignore these values\n during computation.\n \"\"\"\n return [w if w in data.training_set.vocab else 'nan' for w in sequence]\n\ndef simplify_decoding(X, model):\n \"\"\"X should be a 1-D sequence of observations for the model to predict\"\"\"\n _, state_path = model.viterbi(replace_unknown(X))\n return [state[1].name for state in state_path[1:-1]] # do not show the start/end state predictions", "_____no_output_____" ] ], [ [ "### Example Decoding Sequences with MFC Tagger", "_____no_output_____" ] ], [ [ "for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, mfc_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")", "Sentence Key: b100-28144\n\nPredicted labels:\n-----------------\n['CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.']\n\nActual labels:\n--------------\n('CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.')\n\n\nSentence Key: b100-23146\n\nPredicted labels:\n-----------------\n['PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.']\n\nActual labels:\n--------------\n('PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\n\nSentence Key: b100-35462\n\nPredicted labels:\n-----------------\n['DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', '<MISSING>', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADV', 'NOUN', '.']\n\nActual labels:\n--------------\n('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\n\n" ] ], [ [ "### Evaluating Model Accuracy\n\nThe function below will evaluate the accuracy of the MFC tagger on the collection of all sentences from a text corpus. ", "_____no_output_____" ] ], [ [ "def accuracy(X, Y, model):\n \"\"\"Calculate the prediction accuracy by using the model to decode each sequence\n in the input X and comparing the prediction with the true labels in Y.\n \n The X should be an array whose first dimension is the number of sentences to test,\n and each element of the array should be an iterable of the words in the sequence.\n The arrays X and Y should have the exact same shape.\n \n X = [(\"See\", \"Spot\", \"run\"), (\"Run\", \"Spot\", \"run\", \"fast\"), ...]\n Y = [(), (), ...]\n \"\"\"\n correct = total_predictions = 0\n for observations, actual_tags in zip(X, Y):\n \n # The model.viterbi call in simplify_decoding will return None if the HMM\n # raises an error (for example, if a test sentence contains a word that\n # is out of vocabulary for the training set). Any exception counts the\n # full sentence as an error (which makes this a conservative estimate).\n try:\n most_likely_tags = simplify_decoding(observations, model)\n correct += sum(p == t for p, t in zip(most_likely_tags, actual_tags))\n except:\n pass\n total_predictions += len(observations)\n return correct / total_predictions", "_____no_output_____" ] ], [ [ "#### Evaluate the accuracy of the MFC tagger\nRun the next cell to evaluate the accuracy of the tagger on the training and test corpus.", "_____no_output_____" ] ], [ [ "mfc_training_acc = accuracy(data.training_set.X, data.training_set.Y, mfc_model)\nprint(\"training accuracy mfc_model: {:.2f}%\".format(100 * mfc_training_acc))\n\nmfc_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, mfc_model)\nprint(\"testing accuracy mfc_model: {:.2f}%\".format(100 * mfc_testing_acc))\n\nassert mfc_training_acc >= 0.955, \"Uh oh. Your MFC accuracy on the training set doesn't look right.\"\nassert mfc_testing_acc >= 0.925, \"Uh oh. Your MFC accuracy on the testing set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger accuracy looks correct!</div>')", "training accuracy mfc_model: 95.72%\ntesting accuracy mfc_model: 93.01%\n" ] ], [ [ "## Step 3: Build an HMM tagger\n---\nThe HMM tagger has one hidden state for each possible tag, and parameterized by two distributions: the emission probabilties giving the conditional probability of observing a given **word** from each hidden state, and the transition probabilities giving the conditional probability of moving between **tags** during the sequence.\n\nWe will also estimate the starting probability distribution (the probability of each **tag** being the first tag in a sequence), and the terminal probability distribution (the probability of each **tag** being the last tag in a sequence).\n\nThe maximum likelihood estimate of these distributions can be calculated from the frequency counts as described in the following sections where you'll implement functions to count the frequencies, and finally build the model. The HMM model will make predictions according to the formula:\n\n$$t_i^n = \\underset{t_i^n}{\\mathrm{argmax}} \\prod_{i=1}^n P(w_i|t_i) P(t_i|t_{i-1})$$\n\nRefer to Speech & Language Processing [Chapter 10](https://web.stanford.edu/~jurafsky/slp3/10.pdf) for more information.", "_____no_output_____" ], [ "### IMPLEMENTATION: Unigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each symbol over all of the input sequences. The unigram probabilities in our HMM model are estimated from the formula below, where N is the total number of samples in the input. (You only need to compute the counts for now.)\n\n$$P(tag_1) = \\frac{C(tag_1)}{N}$$", "_____no_output_____" ] ], [ [ "def unigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequence list that\n counts the number of occurrences of the value in the sequences list. The sequences\n collection should be a 2-dimensional array.\n \n For example, if the tag NOUN appears 275558 times over all the input sequences,\n then you should return a dictionary such that your_unigram_counts[NOUN] == 275558.\n \"\"\"\n # TODO: Finish this function!\n counts = {}\n for i, sentence in enumerate(sequences):\n for x, y in enumerate(sentence):\n counts[y] = counts[y]+1 if y in counts else 1\n return(counts)\n\n# TODO: call unigram_counts with a list of tag sequences from the training set\ntag_unigrams = unigram_counts(data.training_set.Y)\n\nassert set(tag_unigrams.keys()) == data.training_set.tagset, \\\n \"Uh oh. It looks like your tag counts doesn't include all the tags!\"\nassert min(tag_unigrams, key=tag_unigrams.get) == 'X', \\\n \"Hmmm...'X' is expected to be the least common class\"\nassert max(tag_unigrams, key=tag_unigrams.get) == 'NOUN', \\\n \"Hmmm...'NOUN' is expected to be the most common class\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag unigrams look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Bigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each pair of symbols in each of the input sequences. These counts are used in the HMM model to estimate the bigram probability of two tags from the frequency counts according to the formula: $$P(tag_2|tag_1) = \\frac{C(tag_2|tag_1)}{C(tag_2)}$$\n", "_____no_output_____" ] ], [ [ "def bigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique PAIR of values in the input sequences\n list that counts the number of occurrences of pair in the sequences list. The input\n should be a 2-dimensional array.\n \n For example, if the pair of tags (NOUN, VERB) appear 61582 times, then you should\n return a dictionary such that your_bigram_counts[(NOUN, VERB)] == 61582\n \"\"\"\n counts = {}\n for i, sentence in enumerate(sequences):\n for y in range(len(sentence) - 1):\n counts[(sentence[y], sentence[y+1])] = counts[(sentence[y], sentence[y+1])] + 1 if (sentence[y], sentence[y+1]) in counts else 1\n return counts\n\n# TODO: call bigram_counts with a list of tag sequences from the training set\ntag_bigrams = bigram_counts(data.training_set.Y)\n\nassert len(tag_bigrams) == 144, \\\n \"Uh oh. There should be 144 pairs of bigrams (12 tags x 12 tags)\"\nassert min(tag_bigrams, key=tag_bigrams.get) in [('X', 'NUM'), ('PRON', 'X')], \\\n \"Hmmm...The least common bigram should be one of ('X', 'NUM') or ('PRON', 'X').\"\nassert max(tag_bigrams, key=tag_bigrams.get) in [('DET', 'NOUN')], \\\n \"Hmmm...('DET', 'NOUN') is expected to be the most common bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag bigrams look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Sequence Starting Counts\nComplete the code below to estimate the bigram probabilities of a sequence starting with each tag.", "_____no_output_____" ] ], [ [ "def starting_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the beginning of\n a sequence.\n \n For example, if 8093 sequences start with NOUN, then you should return a\n dictionary such that your_starting_counts[NOUN] == 8093\n \"\"\"\n counts = {}\n \n for i, sentence in enumerate(sequences):\n counts[sentence[0]] = counts[sentence[0]] + 1 if sentence[0] in counts else 1\n return counts\n\n# TODO: Calculate the count of each tag starting a sequence\ntag_starts = starting_counts(data.training_set.Y)\n\nassert len(tag_starts) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_starts, key=tag_starts.get) == 'X', \"Hmmm...'X' is expected to be the least common starting bigram.\"\nassert max(tag_starts, key=tag_starts.get) == 'DET', \"Hmmm...'DET' is expected to be the most common starting bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your starting tag counts look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Sequence Ending Counts\nComplete the function below to estimate the bigram probabilities of a sequence ending with each tag.", "_____no_output_____" ] ], [ [ "def ending_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the end of\n a sequence.\n \n For example, if 18 sequences end with DET, then you should return a\n dictionary such that your_starting_counts[DET] == 18\n \"\"\"\n counts = {}\n for i, sentence in enumerate(sequences):\n index = len(sentence) -1\n counts[sentence[index]] = counts[sentence[index]] + 1 if sentence[index] in counts else 1\n return counts\n\n# TODO: Calculate the count of each tag ending a sequence\ntag_ends = ending_counts(data.training_set.Y)\n\nassert len(tag_ends) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_ends, key=tag_ends.get) in ['X', 'CONJ'], \"Hmmm...'X' or 'CONJ' should be the least common ending bigram.\"\nassert max(tag_ends, key=tag_ends.get) == '.', \"Hmmm...'.' is expected to be the most common ending bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your ending tag counts look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Basic HMM Tagger\nUse the tag unigrams and bigrams calculated above to construct a hidden Markov tagger.\n\n- Add one state per tag\n - The emission distribution at each state should be estimated with the formula: $P(w|t) = \\frac{C(t, w)}{C(t)}$\n- Add an edge from the starting state `basic_model.start` to each tag\n - The transition probability should be estimated with the formula: $P(t|start) = \\frac{C(start, t)}{C(start)}$\n- Add an edge from each tag to the end state `basic_model.end`\n - The transition probability should be estimated with the formula: $P(end|t) = \\frac{C(t, end)}{C(t)}$\n- Add an edge between _every_ pair of tags\n - The transition probability should be estimated with the formula: $P(t_2|t_1) = \\frac{C(t_1, t_2)}{C(t_1)}$", "_____no_output_____" ] ], [ [ "basic_model = HiddenMarkovModel(name=\"base-hmm-tagger\")\n\n# TODO: create states with emission probability distributions P(word | tag) and add to the model\n# (Hint: you may need to loop & create/add new states)\n\n\nstates = []\nfor tag in data.training_set.tagset:\n tag_distribution = {word: emission_counts[tag][word]/tag_unigrams[tag] for word in set(emission_counts[tag])}\n tag_emissions = DiscreteDistribution(tag_distribution)\n tag_state = State(tag_emissions, name=tag)\n states.append(tag_state)\nbasic_model.add_states()\n\n# TODO: add edges between states for the observed transition frequencies P(tag_i | tag_i-1)\n# (Hint: you may need to loop & add transitions\n\nfor state in states:\n basic_model.add_transition(basic_model.start, state, tag_starts[state.name]/sum(tag_starts.values()))\n\nfor state in states:\n basic_model.add_transition(state, basic_model.end, tag_ends[state.name]/tag_unigrams[state.name])\n\nfor state1 in states:\n for state2 in states:\n basic_model.add_transition(state1, state2, tag_bigrams[(state1.name,state2.name)]/tag_unigrams[state1.name])\n\n# NOTE: YOU SHOULD NOT NEED TO MODIFY ANYTHING BELOW THIS LINE\n# finalize the model\nbasic_model.bake()\n\nassert all(tag in set(s.name for s in basic_model.states) for tag in data.training_set.tagset), \\\n \"Every state in your network should use the name of the associated tag, which must be one of the training set tags.\"\nassert basic_model.edge_count() == 168, \\\n (\"Your network should have an edge from the start node to each state, one edge between every \" +\n \"pair of tags (states), and an edge from each state to the end node.\")\nHTML('<div class=\"alert alert-block alert-success\">Your HMM network topology looks good!</div>')", "_____no_output_____" ], [ "hmm_training_acc = accuracy(data.training_set.X, data.training_set.Y, basic_model)\nprint(\"training accuracy basic hmm model: {:.2f}%\".format(100 * hmm_training_acc))\n\nhmm_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, basic_model)\nprint(\"testing accuracy basic hmm model: {:.2f}%\".format(100 * hmm_testing_acc))\n\nassert hmm_training_acc > 0.97, \"Uh oh. Your HMM accuracy on the training set doesn't look right.\"\nassert hmm_testing_acc > 0.955, \"Uh oh. Your HMM accuracy on the testing set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your HMM tagger accuracy looks correct! Congratulations, you\\'ve finished the project.</div>')", "training accuracy basic hmm model: 97.54%\ntesting accuracy basic hmm model: 96.16%\n" ] ], [ [ "### Example Decoding Sequences with the HMM Tagger", "_____no_output_____" ] ], [ [ "for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, basic_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")", "Sentence Key: b100-28144\n\nPredicted labels:\n-----------------\n['CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.']\n\nActual labels:\n--------------\n('CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.')\n\n\nSentence Key: b100-23146\n\nPredicted labels:\n-----------------\n['PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.']\n\nActual labels:\n--------------\n('PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\n\nSentence Key: b100-35462\n\nPredicted labels:\n-----------------\n['DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.']\n\nActual labels:\n--------------\n('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\n\n" ] ], [ [ "\n## Finishing the project\n---\n\n<div class=\"alert alert-block alert-info\">\n**Note:** **SAVE YOUR NOTEBOOK**, then run the next cell to generate an HTML copy. You will zip & submit both this file and the HTML copy for review.\n</div>", "_____no_output_____" ] ], [ [ "!!jupyter nbconvert *.ipynb", "_____no_output_____" ] ], [ [ "## Step 4: [Optional] Improving model performance\n---\nThere are additional enhancements that can be incorporated into your tagger that improve performance on larger tagsets where the data sparsity problem is more significant. The data sparsity problem arises because the same amount of data split over more tags means there will be fewer samples in each tag, and there will be more missing data tags that have zero occurrences in the data. The techniques in this section are optional.\n\n- [Laplace Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) (pseudocounts)\n Laplace smoothing is a technique where you add a small, non-zero value to all observed counts to offset for unobserved values.\n\n- Backoff Smoothing\n Another smoothing technique is to interpolate between n-grams for missing data. This method is more effective than Laplace smoothing at combatting the data sparsity problem. Refer to chapters 4, 9, and 10 of the [Speech & Language Processing](https://web.stanford.edu/~jurafsky/slp3/) book for more information.\n\n- Extending to Trigrams\n HMM taggers have achieved better than 96% accuracy on this dataset with the full Penn treebank tagset using an architecture described in [this](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf) paper. Altering your HMM to achieve the same performance would require implementing deleted interpolation (described in the paper), incorporating trigram probabilities in your frequency tables, and re-implementing the Viterbi algorithm to consider three consecutive states instead of two.\n\n### Obtain the Brown Corpus with a Larger Tagset\nRun the code below to download a copy of the brown corpus with the full NLTK tagset. You will need to research the available tagset information in the NLTK docs and determine the best way to extract the subset of NLTK tags you want to explore. If you write the following the format specified in Step 1, then you can reload the data using all of the code above for comparison.\n\nRefer to [Chapter 5](http://www.nltk.org/book/ch05.html) of the NLTK book for more information on the available tagsets.", "_____no_output_____" ] ], [ [ "import nltk\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.corpus import brown\n\nnltk.download('brown')\ntraining_corpus = nltk.corpus.brown\ntraining_corpus.tagged_sents()[0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]