hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
e7a15b8e286589d1edd4114d57b22ae9bfc2d08a
21,556
ipynb
Jupyter Notebook
tasks/extract_text/notebooks/text_preprocessing_jordi.ipynb
jordiplanascuchi/policy-data-analyzer
8d6bc37cb6d039196c0edb74e0be70e3bd657317
[ "FTL", "RSA-MD" ]
null
null
null
tasks/extract_text/notebooks/text_preprocessing_jordi.ipynb
jordiplanascuchi/policy-data-analyzer
8d6bc37cb6d039196c0edb74e0be70e3bd657317
[ "FTL", "RSA-MD" ]
null
null
null
tasks/extract_text/notebooks/text_preprocessing_jordi.ipynb
jordiplanascuchi/policy-data-analyzer
8d6bc37cb6d039196c0edb74e0be70e3bd657317
[ "FTL", "RSA-MD" ]
null
null
null
30.750357
732
0.583735
[ [ [ "# Short instruction to make sure we are working in the right environment\n!conda info", "_____no_output_____" ], [ "#import libraries\nimport os, io\nimport tempfile\nimport time\nimport re\nimport pdf2image\nfrom pdf2image import convert_from_path\nfrom pdf2image import convert_from_bytes\n\nfrom PyPDF2 import PdfFileReader\n\nfrom IPython.display import display, Image\nimport pytesseract\nimport tesserocr\nimport pdftotext\n\nimport pikepdf\nfrom pikepdf import Pdf, Page\nimport pdfminer\nimport pdfplumber\n\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfpage import PDFTextExtractionNotAllowed\n\n# to import from google drive\nimport pickle\nimport os.path\nfrom googleapiclient import discovery\nfrom googleapiclient.discovery import build\nfrom googleapiclient import errors\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\nfrom PIL import Image", "_____no_output_____" ], [ "# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/drive']\n\n\"\"\"Shows basic usage of the Drive v3 API.\nPrints the names and ids of the first 10 files the user has access to.\n\"\"\"\ncreds = None\n# The file token.pickle stores the user's access and refresh tokens, and is\n# created automatically when the authorization flow completes for the first\n# time.\nif os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n# If there are no (valid) credentials available, let the user log in.\nif not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'client_secret.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\nservice = build('drive', 'v3', credentials=creds)\n\n# Call the Drive v3 API\nresults = service.files().list(\n pageSize=10, fields=\"nextPageToken, files(id, name)\").execute()\nitems = results.get('files', [])\n\nif not items:\n print('No files found.')\nelse:\n print('Files:')\n for item in items:\n print(u'{0} ({1})'.format(item['name'], item['id']))\n", "Files:\nCopy of Text_extraction_example.ipynb (1ce-FVwyzcNuIXJYNQ1qybKpMSJ6Yn-5V)\nWork_summary_jordi_201122.xlsx (1KCWqsZ3aUAARRX-6fYcV5rTmxRQulsWp)\nWork reccoring (1cAorJ-zPaorVAI7KmwDtC5pd0v7w-PS2)\nOCR.ipynb (1hWljx3wduIk1xJGdkQil2lID7H_Tkvd8)\nNormativaForestal_Chile.pdf (1PJVk7utukeA1Y0riPo_53gAcCCqQ4D3H)\nPrograma_comision_forestal_mex.pdf (10jCcgdxWFoO0AaQXwWk7RsA0RD77n4la)\nCopy of 11255.pdf (1HdCavlCXP-tAlaAmjPOm-i7efxUsiIsw)\nCopy of Nº 004-2020-P-CE-PJ.pdf (1Y5jdAcakQfIP4A5H06c6lQoNBCFk9-me)\nCopy of 3000.pdf (1rwaPoFock58hHlWIot3KEnUTuEl1p6n7)\nDocument de prova per pdf.pdf (1Z5-HSv2Ktox4UGLMM9MtJgJFCwZ5c70Z)\n" ], [ "# 'application/vnd.google-apps.spreadsheet' and parents in '{}'\".format(folder_id)\n# \"mimeType = 'application/vnd.google-apps.pdf' and parents in '{}'\".format(folder_id)\nfolder_id = \"1JU2YWwn88_0hwP5EYuF5FB2LLTYVZKND\"\nfiletype = \"application/pdf\"\nquery = f\"'{folder_id}' in parents\"\nquery = f\"mimeType='{filetype}'\"\nquery = f\"'{folder_id}' in parents and mimeType='{filetype}'\"\npage_token = None\nwhile True:\n response = service.files().list(q = query,\n spaces=\"drive\",\n fields=\"nextPageToken, files(id, name, mimeType)\",\n pageToken=page_token).execute()\n for file in response.get('files', []):\n # Process change\n print('Found file: %s (%s)' % (file.get('name'), file.get('id')))\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break", "Found file: PDF_995_dummy.pdf (1BewQJdPcBesbYpS0Y-SgsmxlhSBVo363)\nFound file: dummy.pdf (1wls-EM-h4eOeMJTWLrT21DjUGgsvSKGe)\nFound file: Programa_comision_forestal_mex.pdf (10jCcgdxWFoO0AaQXwWk7RsA0RD77n4la)\nFound file: Copy of 11255.pdf (1HdCavlCXP-tAlaAmjPOm-i7efxUsiIsw)\nFound file: Copy of Nº 004-2020-P-CE-PJ.pdf (1Y5jdAcakQfIP4A5H06c6lQoNBCFk9-me)\nFound file: Copy of 3000.pdf (1rwaPoFock58hHlWIot3KEnUTuEl1p6n7)\nFound file: Document de prova per pdf.pdf (1Z5-HSv2Ktox4UGLMM9MtJgJFCwZ5c70Z)\nFound file: Decreto_ejecutivo_57.pdf (1egVmcj6Ssiupf1mX3BEDBUkYBx7r1tfU)\nFound file: Decreto_ejecutivo.pdf (1_qryhaX0h8obAyLsYzf-u_ph0WVTrL1X)\nFound file: ORDENANZA REGULADORA DE TALA Y PODA DE NEJAPA.pdf (1QMPmYxavfGy2FiWc9yOuxRyfyEu66aj0)\n" ], [ "file_id = ['1wls-EM-h4eOeMJTWLrT21DjUGgsvSKGe', '1BewQJdPcBesbYpS0Y-SgsmxlhSBVo363', '10jCcgdxWFoO0AaQXwWk7RsA0RD77n4la', '1egVmcj6Ssiupf1mX3BEDBUkYBx7r1tfU', '1Z5-HSv2Ktox4UGLMM9MtJgJFCwZ5c70Z', '1HdCavlCXP-tAlaAmjPOm-i7efxUsiIsw', '1Y5jdAcakQfIP4A5H06c6lQoNBCFk9-me', '1rwaPoFock58hHlWIot3KEnUTuEl1p6n7']\nfilename = \"Decreto_ejecutivo_57.pdf\"\n\nrequest = service.files().get_media(fileId=file_id[1])\n\nfh = io.BytesIO()\ndownloader = MediaIoBaseDownload(fh, request)\ndone = False\nwhile done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))", "Download 100%.\n" ], [ "# pdf = PdfFileReader(fh)\n# for page in range(0, pdf.getNumPages()):\n# pdf_page = pdf.getPage(page) #Retrieve the content of each page\n# pdf_content = pdf_page.extractText()\n# if pdf_content == \"\":\n# print(\"no text\")", "_____no_output_____" ], [ "# pdf_content", "_____no_output_____" ], [ "pdf = Pdf.open('../tests/resources/fourpages.pdf')\nextracted_text = \"\"\nparser = PDFParser(fh)\ndocument = PDFDocument(parser)\nif not document.is_extractable:\n raise PDFTextExtractionNotAllowed\nsio = io.StringIO()\nrsrcmgr = PDFResourceManager()\nlaparams = LAParams()\ndevice = PDFPageAggregator(rsrcmgr, sio, laparams=laparams)\ninterpreter = PDFPageInterpreter(rsrcmgr, device)\n\n\nfor page in PDFPage.create_pages(document):\n interpreter.process_page(page)\ntext = sio.getvalue()\n \n# layout = device.get_result()\n# for lt_obj in layout:\n# if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n# extracted_text += lt_obj.get_text()\n# else:\n# print(\"no text to extract\")", "_____no_output_____" ], [ "file_id = '1egVmcj6Ssiupf1mX3BEDBUkYBx7r1tfU'\nfilename = \"Decreto_ejecutivo_57.pdf\"\ndef open_file_from_Gdrive(file_id):\n request = service.files().get_media(fileId=file_id)\n\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))\n scan = convert_from_bytes(fh.getvalue())\n fh.close()\n \ndef check_file_content(bytesIO):\n bytesIO.getValue()", "_____no_output_____" ], [ "extracted_text", "_____no_output_____" ], [ "fh.read()", "_____no_output_____" ], [ " gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n image_data = cmap(gradient, bytes=True)\n image = Image.fromarray(image_data, 'RGBA')\n\n # ostream = io.FileIO('../cmaps/' + cmap_name + '.png', 'wb')\n # image.save(ostream, format='PNG')\n # ostream.close()\n\n ostream = io.BytesIO()\n image.save(ostream, format='PNG')\n cbar_png_bytes = ostream.getvalue()\n ostream.close()\n\n cbar_png_data = base64.b64encode(cbar_png_bytes)\n cbar_png_bytes = cbar_png_data.decode('unicode_escape')\n\n return cbar_png_bytes ", "_____no_output_____" ], [ "# Mouting drive Google Drive\nfrom google.colab import drive\ndrive.mount('/content/drive/')", "_____no_output_____" ], [ "filename = \"Decreto_ejecutivo_57.pdf\" \n# inp_path = \"/content/drive/MyDrive/WRI-LatinAmerica-Talent/Preprocessing/raw/\"\n# out_path = \"/content/drive/MyDrive/WRI-LatinAmerica-Talent/Preprocessing/OCR/outputs\"\n# filepaths = [os.path.join(inp_path, file) for file in os.listdir(inp_path)]\n# filepaths", "_____no_output_____" ] ], [ [ "# Checking whether the files are scanned images or true pdfs", "_____no_output_____" ] ], [ [ "def is_image(file_path):\n with open(file_path, \"rb\") as f:\n return pdftotext.PDF(f)\n \n", "_____no_output_____" ], [ "print(is_image(filename))", "_____no_output_____" ] ], [ [ "# Converting pdf to image files and improving quality", "_____no_output_____" ] ], [ [ "def get_image1(file_path):\n \"\"\"Get image out of pdf file_path. Splits pdf file into PIL images of each of its pages.\n \"\"\"\n return convert_from_path(file_path, 500)\n\n# Performance tips according to pdf2image: \n# Using an output folder is significantly faster if you are using an SSD. Otherwise i/o usually becomes the bottleneck.\n# Using multiple threads can give you some gains but avoid more than 4 as this will cause i/o bottleneck (even on my NVMe SSD!).", "_____no_output_____" ], [ "pages = get_image1(filepaths[0])\ndisplay(pages[0])", "_____no_output_____" ] ], [ [ "What can we do here to improve image quality? It already seems pretty good!", "_____no_output_____" ], [ "# Evaluating extraction time from each method and saving text to disk", "_____no_output_____" ] ], [ [ "def export_ocr(text, file, extract, out=out_path):\n \"\"\" Export ocr output text using extract method to file at out\n \"\"\"\n filename = f'{os.path.splitext(os.path.basename(file))[0]}_{extract}.txt'\n with open(os.path.join(out, filename), 'w') as the_file:\n the_file.write(text)\n\ndef wrap_pagenum(page_text, page_num):\n \"\"\" Wrap page_text with page_num tag\n \"\"\"\n return f\"<p n={page_num}>\" + page_text + \"</p>\"", "_____no_output_____" ], [ "# pytesseract extraction\nstart_time = time.time()\nfor file in filepaths:\n pages = get_image1(file)\n text = \"\"\n for pageNum, imgBlob in enumerate(pages):\n page_text = pytesseract.image_to_string(imgBlob, lang=\"spa\")\n text += wrap_pagenum(page_text, pageNum)\n export_ocr(text, file, \"pytesseract\") # write extracted text to disk\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "_____no_output_____" ], [ "# tesserocr extraction\nstart_time = time.time()\nfor file in filepaths:\n pages = get_image1(file)\n text = \"\"\n for pageNum, imgBlob in enumerate(pages):\n page_text = tesserocr.image_to_text(imgBlob, lang=\"spa\")\n text += wrap_pagenum(page_text, pageNum)\n export_ocr(text, file, \"tesserocr\") # write extracted text to disk\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "_____no_output_____" ], [ "# tesserocr extraction using the PyTessBaseAPI\nstart_time = time.time()\nfor file in filepaths:\n pages = get_image1(file)\n text = \"\"\n with tesserocr.PyTessBaseAPI(lang=\"spa\") as api:\n for pageNum, imgBlob in enumerate(pages):\n api.SetImage(imgBlob)\n page_text = api.GetUTF8Text()\n text += wrap_pagenum(page_text, pageNum)\n export_ocr(text, file, \"tesserocr_pytess\") # write extracted text to disk\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "_____no_output_____" ] ], [ [ "It seems that the pytesseract package provides the fastest extraction and by looking at the extracted text it doesn't seem to exist any difference in the output of all the tested methods.", "_____no_output_____" ] ], [ [ "# comparison between text extracted by the different methods\nos.listdir(out_path)", "_____no_output_____" ], [ "# TODO: perform a more programatical comparison between extracted texts", "_____no_output_____" ] ], [ [ "# Let's look at the extracted text", "_____no_output_____" ] ], [ [ "with open(os.path.join(out_path, 'Decreto_ejecutivo_57_pytesseract.txt')) as text:\n extracted_text = text.read()\nextracted_text", "_____no_output_____" ], [ "# Replace \\x0c (page break) by \\n\n# Match 1 or more occurrences of \\n if preceeded by one occurrence of \\n OR \n# Match 1 or more occurrences of \\s (whitespace) if preceeded by one occurrence of \\n OR \n# Match one occurrence of \\n if it isn't followed by \\n\nprint(re.sub(\"(?<=\\n)\\n+|(?<=\\n)\\s+|\\n(?!\\n)\", \" \", extracted_text.replace(\"\\x0c\", \"\\n\")))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a169ec897198d34e134800bdfc7a644b6f084a
278,643
ipynb
Jupyter Notebook
content/sections/section3/notebook/cs109a_section_3.ipynb
lingcog/2019-CS109A
f1eaa62976fe989c3ad3f3ab4b8dd5d71574a2c3
[ "MIT" ]
null
null
null
content/sections/section3/notebook/cs109a_section_3.ipynb
lingcog/2019-CS109A
f1eaa62976fe989c3ad3f3ab4b8dd5d71574a2c3
[ "MIT" ]
null
null
null
content/sections/section3/notebook/cs109a_section_3.ipynb
lingcog/2019-CS109A
f1eaa62976fe989c3ad3f3ab4b8dd5d71574a2c3
[ "MIT" ]
null
null
null
132.560894
46,068
0.814523
[ [ [ "# <img style=\"float: left; padding-right: 10px; width: 45px\" src=\"https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png\"> CS109A Introduction to Data Science \n\n## Standard Section 3: Multiple Linear Regression and Polynomial Regression \n\n**Harvard University**<br/>\n**Fall 2019**<br/>\n**Instructors**: Pavlos Protopapas, Kevin Rader, and Chris Tanner<br/>\n**Section Leaders**: Marios Mattheakis, Abhimanyu (Abhi) Vasishth, Robbert (Rob) Struyven<br/>\n\n<hr style='height:2px'>", "_____no_output_____" ] ], [ [ "#RUN THIS CELL \nimport requests\nfrom IPython.core.display import HTML\nstyles = requests.get(\"http://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css\").text\nHTML(styles)", "_____no_output_____" ] ], [ [ "For this section, our goal is to get you familiarized with Multiple Linear Regression. We have learned how to model data with kNN Regression and Simple Linear Regression and our goal now is to dive deep into Linear Regression.\n\nSpecifically, we will: \n \n- Load in the titanic dataset from seaborn\n- Learn a few ways to plot **distributions** of variables using seaborn\n- Learn about different **kinds of variables** including continuous, categorical and ordinal\n- Perform single and multiple linear regression\n- Learn about **interaction** terms\n- Understand how to **interpret coefficients** in linear regression\n- Look at **polynomial** regression\n- Understand the **assumptions** being made in a linear regression model\n- (Extra): look at some cool plots to raise your EDA game", "_____no_output_____" ], [ "![meme](../fig/meme.png)", "_____no_output_____" ] ], [ [ "# Data and Stats packages\nimport numpy as np\nimport pandas as pd\n\n# Visualization packages\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()", "_____no_output_____" ] ], [ [ "# Extending Linear Regression\n\n## Working with the Titanic Dataset from Seaborn\n\nFor our dataset, we'll be using the passenger list from the Titanic, which famously sank in 1912. Let's have a look at the data. Some descriptions of the data are at https://www.kaggle.com/c/titanic/data, and here's [how seaborn preprocessed it](https://github.com/mwaskom/seaborn-data/blob/master/process/titanic.py).\n\nThe task is to build a regression model to **predict the fare**, based on different attributes.\n\nLet's keep a subset of the data, which includes the following variables: \n\n- age\n- sex\n- class\n- embark_town\n- alone\n- **fare** (the response variable)", "_____no_output_____" ] ], [ [ "# Load the dataset from seaborn \ntitanic = sns.load_dataset(\"titanic\")\ntitanic.head()", "_____no_output_____" ], [ "# checking for null values\nchosen_vars = ['age', 'sex', 'class', 'embark_town', 'alone', 'fare']\ntitanic = titanic[chosen_vars]\ntitanic.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 6 columns):\nage 714 non-null float64\nsex 891 non-null object\nclass 891 non-null category\nembark_town 889 non-null object\nalone 891 non-null bool\nfare 891 non-null float64\ndtypes: bool(1), category(1), float64(2), object(2)\nmemory usage: 29.8+ KB\n" ] ], [ [ "**Exercise**: check the datatypes of each column and display the statistics (min, max, mean and any others) for all the numerical columns of the dataset.", "_____no_output_____" ] ], [ [ "## your code here\n\n", "_____no_output_____" ], [ "# %load 'solutions/sol1.py'\nprint(titanic.dtypes)\ntitanic.describe()", "age float64\nsex object\nclass category\nembark_town object\nalone bool\nfare float64\ndtype: object\n" ] ], [ [ "**Exercise**: drop all the non-null *rows* in the dataset. Is this always a good idea?", "_____no_output_____" ] ], [ [ "## your code here\n", "_____no_output_____" ], [ "# %load 'solutions/sol2.py'\ntitanic = titanic.dropna(axis=0)\ntitanic.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 712 entries, 0 to 890\nData columns (total 6 columns):\nage 712 non-null float64\nsex 712 non-null object\nclass 712 non-null category\nembark_town 712 non-null object\nalone 712 non-null bool\nfare 712 non-null float64\ndtypes: bool(1), category(1), float64(2), object(2)\nmemory usage: 29.3+ KB\n" ] ], [ [ "Now let us visualize the response variable. A good visualization of the distribution of a variable will enable us to answer three kinds of questions:\n\n- What values are central or typical? (e.g., mean, median, modes)\n- What is the typical spread of values around those central values? (e.g., variance/stdev, skewness)\n- What are unusual or exceptional values (e.g., outliers)", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 3, figsize=(24, 6))\nax = ax.ravel()\n\nsns.distplot(titanic['fare'], ax=ax[0])\n# use seaborn to draw distributions\nax[0].set_title('Seaborn distplot')\nax[0].set_ylabel('Normalized frequencies')\n\nsns.violinplot(x='fare', data=titanic, ax=ax[1])\nax[1].set_title('Seaborn violin plot')\nax[1].set_ylabel('Frequencies')\n\nsns.boxplot(x='fare', data=titanic, ax=ax[2])\nax[2].set_title('Seaborn box plot')\nax[2].set_ylabel('Frequencies')\nfig.suptitle('Distribution of count');", "_____no_output_____" ] ], [ [ "How do we interpret these plots?", "_____no_output_____" ], [ "## Train-Test Split", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\ntitanic_train, titanic_test = train_test_split(titanic, train_size=0.7, random_state=99)\ntitanic_train = titanic_train.copy()\ntitanic_test = titanic_test.copy()\nprint(titanic_train.shape, titanic_test.shape)", "(498, 6) (214, 6)\n" ] ], [ [ "## Simple one-variable OLS", "_____no_output_____" ], [ "**Exercise**: You've done this before: make a simple model using the OLS package from the statsmodels library predicting **fare** using **age** using the training data. Name your model `model_1` and display the summary", "_____no_output_____" ] ], [ [ "from statsmodels.api import OLS\nimport statsmodels.api as sm", "_____no_output_____" ], [ "# Your code here\n\n", "_____no_output_____" ], [ "# %load 'solutions/sol3.py'\nage_ca = sm.add_constant(titanic_train['age'])\nmodel_1 = OLS(titanic_train['fare'], age_ca).fit()\nmodel_1.summary()", "_____no_output_____" ] ], [ [ "## Dealing with different kinds of variables", "_____no_output_____" ], [ "In general, you should be able to distinguish between three kinds of variables: \n\n1. Continuous variables: such as `fare` or `age`\n2. Categorical variables: such as `sex` or `alone`. There is no inherent ordering between the different values that these variables can take on. These are sometimes called nominal variables. Read more [here](https://stats.idre.ucla.edu/other/mult-pkg/whatstat/what-is-the-difference-between-categorical-ordinal-and-interval-variables/). \n3. Ordinal variables: such as `class` (first > second > third). There is some inherent ordering of the values in the variables, but the values are not continuous either. \n\n*Note*: While there is some inherent ordering in `class`, we will be treating it like a categorical variable.", "_____no_output_____" ] ], [ [ "titanic_orig = titanic_train.copy()", "_____no_output_____" ] ], [ [ "Let us now examine the `sex` column and see the value counts.", "_____no_output_____" ] ], [ [ "titanic_train['sex'].value_counts()", "_____no_output_____" ] ], [ [ "**Exercise**: Create a column `sex_male` that is 1 if the passenger is male, 0 if female. The value counts indicate that these are the two options in this particular dataset. Ensure that the datatype is `int`.", "_____no_output_____" ] ], [ [ "# your code here\n\n", "_____no_output_____" ], [ "# %load 'solutions/sol4.py'\n# functions that help us create a dummy variable\nstratify\ntitanic_train['sex_male'].value_counts()", "_____no_output_____" ] ], [ [ "Do we need a `sex_female` column, or a `sex_others` column? Why or why not?\n\nNow, let us look at `class` in greater detail.", "_____no_output_____" ] ], [ [ "titanic_train['class_Second'] = (titanic_train['class'] == 'Second').astype(int)\ntitanic_train['class_Third'] = 1 * (titanic_train['class'] == 'Third') # just another way to do it", "_____no_output_____" ], [ "titanic_train.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 498 entries, 278 to 805\nData columns (total 9 columns):\nage 498 non-null float64\nsex 498 non-null object\nclass 498 non-null category\nembark_town 498 non-null object\nalone 498 non-null bool\nfare 498 non-null float64\nsex_male 498 non-null int64\nclass_Second 498 non-null int64\nclass_Third 498 non-null int64\ndtypes: bool(1), category(1), float64(2), int64(3), object(2)\nmemory usage: 32.2+ KB\n" ], [ "# This function automates the above:\ntitanic_train_copy = pd.get_dummies(titanic_train, columns=['sex', 'class'], drop_first=True)\ntitanic_train_copy.head()", "_____no_output_____" ] ], [ [ "## Linear Regression with More Variables", "_____no_output_____" ], [ "**Exercise**: Fit a linear regression including the new sex and class variables. Name this model `model_2`. Don't forget the constant!", "_____no_output_____" ] ], [ [ "# your code here", "_____no_output_____" ], [ "# %load 'solutions/sol5.py'\nmodel_2 = sm.OLS(titanic_train['fare'], \n sm.add_constant(titanic_train[['age', 'sex_male', 'class_Second', 'class_Third']])).fit()\nmodel_2.summary()", "_____no_output_____" ] ], [ [ "### Interpreting These Results", "_____no_output_____" ], [ "1. Which of the predictors do you think are important? Why?\n2. All else equal, what does being male do to the fare?\n\n### Going back to the example from class\n\n![male_female](../fig/male_female.png)\n\n3. What is the interpretation of $\\beta_0$ and $\\beta_1$?", "_____no_output_____" ], [ "## Exploring Interactions", "_____no_output_____" ] ], [ [ "sns.lmplot(x=\"age\", y=\"fare\", hue=\"sex\", data=titanic_train, size=6)", "/anaconda3/envs/109a/lib/python3.7/site-packages/seaborn/regression.py:546: UserWarning: The `size` paramter has been renamed to `height`; please update your code.\n warnings.warn(msg, UserWarning)\n" ] ], [ [ "The slopes seem to be different for male and female. What does that indicate?\n\nLet us now try to add an interaction effect into our model.", "_____no_output_____" ] ], [ [ "# It seemed like gender interacted with age and class. Can we put that in our model?\ntitanic_train['sex_male_X_age'] = titanic_train['age'] * titanic_train['sex_male']\n\nmodel_3 = sm.OLS(\n titanic_train['fare'],\n sm.add_constant(titanic_train[['age', 'sex_male', 'class_Second', 'class_Third', 'sex_male_X_age']])\n).fit()\nmodel_3.summary()", "_____no_output_____" ] ], [ [ "**What happened to the `age` and `male` terms?**", "_____no_output_____" ] ], [ [ "# It seemed like gender interacted with age and class. Can we put that in our model?\ntitanic_train['sex_male_X_class_Second'] = titanic_train['age'] * titanic_train['class_Second']\ntitanic_train['sex_male_X_class_Third'] = titanic_train['age'] * titanic_train['class_Third']\n\nmodel_4 = sm.OLS(\n titanic_train['fare'],\n sm.add_constant(titanic_train[['age', 'sex_male', 'class_Second', 'class_Third', 'sex_male_X_age', \n 'sex_male_X_class_Second', 'sex_male_X_class_Third']])\n).fit()\nmodel_4.summary()", "_____no_output_____" ] ], [ [ "## Polynomial Regression \n\n![poly](../fig/poly.png)", "_____no_output_____" ], [ "Perhaps we now believe that the fare also depends on the square of age. How would we include this term in our model?", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(12,6))\nax.plot(titanic_train['age'], titanic_train['fare'], 'o')\nx = np.linspace(0,80,100)\nax.plot(x, x, '-', label=r'$y=x$')\nax.plot(x, 0.04*x**2, '-', label=r'$y=c x^2$')\nax.set_title('Plotting Age (x) vs Fare (y)')\nax.set_xlabel('Age (x)')\nax.set_ylabel('Fare (y)')\nax.legend();", "_____no_output_____" ] ], [ [ "**Exercise**: Create a model that predicts fare from all the predictors in `model_4` + the square of age. Show the summary of this model. Call it `model_5`. Remember to use the training data, `titanic_train`.", "_____no_output_____" ] ], [ [ "# your code here\n\n", "_____no_output_____" ], [ "# %load 'solutions/sol6.py'\ntitanic_train['age^2'] = titanic_train['age'] **2\nmodel_5 = sm.OLS(\n titanic_train['fare'],\n sm.add_constant(titanic_train[['age', 'sex_male', 'class_Second', 'class_Third', 'sex_male_X_age', \n 'sex_male_X_class_Second', 'sex_male_X_class_Third', 'age^2']])\n).fit()\nmodel_5.summary()", "_____no_output_____" ] ], [ [ "## Looking at All Our Models: Model Selection", "_____no_output_____" ], [ "What has happened to the $R^2$ as we added more features? Does this mean that the model is better? (What if we kept adding more predictors and interaction terms? **In general, how should we choose a model?** We will spend a lot more time on model selection and learn about ways to do so as the course progresses.", "_____no_output_____" ] ], [ [ "models = [model_1, model_2, model_3, model_4, model_5]\nfig, ax = plt.subplots(figsize=(12,6))\nax.plot([model.df_model for model in models], [model.rsquared for model in models], 'x-')\nax.set_xlabel(\"Model degrees of freedom\")\nax.set_title('Model degrees of freedom vs training $R^2$')\nax.set_ylabel(\"$R^2$\");", "_____no_output_____" ] ], [ [ "**What about the test data?**\n\nWe added a lot of columns to our training data and must add the same to our test data in order to calculate $R^2$ scores.", "_____no_output_____" ] ], [ [ "# Added features for model 1\n# Nothing new to be added\n\n# Added features for model 2\ntitanic_test = pd.get_dummies(titanic_test, columns=['sex', 'class'], drop_first=True)\n\n# Added features for model 3\ntitanic_test['sex_male_X_age'] = titanic_test['age'] * titanic_test['sex_male']\n\n# Added features for model 4\ntitanic_test['sex_male_X_class_Second'] = titanic_test['age'] * titanic_test['class_Second']\ntitanic_test['sex_male_X_class_Third'] = titanic_test['age'] * titanic_test['class_Third']\n\n# Added features for model 5\ntitanic_test['age^2'] = titanic_test['age'] **2", "_____no_output_____" ] ], [ [ "**Calculating R^2 scores**", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score\n\nr2_scores = []\ny_preds = []\ny_true = titanic_test['fare']\n\n# model 1\ny_preds.append(model_1.predict(sm.add_constant(titanic_test['age'])))\n\n# model 2\ny_preds.append(model_2.predict(sm.add_constant(titanic_test[['age', 'sex_male', 'class_Second', 'class_Third']])))\n\n# model 3\ny_preds.append(model_3.predict(sm.add_constant(titanic_test[['age', 'sex_male', 'class_Second', 'class_Third', \n 'sex_male_X_age']])))\n\n# model 4\ny_preds.append(model_4.predict(sm.add_constant(titanic_test[['age', 'sex_male', 'class_Second', 'class_Third', \n 'sex_male_X_age', 'sex_male_X_class_Second', \n 'sex_male_X_class_Third']])))\n\n# model 5\ny_preds.append(model_5.predict(sm.add_constant(titanic_test[['age', 'sex_male', 'class_Second', \n 'class_Third', 'sex_male_X_age', \n 'sex_male_X_class_Second', \n 'sex_male_X_class_Third', 'age^2']])))\n\nfor y_pred in y_preds:\n r2_scores.append(r2_score(y_true, y_pred))\n \nmodels = [model_1, model_2, model_3, model_4, model_5]\nfig, ax = plt.subplots(figsize=(12,6))\nax.plot([model.df_model for model in models], r2_scores, 'x-')\nax.set_xlabel(\"Model degrees of freedom\")\nax.set_title('Model degrees of freedom vs test $R^2$')\nax.set_ylabel(\"$R^2$\");", "/anaconda3/envs/109a/lib/python3.7/site-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\n return ptp(axis=axis, out=out, **kwargs)\n" ] ], [ [ "## Regression Assumptions. Should We Even Regress Linearly?", "_____no_output_____" ], [ "![linear regression](../fig/linear_regression.png)", "_____no_output_____" ], [ "**Question**: What are the assumptions of a linear regression model? \n\nWe find that the answer to this question can be found on closer examimation of $\\epsilon$. What is $\\epsilon$? It is assumed that $\\epsilon$ is normally distributed with a mean of 0 and variance $\\sigma^2$. But what does this tell us?\n\n1. Assumption 1: Constant variance of $\\epsilon$ errors. This means that if we plot our **residuals**, which are the differences between the true $Y$ and our predicted $\\hat{Y}$, they should look like they have constant variance and a mean of 0. We will show this in our plots.\n2. Assumption 2: Independence of $\\epsilon$ errors. This again comes from the distribution of $\\epsilon$ that we decide beforehand.\n3. Assumption 3: Linearity. This is an implicit assumption as we claim that Y can be modeled through a linear combination of the predictors. **Important Note:** Even though our predictors, for instance $X_2$, can be created by squaring or cubing another variable, we still use them in a linear equation as shown above, which is why polynomial regression is still a linear model.\n4. Assumption 4: Normality. We assume that the $\\epsilon$ is normally distributed, and we can show this in a histogram of the residuals.\n\n**Exercise**: Calculate the residuals for model 5, our most recent model. Optionally, plot and histogram these residuals and check the assumptions of the model.", "_____no_output_____" ] ], [ [ "# your code here", "_____no_output_____" ], [ "# %load 'solutions/sol7.py'\n# %load 'solutions/sol7.py'\npredictors = sm.add_constant(titanic_train[['age', 'sex_male', 'class_Second', 'class_Third', 'sex_male_X_age', \n 'sex_male_X_class_Second', 'sex_male_X_class_Third', 'age^2']])\ny_hat = model_5.predict(predictors)\nresiduals = titanic_train['fare'] - y_hat\n\n# plotting\nfig, ax = plt.subplots(ncols=2, figsize=(16,5))\nax = ax.ravel()\nax[0].set_title('Plot of Residuals')\nax[0].scatter(y_hat, residuals, alpha=0.2)\nax[0].set_xlabel(r'$\\hat{y}$')\nax[0].set_xlabel('residuals')\n\nax[1].set_title('Histogram of Residuals')\nax[1].hist(residuals, alpha=0.7)\nax[1].set_xlabel('residuals')\nax[1].set_ylabel('frequency');\n\n# Mean of residuals\nprint('Mean of residuals: {}'.format(np.mean(residuals)))", "Mean of residuals: 4.784570776163707e-13\n" ] ], [ [ "**What can you say about the assumptions of the model?**", "_____no_output_____" ], [ "----------------\n### End of Standard Section\n---------------", "_____no_output_____" ], [ "## Extra: Visual exploration of predictors' correlations\n\nThe dataset for this problem contains 10 simulated predictors and a response variable. ", "_____no_output_____" ] ], [ [ "# read in the data \ndata = pd.read_csv('../data/dataset3.txt')\ndata.head()", "_____no_output_____" ], [ "# this effect can be replicated using the scatter_matrix function in pandas plotting\nsns.pairplot(data);", "_____no_output_____" ] ], [ [ "Predictors x1, x2, x3 seem to be perfectly correlated while predictors x4, x5, x6, x7 seem correlated.", "_____no_output_____" ] ], [ [ "data.corr()", "_____no_output_____" ], [ "sns.heatmap(data.corr())", "_____no_output_____" ] ], [ [ "## Extra: A Handy Matplotlib Guide", "_____no_output_____" ], [ "![](https://i.imgur.com/XTzSuoR.png)\nsource: http://matplotlib.org/faq/usage_faq.html\n\nSee also [this](http://matplotlib.org/faq/usage_faq.html) matplotlib tutorial.", "_____no_output_____" ], [ "![violin plot](../fig/violin.png)\n\nSee also [this](https://mode.com/blog/violin-plot-examples) violin plot tutorial.", "_____no_output_____" ], [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
e7a16e50967a0f50baf437f373a478e93f4984ae
3,253
ipynb
Jupyter Notebook
Get Stats.ipynb
jrterven/lip_reading_dataset
85845b7948e4ea26ae27a0cc3e0b15ae68198fb4
[ "MIT" ]
2
2019-10-06T14:49:12.000Z
2019-12-09T17:13:20.000Z
Get Stats.ipynb
jrterven/lip_reading_dataset
85845b7948e4ea26ae27a0cc3e0b15ae68198fb4
[ "MIT" ]
null
null
null
Get Stats.ipynb
jrterven/lip_reading_dataset
85845b7948e4ea26ae27a0cc3e0b15ae68198fb4
[ "MIT" ]
null
null
null
22.590278
223
0.503535
[ [ [ "import glob\nimport os\nfrom collections import Counter\n\nresults_dir = '/datasets/Our_dataset/results'\n\n# load all the .txt files recursively\nall_annotations = glob.glob(results_dir + '/**/*.txt', recursive=True)\nprint(len(all_annotations), 'files')\n", "9182 files\n" ] ], [ [ "## Count all the words", "_____no_output_____" ] ], [ [ "wordcounter = Counter({})\nwords_per_video = []\nfor ann_idx, ann_file in enumerate(all_annotations):\n file = open(ann_file, \"r\")\n words = file.read().split()\n file.close()\n \n current_wordcounter = Counter(words)\n wordcounter += current_wordcounter\n \n words_per_video.append(len(words))\n", "_____no_output_____" ] ], [ [ "## Some stats", "_____no_output_____" ] ], [ [ "print(\"Number of words:\", len(wordcounter))\n\nprint(\"10 most common words:\")\nprint(wordcounter.most_common(10))\n\nprint(\"Max words in a video:\", max(words_per_video))\nprint(\"Min words in a video:\", min(words_per_video))", "Number of words: 9873\n10 most common words:\n[('que', 3723), ('de', 3050), ('la', 2163), ('y', 1988), ('a', 1865), ('el', 1594), ('no', 1589), ('en', 1459), ('un', 1178), ('es', 1169)]\nMax words in a video: 25\nMin words in a video: 1\n" ], [ "words_per_video_counter = Counter(words_per_video)", "_____no_output_____" ], [ "print(words_per_video_counter)", "Counter({11: 762, 12: 746, 9: 662, 10: 650, 13: 643, 8: 602, 5: 601, 4: 592, 7: 570, 6: 549, 14: 524, 3: 513, 2: 438, 15: 380, 1: 329, 16: 267, 17: 179, 18: 92, 19: 35, 20: 21, 21: 15, 22: 8, 23: 2, 25: 1, 24: 1})\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7a17c5bf52e4ac47ecc0bda44d06086265a9fd0
82,312
ipynb
Jupyter Notebook
models/Linear_ensemble/hyperparameter tuning/linear model_new_classification-seq only.ipynb
jingyi7777/CasRx_guide_efficiency
c9e900e4c4a73215f09852bd621b30e8dcb039e8
[ "MIT" ]
null
null
null
models/Linear_ensemble/hyperparameter tuning/linear model_new_classification-seq only.ipynb
jingyi7777/CasRx_guide_efficiency
c9e900e4c4a73215f09852bd621b30e8dcb039e8
[ "MIT" ]
null
null
null
models/Linear_ensemble/hyperparameter tuning/linear model_new_classification-seq only.ipynb
jingyi7777/CasRx_guide_efficiency
c9e900e4c4a73215f09852bd621b30e8dcb039e8
[ "MIT" ]
null
null
null
73.822422
1,558
0.638133
[ [ [ "import matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\nimport statistics\nimport math\nfrom sklearn.model_selection import train_test_split\nimport random\nimport sklearn\nfrom sklearn import ensemble\nfrom itertools import chain\nfrom typing import TextIO\nimport re\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import PredefinedSplit\n\nfrom sklearn import linear_model\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.ensemble import RandomForestClassifier\n#from sklearn import svm\n#from sklearn.svm import SVC\n#from sklearn.svm import LinearSVC\nfrom sklearn.ensemble import GradientBoostingClassifier\n#from sklearn.ensemble import VotingRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve, confusion_matrix", "_____no_output_____" ], [ "# data\ngenes = ['RPS14', 'CDC5L', 'POLR2I', 'RPS7', 'XAB2', 'RPS19BP1', 'RPL23A', 'SUPT6H', 'PRPF31', 'U2AF1', 'PSMD7',\n 'Hsp10', 'RPS13', 'PHB', 'RPS9', 'EIF5B', 'RPS6', 'RPS11', 'SUPT5H', 'SNRPD2', 'RPL37', 'RPSA', 'COPS6',\n 'DDX51', 'EIF4A3', 'KARS', 'RPL5', 'RPL32', 'SF3A1', 'RPS3A', 'SF3B3', 'POLR2D', 'RPS15A', 'RPL31', 'PRPF19',\n 'SF3B2', 'RPS4X', 'CSE1L', 'RPL6', 'COPZ1', 'PSMB2', 'RPL7', 'PHB2', 'ARCN1', 'RPA2', 'NUP98', 'RPS3', 'EEF2',\n 'USP39', 'PSMD1', 'NUP93', 'AQR', 'RPL34', 'PSMA1', 'RPS27A']\n\ngenes_filter_1 = ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B', 'RPL31',\n 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H', 'EEF2', 'RPS11',\n 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51', 'RPL7', 'RPS9', 'KARS',\n 'SF3A1', 'RPL32', 'PSMB2', 'RPS7', 'EIF4A3', 'U2AF1', 'PSMA1',\n 'PHB', 'POLR2D', 'RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2',\n 'SUPT5H', 'RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1',\n 'RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L', 'RPL5',\n 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\n\ngene_split_index = {}\nfor i in range(len(genes_filter_1)):\n gene = genes_filter_1[i]\n gene_split_index[gene]= math.floor(i/6)\n\nbase_positions = {\n 'A': 0,\n 'T': 1,\n 'C': 2,\n 'G': 3,\n 0: 'A',\n 1: 'T',\n 2: 'C',\n 3: 'G',\n}\n", "_____no_output_____" ], [ "def create_gene_splits_filter1_kfold_noval(gene_strings, values_to_split: list, kfold, split):\n # use number [0, 1, 2, 3, 4,...] as index\n genes_filter_1 = ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B', 'RPL31',\n 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H', 'EEF2', 'RPS11',\n 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51', 'RPL7', 'RPS9', 'KARS',\n 'SF3A1', 'RPL32', 'PSMB2', 'RPS7', 'EIF4A3', 'U2AF1', 'PSMA1',\n 'PHB', 'POLR2D', 'RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2',\n 'SUPT5H', 'RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1',\n 'RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L', 'RPL5',\n 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\n assert split >= 0 and split < kfold\n if kfold == 9:\n #val_genes = genes_filter_1[split * 6: (split + 1) * 6]\n if split != 8:\n test_genes = genes_filter_1[((split + 1) * 6): (split + 2) * 6]\n else:\n test_genes = genes_filter_1[0:6]\n #print('val:', val_genes)\n print('test:', test_genes)\n\n #val_ids = list(chain(*[np.where(gene_strings == g)[0] for g in val_genes]))\n test_ids = list(chain(*[np.where(gene_strings == g)[0] for g in test_genes]))\n train_ids = list((set(range(len(gene_strings))) - set(test_ids)))\n\n train = [[arr[i] for i in train_ids] for arr in values_to_split]\n #val = [[arr[i] for i in val_ids] for arr in values_to_split]\n test = [[arr[i] for i in test_ids] for arr in values_to_split]\n\n return train, test\n\ndef normalize(a: np.ndarray):\n \"\"\"\n :param a: numpy array of size N x D, where N is number of examples, D is number of features\n :return: a, normalized so that all feature columns are now between 0 and 1\n \"\"\"\n a_normed, norms = sklearn.preprocessing.normalize(a, norm='max', axis=0, return_norm=True)\n print(\"Norms:\", norms)\n return a_normed\n\ndef one_hot_encode_sequence(seq, pad_to_len=-1):\n output_len = len(seq)\n if pad_to_len > 0:\n assert pad_to_len >= output_len\n output_len = pad_to_len\n\n encoded_seq = np.zeros((output_len, 4), dtype=np.float32)\n for i, base in enumerate(seq):\n encoded_seq[i][base_positions[base]] = 1\n return encoded_seq", "_____no_output_____" ], [ "dataset_filtered_csv_path = '../../../data/integrated_guide_feature_filtered_f24_mismatch3_all_flanks.csv'\n\n#dataset\ndataframe = pd.read_csv(dataset_filtered_csv_path)\ndataframe = dataframe[dataframe['gene'].isin(genes_filter_1)] #filter out 1 gene\n\nnum_examples = len(dataframe['gene'].values)\nencoded_guides = [one_hot_encode_sequence(guide).flatten() for guide in dataframe['guide'].values]\n\n# guide seq only classification\n\nclasses = dataframe['binary_relative_ratio_075f'].values\n\noutputs = classes.astype(np.float32)\n \nall_cols = [encoded_guides, outputs]\n\n# group label to split\ngroups = dataframe['gene'].values\n\n# predefined split index\nfor g in gene_split_index.keys():\n dataframe.loc[dataframe['gene']== g,'predefined split index']= gene_split_index[g]\nps = PredefinedSplit(dataframe['predefined split index'].values)\nprint(ps.get_n_splits())", "9\n" ], [ "len(all_cols[1])", "_____no_output_____" ] ], [ [ "## hp tuning", "_____no_output_____" ] ], [ [ "# LogisticRegression, L1\nlogreg = LogisticRegression(penalty='l1',solver='saga',random_state=0,max_iter=10000)\ngrid = {'C': np.logspace(-5, 5, 11)}\n\n#predefined splits\n#gs = GridSearchCV(logreg, grid, cv=ps.split(),scoring='accuracy')\ngs = GridSearchCV(logreg, grid, cv=ps.split(),scoring=['roc_auc','average_precision'],refit='roc_auc')\ngs.fit(all_cols[0], all_cols[1])\nprint(gs.best_params_)\nprint(gs.best_score_) #best cv score\ndf_gridsearch = pd.DataFrame(gs.cv_results_)\n\ndf_gridsearch.to_csv('model_hp_results/guideonly_gene20_075f_classi_LogisticRegression_L1_hp.csv')", "{'C': 0.1}\n0.7214640628190946\n" ], [ "# LogisticRegression, L2\nlogreg = LogisticRegression(penalty='l2',solver='saga',random_state=0,max_iter=10000)\ngrid = {'C': np.logspace(-5, 5, 11)}\n\n#predefined splits\ngs = GridSearchCV(logreg, grid, cv=ps.split(),scoring=['roc_auc','average_precision'],refit='roc_auc')\ngs.fit(all_cols[0], all_cols[1])\nprint(gs.best_params_)\nprint(gs.best_score_) #best cv score\ndf_gridsearch = pd.DataFrame(gs.cv_results_)\n\ndf_gridsearch.to_csv('model_hp_results/guideonly_gene20_075f_classi_LogisticRegression_L2_hp.csv')", "{'C': 0.01}\n0.7214300947964725\n" ], [ "# LogisticRegression, elasticnet\nlogreg = LogisticRegression(penalty='elasticnet',solver='saga',random_state=0,max_iter=10000)\ngrid = {'C': np.logspace(-4, 4, 9),'l1_ratio':np.linspace(0.1, 1, num=10)}\n\ngs = GridSearchCV(logreg, grid, cv=ps.split(),scoring=['roc_auc','average_precision'],refit='roc_auc')\ngs.fit(all_cols[0], all_cols[1])\nprint(gs.best_params_)\nprint(gs.best_score_) #best cv score\ndf_gridsearch = pd.DataFrame(gs.cv_results_)\n\ndf_gridsearch.to_csv('model_hp_results/guideonly_gene20_075f_classi_LogisticRegression_elasticnet_hp.csv')", "{'C': 0.01, 'l1_ratio': 0.2}\n0.7214917118945797\n" ], [ "# https://www.programcreek.com/python/example/91158/sklearn.model_selection.GroupKFold\n#random forest\nclf = RandomForestClassifier(random_state=0)\ngrid = {'n_estimators':[100,200,400,800,1000,1200,1500],'max_features':['auto','sqrt','log2']}\ngs = GridSearchCV(clf, grid, cv=GroupKFold(n_splits=5))\ngs.fit(all_cols[0], all_cols[1], groups=groups)\n", "_____no_output_____" ], [ "#GradientBoostingClassifier\ngb = ensemble.GradientBoostingClassifier(random_state=0)\ngrid = {'learning_rate':np.logspace(-2, 0, 3),'n_estimators':[100,200,400,800,1000,1200,1500],'max_depth':[2,3,4,8],'max_features':['auto','sqrt','log2']}\ngs = GridSearchCV(gb, grid, cv=GroupKFold(n_splits=5))\ngs.fit(all_cols[0], all_cols[1], groups=groups)\nprint(gs.best_score_) #best cv score\nprint(gs.best_params_)\ndf_gridsearch = pd.DataFrame(gs.cv_results_)\n\ndf_gridsearch.to_csv('linearmodel_hp_results/classi_gb_hp.csv')", "_____no_output_____" ] ], [ [ "## Test models", "_____no_output_____" ] ], [ [ "def classification_analysis(model_name, split, y_pred,y_true):\n test_df = pd.DataFrame(list(zip(list(y_pred), list(y_true))),\n columns =['predicted_value', 'true_binary_label'])\n \n thres_list = [0.8, 0.9,0.95]\n tp_thres = []\n #print('thres_stats')\n for thres in thres_list:\n df_pre_good = test_df[test_df['predicted_value']>thres]\n true_good_label = df_pre_good['true_binary_label'].values\n num_real_gg = np.count_nonzero(true_good_label)\n if len(true_good_label)>0:\n gg_ratio = num_real_gg/len(true_good_label)\n tp_thres.append(gg_ratio)\n #print('true good guide percent '+str(gg_ratio))\n else:\n tp_thres.append('na')\n \n outputs = np.array(y_pred)\n labels = np.array(y_true)\n #plt.clf()\n #fig.suptitle('AUC and PRC')\n score = roc_auc_score(labels, outputs)\n fpr, tpr, _ = roc_curve(labels, outputs)\n #print('AUROC '+str(score))\n average_precision = average_precision_score(labels, outputs)\n precision, recall, thres_prc = precision_recall_curve(labels, outputs)\n #print('AUPRC '+str(average_precision))\n #plt.savefig(fname='results/linear_models/'+str(model_name)+'precision-recall_'+str(split)+'.png',dpi=600,bbox_inches='tight')\n return score,average_precision,tp_thres\n ", "_____no_output_____" ], [ "#LogisticRegression, little regularization\nlogreg = LogisticRegression(penalty='l1',solver='saga',random_state=0,max_iter=10000,C=100000000)\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n logreg.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n #pred = logreg.predict(xt)\n pred = logreg.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('LogisticRegression-L1', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.7213890843218597\nauroc_sd: 0.01830150463668443\nauprc_mean: 0.3253363736827444\nauprc_sd: 0.02084537819584787\ntp_80_mean: 0.3\ntp_80_sd: 0.4760952285695233\n" ], [ "# LogisticRegression, L1\nlogreg = LogisticRegression(penalty='l1',solver='saga',random_state=0,max_iter=10000,C=0.1)\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n logreg.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n #pred = logreg.predict(xt)\n pred = logreg.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('LogisticRegression-L1', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.7214640628190947\nauroc_sd: 0.018183248177440187\nauprc_mean: 0.32520716564837393\nauprc_sd: 0.020724462125722667\ntp_80_mean: 0.3125\ntp_80_sd: 0.4732423621500228\n" ], [ "# LogisticRegression, L2\nlogreg = LogisticRegression(penalty='l2',solver='saga',random_state=0,max_iter=10000,C=0.01)\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n logreg.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n #pred = logreg.predict(xt)\n pred = logreg.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('LogisticRegression-L2', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.7214300947964725\nauroc_sd: 0.0182392049171858\nauprc_mean: 0.3252897453354815\nauprc_sd: 0.02076884919984052\ntp_80_mean: 0.3333333333333333\ntp_80_sd: 0.5773502691896258\n" ], [ "# LogisticRegression, elasticnet\nlogreg = LogisticRegression(penalty='elasticnet',solver='saga',random_state=0,max_iter=10000,l1_ratio=0.50,C=0.1)\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n logreg.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n #pred = logreg.predict(xt)\n pred = logreg.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('LogisticRegression-elasticnet', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.721435369649852\nauroc_sd: 0.018237601384323694\nauprc_mean: 0.3252827677556211\nauprc_sd: 0.02078180094189064\ntp_80_mean: 0.3125\ntp_80_sd: 0.4732423621500228\n" ], [ "#SVM, linear\n\n\nclf = svm.SVC(kernel='linear',probability=True,random_state=0,C=0.001)\n\n#clf = LinearSVC(dual= False, random_state=0, max_iter=10000,C=1,penalty='l2')\n\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n clf.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n pred = clf.predict_proba(xt)\n pred = pred[:,1]\n #pred = clf.predict(xt)\n auroc,auprc,tp_thres = classification_analysis('svm', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd)) \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\n#print('tp_80_mean: '+str(tp_80_mean))\n#print('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\n#print('tp_90_mean: '+str(tp_90_mean))\n#print('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\n" ], [ "# random forest\n#clf = RandomForestClassifier(n_estimators=32,min_samples_split=2, min_samples_leaf=2, max_features='auto',random_state=0)\nclf = RandomForestClassifier(n_estimators=1500,max_features='auto',random_state=0)\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n #tr, val, te = create_gene_splits_filter1_kfold(dataframe['gene'].values, all_cols, 9, args.split)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n clf.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n #pred = logreg.predict(xt)\n pred = clf.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('random forest', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.788279464458613\nauroc_sd: 0.01428358067005656\nauprc_mean: 0.4395069525801463\nauprc_sd: 0.023567763312645224\n" ], [ "#GradientBoostingClassifier\nclf = ensemble.GradientBoostingClassifier(random_state=0,max_depth=4,\n max_features='auto', n_estimators=1500)\n\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\n#for s in range(11):\nfor s in range(9):\n #tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 11, s)\n #tr, val, te = create_gene_splits_filter1_kfold(dataframe['gene'].values, all_cols, 9, args.split)\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n clf.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n pred = clf.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('GradientBoostingClassifier_hpnew', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.8419599405820187\nauroc_sd: 0.01700381890902823\nauprc_mean: 0.5374780612257355\nauprc_sd: 0.0307901999759122\ntp_80_mean: 0.813415325970473\ntp_80_sd: 0.060331083529782635\ntp_90_mean: 0.8539377289377289\ntp_90_sd: 0.14257472730230836\n" ], [ "print(auroc_l)\nprint(auprc_l)\nprint(tp_80)\nprint(tp_90)", "[0.8584233873947144, 0.8178238022371683, 0.8167461147806936, 0.835408453266539, 0.8520322898053144, 0.8454747683961216, 0.8636994996978711, 0.8343741102400096, 0.8536570394197361]\n[0.566563028022413, 0.4848862901265541, 0.4920029175065841, 0.5422250027559219, 0.556203199805487, 0.5660378943500952, 0.5615582396576324, 0.5413416546584848, 0.5264843241484469]\n[0.8656716417910447, 0.676056338028169, 0.7966101694915254, 0.8414634146341463, 0.8536585365853658, 0.8666666666666667, 0.8387096774193549, 0.8041237113402062, 0.7777777777777778]\n[0.9285714285714286, 0.8461538461538461, 0.5833333333333334, 0.9285714285714286, 0.875, 1.0, 1.0, 0.8571428571428571, 0.6666666666666666]\n" ], [ "#GradientBoostingClassifier, hp2\nclf = ensemble.GradientBoostingClassifier(random_state=0,max_depth=4,\n max_features='sqrt', n_estimators=1800)\n\nauroc_l = []\nauprc_l = []\ntp_80 = []\ntp_90 = []\n\nfor s in range(9):\n tr, te = create_gene_splits_filter1_kfold_noval(dataframe['gene'].values, all_cols, 9, s)\n # training input and output\n d_input = tr[0]\n d_output = tr[1]\n clf.fit(d_input, d_output) #fit models\n #test set\n xt = te[0] \n pred = clf.predict_proba(xt)\n pred = pred[:,1]\n auroc,auprc,tp_thres = classification_analysis('GradientBoostingClassifier_hpnew', s,pred,te[1])\n auroc_l.append(auroc)\n auprc_l.append(auprc)\n if tp_thres[0]!= 'na':\n tp_80.append(tp_thres[0])\n if tp_thres[1]!= 'na':\n tp_90.append(tp_thres[1])\n \nauroc_mean = statistics.mean(auroc_l)\nauroc_sd = statistics.stdev(auroc_l)\nprint('auroc_mean: '+str(auroc_mean))\nprint('auroc_sd: '+str(auroc_sd))\nauprc_mean = statistics.mean(auprc_l)\nauprc_sd = statistics.stdev(auprc_l)\nprint('auprc_mean: '+str(auprc_mean))\nprint('auprc_sd: '+str(auprc_sd))\n \ntp_80_mean = statistics.mean(tp_80)\ntp_80_sd = statistics.stdev(tp_80)\nprint('tp_80_mean: '+str(tp_80_mean))\nprint('tp_80_sd: '+str(tp_80_sd))\ntp_90_mean = statistics.mean(tp_90)\ntp_90_sd = statistics.stdev(tp_90)\nprint('tp_90_mean: '+str(tp_90_mean))\nprint('tp_90_sd: '+str(tp_90_sd))", "test: ['RPL31', 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H']\ntest: ['EEF2', 'RPS11', 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51']\ntest: ['RPL7', 'RPS9', 'KARS', 'SF3A1', 'RPL32', 'PSMB2']\ntest: ['RPS7', 'EIF4A3', 'U2AF1', 'PSMA1', 'PHB', 'POLR2D']\ntest: ['RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2', 'SUPT5H']\ntest: ['RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1']\ntest: ['RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L']\ntest: ['RPL5', 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\ntest: ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B']\nauroc_mean: 0.8402434698783054\nauroc_sd: 0.017096114410535924\nauprc_mean: 0.5326705713945947\nauprc_sd: 0.029089488378007556\ntp_80_mean: 0.8210448665312134\ntp_80_sd: 0.08843235627451937\ntp_90_mean: 0.8753086419753087\ntp_90_sd: 0.19982845866986979\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a181a10159c9db9c2cdcca0d333f4ce8616c6a
71,752
ipynb
Jupyter Notebook
Classification/Apples_Banana_Lemon and Guava Classification.ipynb
vivekrathi14/Machine-Learning-with-Scikit-Learn-Python-3.x
16828cf26e4956166ec5c4cee7755a527ad27322
[ "MIT" ]
39
2019-10-22T11:09:52.000Z
2022-03-01T11:30:26.000Z
Classification/Apples_Banana_Lemon and Guava Classification.ipynb
gvbc42/scikit-learn-unsupervised_learning
beb5b58eae9290a417495b689e719fe03702c380
[ "MIT" ]
2
2020-09-26T05:31:29.000Z
2020-09-26T06:16:01.000Z
Classification/Apples_Banana_Lemon and Guava Classification.ipynb
gvbc42/scikit-learn-unsupervised_learning
beb5b58eae9290a417495b689e719fe03702c380
[ "MIT" ]
16
2019-11-24T10:40:14.000Z
2022-02-27T04:57:53.000Z
73.971134
13,044
0.767031
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pathlib import Path ", "_____no_output_____" ], [ "from sklearn.utils import Bunch # Array, Target, Target names and so on", "_____no_output_____" ], [ "from skimage.io import imread # to read my images\nfrom skimage.transform import resize\nimport skimage as sk", "_____no_output_____" ], [ "def generate_data(Location , dim =(64,64)):\n \"\"\"We are Generating the data with different Dimensions of data set and we are bring them into 24*24\"\"\"\n \n img_dir = Path(Location)\n print(img_dir)\n folder_dir = [directory for directory in img_dir.iterdir()]\n print(folder_dir)\n Categories = [fo.name for fo in folder_dir]\n print(Categories)\n \n images = []\n image_data = []\n target = []\n \n desc = \"Image Generation\"\n \n for i, direc in enumerate(folder_dir):\n #print(i,direc)\n for files in direc.iterdir():\n #print(files)\n img = imread(files) \n img_resize = resize(img,dim)\n images.append(img_resize)\n image_data.append(img_resize.flatten())\n target.append(i)\n\n # Coverting list data into array Formate\n flat_data = np.array(image_data)\n target = np.array(target)\n images = np.array(images)\n return Bunch(data = flat_data,\n target= target,\n target_names = Categories,\n images =images,\n desc = desc)\n \n ", "_____no_output_____" ], [ "Dataset = generate_data(\"Datasets/Fruits\")", "Datasets\\Fruits\n[WindowsPath('Datasets/Fruits/Apples'), WindowsPath('Datasets/Fruits/Banana'), WindowsPath('Datasets/Fruits/Guava'), WindowsPath('Datasets/Fruits/Lemons')]\n['Apples', 'Banana', 'Guava', 'Lemons']\n" ], [ "Dataset.keys()", "_____no_output_____" ], [ "Dataset.data.shape", "_____no_output_____" ], [ "Dataset.target.shape", "_____no_output_____" ], [ "Dataset.target", "_____no_output_____" ], [ "sns.distplot(Dataset.data)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "x_train,x_test,y_train,y_test= train_test_split(Dataset.data,Dataset.target,test_size=0.20)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "Lr = LogisticRegression(max_iter=4000)", "_____no_output_____" ], [ "Lr.fit(x_train,y_train)", "_____no_output_____" ], [ "yhat = Lr.predict(x_test)\nyhat", "_____no_output_____" ], [ "pd.DataFrame({\"Actual\":y_test,\n \"Predications\":yhat,\n \"Same Data\":y_test==yhat})", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix,classification_report,accuracy_score,plot_confusion_matrix", "_____no_output_____" ], [ "accuracy_score(y_test,yhat)", "_____no_output_____" ], [ "plot_confusion_matrix(Lr,x_test,y_test)", "_____no_output_____" ], [ "cm = confusion_matrix(y_test,yhat)\ncm", "_____no_output_____" ], [ "sns.heatmap(cm,annot=True)", "_____no_output_____" ], [ "print(classification_report(y_test,yhat))", " precision recall f1-score support\n\n 0 0.91 1.00 0.95 10\n 1 1.00 0.75 0.86 8\n 2 0.90 0.75 0.82 12\n 3 0.79 1.00 0.88 11\n\n accuracy 0.88 41\n macro avg 0.90 0.88 0.88 41\nweighted avg 0.89 0.88 0.88 41\n\n" ], [ "from sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ], [ "knn = KNeighborsClassifier(n_neighbors=2)", "_____no_output_____" ], [ "knn.fit(x_train,y_train)", "_____no_output_____" ], [ "knn_yhat = knn.predict(x_test)", "_____no_output_____" ], [ "D = pd.DataFrame({\"Logistics_New_predication\":yhat,\n \"Knn_New_predications\":knn_yhat,\n \"Actual\":y_test})", "_____no_output_____" ], [ "plot_confusion_matrix(knn,x_test,knn_yhat)", "_____no_output_____" ], [ "cm= confusion_matrix(y_test,knn_yhat)", "_____no_output_____" ], [ "sns.heatmap(cm,annot=True)", "_____no_output_____" ], [ "print(classification_report(y_test,knn_yhat))", " precision recall f1-score support\n\n 0 0.91 1.00 0.95 10\n 1 1.00 0.88 0.93 8\n 2 0.62 0.83 0.71 12\n 3 0.71 0.45 0.56 11\n\n accuracy 0.78 41\n macro avg 0.81 0.79 0.79 41\nweighted avg 0.79 0.78 0.77 41\n\n" ] ], [ [ "### Task:\n1. plot AUC or ROC ", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7a19a292aa9f0c213c5d0376b8c093285fbdef4
16,607
ipynb
Jupyter Notebook
recitations/RC3_robustOptimalExperiments.ipynb
1ozturkbe/ROdemos
1e8f41904658a34b88848ecc45bdaa529b992f2a
[ "MIT" ]
null
null
null
recitations/RC3_robustOptimalExperiments.ipynb
1ozturkbe/ROdemos
1e8f41904658a34b88848ecc45bdaa529b992f2a
[ "MIT" ]
null
null
null
recitations/RC3_robustOptimalExperiments.ipynb
1ozturkbe/ROdemos
1e8f41904658a34b88848ecc45bdaa529b992f2a
[ "MIT" ]
null
null
null
42.256997
426
0.565846
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7a19b5500e3aaa773f79566d1b3e6aa82230b25
5,033
ipynb
Jupyter Notebook
jnotebook/test utils sparse functions.ipynb
edervishaj/spotify-recsys-challenge
4077201ac7e4ed9da433bd10a92c183614182437
[ "Apache-2.0" ]
3
2018-10-12T20:19:57.000Z
2019-12-11T01:11:38.000Z
jnotebook/test utils sparse functions.ipynb
kiminh/spotify-recsys-challenge
5e7844a77ce3c26658400f161d2d74d682f30e69
[ "Apache-2.0" ]
null
null
null
jnotebook/test utils sparse functions.ipynb
kiminh/spotify-recsys-challenge
5e7844a77ce3c26658400f161d2d74d682f30e69
[ "Apache-2.0" ]
4
2018-10-27T20:30:18.000Z
2020-10-14T07:43:27.000Z
23.193548
68
0.405126
[ [ [ "# Test functions", "_____no_output_____" ] ], [ [ "from utils.sparse import *", "_____no_output_____" ] ], [ [ "# Function list \n1. inplace_set_rows_zero_where_sum (X, op, cut) \n2. inplace_set_cols_zero_where_sum (X, op, cut)\n3. inplace_set_rows_zero (X, target_rows)\n4. inplace_set_cols_zero (X, target_cols)\n5. inplace_row_scale (X, scale)\n6. inplace_col_scale (X, scale) \n7. sum_cols (X)\n8. sum_rows (X)", "_____no_output_____" ] ], [ [ "m = sp.random(4,5,0.5).tocsr()\nm.data = np.ones(m.data.shape[0])\nprint(m.todense())\ninplace_row_scale(m,np.array([1,2,3,4]))\nprint (m.todense())", "[[0. 1. 1. 0. 0.]\n [1. 1. 0. 0. 0.]\n [0. 0. 1. 1. 1.]\n [1. 1. 1. 0. 0.]]\n[[0. 1. 1. 0. 0.]\n [2. 2. 0. 0. 0.]\n [0. 0. 3. 3. 3.]\n [4. 4. 4. 0. 0.]]\n" ], [ "m = sp.random(4,5,0.5).tocsc()\nm.data = np.ones(m.data.shape[0])\nprint(m.todense())\ninplace_col_scale(m,np.array([1,2,3,4,5]))\nprint (m.todense())", "[[1. 0. 0. 0. 0.]\n [0. 0. 1. 1. 1.]\n [0. 1. 0. 1. 1.]\n [1. 1. 0. 0. 1.]]\n[[1. 0. 0. 0. 0.]\n [0. 0. 3. 4. 5.]\n [0. 2. 0. 4. 5.]\n [1. 2. 0. 0. 5.]]\n" ], [ "m = sp.random(4,5,0.5).tocsr()\nm.data = np.ones(m.data.shape[0])\nprint(m.todense())\ninplace_set_rows_zero(m,np.array([1,3]))\nprint (m.todense())", "[[0. 0. 0. 0. 1.]\n [1. 0. 1. 1. 0.]\n [0. 0. 0. 1. 1.]\n [1. 1. 1. 0. 1.]]\n[[0. 0. 0. 0. 1.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 1. 1.]\n [0. 0. 0. 0. 0.]]\n" ], [ "m = sp.random(4,5,0.5).tocsr()\nm.data = np.ones(m.data.shape[0])\nprint(m.todense())\ninplace_set_cols_zero(m,np.array([1,3]))\nprint (m.todense())", "[[0. 0. 1. 0. 1.]\n [1. 1. 0. 1. 0.]\n [0. 1. 0. 1. 0.]\n [0. 0. 1. 1. 1.]]\n[[0. 0. 1. 0. 1.]\n [1. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 1. 0. 1.]]\n" ], [ "m = sp.random(4,5,0.5).tocsr()\nprint (sum_rows(m))\ninplace_set_rows_zero_where_sum(m, '>', 1.5)\nprint (m.todense())", "[2.06284216 0.17633125 0.86179057 1.79197257]\n[[0. 0. 0. 0. 0. ]\n [0. 0. 0. 0. 0.17633125]\n [0.01446204 0. 0.39882688 0. 0.44850165]\n [0. 0. 0. 0. 0. ]]\n" ], [ "m = sp.random(4,5,0.5).tocsr()\nprint (sum_cols(m))\ninplace_set_cols_zero_where_sum(m, '>', 1.5)\nprint (m.todense())", "[1.96108189 1.12923879 0. 1.93997106 0.40970854]\n[[0. 0.69020914 0. 0. 0.40970854]\n [0. 0. 0. 0. 0. ]\n [0. 0. 0. 0. 0. ]\n [0. 0.43902965 0. 0. 0. ]]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e7a1a1baaf301a2a413b43fdabb34dd825fed362
460,259
ipynb
Jupyter Notebook
regerssao_linear_simples.ipynb
RafaelJNascimento/regerssao_linear_simples
5426679aaf2af1302c4ffb110008693bb7a9d3cd
[ "MIT" ]
2
2020-08-06T14:10:45.000Z
2020-08-07T01:23:18.000Z
regerssao_linear_simples.ipynb
RafaelJNascimento/regerssao_linear_simples
5426679aaf2af1302c4ffb110008693bb7a9d3cd
[ "MIT" ]
null
null
null
regerssao_linear_simples.ipynb
RafaelJNascimento/regerssao_linear_simples
5426679aaf2af1302c4ffb110008693bb7a9d3cd
[ "MIT" ]
1
2020-08-06T02:41:21.000Z
2020-08-06T02:41:21.000Z
3,967.75
425,296
0.624416
[ [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\n#Código original do Rafael para carregar o dataset \n\ndef load(dataset):\n with open(dataset) as ds:\n d = []\n for l in ds.readlines():\n l = l.split()\n d.append([float(value) for value in l])\n return np.array(d)\n\ndef plot(x, y, b):\n # plota os pontos atuais \n # s = np.abs(y) esse valor pode ser útil para personalizar o Scatter Plot no futuro \n\n plt.scatter(x, y, c = x, cmap='plasma', marker = \"o\", s = 10)\n plt.plot(x, b, color='red')\n \n plt.axvline(0, c=(.5, .5, .5), ls = '--')\n plt.axhline(0, c=(.5, .5, .5), ls = '--')\n \n plt.colorbar()\n\n #legenda e título do gráfico\n plt.title('Modelos de regressão Linear')\n plt.xlabel('velocidade do vento – m/s')\n plt.ylabel('variável de saída: potência gerada – kWatts')\n plt.savefig('img/update.jpg', dpi=300)\n plt.show()\n \n \n\ndef main():\n base = load('aerogerador.dat')\n # separa as colunas do vetor\n x, y = base[:,0], base[:,1]\n n = len(y)\n\n # valor de beta_1 e beta_0\n beta_1 = (np.sum([x[i]*y[i] for i in range(0, len(x))]) - (1/n) * np.sum(y) * np.sum(x)) / (np.sum(x**2) - (1/n) * np.sum(x)**2)\n\n beta_0 = np.mean(y) - beta_1 * np.mean(x)\n\n # cria a equacao da reta\n y_chap = beta_0 + beta_1 * x\n\n\n print('Beta 1: {}\\nBeta 0: {}\\n'.format(beta_1, beta_0))\n print('\\n')\n\n plot(x,y,y_chap)\n\n\nif __name__ == \"__main__\":\n main() \n\n", "Beta 1: 56.443855448050236\nBeta 0: -217.69027909512027\n\n\n\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7a1a288638e8eba1e679126bcb4e8f70415878d
35,460
ipynb
Jupyter Notebook
inflearn_machine_learning/pandas/pandas_pivot_crosstab.ipynb
Junhojuno/TIL
c252b62b94dc519ccd528c2cd8b638e85adee89c
[ "MIT" ]
null
null
null
inflearn_machine_learning/pandas/pandas_pivot_crosstab.ipynb
Junhojuno/TIL
c252b62b94dc519ccd528c2cd8b638e85adee89c
[ "MIT" ]
null
null
null
inflearn_machine_learning/pandas/pandas_pivot_crosstab.ipynb
Junhojuno/TIL
c252b62b94dc519ccd528c2cd8b638e85adee89c
[ "MIT" ]
3
2018-05-23T03:33:41.000Z
2018-07-09T14:34:15.000Z
32.80296
115
0.295149
[ [ [ "## Pivot table\n- excel에서 보던 것\n- index축은 groupby와 동일\n- column에 추가로 labeling값을 추가하여,\n- Value에 numeric type 값을 aggregation하는 형태", "_____no_output_____" ] ], [ [ "import dateutil\n\ndf_phone = pd.read_csv(\"code/ch5/data/phone_data.csv\")\ndf_phone['date'] = df_phone['date'].apply(dateutil.parser.parse, dayfirst=True)\ndf_phone.tail()", "_____no_output_____" ], [ "df_phone.pivot_table(['duration'], index=['month','item'], columns=['network'], fill_value=0, aggfunc='sum')", "_____no_output_____" ] ], [ [ "## Crosstab\n- 두 컬럼의 교차 빈도, 비율, 덧셈 등을 구할 때 사용\n- Pivot table의 특수한 형태\n- User-Item Rating Matrix 등을 만들 때 사용가능", "_____no_output_____" ] ], [ [ "df_movie = pd.read_csv(\"code/ch5/data/movie_rating.csv\")\ndf_movie.tail()", "_____no_output_____" ], [ "# 평론가의 영화별 평점\npd.crosstab(values=df_movie.rating, index=df_movie.critic, columns=df_movie.title, aggfunc='first').fillna(0)", "_____no_output_____" ], [ "# 이걸 groupby로 만들어보자.1\ndf_movie.groupby(['critic','title'])['rating'].first().unstack().fillna(0)", "_____no_output_____" ], [ "# 이걸 groupby로 만들어보자.2\ndf_movie.groupby(['critic','title']).agg({'rating' : 'first'}).unstack().fillna(0)", "_____no_output_____" ], [ "# 이걸 pivot table로 만들어보자\ndf_movie.pivot_table(values='rating', index='critic', columns='title', aggfunc='first', fill_value=0)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e7a1a58b21d3e573b49efb63eabb374852a19b6e
3,249
ipynb
Jupyter Notebook
01 Graph Theory/Python/notebook/kleinnijenhuis_tree_groups.ipynb
Sultanow/collatz
d8a5137af508be19da371fff787c114f1b5185c3
[ "CC-BY-4.0" ]
2
2021-04-01T15:12:10.000Z
2021-04-01T15:54:55.000Z
01 Graph Theory/Python/notebook/kleinnijenhuis_tree_groups.ipynb
Sultanow/collatz
d8a5137af508be19da371fff787c114f1b5185c3
[ "CC-BY-4.0" ]
null
null
null
01 Graph Theory/Python/notebook/kleinnijenhuis_tree_groups.ipynb
Sultanow/collatz
d8a5137af508be19da371fff787c114f1b5185c3
[ "CC-BY-4.0" ]
1
2021-05-06T20:44:07.000Z
2021-05-06T20:44:07.000Z
26.85124
79
0.461373
[ [ [ "from typing import Union, List\nimport networkx as nx\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nfrom networkx.drawing.nx_agraph import graphviz_layout\nfrom abc import ABC, abstractmethod\nimport numpy as np", "_____no_output_____" ], [ "def down(n: int) -> int:\n if n % 24 == 5:\n return (n-1)/4\n elif n % 96 == 85:\n return (n-5)/16\n else:\n return -1\n\ndef right_prune1(n: int) -> Union[int, int]:\n d = down(n)\n mod_case = -1\n if (2*d)%18 == 4 and (d%18) in [11,17] :\n return 1, (8*d-1)/3\n elif (4*d)%18 == 4 and (d%18) in [11,13]:\n return 2, (16*d-1)/3\n elif (2*d)%18 == 16 and (d%18) in [11,17] or d%18 == 5:\n return 3, (32*d-1)/3\n elif (4*d)%18 == 16 and (d%18) in [1,13] or d%18 == 7:\n return 4, (64*d-1)/3\n else:\n return 0, -1\n\ndef right_prune2(n: int) -> Union[int, int]:\n dd = down(down(n))\n mod_case = -1\n if (2*d)%18 == 4 and (d%18) in [11,17] :\n return 1, (32*dd-1)/3\n elif (4*d)%18 == 4 and (d%18) in [11,13]:\n return 2, (64*dd-1)/3\n elif (2*d)%18 == 16 and (d%18) in [11,17] or d%18 == 5:\n return 3, (128*dd-1)/3\n elif (4*d)%18 == 16 and (d%18) in [1,13] or d%18 == 7:\n return 4, (256*dd-1)/3\n elif (4*d)%18 == 16 and (d%18) in [1,13] or d%18 == 7:\n return 4, (512*dd-1)/3\n elif (4*d)%18 == 16 and (d%18) in [1,13] or d%18 == 7:\n return 4, (1024*dd-1)/3\n else:\n return 0, -1", "_____no_output_____" ], [ "#print(down(53))#13\n#print(down(1109))#277\n#print(down(181))#11\n\n#case 2 never happens\nprint(right_prune1(85))#53\nprint(right_prune1(341))#1813\nprint(right_prune1(181))#29\nprint(right_prune1(1813))#1205\nprint(right_prune1(53))#1205\n", "(3, 53.0)\n(4, 1813.0)\n(1, 29.0)\n(3, 1205.0)\n(4, 277.0)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7a1a7d5e74f2f4f4e6c56684f3978266bab1a08
12,693
ipynb
Jupyter Notebook
legacy/MNIST/lab.ipynb
MaybeS/mnist
d0aeafce97d7308dc84adbb6ad8e547776db0cd5
[ "MIT" ]
8
2020-07-17T00:30:20.000Z
2021-06-15T07:14:55.000Z
legacy/MNIST/lab.ipynb
MaybeS/mnist
d0aeafce97d7308dc84adbb6ad8e547776db0cd5
[ "MIT" ]
null
null
null
legacy/MNIST/lab.ipynb
MaybeS/mnist
d0aeafce97d7308dc84adbb6ad8e547776db0cd5
[ "MIT" ]
2
2019-07-02T04:20:21.000Z
2019-07-16T06:51:13.000Z
39.419255
92
0.530214
[ [ [ "## MNIST Simple DEMO", "_____no_output_____" ] ], [ [ "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms", "_____no_output_____" ], [ "class Arguments:\n batch = 64\n test_batch = 512\n epochs = 10\n lr = .01\n momentum = .5\n seed = 42\n log_interval = 100", "_____no_output_____" ], [ "args = Arguments()", "_____no_output_____" ], [ "class Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)", "_____no_output_____" ], [ "def train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss, correct = 0, 0\n\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "_____no_output_____" ], [ "torch.manual_seed(args.seed)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nkwargs = {\n 'num_workers': 1,\n 'pin_memory': True\n} if device.type == 'cuda' else {}\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch, shuffle=True, **kwargs)\n\nmodel = Network().to(device)\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\nfor epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n\ntorch.save(model.state_dict(), \"mnist_cnn.pt\")", "Train Epoch: 1 [0/60000 (0%)]\tLoss: 2.309220\nTrain Epoch: 1 [6400/60000 (11%)]\tLoss: 0.545335\nTrain Epoch: 1 [12800/60000 (21%)]\tLoss: 0.417650\nTrain Epoch: 1 [19200/60000 (32%)]\tLoss: 0.353491\nTrain Epoch: 1 [25600/60000 (43%)]\tLoss: 0.306972\nTrain Epoch: 1 [32000/60000 (53%)]\tLoss: 0.133229\nTrain Epoch: 1 [38400/60000 (64%)]\tLoss: 0.188936\nTrain Epoch: 1 [44800/60000 (75%)]\tLoss: 0.070623\nTrain Epoch: 1 [51200/60000 (85%)]\tLoss: 0.258176\nTrain Epoch: 1 [57600/60000 (96%)]\tLoss: 0.040762\n\nTest set: Average loss: 0.1040, Accuracy: 9675/10000 (97%)\n\nTrain Epoch: 2 [0/60000 (0%)]\tLoss: 0.235796\nTrain Epoch: 2 [6400/60000 (11%)]\tLoss: 0.049525\nTrain Epoch: 2 [12800/60000 (21%)]\tLoss: 0.077299\nTrain Epoch: 2 [19200/60000 (32%)]\tLoss: 0.058649\nTrain Epoch: 2 [25600/60000 (43%)]\tLoss: 0.162579\nTrain Epoch: 2 [32000/60000 (53%)]\tLoss: 0.043902\nTrain Epoch: 2 [38400/60000 (64%)]\tLoss: 0.037764\nTrain Epoch: 2 [44800/60000 (75%)]\tLoss: 0.007759\nTrain Epoch: 2 [51200/60000 (85%)]\tLoss: 0.125971\nTrain Epoch: 2 [57600/60000 (96%)]\tLoss: 0.033037\n\nTest set: Average loss: 0.0616, Accuracy: 9805/10000 (98%)\n\nTrain Epoch: 3 [0/60000 (0%)]\tLoss: 0.081351\nTrain Epoch: 3 [6400/60000 (11%)]\tLoss: 0.088761\nTrain Epoch: 3 [12800/60000 (21%)]\tLoss: 0.095073\nTrain Epoch: 3 [19200/60000 (32%)]\tLoss: 0.091261\nTrain Epoch: 3 [25600/60000 (43%)]\tLoss: 0.160844\nTrain Epoch: 3 [32000/60000 (53%)]\tLoss: 0.034395\nTrain Epoch: 3 [38400/60000 (64%)]\tLoss: 0.010957\nTrain Epoch: 3 [44800/60000 (75%)]\tLoss: 0.033368\nTrain Epoch: 3 [51200/60000 (85%)]\tLoss: 0.013109\nTrain Epoch: 3 [57600/60000 (96%)]\tLoss: 0.070705\n\nTest set: Average loss: 0.0484, Accuracy: 9847/10000 (98%)\n\nTrain Epoch: 4 [0/60000 (0%)]\tLoss: 0.019743\nTrain Epoch: 4 [6400/60000 (11%)]\tLoss: 0.040987\nTrain Epoch: 4 [12800/60000 (21%)]\tLoss: 0.061202\nTrain Epoch: 4 [19200/60000 (32%)]\tLoss: 0.007646\nTrain Epoch: 4 [25600/60000 (43%)]\tLoss: 0.011820\nTrain Epoch: 4 [32000/60000 (53%)]\tLoss: 0.022924\nTrain Epoch: 4 [38400/60000 (64%)]\tLoss: 0.044619\nTrain Epoch: 4 [44800/60000 (75%)]\tLoss: 0.015211\nTrain Epoch: 4 [51200/60000 (85%)]\tLoss: 0.016549\nTrain Epoch: 4 [57600/60000 (96%)]\tLoss: 0.069062\n\nTest set: Average loss: 0.0358, Accuracy: 9887/10000 (99%)\n\nTrain Epoch: 5 [0/60000 (0%)]\tLoss: 0.036325\nTrain Epoch: 5 [6400/60000 (11%)]\tLoss: 0.068640\nTrain Epoch: 5 [12800/60000 (21%)]\tLoss: 0.010548\nTrain Epoch: 5 [19200/60000 (32%)]\tLoss: 0.029485\nTrain Epoch: 5 [25600/60000 (43%)]\tLoss: 0.025582\nTrain Epoch: 5 [32000/60000 (53%)]\tLoss: 0.060043\nTrain Epoch: 5 [38400/60000 (64%)]\tLoss: 0.013400\nTrain Epoch: 5 [44800/60000 (75%)]\tLoss: 0.011863\nTrain Epoch: 5 [51200/60000 (85%)]\tLoss: 0.067035\nTrain Epoch: 5 [57600/60000 (96%)]\tLoss: 0.056927\n\nTest set: Average loss: 0.0344, Accuracy: 9884/10000 (99%)\n\nTrain Epoch: 6 [0/60000 (0%)]\tLoss: 0.014376\nTrain Epoch: 6 [6400/60000 (11%)]\tLoss: 0.006622\nTrain Epoch: 6 [12800/60000 (21%)]\tLoss: 0.020543\nTrain Epoch: 6 [19200/60000 (32%)]\tLoss: 0.035187\nTrain Epoch: 6 [25600/60000 (43%)]\tLoss: 0.038597\nTrain Epoch: 6 [32000/60000 (53%)]\tLoss: 0.016477\nTrain Epoch: 6 [38400/60000 (64%)]\tLoss: 0.021265\nTrain Epoch: 6 [44800/60000 (75%)]\tLoss: 0.034409\nTrain Epoch: 6 [51200/60000 (85%)]\tLoss: 0.012662\nTrain Epoch: 6 [57600/60000 (96%)]\tLoss: 0.044574\n\nTest set: Average loss: 0.0375, Accuracy: 9879/10000 (99%)\n\nTrain Epoch: 7 [0/60000 (0%)]\tLoss: 0.011418\nTrain Epoch: 7 [6400/60000 (11%)]\tLoss: 0.008460\nTrain Epoch: 7 [12800/60000 (21%)]\tLoss: 0.024678\nTrain Epoch: 7 [19200/60000 (32%)]\tLoss: 0.021109\nTrain Epoch: 7 [25600/60000 (43%)]\tLoss: 0.044059\nTrain Epoch: 7 [32000/60000 (53%)]\tLoss: 0.012801\nTrain Epoch: 7 [38400/60000 (64%)]\tLoss: 0.002572\nTrain Epoch: 7 [44800/60000 (75%)]\tLoss: 0.008726\nTrain Epoch: 7 [51200/60000 (85%)]\tLoss: 0.032433\nTrain Epoch: 7 [57600/60000 (96%)]\tLoss: 0.086093\n\nTest set: Average loss: 0.0300, Accuracy: 9900/10000 (99%)\n\nTrain Epoch: 8 [0/60000 (0%)]\tLoss: 0.005734\nTrain Epoch: 8 [6400/60000 (11%)]\tLoss: 0.011664\nTrain Epoch: 8 [12800/60000 (21%)]\tLoss: 0.083290\nTrain Epoch: 8 [19200/60000 (32%)]\tLoss: 0.014290\nTrain Epoch: 8 [25600/60000 (43%)]\tLoss: 0.018174\nTrain Epoch: 8 [32000/60000 (53%)]\tLoss: 0.013148\nTrain Epoch: 8 [38400/60000 (64%)]\tLoss: 0.010231\nTrain Epoch: 8 [44800/60000 (75%)]\tLoss: 0.054055\nTrain Epoch: 8 [51200/60000 (85%)]\tLoss: 0.003165\nTrain Epoch: 8 [57600/60000 (96%)]\tLoss: 0.023597\n\nTest set: Average loss: 0.0319, Accuracy: 9884/10000 (99%)\n\nTrain Epoch: 9 [0/60000 (0%)]\tLoss: 0.056386\nTrain Epoch: 9 [6400/60000 (11%)]\tLoss: 0.022121\nTrain Epoch: 9 [12800/60000 (21%)]\tLoss: 0.024276\nTrain Epoch: 9 [19200/60000 (32%)]\tLoss: 0.014277\nTrain Epoch: 9 [25600/60000 (43%)]\tLoss: 0.027978\nTrain Epoch: 9 [32000/60000 (53%)]\tLoss: 0.007992\nTrain Epoch: 9 [38400/60000 (64%)]\tLoss: 0.018210\nTrain Epoch: 9 [44800/60000 (75%)]\tLoss: 0.023663\nTrain Epoch: 9 [51200/60000 (85%)]\tLoss: 0.005544\nTrain Epoch: 9 [57600/60000 (96%)]\tLoss: 0.005737\n\nTest set: Average loss: 0.0281, Accuracy: 9906/10000 (99%)\n\nTrain Epoch: 10 [0/60000 (0%)]\tLoss: 0.011280\nTrain Epoch: 10 [6400/60000 (11%)]\tLoss: 0.029055\nTrain Epoch: 10 [12800/60000 (21%)]\tLoss: 0.007866\nTrain Epoch: 10 [19200/60000 (32%)]\tLoss: 0.053182\nTrain Epoch: 10 [25600/60000 (43%)]\tLoss: 0.002478\nTrain Epoch: 10 [32000/60000 (53%)]\tLoss: 0.001874\nTrain Epoch: 10 [38400/60000 (64%)]\tLoss: 0.041121\nTrain Epoch: 10 [44800/60000 (75%)]\tLoss: 0.004530\nTrain Epoch: 10 [51200/60000 (85%)]\tLoss: 0.038643\nTrain Epoch: 10 [57600/60000 (96%)]\tLoss: 0.008336\n\nTest set: Average loss: 0.0264, Accuracy: 9910/10000 (99%)\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e7a1ae4a383e4b873e7b79fdf4240a05e9fa503a
136,831
ipynb
Jupyter Notebook
notebooks/pa-ws-simple.ipynb
ivandasch/ignite-python-thin-client-benchmark
4a2a174595ab7e3afe17d7f18740d1792d20cf07
[ "Apache-2.0" ]
null
null
null
notebooks/pa-ws-simple.ipynb
ivandasch/ignite-python-thin-client-benchmark
4a2a174595ab7e3afe17d7f18740d1792d20cf07
[ "Apache-2.0" ]
null
null
null
notebooks/pa-ws-simple.ipynb
ivandasch/ignite-python-thin-client-benchmark
4a2a174595ab7e3afe17d7f18740d1792d20cf07
[ "Apache-2.0" ]
null
null
null
592.341991
34,748
0.940094
[ [ [ "import simplejson as json\n\nwith open('0001_bytearray_master.json') as f:\n byte_array_data = json.load(f)\n\nwith open('0002_binary_master.json') as f:\n bin_obj_data = json.load(f)", "_____no_output_____" ], [ "from collections import defaultdict\n\n\ndef prepare_data(data, name_pattern='', max_threshold=102400, min_threshold=0):\n ret = {'simple': defaultdict(dict), 'partition_aware': defaultdict(dict)}\n\n for bench in data['benchmarks']:\n params = bench['param'].split('-')\n name = bench['name']\n if name_pattern not in name:\n continue\n \n batch_sz = int(params[2]) if len(params) == 3 else 0\n value_sz = int(params[1])\n\n if not (min_threshold < value_sz < max_threshold):\n continue\n \n if 'async' in name:\n continue\n \n median = bench['stats']['median'] * 1000000\n ret[params[0]][value_sz] = median\n return ret", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot(pa_data, simple_data, title='',yscale='linear', time_scale=1, ylabel='us'):\n labels = []\n for k in pa_data.keys():\n if k // (1024 * 1024) > 0 or k == 1024 * 1024:\n labels.append(f'{k / (1024 * 1024)}Mb')\n elif k // 1024 > 0:\n labels.append(f'{k / 1024}Kb')\n else:\n labels.append(k)\n\n \n pa_means = [v / time_scale for v in pa_data.values()]\n simple_means = [v / time_scale for v in simple_data.values()]\n \n\n x = np.arange(len(labels))\n width = 0.3\n\n fig, ax = plt.subplots()\n plt.rcParams[\"figure.figsize\"] = (15, 12)\n plt.yscale(yscale)\n \n rects = []\n \n rects.append(ax.bar(x - width * 0.5, simple_means, width, label='simple'))\n rects.append(ax.bar(x + width * 0.5, pa_means, width, label='partition_aware'))\n\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n for rect in rects:\n ax.bar_label(rect, padding=3)\n\n\n fig.tight_layout()\n\n plt.show()", "_____no_output_____" ], [ "get_data = prepare_data(byte_array_data, 'get', max_threshold=20 * 1024 * 1024)\n\nplot(get_data['partition_aware'], get_data['simple'], 'Get bytearrays', yscale='log', ylabel='ms', time_scale=1000)", "_____no_output_____" ], [ "put_data = prepare_data(byte_array_data, 'put', max_threshold=20 * 1024 * 1024)\n\nplot(put_data['partition_aware'], put_data['simple'], 'Put bytearrays', yscale='log', ylabel='ms', time_scale=1000)", "_____no_output_____" ], [ "get_data = prepare_data(bin_obj_data, 'get', max_threshold=20 * 1024 * 1024)\n\nplot(get_data['partition_aware'], get_data['simple'], 'Get BinaryObject', yscale='log', ylabel='ms', time_scale=1000)", "_____no_output_____" ], [ "put_data = prepare_data(bin_obj_data, 'put', max_threshold=20 * 1024 * 1024)\n\nplot(put_data['partition_aware'], put_data['simple'], 'Put BinaryObject', yscale='log', ylabel='ms', time_scale=1000)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a1b65e2816d0ad42e0c09f87ded3054feff90e
86,701
ipynb
Jupyter Notebook
Project 1.ipynb
notnasobe666/BlackHatGang
e6ec842cfaedaa3752003d7dc1d50ef66091fe6d
[ "MIT" ]
null
null
null
Project 1.ipynb
notnasobe666/BlackHatGang
e6ec842cfaedaa3752003d7dc1d50ef66091fe6d
[ "MIT" ]
null
null
null
Project 1.ipynb
notnasobe666/BlackHatGang
e6ec842cfaedaa3752003d7dc1d50ef66091fe6d
[ "MIT" ]
null
null
null
246.309659
46,522
0.732771
[ [ [ "<h1>Project 0: Inaugural project </h1>", "_____no_output_____" ], [ "\n<b>Labor Supply Problem</b>", "_____no_output_____" ], [ "Following labor supply problem is given: \n\n$$\nc^*,l^* = log(c) - v \\frac{l^{1+\\frac{1}{\\epsilon}}}{1+\\frac{1}{\\epsilon}}\n\\\\\nx = m + wl - [\\tau_0wl+\\tau_1 \\max(wl-\\kappa,0)]\\\\\n\nc \\in [0,x]\\\\\nl \\in [0,1]\\\\\n\n$$\n\nWhere: \nc is consumption,\nl is labor supply,\nm is cash-on-hand, \nw is the wage rate, \n$$t_0$$ is the standard labor income tax\n$$t_1$$ is the top bracket labor income tax,\nk is the cut-off of top labor income bracket\nx is total resources\nv scales disutility of labor \nE is the Frisch elasticity of labor supply\nutility is monotonically increasing in consumption, which implies $$c^* = x$$", "_____no_output_____" ], [ "<h2>Question 1</h2>", "_____no_output_____" ] ], [ [ "# All used packages are imported\n\nimport numpy as np\nimport sympy as sm \nfrom scipy import optimize\n\n", "_____no_output_____" ], [ "t0 = sm.symbols('t_0')\nt1 = sm.symbols('t_1')", "_____no_output_____" ], [ "\n\n", "_____no_output_____" ], [ "m = 1 #cash-on-hand\nv = 10 #disutility of labor\ne = 0.3 #elasticity of labor supply\nt0 = 0.4 #standard labor income tax\nt1 = 0.1 #top bracket labor income tax\nk = 0.4 #cut-off for top labor income tax\n\n\n# Defining utility\n\ndef utility(c,v,l,e):\n u = np.log(c) - v*(l**(1+1/e)/(1+1/e))\n return u\n\n# Defining constraint\n\ndef constraint(m,w,l,t0,t1,k):\n x = m + w*l - (t0*w*l + t1*np.max(w*l-k,0))\n return x\n\n\ndef consumption(l,w,e,v,t0,t1,k):\n c = constraint(m,w,l,t0,t1,k)\n return -utility(c,v,l,e)\n\n\ndef optimizer(w,e,v,t0,t1,k,m):\n res = optimize.minimize_scalar(\n consumption, method='bounded',\n bounds=(0,1), args=(w,e,v,t0,t1,k))\n \n labor_star = res.x\n cons_star = constraint(m,w,labor_star,t0,t1,k)\n utility_star = utility(cons_star,v,labor_star,e)\n \n return labor_star,cons_star,utility_star\n\nlabor_star = optimizer(0.5,e,v,t0,t1,k,m)[0]\ncons_star = optimizer(0.5,e,v,t0,t1,k,m)[1]\nu_star = optimizer(0.5,e,v,t0,t1,k,m)[2]\n\nprint('labour supply is:' + str(labor_star))\nprint('consumption is:' + str(cons_star))\nprint('utility:' + str(u_star))", "labour supply is:0.31961536193545265\nconsumption is:1.119903840483863\nutility:0.09677772523865749\n" ] ], [ [ "<h2>Question 2</h2>", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\n\nplt.style.use('grayscale')\n\n# Plot l_star and c_star with w going from 0.5 to 1.5\n# The definitions are defined - the used packages is defined above\nN = 10000\nw_vector = np.linspace(0.5,1.5,num=N)\nc_optimal = np.empty(N)\nl_optimal = np.empty(N)\n\n# a loop is generated to test the range of W \n\nfor i, w in enumerate(w_vector):\n optimization = optimizer(w,e,v,t0,t1,k,m)\n l_optimal[i]=optimization[0]\n c_optimal[i]=optimization[1]\n\nfig = plt.figure(figsize=(10,4))\n\n# Left plot\naxis_left = fig.add_subplot(1,2,1)\naxis_left.plot(w_vector,l_optimal)\naxis_left.set_title('Optimal labor supply given w')\naxis_left.set_xlabel('$w$')\naxis_left.set_ylabel('$l$')\naxis_left.grid(True)\n\n# Right plot \naxis_right = fig.add_subplot(1,2,2)\naxis_right.plot(w_vector,c_optimal)\naxis_right.set_title('Optimal consumption given w')\naxis_right.set_xlabel('$w1$')\naxis_right.set_ylabel('$c$')\naxis_right.grid(True)\n\nplt.show\n", "_____no_output_____" ] ], [ [ "\n<h2>Question 3</h2>", "_____no_output_____" ] ], [ [ "# Calculate the tax revenue\n\ntax_revenue = np.sum( t0 * w_vector * l_optimal + t1 * np.max( w_vector * l_optimal - k ,0 ))\nprint('Total tax revenue is: ' + str(tax_revenue))\n", "Total tax revenue is: 1775.3896759006836\n" ] ], [ [ "\n<h2>Question 4</h2>", "_____no_output_____" ] ], [ [ "# How does the tax revenue change when e = 0.1? \n# New epsilon is defined\ne_new = 0.1\nl_optimal_e_new = np.empty(N)\n\n# Same loop is used as above but only a new labor\n# supply is calculated as consumption isn't included\n# in the tax revenue formula\nfor i, w in enumerate(w_vector):\n optimization = optimizer(w,e_new,v,t0,t1,k,m)\n l_optimal_e_new[i]=optimization[0]\n\n# then the new tax revenue can be calculated\ntax_revenue_e_new = np.sum( t0 * w_vector * l_optimal_e_new + t1 * np.max( w_vector * l_optimal_e_new - k ,0))\nprint('New total tax revenue: '+str(tax_revenue_e_new))\n\n# Thus the difference in tax revenue can be calucalted as\nprint('The difference in tax revenue is: '+ str(tax_revenue_e_new-tax_revenue))", "New total tax revenue: 3578.900497991557\nThe difference in tax revenue is: 1803.5108220908735\n" ] ], [ [ "\n<h2>Question 5</h2>", "_____no_output_____" ] ], [ [ "# Optimize the tax \n\n# Same optimization formula as above\ndef tax_optimize(t0,t1,k):\n tax_optimal = optimize.minimize_scalar(tax_revenue , method='bounded' , x=[0.1,0.1,0.1])\n t0_optimal = tax_optimal.x\n t1_optimal = tax_optimal.x\n k_optimal = tax_optimal.x\n return t0_optimal, t1_optimal, k_optimal\n \nt0_optimal = tax_optimize(t0,t1,k)[0]\nt1_optimal = tax_optimize(t0,t1,k)[1]\nk_optimal = tax_optimize(t0,t1,k)[2]\n\nprint('Optimal t0 is: ' + str(t0_optimal))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a1d2ee5cc040eaa5c4d317eee5419ad449aa0d
20,159
ipynb
Jupyter Notebook
TicTacToe_Agent.ipynb
Chiragchhillar1/ML-TicTacToe
31190477e63f506602837354b07acc3cd384848a
[ "MIT" ]
null
null
null
TicTacToe_Agent.ipynb
Chiragchhillar1/ML-TicTacToe
31190477e63f506602837354b07acc3cd384848a
[ "MIT" ]
null
null
null
TicTacToe_Agent.ipynb
Chiragchhillar1/ML-TicTacToe
31190477e63f506602837354b07acc3cd384848a
[ "MIT" ]
null
null
null
31.998413
1,003
0.53212
[ [ [ "## Tic-Tac-Toe Agent\n​\nIn this notebook, you will learn to build an RL agent (using Q-learning) that learns to play Numerical Tic-Tac-Toe with odd numbers. The environment is playing randomly with the agent, i.e. its strategy is to put an even number randomly in an empty cell. The following is the layout of the notebook:\n - Defining epsilon-greedy strategy\n - Tracking state-action pairs for convergence\n - Define hyperparameters for the Q-learning algorithm\n - Generating episode and applying Q-update equation\n - Checking convergence in Q-values", "_____no_output_____" ], [ "#### Importing libraries\nWrite the code to import Tic-Tac-Toe class from the environment file", "_____no_output_____" ] ], [ [ "# from <TC_Env> import <TicTacToe> - import your class from environment file\nfrom TCGame_Env import TicTacToe\nimport collections\nimport numpy as np\nimport random\nimport pickle\nimport time\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm", "_____no_output_____" ], [ "# Function to convert state array into a string to store it as keys in the dictionary\n# states in Q-dictionary will be of form: x-4-5-3-8-x-x-x-x\n# x | 4 | 5\n# ----------\n# 3 | 8 | x\n# ----------\n# x | x | x\n\ndef Q_state(state):\n\n return ('-'.join(str(e) for e in state)).replace('nan','x')", "_____no_output_____" ], [ "# Defining a function which will return valid (all possible actions) actions corresponding to a state\n# Important to avoid errors during deployment.\n\ndef valid_actions(state):\n\n valid_Actions = []\n \n valid_Actions = [i for i in env.action_space(state)[0]] ###### -------please call your environment as env\n return valid_Actions", "_____no_output_____" ], [ "# Defining a function which will add new Q-values to the Q-dictionary. \ndef add_to_dict(state):\n state1 = Q_state(state)\n \n valid_act = valid_actions(state)\n if state1 not in Q_dict.keys():\n for action in valid_act:\n Q_dict[state1][action]=0", "_____no_output_____" ] ], [ [ "#### Epsilon-greedy strategy - Write your code here\n\n(you can build your epsilon-decay function similar to the one given at the end of the notebook)", "_____no_output_____" ] ], [ [ "# Defining epsilon-greedy policy. You can choose any function epsilon-decay strategy\ndef epsilon_greedy(state, time):\n max_epsilon = 1.0\n min_epsilon = 0.001\n\n epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-0.000001*time)\n z = np.random.random() \n if z > epsilon:\n action = max(Q_dict[Q_state(state)],key=Q_dict[Q_state(state)].get)\n else:\n action = random.sample(valid_actions(state),1)[0] \n \n return action", "_____no_output_____" ] ], [ [ "#### Tracking the state-action pairs for checking convergence - write your code here", "_____no_output_____" ] ], [ [ "# Initialise Q_dictionary as 'Q_dict' and States_tracked as 'States_track' (for convergence)\nQ_dict = collections.defaultdict(dict)\n\nStates_track = collections.defaultdict(dict) \n \nprint(len(Q_dict))\nprint(len(States_track))", "_____no_output_____" ], [ "# Initialise states to be tracked\ndef initialise_tracking_states():\n sample_q_values = [('x-3-x-x-x-6-x-x-x',(0,1)),\n ('x-1-x-x-x-x-8-x-x',(2,9)),\n ('x-x-x-x-6-x-x-x-5',(2,7)),\n ('x-x-x-x-9-x-6-x-x',(1,7)),\n ('x-5-x-2-x-x-4-7-x',(0,9)),\n ('9-x-5-x-x-x-8-x-4',(1,3)),\n ('2-7-x-x-6-x-x-3-x',(8,5)),\n ('9-x-x-x-x-2-x-x-x',(2,5)),\n ('x-x-7-x-x-x-x-x-2',(1,5)),\n ('5-x-x-x-x-6-x-x-x',(4,9)),\n ('4-x-x-6-x-x-3-1-x',(8,5)),\n ('5-x-8-x-x-6-3-x-x',(3,1)),\n ('x-6-5-x-2-x-x-3-x',(0,7)),\n ('7-x-5-x-2-x-x-x-6',(1,3))]\n \n for q_values in sample_q_values:\n state = q_values[0]\n action = q_values[1]\n States_track[state][action] = []", "_____no_output_____" ], [ "#Defining a function to save the Q-dictionary as a pickle file\n\ndef save_obj(obj, name ):\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "def save_tracking_states():\n for state in States_track.keys():\n for action in States_track[state].keys():\n if state in Q_dict and action in Q_dict[state]:\n States_track[state][action].append(Q_dict[state][action])", "_____no_output_____" ], [ "initialise_tracking_states()", "_____no_output_____" ] ], [ [ "#### Define hyperparameters ---write your code here", "_____no_output_____" ] ], [ [ "EPISODES = 6000000\nLR = 0.20\nGAMMA = 0.8\nthreshold = 2540\ncheckpoint_print_episodes = 600000", "_____no_output_____" ] ], [ [ "### Q-update loop ---write your code here", "_____no_output_____" ] ], [ [ "start_time = time.time()\n\nq_track={}\nq_track['x-3-x-x-x-6-x-x-x']=[]\nq_track['x-1-x-x-x-x-8-x-x']=[]\nq_track['x-x-x-x-6-x-x-x-5']=[]\nq_track['x-x-x-x-9-x-6-x-x']=[]\nq_track['x-5-x-2-x-x-4-7-x']=[]\nq_track['9-x-5-x-x-x-8-x-4']=[]\nq_track['2-7-x-x-6-x-x-3-x']=[]\nq_track['9-x-x-x-x-2-x-x-x']=[]\nq_track['x-x-7-x-x-x-x-x-2']=[]\nq_track['5-x-x-x-x-6-x-x-x']=[]\nq_track['4-x-x-6-x-x-3-1-x']=[]\nq_track['5-x-8-x-x-6-3-x-x']=[]\nq_track['x-6-5-x-2-x-x-3-x']=[]\nq_track['7-x-5-x-2-x-x-x-6']=[] \n \nagent_won_count = 0\nenv_won_count = 0\ntie_count = 0\n\nfor episode in range(EPISODES):\n ##### Start writing your code from the next line\n env = TicTacToe()\n \n ## Initalizing parameter for the episodes\n reward=0\n curr_state = env.state\n add_to_dict(curr_state)\n is_terminal = False\n total_reward = 0\n \n while not(is_terminal):\n curr_action = epsilon_greedy(curr_state, episode)\n \n if Q_state(curr_state) in q_track.keys():\n q_track[Q_state(curr_state)].append(curr_action)\n\n next_state,reward,is_terminal, msg = env.step(curr_state,curr_action) \n\n curr_lookup = Q_state(curr_state)\n next_lookup = Q_state(next_state)\n\n if is_terminal:\n q_value_max = 0\n \n # Tracking the count of games won by agent and environment\n if msg == \"Agent Won!\":\n agent_won_count += 1\n elif msg == \"Environment Won!\":\n env_won_count += 1\n else:\n tie_count += 1\n else:\n add_to_dict(next_state)\n max_next = max(Q_dict[next_lookup],key=Q_dict[next_lookup].get)\n q_value_max = Q_dict[next_lookup][max_next]\n\n Q_dict[curr_lookup][curr_action] += LR * ((reward + (GAMMA * (q_value_max))) - Q_dict[curr_lookup][curr_action]) \n curr_state = next_state\n\n total_reward += reward\n\n if (episode + 1) % checkpoint_print_episodes == 0:\n print(\"After playing %d games, Agent Won : %.4f, Environment Won : %.4f, Tie : %.4f\"% (episode + 1, \n agent_won_count / (episode + 1), env_won_count /(episode + 1), tie_count / (episode + 1)))\n\n if ((episode + 1) % threshold) == 0: \n save_tracking_states()\n\n if ((episode + 1) % 1000000) == 0:\n print('Processed %dM episodes'%((episode+1)/1000000))\n \nelapsed_time = time.time() - start_time\nsave_obj(States_track,'States_tracked') \nsave_obj(Q_dict,'Policy')", "_____no_output_____" ] ], [ [ "#### Check the Q-dictionary", "_____no_output_____" ] ], [ [ "Q_dict", "_____no_output_____" ], [ "len(Q_dict)", "_____no_output_____" ], [ "# try checking for one of the states - that which action your agent thinks is the best -----This will not be evaluated\nQ_dict['x-x-5-x-x-x-x-x-4']", "_____no_output_____" ] ], [ [ "#### Check the states tracked for Q-values convergence\n(non-evaluative)", "_____no_output_____" ] ], [ [ "# Write the code for plotting the graphs for state-action pairs tracked", "_____no_output_____" ], [ "plt.figure(0, figsize=(16,7))\nplt.subplot(241)\nt1=States_track['x-3-x-x-x-6-x-x-x'][(0,1)]\nplt.title(\"(s,a)=('x-3-x-x-x-6-x-x-x',(0,1))\")\nplt.plot(np.asarray(range(0, len(t1))),np.asarray(t1))\n\nplt.subplot(242)\nt2=States_track['x-x-x-x-6-x-x-x-5'][(2,7)]\nplt.title(\"(s,a)=('x-x-x-x-6-x-x-x-5',(2,7))\")\nplt.plot(np.asarray(range(0, len(t2))),np.asarray(t2))\n\nplt.subplot(243)\nt3=States_track['5-x-x-x-x-6-x-x-x'][(4,9)]\nplt.title(\"(s,a)=('5-x-x-x-x-6-x-x-x',(4,9))\")\nplt.plot(np.asarray(range(0, len(t3))),np.asarray(t3))\n\nplt.subplot(244)\nt4=States_track['x-5-x-2-x-x-4-7-x'][(0,9)]\nplt.title(\"(s,a)=('x-5-x-2-x-x-4-7-x',(0,9))\")\nplt.plot(np.asarray(range(0, len(t4))),np.asarray(t4))\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Epsilon - decay check", "_____no_output_____" ] ], [ [ "max_epsilon = 1.0\nmin_epsilon = 0.001\ntime = np.arange(0,5000000)\nepsilon = []\nfor i in range(0,5000000):\n epsilon.append(min_epsilon + (max_epsilon - min_epsilon) * np.exp(-0.000001*i))", "_____no_output_____" ], [ "plt.plot(time, epsilon)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a1df3a6668dc69b070bbfb74c823080a65b3a5
11,524
ipynb
Jupyter Notebook
notebooks/dev/old_notebooks/mat2text.ipynb
ankitesh97/CBRAIN-CAM
cec46f1e5736aeedde480bdc0754c00bf0e06cf5
[ "MIT" ]
66
2018-05-25T18:36:27.000Z
2022-03-20T15:39:54.000Z
notebooks/dev/old_notebooks/mat2text.ipynb
dhilarj2/CBRAIN-CAM
f4f5d1ec53b4361f3abc60f15de6c88a5a843de3
[ "MIT" ]
17
2019-01-08T12:41:47.000Z
2021-09-05T01:22:04.000Z
notebooks/dev/old_notebooks/mat2text.ipynb
dhilarj2/CBRAIN-CAM
f4f5d1ec53b4361f3abc60f15de6c88a5a843de3
[ "MIT" ]
48
2018-04-04T22:42:12.000Z
2021-03-06T06:21:39.000Z
28.454321
463
0.519177
[ [ [ "import netCDF4 as nc\nimport numpy as np\nimport h5py", "_____no_output_____" ], [ "weight_file = '/export/home/srasp/tmp/015_1year_log_loss.h5'", "_____no_output_____" ], [ "f = h5py.File(weight_file, 'r')", "_____no_output_____" ], [ "layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]; layer_names", "_____no_output_____" ], [ "g_tmp = f[layer_names[0]]; g_tmp", "_____no_output_____" ], [ "wn_tmp = [n.decode('utf8') for n in g_tmp.attrs['weight_names']]; wn_tmp", "_____no_output_____" ], [ "f.close()", "_____no_output_____" ], [ "def read_weights(weight_file):\n weights = []\n biases = []\n with h5py.File(weight_file, 'r') as f:\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n for l in layer_names:\n g = f[l]\n weights.append(g[l+'/kernel:0'][:])\n biases.append(g[l+'/bias:0'][:])\n return weights, biases", "_____no_output_____" ], [ "w, b = read_weights(weight_file)", "_____no_output_____" ], [ "w[0].shape, b[0].shape", "_____no_output_____" ], [ "nchunk = 64", "_____no_output_____" ], [ "bre = b[0].reshape(1, -1)", "_____no_output_____" ], [ "np.savetxt('/export/home/srasp/tmp/test_bias.txt', bre, fmt='%.6e', delimiter=',')", "_____no_output_____" ], [ "wtrue = np.loadtxt('/export/home/srasp/tmp/results_015/layer1_kernel.txt', delimiter=',')", "_____no_output_____" ], [ "wtrue.shape", "_____no_output_____" ], [ "np.savetxt('/export/home/srasp/tmp/test_weight.txt', w[0].T, fmt='%.6e', delimiter=',')", "_____no_output_____" ], [ "# Convert norm and std files\nnorm_fn = '/scratch/srasp/preprocessed_data/purecrm_ess_train_sample1_norm.nc'", "_____no_output_____" ], [ "import xarray as xr", "_____no_output_____" ], [ "ds = nc.Dataset(norm_fn); ds", "_____no_output_____" ], [ "ds['feature_means'][:]", "_____no_output_____" ], [ "np.savetxt('/export/home/srasp/tmp/test_means.txt', ds['feature_means'][:].reshape(1, -1), fmt='%.6e', delimiter=',')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a1ef98181c0cc22d0ed673ee277509d30e25cb
3,365
ipynb
Jupyter Notebook
2021-06-09-fnmatch.ipynb
phackstock/code-and-tell
f33ce501526dbc438f71259e9196cadcf1d33058
[ "MIT" ]
1
2021-06-18T00:59:17.000Z
2021-06-18T00:59:17.000Z
2021-06-09-fnmatch.ipynb
phackstock/code-and-tell
f33ce501526dbc438f71259e9196cadcf1d33058
[ "MIT" ]
2
2021-05-31T12:18:04.000Z
2021-08-11T11:01:36.000Z
2021-06-09-fnmatch.ipynb
phackstock/code-and-tell
f33ce501526dbc438f71259e9196cadcf1d33058
[ "MIT" ]
5
2021-06-17T15:34:05.000Z
2022-02-18T00:52:55.000Z
26.92
203
0.524517
[ [ [ "# [fnmatch](https://docs.python.org/3/library/fnmatch.html)\n\n1. What is fnmatch and why is it useful?\n1. Why should I use fnmatch and not regex?\n1. Two examples\n\nFnmatch is part of the python standard library. Allows the use of UNIX style wildcards for string matching. Makes it easy to select a single file type out of a list (e.g. *.csv).\n\nWhile regex is much more powerful, fnmatch offers a simple syntax for using wildcards.\n\nIf you want to look for a string that starts with 5 characters, then a space and then 3 numbers between 4 and 7 you'll still need to resort to regex though.\n", "_____no_output_____" ], [ "## Simple example", "_____no_output_____" ] ], [ [ "import fnmatch\n\nFILES = [\"some_picture.png\", \"some_data.csv\", \"another_picture.png\"]\n\n# select only the .png files\nfor file in FILES:\n if fnmatch.fnmatch(file, '*.png'):\n print(file)\n \n# or using the fnmatch shorthand\nprint(fnmatch.filter(FILES, '*.png'))", "some_picture.png\nanother_picture.png\n['some_picture.png', 'another_picture.png']\n" ] ], [ [ "*SIDE NOTE*: The matching is **case insensitive**, if you want to perform a case sensitive match use [`fnmatch.fnmatchcase()`](https://docs.python.org/3/library/fnmatch.html#fnmatch.fnmatchcase)\n\n## Match a list of patterns\n", "_____no_output_____" ] ], [ [ "MODELS = [\"MESSAGEix-GLOBIOM 1.0\",\n \"MESSAGEix-GLOBIOM 1.1\",\n \"REMIND-MAgPIE 2.1-4.2\",\n \"REMIND-MAgPIE 1.7-3.2\",\n \"NIGEM\",\n \"POLES GECO2019\",\n \"COFFEE 1.0\",\n \"COFFEE 2.0\",\n \"TEA\",\n \"GCAM5.2\",\n \"GCAM5.3\"]\n\nMATCH_MODELS = [\"MESSAGEix-GLOBIOM*\", \"REMIND-MAgPIE*\"]\n\nmatch_any = lambda x, patterns: any(fnmatch.fnmatch(x, pattern) for pattern in patterns)\n\nfor m in MODELS:\n if match_any(m, MATCH_MODELS):\n print(m)", "MESSAGEix-GLOBIOM 1.0\nMESSAGEix-GLOBIOM 1.1\nREMIND-MAgPIE 2.1-4.2\nREMIND-MAgPIE 1.7-3.2\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a1f10085b27c40eb9e999682403c39861da82c
1,456
ipynb
Jupyter Notebook
Python/C6.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
null
null
null
Python/C6.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
null
null
null
Python/C6.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
1
2021-05-21T09:30:41.000Z
2021-05-21T09:30:41.000Z
22.75
92
0.461538
[ [ [ "\nQuestion 6:Write a code in python to display different functions of python module.", "_____no_output_____" ] ], [ [ "#module required\nimport time\nprint(\"I am Iron Man.\")\ntime.sleep(2.4)#this function delays the time\nprint(\"I love you 3000.\") #this statement is printed after 2.4 seconds", "_____no_output_____" ], [ "\nimport time\n\n# seconds passed since epoch\nseconds = 1545925769.9618232\nlocal_time = time.ctime(seconds)\nprint(\"Local time:\", local_time)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
e7a20448d8295919fe4582cb62a0506397c59801
10,579
ipynb
Jupyter Notebook
notebooks/Programacion/3d_formacion_vectores.ipynb
izmfc/MNO_finalproject
5e25ba84708f75e98768e75a681992986efd87fc
[ "RSA-MD" ]
null
null
null
notebooks/Programacion/3d_formacion_vectores.ipynb
izmfc/MNO_finalproject
5e25ba84708f75e98768e75a681992986efd87fc
[ "RSA-MD" ]
61
2020-04-25T01:09:22.000Z
2020-05-29T00:18:46.000Z
notebooks/Programacion/3d_formacion_vectores.ipynb
izmfc/MNO_finalproject
5e25ba84708f75e98768e75a681992986efd87fc
[ "RSA-MD" ]
4
2020-05-01T19:24:45.000Z
2021-01-23T01:28:44.000Z
33.584127
348
0.457227
[ [ [ "# **3.d Formación de vectores**\n\n**Responsable:**\n\nCésar Zamora Martínez\n\n**Infraestructura usada:** \nGoogle Colab, para pruebas\n", "_____no_output_____" ], [ "## 0. Importamos librerias necesarias\n\n**Fuente:** 3c_formacion_matrices.ipynb, 3c_formacion_abc.ipynb, 3c_formacion_delta.ipynb", "_____no_output_____" ] ], [ [ "!curl https://colab.chainer.org/install | sh -", " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1580 100 1580 0 0 3550 0 --:--:-- --:--:-- --:--:-- 3542\n+ apt -y -q install cuda-libraries-dev-10-0\nReading package lists...\nBuilding dependency tree...\nReading state information...\ncuda-libraries-dev-10-0 is already the newest version (10.0.130-1).\n0 upgraded, 0 newly installed, 0 to remove and 29 not upgraded.\n+ pip install -q cupy-cuda100 chainer \n\u001b[K |████████████████████████████████| 335.2MB 47kB/s \n\u001b[?25h+ set +ex\nInstallation succeeded!\n" ], [ "import cupy as cp\n\ndef formar_vectores(mu, Sigma):\n '''\n Calcula las cantidades u = \\Sigma^{-1} \\mu y v := \\Sigma^{-1} \\cdot 1 del problema de Markowitz\n\n Args:\n mu (cupy array, vector): valores medios esperados de activos (dimension n)\n Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n)\n\n Return:\n u (cupy array, escalar): vector dado por \\cdot Sigma^-1 \\cdot mu (dimension n)\n v (cupy array, escalar): vector dado por Sigma^-1 \\cdot 1 (dimension n)\n '''\n\n # Vector auxiliar con entradas igual a 1\n n = Sigma.shape[0]\n ones_vector = cp.ones(n)\n\n # Formamos vector \\cdot Sigma^-1 mu y Sigm^-1 1\n # Nota: \n # 1) u= Sigma^-1 \\cdot mu se obtiene resolviendo Sigma u = mu\n # 2) v= Sigma^-1 \\cdot 1 se obtiene resolviendo Sigma v = 1\n\n # Obtiene vectores de interes\n u = cp.linalg.solve(Sigma, mu)\n u = u.transpose()[0] # correcion de expresion de array\n v = cp.linalg.solve(Sigma, ones_vector)\n\n return u , v", "_____no_output_____" ], [ "def formar_abc(mu, Sigma):\n '''\n Calcula las cantidades A, B y C del diagrama de flujo del problema de Markowitz\n\n Args:\n mu (cupy array, vector): valores medios esperados de activos (dimension n)\n Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n)\n\n Return:\n A (cupy array, escalar): escalar dado por mu^t \\cdot Sigma^-1 \\cdot mu\n B (cupy array, escalar): escalar dado por 1^t \\cdot Sigma^-1 \\cdot 1\n C (cupy array, escalar): escalar dado por 1^t \\cdot Sigma^-1 \\cdot mu\n '''\n\n # Vector auxiliar con entradas igual a 1\n n = Sigma.shape[0]\n ones_vector = cp.ones(n)\n\n # Formamos vector \\cdot Sigma^-1 mu y Sigm^-1 1\n # Nota: \n # 1) u= Sigma^-1 \\cdot mu se obtiene resolviendo Sigma u = mu\n # 2) v= Sigma^-1 \\cdot 1 se obtiene resolviendo Sigma v = 1\n\n u, v = formar_vectores(mu, Sigma)\n\n # Obtiene escalares de interes\n A = mu.transpose()@u\n B = ones_vector.transpose()@v\n C = ones_vector.transpose()@u\n\n return A, B, C", "_____no_output_____" ], [ "def delta(A,B,C):\n '''\n Calcula las cantidad Delta = AB-C^2 del diagrama de flujo del problema de Markowitz\n\n Args:\n A (cupy array, escalar): escalar dado por mu^t \\cdot Sigma^-1 \\cdot mu\n B (cupy array, escalar): escalar dado por 1^t \\cdot Sigma^-1 \\cdot 1\n C (cupy array, escalar): escalar dado por 1^t \\cdot Sigma^-1 \\cdot mu\n\n Return:\n Delta (cupy array, escalar): escalar dado \\mu^t \\cdot \\Sigma^{-1} \\cdot \\mu\n '''\n Delta = A*B-C**2\n\n return Delta", "_____no_output_____" ] ], [ [ "## 1. Implementación\n\n**Consideraciones:**. Esta etapa supone que se conocen $\\bar{r}$, $\\mu$ y $\\Sigma$ asociados a los activos, ello con el objeto de es obtener valores escalares que serán relevantes para obtener los pesos del portafolio para el inversionista. Hasta este punto se asume que ya conocemos todos los términos presentes en las expresiones:\n\n$$A = \\mu^t \\cdot \\Sigma^{-1} \\cdot \\mu $$\n\n$$B = 1^t \\cdot \\Sigma^{-1} \\cdot 1 $$\n\n$$C = 1^t \\cdot \\Sigma^{-1} \\cdot \\mu = \\mu^t \\cdot \\Sigma^{-1} \\cdot 1 $$\n\nPara con ello poder estimar los multiplicadores de Lagrange asociados al problema:\n\n$$ w_0 = \\frac{1}{\\Delta} ( \\hat{r} \\cdot B - C ) $$\n\n$$ w_1 = \\frac{1}{\\Delta} (A - C \\cdot \\hat{r}) $$\n\nCon los que se forma la solución del sistema dada por\n\n$$w = w_0 \\cdot (\\Sigma^{-1} \\mu) + w_1 \\cdot (\\Sigma^{-1} 1) $$\n\nEn seguida se presenta el código correspondiente:", "_____no_output_____" ] ], [ [ "def formar_omegas(r, mu, Sigma):\n '''\n Calcula las cantidades w_o y w_1 del problema de Markowitz\n (valores de multiplicadores de Lagrange)\n\n Args:\n r (cupy array, escalar): escalar que denota el retorno esperado por el inversionista\n mu (cupy array, vector): valores medios esperados de activos (dimension n)\n Sigma (cupy array, matriz): matriz de covarianzas asociada a activos (dimension n x n)\n\n Return:\n w_0 (cupy array, escalar): escalar dada por \n w_0 = \\frac{1}{\\Delta} (B \\Sigma^{-1} \\hat{\\mu}- C\\Sigma^{-1} 1)\nw_1 (cupy array, escalar): escalar dado por \n w_1 = \\frac{1}{\\Delta} (C \\Sigma^{-1} \\hat{\\mu}- A\\Sigma^{-1} 1)\n '''\n # Obtenemos u = Sigma^{-1} \\hat{\\mu}, v = \\Sigma^{-1} 1\n u, v = formar_vectores(mu, Sigma)\n # Escalares relevantes\n A, B, C = formar_abc(mu, Sigma)\n Delta = delta(A,B,C)\n # Formamos w_0 y w_1\n w_0 = (1/Delta)*(r*B-C)\n w_1 = (1/Delta)*(A-C*r)\n\n return w_0, w_1", "_____no_output_____" ] ], [ [ "## 1.1 Valores de prueba", "_____no_output_____" ] ], [ [ "n= 10\n\n# r y mu\nr= 10\nmu=cp.random.rand(n, 1)\n\n# Sigma\nS=cp.random.rand(n, n)\nSigma=S@S\n\n# multiplicadores de lagrande\nformar_omegas(r,mu,Sigma)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a20b5884946945613254bfcabf3babedf6e824
482,489
ipynb
Jupyter Notebook
ieee-preprocess-v2-0-top-300.ipynb
tarekoraby/IEEE-CIS-Fraud-Detection
2f1ec955b318c5d98f7af0d5f8df4f969531e728
[ "MIT" ]
null
null
null
ieee-preprocess-v2-0-top-300.ipynb
tarekoraby/IEEE-CIS-Fraud-Detection
2f1ec955b318c5d98f7af0d5f8df4f969531e728
[ "MIT" ]
null
null
null
ieee-preprocess-v2-0-top-300.ipynb
tarekoraby/IEEE-CIS-Fraud-Detection
2f1ec955b318c5d98f7af0d5f8df4f969531e728
[ "MIT" ]
null
null
null
59.981228
180
0.520346
[ [ [ "run_checks = False\nrun_sample = False", "_____no_output_____" ] ], [ [ "### Overview\nThis notebook works on the IEEE-CIS Fraud Detection competition. Here I build a simple XGBoost model based on a balanced dataset.", "_____no_output_____" ], [ "### Lessons:\n\n. keep the categorical variables as single items\n\n. Use a high max_depth for xgboost (maybe 40)\n\n\n### Ideas to try:\n\n. train divergence of expected value (eg. for TransactionAmt and distance based on the non-fraud subset (not all subset as in the case now)\n\n. try using a temporal approach to CV", "_____no_output_____" ] ], [ [ "# all imports necessary for this notebook\n%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport gc\nimport copy\nimport missingno as msno \nimport xgboost\nfrom xgboost import XGBClassifier, XGBRegressor\nfrom sklearn.model_selection import StratifiedKFold, cross_validate, train_test_split \nfrom sklearn.metrics import roc_auc_score, r2_score\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))", "/kaggle/input/ieee-fraud-detection/test_identity.csv\n/kaggle/input/ieee-fraud-detection/sample_submission.csv\n/kaggle/input/ieee-fraud-detection/train_identity.csv\n/kaggle/input/ieee-fraud-detection/train_transaction.csv\n/kaggle/input/ieee-fraud-detection/test_transaction.csv\n/kaggle/input/ieee-preprocessed/master_df_top_all.csv\n/kaggle/input/ieee-preprocessed/master_df_top_300.csv\n/kaggle/input/ieee-preprocessed/master_df_top_100.csv\n/kaggle/input/ieee-preprocessed/master_df_top_200.csv\n" ], [ "# Helpers\n \ndef seed_everything(seed=0):\n '''Seed to make all processes deterministic '''\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n \ndef drop_correlated_cols(df, threshold, sample_frac = 1):\n '''Drops one of two dataframe's columns whose pairwise pearson's correlation is above the provided threshold'''\n if sample_frac != 1:\n dataset = df.sample(frac = sample_frac).copy()\n else:\n dataset = df\n \n col_corr = set() # Set of all the names of deleted columns\n corr_matrix = dataset.corr()\n for i in range(len(corr_matrix.columns)):\n if corr_matrix.columns[i] in col_corr:\n continue\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i] # getting the name of column\n col_corr.add(colname)\n del dataset\n gc.collect()\n df.drop(columns = col_corr, inplace = True)\n\ndef calc_feature_difference(df, feature_name, indep_features, min_r2 = 0.1, min_r2_improv = 0, frac1 = 0.1, \n max_depth_start = 2, max_depth_step = 4):\n \n from copy import deepcopy\n \n print(\"Feature name %s\" %feature_name)\n #print(\"Indep_features %s\" %indep_features)\n \n is_imrpoving = True\n curr_max_depth = max_depth_start\n best_r2 = float(\"-inf\")\n clf_best = np.nan\n \n while is_imrpoving:\n clf = XGBRegressor(max_depth = curr_max_depth)\n\n rand_sample_indeces = df[df[feature_name].notnull()].sample(frac = frac1).index\n clf.fit(df.loc[rand_sample_indeces, indep_features], df.loc[rand_sample_indeces, feature_name]) \n\n rand_sample_indeces = df[df[feature_name].notnull()].sample(frac = frac1).index\n \n pred_y = clf.predict(df.loc[rand_sample_indeces, indep_features])\n r2Score = r2_score(df.loc[rand_sample_indeces, feature_name], pred_y)\n print(\"%d, R2 score %.4f\" % (curr_max_depth, r2Score))\n \n curr_max_depth = curr_max_depth + max_depth_step\n \n if r2Score > best_r2:\n best_r2 = r2Score\n clf_best = deepcopy(clf)\n if r2Score < best_r2 + (best_r2 * min_r2_improv) or (curr_max_depth > max_depth_start * max_depth_step and best_r2 < min_r2 / 2):\n is_imrpoving = False\n\n print(\"The best R2 score of %.4f\" % ( best_r2))\n \n if best_r2 > min_r2:\n pred_feature = clf_best.predict(df.loc[:, indep_features])\n return (df[feature_name] - pred_feature), best_r2\n else:\n return df[feature_name], best_r2", "_____no_output_____" ], [ "seed_everything()\npd.set_option('display.max_columns', 500)", "_____no_output_____" ], [ "master_df = pd.read_csv('/kaggle/input/ieee-preprocessed/master_df_top_300.csv')\nmaster_df.head()", "_____no_output_____" ], [ "cols_cat = {'id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', \n 'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_30', 'id_31', 'id_32', 'id_33', \n 'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'ProductCD', 'card4', \n 'card6', 'M4','P_emaildomain', 'R_emaildomain', 'card1', 'card2', 'card3', 'card5', 'addr1', \n 'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9'}", "_____no_output_____" ], [ "%%time\nindep_features = ['weekday', 'hours', 'TransactionDT', 'ProductCD', 'card1', 'card2', 'card3', 'card4', 'card5'\n , 'card6', 'addr1', 'addr2']\n\nfor feature in indep_features:\n master_df[feature] = master_df[feature].astype('category').cat.codes\n\ncont_cols_list = list(master_df.select_dtypes(include='number').columns)\ncont_features_list = [x for x in cont_cols_list if x not in cols_cat and x not in indep_features and x not in ['TransactionID', 'isFraud', 'TransactionDT', 'is_train_df']]\n\nfor cont_feature in cont_features_list:\n print(cont_feature)\n master_df[cont_feature], best_r2 = calc_feature_difference(master_df, cont_feature, indep_features, frac1= 0.025)\n if best_r2 > 0.9:\n master_df.drop(columns = [cont_feature], inplace = True)\n print(80 * '-')", "TransactionAmt\nFeature name TransactionAmt\n[07:15:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0920\n[07:15:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:15:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0798\nThe best R2 score of 0.0920\n--------------------------------------------------------------------------------\ndist1\nFeature name dist1\n[07:15:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0057\n[07:15:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:15:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0028\nThe best R2 score of 0.0057\n--------------------------------------------------------------------------------\ndist2\nFeature name dist2\n[07:15:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1044\n[07:15:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:15:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0602\n[07:15:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.0602\n--------------------------------------------------------------------------------\nC1\nFeature name C1\n[07:15:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3002\n[07:15:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:15:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7461\n[07:15:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:15:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5264\nThe best R2 score of 0.7461\n--------------------------------------------------------------------------------\nC2\nFeature name C2\n[07:16:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2321\n[07:16:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:16:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6294\n[07:16:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:16:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1139\nThe best R2 score of 0.6294\n--------------------------------------------------------------------------------\nC4\nFeature name C4\n[07:16:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3506\n[07:16:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:17:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4649\n[07:17:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:17:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4466\nThe best R2 score of 0.4649\n--------------------------------------------------------------------------------\nC5\nFeature name C5\n[07:17:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0255\n[07:17:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:17:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0192\nThe best R2 score of 0.0255\n--------------------------------------------------------------------------------\nC6\nFeature name C6\n[07:17:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2527\n[07:17:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:17:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2919\n[07:18:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:18:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3616\n[07:18:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:18:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.4728\n[07:18:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:18:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.3745\nThe best R2 score of 0.4728\n--------------------------------------------------------------------------------\nC8\nFeature name C8\n[07:19:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3817\n[07:19:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:19:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5930\n[07:19:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:19:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7405\n[07:19:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:19:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.8135\n[07:19:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:19:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.9043\n[07:20:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:20:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n22, R2 score 0.7858\nThe best R2 score of 0.9043\n--------------------------------------------------------------------------------\nC9\nFeature name C9\n[07:20:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0286\n[07:20:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:20:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0256\nThe best R2 score of 0.0286\n--------------------------------------------------------------------------------\nC10\nFeature name C10\n[07:21:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4407\n[07:21:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:21:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8598\n[07:21:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:21:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7704\nThe best R2 score of 0.8598\n--------------------------------------------------------------------------------\nC11\nFeature name C11\n[07:21:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2542\n[07:21:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:21:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5071\n[07:22:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:22:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4696\nThe best R2 score of 0.5071\n--------------------------------------------------------------------------------\nC12\nFeature name C12\n[07:22:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3876\n[07:22:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:22:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5854\n[07:22:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:22:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4035\nThe best R2 score of 0.5854\n--------------------------------------------------------------------------------\nC13\nFeature name C13\n[07:23:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0894\n[07:23:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:23:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3133\n[07:23:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:23:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2588\nThe best R2 score of 0.3133\n--------------------------------------------------------------------------------\nC14\nFeature name C14\n[07:23:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1727\n[07:23:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:23:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3742\n[07:23:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:23:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5662\n[07:24:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:24:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.3651\nThe best R2 score of 0.5662\n--------------------------------------------------------------------------------\nD1\nFeature name D1\n[07:24:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0848\n[07:24:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:24:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1039\n[07:24:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:24:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1027\nThe best R2 score of 0.1039\n--------------------------------------------------------------------------------\nD2\nFeature name D2\n[07:25:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0431\n[07:25:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:25:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0464\n[07:25:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0464\n--------------------------------------------------------------------------------\nD3\nFeature name D3\n[07:25:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0205\n[07:25:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:25:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0009\nThe best R2 score of 0.0205\n--------------------------------------------------------------------------------\nD4\nFeature name D4\n[07:25:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0740\n[07:25:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:25:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0817\n[07:25:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:25:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0556\nThe best R2 score of 0.0817\n--------------------------------------------------------------------------------\nD5\nFeature name D5\n[07:26:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0338\n[07:26:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0301\nThe best R2 score of 0.0338\n--------------------------------------------------------------------------------\nD6\nFeature name D6\n[07:26:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0849\n[07:26:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0704\nThe best R2 score of 0.0849\n--------------------------------------------------------------------------------\nD7\nFeature name D7\n[07:26:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0792\n[07:26:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0115\nThe best R2 score of 0.0792\n--------------------------------------------------------------------------------\nD8\nFeature name D8\n[07:26:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0755\n[07:26:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0534\nThe best R2 score of 0.0755\n--------------------------------------------------------------------------------\nD9\nFeature name D9\n[07:26:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[07:26:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[07:26:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nD10\nFeature name D10\n[07:26:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1127\n[07:26:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1124\nThe best R2 score of 0.1127\n--------------------------------------------------------------------------------\nD11\nFeature name D11\n[07:26:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0524\n[07:26:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:26:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0514\nThe best R2 score of 0.0524\n--------------------------------------------------------------------------------\nD12\nFeature name D12\n[07:26:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0199\n[07:26:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0358\nThe best R2 score of 0.0199\n--------------------------------------------------------------------------------\nD13\nFeature name D13\n[07:27:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0592\n[07:27:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0045\nThe best R2 score of 0.0592\n--------------------------------------------------------------------------------\nD14\nFeature name D14\n[07:27:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0457\n[07:27:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0309\nThe best R2 score of 0.0457\n--------------------------------------------------------------------------------\nD15\nFeature name D15\n[07:27:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1118\n[07:27:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1179\n[07:27:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0978\nThe best R2 score of 0.1179\n--------------------------------------------------------------------------------\nV2\nFeature name V2\n[07:27:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0002\n[07:27:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0255\nThe best R2 score of -0.0002\n--------------------------------------------------------------------------------\nV3\nFeature name V3\n[07:27:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0024\n[07:27:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:27:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0102\nThe best R2 score of 0.0024\n--------------------------------------------------------------------------------\nV4\nFeature name V4\n[07:28:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0036\n[07:28:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0001\nThe best R2 score of 0.0036\n--------------------------------------------------------------------------------\nV5\nFeature name V5\n[07:28:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0029\n[07:28:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0100\n[07:28:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0100\n--------------------------------------------------------------------------------\nV6\nFeature name V6\n[07:28:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0054\n[07:28:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0218\nThe best R2 score of 0.0054\n--------------------------------------------------------------------------------\nV7\nFeature name V7\n[07:28:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0003\n[07:28:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0052\nThe best R2 score of 0.0003\n--------------------------------------------------------------------------------\nV10\nFeature name V10\n[07:28:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0566\n[07:28:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0730\n[07:28:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:28:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0412\nThe best R2 score of 0.0730\n--------------------------------------------------------------------------------\nV12\nFeature name V12\n[07:29:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3424\n[07:29:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:29:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3361\nThe best R2 score of 0.3424\n--------------------------------------------------------------------------------\nV13\nFeature name V13\n[07:29:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3457\n[07:29:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:29:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3468\n[07:29:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:29:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3314\nThe best R2 score of 0.3468\n--------------------------------------------------------------------------------\nV19\nFeature name V19\n[07:29:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0933\n[07:30:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:30:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1043\n[07:30:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:30:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0796\nThe best R2 score of 0.1043\n--------------------------------------------------------------------------------\nV20\nFeature name V20\n[07:30:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0804\n[07:30:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:30:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0888\n[07:30:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:30:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0809\nThe best R2 score of 0.0888\n--------------------------------------------------------------------------------\nV21\nFeature name V21\n[07:31:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8833\n[07:31:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:31:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8940\n[07:31:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:31:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8853\nThe best R2 score of 0.8940\n--------------------------------------------------------------------------------\nV23\nFeature name V23\n[07:31:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0191\n[07:31:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:31:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1531\nThe best R2 score of 0.0191\n--------------------------------------------------------------------------------\nV24\nFeature name V24\n[07:31:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0169\n[07:31:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:32:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0180\n[07:32:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0180\n--------------------------------------------------------------------------------\nV25\nFeature name V25\n[07:32:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0913\n[07:32:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:32:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2073\n[07:32:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:32:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0292\nThe best R2 score of 0.2073\n--------------------------------------------------------------------------------\nV29\nFeature name V29\n[07:32:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1793\n[07:32:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:32:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1957\n[07:33:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:33:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1852\nThe best R2 score of 0.1957\n--------------------------------------------------------------------------------\nV34\nFeature name V34\n[07:33:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7948\n[07:33:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:33:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7568\nThe best R2 score of 0.7948\n--------------------------------------------------------------------------------\nV35\nFeature name V35\n[07:33:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3556\n[07:33:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:33:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3482\nThe best R2 score of 0.3556\n--------------------------------------------------------------------------------\nV36\nFeature name V36\n[07:34:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3595\n[07:34:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:34:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3517\nThe best R2 score of 0.3595\n--------------------------------------------------------------------------------\nV37\nFeature name V37\n[07:34:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0326\n[07:34:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:34:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0209\nThe best R2 score of 0.0326\n--------------------------------------------------------------------------------\nV38\nFeature name V38\n[07:34:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0804\n[07:34:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:34:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0800\nThe best R2 score of 0.0804\n--------------------------------------------------------------------------------\nV39\nFeature name V39\n[07:34:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7541\n[07:34:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:34:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7180\nThe best R2 score of 0.7541\n--------------------------------------------------------------------------------\nV40\nFeature name V40\n[07:35:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6474\n[07:35:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:35:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6398\nThe best R2 score of 0.6474\n--------------------------------------------------------------------------------\nV44\nFeature name V44\n[07:35:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0549\n[07:35:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:35:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2229\n[07:35:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:35:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0446\nThe best R2 score of 0.2229\n--------------------------------------------------------------------------------\nV45\nFeature name V45\n[07:35:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0066\n[07:36:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:36:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0077\n[07:36:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0077\n--------------------------------------------------------------------------------\nV47\nFeature name V47\n[07:36:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0234\n[07:36:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:36:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0081\nThe best R2 score of 0.0234\n--------------------------------------------------------------------------------\nV48\nFeature name V48\n[07:36:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1936\n[07:36:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:36:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2121\n[07:36:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:36:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2018\nThe best R2 score of 0.2121\n--------------------------------------------------------------------------------\nV49\nFeature name V49\n[07:36:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1844\n[07:37:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:37:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1963\n[07:37:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:37:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1855\nThe best R2 score of 0.1963\n--------------------------------------------------------------------------------\nV51\nFeature name V51\n[07:37:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8429\n[07:37:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:37:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8608\n[07:37:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:37:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8385\nThe best R2 score of 0.8608\n--------------------------------------------------------------------------------\nV52\nFeature name V52\n[07:38:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7573\n[07:38:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:38:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7688\n[07:38:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:38:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7489\nThe best R2 score of 0.7688\n--------------------------------------------------------------------------------\nV53\nFeature name V53\n[07:38:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3724\n[07:38:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:38:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3769\n[07:38:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:38:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3607\nThe best R2 score of 0.3769\n--------------------------------------------------------------------------------\nV54\nFeature name V54\n[07:39:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3852\n[07:39:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:39:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3797\nThe best R2 score of 0.3852\n--------------------------------------------------------------------------------\nV55\nFeature name V55\n[07:39:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0330\n[07:39:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:39:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0053\nThe best R2 score of 0.0330\n--------------------------------------------------------------------------------\nV56\nFeature name V56\n[07:39:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0610\n[07:39:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:39:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1407\n[07:40:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:40:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0646\nThe best R2 score of 0.1407\n--------------------------------------------------------------------------------\nV60\nFeature name V60\n[07:40:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7122\n[07:40:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:40:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7488\n[07:40:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:40:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7173\nThe best R2 score of 0.7488\n--------------------------------------------------------------------------------\nV61\nFeature name V61\n[07:40:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0887\n[07:41:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:41:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1046\n[07:41:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:41:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1016\nThe best R2 score of 0.1046\n--------------------------------------------------------------------------------\nV62\nFeature name V62\n[07:41:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0630\n[07:41:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:41:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0710\n[07:41:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:41:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0888\n[07:42:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:42:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.0529\nThe best R2 score of 0.0888\n--------------------------------------------------------------------------------\nV64\nFeature name V64\n[07:42:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7319\n[07:42:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:42:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7874\n[07:42:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:42:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7459\nThe best R2 score of 0.7874\n--------------------------------------------------------------------------------\nV66\nFeature name V66\n[07:42:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0552\n[07:42:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:43:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0800\n[07:43:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:43:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0136\nThe best R2 score of 0.0800\n--------------------------------------------------------------------------------\nV67\nFeature name V67\n[07:43:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0476\n[07:43:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:43:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0492\n[07:43:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0492\n--------------------------------------------------------------------------------\nV69\nFeature name V69\n[07:43:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1823\n[07:43:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:43:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1969\n[07:43:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:43:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1905\nThe best R2 score of 0.1969\n--------------------------------------------------------------------------------\nV70\nFeature name V70\n[07:44:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1644\n[07:44:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:44:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1962\n[07:44:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:44:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1762\nThe best R2 score of 0.1962\n--------------------------------------------------------------------------------\nV74\nFeature name V74\n[07:44:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7409\n[07:44:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:45:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7244\nThe best R2 score of 0.7409\n--------------------------------------------------------------------------------\nV75\nFeature name V75\n[07:45:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3092\n[07:45:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:45:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3103\n[07:45:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:45:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2904\nThe best R2 score of 0.3103\n--------------------------------------------------------------------------------\nV76\nFeature name V76\n[07:45:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3192\n[07:45:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:45:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3164\nThe best R2 score of 0.3192\n--------------------------------------------------------------------------------\nV77\nFeature name V77\n[07:46:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0677\n[07:46:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:46:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1637\n[07:46:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:46:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.6370\nThe best R2 score of 0.1637\n--------------------------------------------------------------------------------\nV78\nFeature name V78\n[07:46:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0651\n[07:46:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:46:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2534\n[07:47:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:47:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.3201\nThe best R2 score of 0.2534\n--------------------------------------------------------------------------------\nV79\nFeature name V79\n[07:47:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8942\n[07:47:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:47:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8795\nThe best R2 score of 0.8942\n--------------------------------------------------------------------------------\nV81\nFeature name V81\n[07:47:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7104\n[07:47:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:47:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6667\nThe best R2 score of 0.7104\n--------------------------------------------------------------------------------\nV82\nFeature name V82\n[07:47:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0991\n[07:48:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:48:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1028\n[07:48:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:48:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0879\nThe best R2 score of 0.1028\n--------------------------------------------------------------------------------\nV83\nFeature name V83\n[07:48:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0780\n[07:48:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:48:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0775\nThe best R2 score of 0.0780\n--------------------------------------------------------------------------------\nV86\nFeature name V86\n[07:48:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0114\n[07:48:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:49:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0485\n[07:49:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0485\n--------------------------------------------------------------------------------\nV87\nFeature name V87\n[07:49:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0294\n[07:49:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:49:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2333\n[07:49:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:49:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.7488\nThe best R2 score of 0.2333\n--------------------------------------------------------------------------------\nV90\nFeature name V90\n[07:49:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1835\n[07:49:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:49:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2071\n[07:50:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:50:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1868\nThe best R2 score of 0.2071\n--------------------------------------------------------------------------------\nV91\nFeature name V91\n[07:50:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1733\n[07:50:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:50:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1938\n[07:50:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:50:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1767\nThe best R2 score of 0.1938\n--------------------------------------------------------------------------------\nV94\nFeature name V94\n[07:50:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9111\n[07:51:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:51:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9116\n[07:51:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:51:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9116\n[07:51:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:51:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9061\nThe best R2 score of 0.9116\n--------------------------------------------------------------------------------\nV95\nFeature name V95\n[07:51:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6377\n[07:51:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:52:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9837\n[07:52:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:52:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9315\nThe best R2 score of 0.9837\n--------------------------------------------------------------------------------\nV96\nFeature name V96\n[07:52:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6587\n[07:52:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:52:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9740\n[07:52:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:52:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9798\n[07:53:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:53:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9776\nThe best R2 score of 0.9798\n--------------------------------------------------------------------------------\nV97\nFeature name V97\n[07:53:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6661\n[07:53:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:53:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9909\n[07:53:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:53:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9785\nThe best R2 score of 0.9909\n--------------------------------------------------------------------------------\nV99\nFeature name V99\n[07:54:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1171\n[07:54:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:54:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2309\n[07:54:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:54:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2118\nThe best R2 score of 0.2309\n--------------------------------------------------------------------------------\nV100\nFeature name V100\n[07:55:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1341\n[07:55:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:55:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2255\n[07:55:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:55:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1572\nThe best R2 score of 0.2255\n--------------------------------------------------------------------------------\nV102\nFeature name V102\n[07:55:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5246\n[07:55:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:55:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8036\n[07:55:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:56:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9270\n[07:56:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:56:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9832\n[07:56:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:56:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.8002\nThe best R2 score of 0.9832\n--------------------------------------------------------------------------------\nV103\nFeature name V103\n[07:57:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6407\n[07:57:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:57:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9855\n[07:57:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:57:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8247\nThe best R2 score of 0.9855\n--------------------------------------------------------------------------------\nV104\nFeature name V104\n[07:57:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4281\n[07:58:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:58:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3100\nThe best R2 score of 0.4281\n--------------------------------------------------------------------------------\nV105\nFeature name V105\n[07:58:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7554\n[07:58:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:58:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8688\n[07:58:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:58:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8674\nThe best R2 score of 0.8688\n--------------------------------------------------------------------------------\nV109\nFeature name V109\n[07:59:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0047\n[07:59:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:59:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0138\nThe best R2 score of 0.0047\n--------------------------------------------------------------------------------\nV110\nFeature name V110\n[07:59:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0029\n[07:59:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:59:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0529\nThe best R2 score of 0.0029\n--------------------------------------------------------------------------------\nV112\nFeature name V112\n[07:59:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0010\n[07:59:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[07:59:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0008\nThe best R2 score of 0.0010\n--------------------------------------------------------------------------------\nV115\nFeature name V115\n[07:59:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0096\n[08:00:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:00:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0116\n[08:00:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0116\n--------------------------------------------------------------------------------\nV116\nFeature name V116\n[08:00:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0041\n[08:00:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:00:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0166\nThe best R2 score of 0.0041\n--------------------------------------------------------------------------------\nV117\nFeature name V117\n[08:00:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0414\n[08:00:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:00:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1067\nThe best R2 score of 0.0414\n--------------------------------------------------------------------------------\nV122\nFeature name V122\n[08:00:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0505\n[08:01:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:01:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0239\n[08:01:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.0239\n--------------------------------------------------------------------------------\nV124\nFeature name V124\n[08:01:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0587\n[08:01:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:01:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0483\nThe best R2 score of 0.0587\n--------------------------------------------------------------------------------\nV126\nFeature name V126\n[08:01:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5813\n[08:01:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:01:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2682\nThe best R2 score of 0.5813\n--------------------------------------------------------------------------------\nV127\nFeature name V127\n[08:01:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7013\n[08:02:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:02:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5785\nThe best R2 score of 0.7013\n--------------------------------------------------------------------------------\nV128\nFeature name V128\n[08:02:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4943\n[08:02:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:02:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5320\n[08:02:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:02:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7111\n[08:02:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:02:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score -1.0119\nThe best R2 score of 0.7111\n--------------------------------------------------------------------------------\nV129\nFeature name V129\n[08:03:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0111\n[08:03:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:03:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0114\nThe best R2 score of -0.0111\n--------------------------------------------------------------------------------\nV130\nFeature name V130\n[08:03:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0210\n[08:03:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:03:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0462\nThe best R2 score of 0.0210\n--------------------------------------------------------------------------------\nV131\nFeature name V131\n[08:04:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0185\n[08:04:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:04:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0197\nThe best R2 score of 0.0185\n--------------------------------------------------------------------------------\nV132\nFeature name V132\n[08:04:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6261\n[08:04:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:04:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9612\n[08:04:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:04:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.3090\nThe best R2 score of 0.9612\n--------------------------------------------------------------------------------\nV133\nFeature name V133\n[08:05:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5530\n[08:05:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:05:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8278\n[08:05:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:05:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9529\n[08:05:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:05:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.2752\nThe best R2 score of 0.9529\n--------------------------------------------------------------------------------\nV134\nFeature name V134\n[08:06:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4508\n[08:06:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:06:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9812\n[08:06:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:06:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.1467\nThe best R2 score of 0.9812\n--------------------------------------------------------------------------------\nV135\nFeature name V135\n[08:06:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1941\n[08:06:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:06:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0019\n[08:07:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.0019\n--------------------------------------------------------------------------------\nV136\nFeature name V136\n[08:07:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -14.7716\n[08:07:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:07:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1319\n[08:07:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:07:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.3096\nThe best R2 score of 0.1319\n--------------------------------------------------------------------------------\nV137\nFeature name V137\n[08:07:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1638\n[08:07:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:07:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -1.2927\nThe best R2 score of 0.1638\n--------------------------------------------------------------------------------\nV139\nFeature name V139\n[08:08:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0415\n[08:08:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0432\n[08:08:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0432\n--------------------------------------------------------------------------------\nV141\nFeature name V141\n[08:08:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0394\n[08:08:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0087\nThe best R2 score of 0.0394\n--------------------------------------------------------------------------------\nV143\nFeature name V143\n[08:08:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7714\n[08:08:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9480\n[08:08:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7329\nThe best R2 score of 0.9480\n--------------------------------------------------------------------------------\nV144\nFeature name V144\n[08:08:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4921\n[08:08:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5504\n[08:08:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4726\nThe best R2 score of 0.5504\n--------------------------------------------------------------------------------\nV145\nFeature name V145\n[08:08:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4794\n[08:08:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4940\n[08:08:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3922\nThe best R2 score of 0.4940\n--------------------------------------------------------------------------------\nV146\nFeature name V146\n[08:08:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0117\n[08:08:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0377\n[08:08:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0377\n--------------------------------------------------------------------------------\nV149\nFeature name V149\n[08:08:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0958\n[08:08:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1098\n[08:08:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:08:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.4213\nThe best R2 score of 0.1098\n--------------------------------------------------------------------------------\nV150\nFeature name V150\n[08:09:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4886\n[08:09:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4683\nThe best R2 score of 0.4886\n--------------------------------------------------------------------------------\nV151\nFeature name V151\n[08:09:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5041\n[08:09:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4977\nThe best R2 score of 0.5041\n--------------------------------------------------------------------------------\nV152\nFeature name V152\n[08:09:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4205\n[08:09:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4091\nThe best R2 score of 0.4205\n--------------------------------------------------------------------------------\nV154\nFeature name V154\n[08:09:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1359\n[08:09:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1235\nThe best R2 score of 0.1359\n--------------------------------------------------------------------------------\nV160\nFeature name V160\n[08:09:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3318\n[08:09:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3993\n[08:09:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2804\nThe best R2 score of 0.3993\n--------------------------------------------------------------------------------\nV161\nFeature name V161\n[08:09:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.9572\n[08:09:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.4251\n[08:09:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.4251\n--------------------------------------------------------------------------------\nV162\nFeature name V162\n[08:09:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1922\n[08:09:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.3519\nThe best R2 score of -0.1922\n--------------------------------------------------------------------------------\nV164\nFeature name V164\n[08:09:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2818\n[08:09:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3343\n[08:09:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6244\n[08:09:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.8383\n[08:09:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:09:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.5811\nThe best R2 score of 0.8383\n--------------------------------------------------------------------------------\nV165\nFeature name V165\n[08:10:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3743\n[08:10:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4322\n[08:10:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4774\n[08:10:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.3527\nThe best R2 score of 0.4774\n--------------------------------------------------------------------------------\nV166\nFeature name V166\n[08:10:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4557\n[08:10:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5915\n[08:10:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3719\nThe best R2 score of 0.5915\n--------------------------------------------------------------------------------\nV170\nFeature name V170\n[08:10:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0372\n[08:10:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0058\nThe best R2 score of 0.0372\n--------------------------------------------------------------------------------\nV171\nFeature name V171\n[08:10:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0757\n[08:10:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:10:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1169\n[08:11:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0741\nThe best R2 score of 0.1169\n--------------------------------------------------------------------------------\nV173\nFeature name V173\n[08:11:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0179\n[08:11:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0323\nThe best R2 score of 0.0179\n--------------------------------------------------------------------------------\nV176\nFeature name V176\n[08:11:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2555\n[08:11:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5801\n[08:11:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.3614\nThe best R2 score of 0.5801\n--------------------------------------------------------------------------------\nV177\nFeature name V177\n[08:11:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5450\n[08:11:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7905\n[08:11:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9822\n[08:11:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:11:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.8925\nThe best R2 score of 0.9822\n--------------------------------------------------------------------------------\nV187\nFeature name V187\n[08:12:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0967\n[08:12:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1936\nThe best R2 score of -0.0967\n--------------------------------------------------------------------------------\nV188\nFeature name V188\n[08:12:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0115\n[08:12:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0267\n[08:12:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0267\n--------------------------------------------------------------------------------\nV189\nFeature name V189\n[08:12:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0003\n[08:12:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0410\nThe best R2 score of -0.0003\n--------------------------------------------------------------------------------\nV191\nFeature name V191\n[08:12:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0122\n[08:12:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.2211\nThe best R2 score of -0.0122\n--------------------------------------------------------------------------------\nV192\nFeature name V192\n[08:12:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0425\n[08:12:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0036\n[08:12:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0036\n--------------------------------------------------------------------------------\nV194\nFeature name V194\n[08:12:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0388\n[08:12:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0031\nThe best R2 score of 0.0388\n--------------------------------------------------------------------------------\nV201\nFeature name V201\n[08:12:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0140\n[08:12:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0376\nThe best R2 score of 0.0140\n--------------------------------------------------------------------------------\nV202\nFeature name V202\n[08:12:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1315\n[08:12:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -7.4106\nThe best R2 score of 0.1315\n--------------------------------------------------------------------------------\nV203\nFeature name V203\n[08:12:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0924\n[08:12:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4865\n[08:12:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:12:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0054\nThe best R2 score of 0.4865\n--------------------------------------------------------------------------------\nV206\nFeature name V206\n[08:12:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -1.0298\n[08:13:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1694\n[08:13:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.1694\n--------------------------------------------------------------------------------\nV208\nFeature name V208\n[08:13:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0072\n[08:13:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.9807\nThe best R2 score of -0.0072\n--------------------------------------------------------------------------------\nV209\nFeature name V209\n[08:13:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0330\n[08:13:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0011\n[08:13:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0011\n--------------------------------------------------------------------------------\nV211\nFeature name V211\n[08:13:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -2.4491\n[08:13:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1501\n[08:13:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5734\n[08:13:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.0946\nThe best R2 score of 0.5734\n--------------------------------------------------------------------------------\nV214\nFeature name V214\n[08:13:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -40.1710\n[08:13:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3079\n[08:13:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0709\nThe best R2 score of 0.3079\n--------------------------------------------------------------------------------\nV218\nFeature name V218\n[08:13:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2363\n[08:13:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4939\n[08:13:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:13:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4540\nThe best R2 score of 0.4939\n--------------------------------------------------------------------------------\nV221\nFeature name V221\n[08:14:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0075\n[08:14:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.3627\nThe best R2 score of 0.0075\n--------------------------------------------------------------------------------\nV222\nFeature name V222\n[08:14:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3026\n[08:14:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3859\n[08:14:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1023\nThe best R2 score of 0.3859\n--------------------------------------------------------------------------------\nV223\nFeature name V223\n[08:14:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0291\n[08:14:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0613\nThe best R2 score of 0.0291\n--------------------------------------------------------------------------------\nV224\nFeature name V224\n[08:14:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.3549\n[08:14:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0206\n[08:14:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0206\n--------------------------------------------------------------------------------\nV225\nFeature name V225\n[08:14:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.8025\n[08:14:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.5157\n[08:14:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.5157\n--------------------------------------------------------------------------------\nV226\nFeature name V226\n[08:14:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3572\n[08:14:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0067\nThe best R2 score of 0.3572\n--------------------------------------------------------------------------------\nV229\nFeature name V229\n[08:14:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0456\n[08:14:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0454\nThe best R2 score of 0.0456\n--------------------------------------------------------------------------------\nV230\nFeature name V230\n[08:14:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0829\n[08:14:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0917\nThe best R2 score of 0.0829\n--------------------------------------------------------------------------------\nV234\nFeature name V234\n[08:14:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1841\n[08:14:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5568\n[08:14:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:14:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5633\n[08:15:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.2440\nThe best R2 score of 0.5633\n--------------------------------------------------------------------------------\nV236\nFeature name V236\n[08:15:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4358\n[08:15:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2918\nThe best R2 score of 0.4358\n--------------------------------------------------------------------------------\nV243\nFeature name V243\n[08:15:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1768\n[08:15:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.4186\nThe best R2 score of -0.1768\n--------------------------------------------------------------------------------\nV244\nFeature name V244\n[08:15:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0129\n[08:15:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0054\nThe best R2 score of 0.0129\n--------------------------------------------------------------------------------\nV245\nFeature name V245\n[08:15:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0546\n[08:15:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0063\n[08:15:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.0063\n--------------------------------------------------------------------------------\nV247\nFeature name V247\n[08:15:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1791\n[08:15:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0581\n[08:15:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.0581\n--------------------------------------------------------------------------------\nV248\nFeature name V248\n[08:15:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0855\n[08:15:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -1.0362\nThe best R2 score of 0.0855\n--------------------------------------------------------------------------------\nV249\nFeature name V249\n[08:15:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0032\n[08:15:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.7305\nThe best R2 score of -0.0032\n--------------------------------------------------------------------------------\nV253\nFeature name V253\n[08:15:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0012\n[08:15:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:15:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.7159\nThe best R2 score of -0.0012\n--------------------------------------------------------------------------------\nV254\nFeature name V254\n[08:15:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.5027\n[08:15:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.5741\nThe best R2 score of -0.5027\n--------------------------------------------------------------------------------\nV257\nFeature name V257\n[08:16:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3238\n[08:16:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5942\n[08:16:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.2051\nThe best R2 score of 0.5942\n--------------------------------------------------------------------------------\nV258\nFeature name V258\n[08:16:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0321\n[08:16:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0781\n[08:16:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.6362\nThe best R2 score of 0.0781\n--------------------------------------------------------------------------------\nV263\nFeature name V263\n[08:16:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1177\n[08:16:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0415\nThe best R2 score of 0.1177\n--------------------------------------------------------------------------------\nV264\nFeature name V264\n[08:16:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -7.5730\n[08:16:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1877\n[08:16:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.1877\n--------------------------------------------------------------------------------\nV265\nFeature name V265\n[08:16:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -5.1975\n[08:16:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.3002\n[08:16:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of -0.3002\n--------------------------------------------------------------------------------\nV266\nFeature name V266\n[08:16:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0222\n[08:16:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5373\n[08:16:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -8.9798\nThe best R2 score of 0.5373\n--------------------------------------------------------------------------------\nV267\nFeature name V267\n[08:16:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.1919\n[08:16:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:16:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -4.2503\nThe best R2 score of -0.1919\n--------------------------------------------------------------------------------\nV269\nFeature name V269\n[08:16:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1138\n[08:16:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.4789\nThe best R2 score of 0.1138\n--------------------------------------------------------------------------------\nV270\nFeature name V270\n[08:17:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0056\n[08:17:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0458\nThe best R2 score of -0.0056\n--------------------------------------------------------------------------------\nV271\nFeature name V271\n[08:17:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0914\n[08:17:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -1.1909\nThe best R2 score of -0.0914\n--------------------------------------------------------------------------------\nV272\nFeature name V272\n[08:17:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0023\n[08:17:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0222\n[08:17:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0222\n--------------------------------------------------------------------------------\nV273\nFeature name V273\n[08:17:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0045\n[08:17:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -1.2337\nThe best R2 score of 0.0045\n--------------------------------------------------------------------------------\nV279\nFeature name V279\n[08:17:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7365\n[08:17:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7703\n[08:17:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:17:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9881\n[08:17:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:18:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9437\nThe best R2 score of 0.9881\n--------------------------------------------------------------------------------\nV280\nFeature name V280\n[08:18:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6736\n[08:18:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:18:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9822\n[08:18:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:18:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8636\nThe best R2 score of 0.9822\n--------------------------------------------------------------------------------\nV281\nFeature name V281\n[08:19:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0159\n[08:19:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:19:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1086\n[08:19:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:19:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0895\nThe best R2 score of 0.1086\n--------------------------------------------------------------------------------\nV282\nFeature name V282\n[08:19:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0720\n[08:20:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:20:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0814\n[08:20:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:20:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0764\nThe best R2 score of 0.0814\n--------------------------------------------------------------------------------\nV283\nFeature name V283\n[08:20:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0785\n[08:20:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:20:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1996\n[08:20:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:20:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1299\nThe best R2 score of 0.1996\n--------------------------------------------------------------------------------\nV285\nFeature name V285\n[08:21:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0924\n[08:21:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:21:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2049\n[08:21:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:21:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2187\n[08:21:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:21:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.0817\nThe best R2 score of 0.2187\n--------------------------------------------------------------------------------\nV286\nFeature name V286\n[08:22:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0094\n[08:22:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:22:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0289\nThe best R2 score of 0.0094\n--------------------------------------------------------------------------------\nV287\nFeature name V287\n[08:22:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0675\n[08:22:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:22:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1704\n[08:22:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:22:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1054\nThe best R2 score of 0.1704\n--------------------------------------------------------------------------------\nV288\nFeature name V288\n[08:23:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0219\n[08:23:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:23:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0301\n[08:23:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0301\n--------------------------------------------------------------------------------\nV290\nFeature name V290\n[08:23:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1179\n[08:23:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:23:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2683\n[08:23:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:23:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0731\nThe best R2 score of 0.2683\n--------------------------------------------------------------------------------\nV291\nFeature name V291\n[08:24:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3416\n[08:24:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:24:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6289\n[08:24:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:24:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6959\n[08:24:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:24:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.7846\n[08:24:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:24:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.6818\nThe best R2 score of 0.7846\n--------------------------------------------------------------------------------\nV292\nFeature name V292\n[08:25:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4965\n[08:25:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:25:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8694\n[08:25:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:25:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6871\nThe best R2 score of 0.8694\n--------------------------------------------------------------------------------\nV293\nFeature name V293\n[08:26:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5416\n[08:26:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:26:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9849\n[08:26:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:26:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9100\nThe best R2 score of 0.9849\n--------------------------------------------------------------------------------\nV294\nFeature name V294\n[08:26:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6528\n[08:27:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:27:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9870\n[08:27:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:27:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9673\nThe best R2 score of 0.9870\n--------------------------------------------------------------------------------\nV295\nFeature name V295\n[08:27:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6777\n[08:27:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:27:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9739\n[08:27:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:27:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9878\n[08:28:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:28:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9764\nThe best R2 score of 0.9878\n--------------------------------------------------------------------------------\nV296\nFeature name V296\n[08:28:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6256\n[08:28:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:28:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7943\n[08:28:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:29:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7844\nThe best R2 score of 0.7943\n--------------------------------------------------------------------------------\nV298\nFeature name V298\n[08:29:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6609\n[08:29:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:29:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7495\n[08:29:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:29:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8282\n[08:29:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:29:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.8503\n[08:30:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:30:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.7274\nThe best R2 score of 0.8503\n--------------------------------------------------------------------------------\nV301\nFeature name V301\n[08:30:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0015\n[08:30:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:31:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0200\nThe best R2 score of 0.0015\n--------------------------------------------------------------------------------\nV302\nFeature name V302\n[08:31:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8012\n[08:31:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:31:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7998\nThe best R2 score of 0.8012\n--------------------------------------------------------------------------------\nV303\nFeature name V303\n[08:31:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6599\n[08:31:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:31:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6962\n[08:31:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:31:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6281\nThe best R2 score of 0.6962\n--------------------------------------------------------------------------------\nV304\nFeature name V304\n[08:32:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7264\n[08:32:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:32:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7472\n[08:32:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:32:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7254\nThe best R2 score of 0.7472\n--------------------------------------------------------------------------------\nV306\nFeature name V306\n[08:32:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2834\n[08:32:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:32:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1496\nThe best R2 score of 0.2834\n--------------------------------------------------------------------------------\nV307\nFeature name V307\n[08:33:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4737\n[08:33:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:33:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6765\n[08:33:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:33:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6130\nThe best R2 score of 0.6765\n--------------------------------------------------------------------------------\nV308\nFeature name V308\n[08:33:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0127\n[08:33:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:33:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8979\n[08:33:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:34:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -1.2454\nThe best R2 score of 0.8979\n--------------------------------------------------------------------------------\nV309\nFeature name V309\n[08:34:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0145\n[08:34:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:34:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1178\nThe best R2 score of 0.0145\n--------------------------------------------------------------------------------\nV310\nFeature name V310\n[08:34:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0244\n[08:34:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:34:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.5758\nThe best R2 score of 0.0244\n--------------------------------------------------------------------------------\nV311\nFeature name V311\n[08:35:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0051\n[08:35:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:35:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0244\nThe best R2 score of -0.0051\n--------------------------------------------------------------------------------\nV312\nFeature name V312\n[08:35:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0049\n[08:35:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:35:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1351\nThe best R2 score of 0.0049\n--------------------------------------------------------------------------------\nV313\nFeature name V313\n[08:35:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0008\n[08:35:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:35:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0141\nThe best R2 score of -0.0008\n--------------------------------------------------------------------------------\nV314\nFeature name V314\n[08:35:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0203\n[08:36:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:36:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0870\n[08:36:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:36:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0842\nThe best R2 score of 0.0870\n--------------------------------------------------------------------------------\nV315\nFeature name V315\n[08:36:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0303\n[08:36:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:36:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0306\nThe best R2 score of -0.0303\n--------------------------------------------------------------------------------\nV316\nFeature name V316\n[08:36:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1164\n[08:36:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:36:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7611\n[08:37:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:37:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9761\n[08:37:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:37:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.6066\nThe best R2 score of 0.9761\n--------------------------------------------------------------------------------\nV317\nFeature name V317\n[08:37:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.4905\n[08:37:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:37:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8781\n[08:38:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:38:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4361\nThe best R2 score of 0.8781\n--------------------------------------------------------------------------------\nV318\nFeature name V318\n[08:38:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5865\n[08:38:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:38:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1627\nThe best R2 score of 0.5865\n--------------------------------------------------------------------------------\nV319\nFeature name V319\n[08:38:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.5710\n[08:38:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:38:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -2.5461\nThe best R2 score of -0.5710\n--------------------------------------------------------------------------------\nV320\nFeature name V320\n[08:39:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0435\n[08:39:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:39:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0688\n[08:39:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:39:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4031\n[08:39:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:39:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score -1.5642\nThe best R2 score of 0.4031\n--------------------------------------------------------------------------------\nV322\nFeature name V322\n[08:40:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7971\n[08:40:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9743\n[08:40:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7105\nThe best R2 score of 0.9743\n--------------------------------------------------------------------------------\nV323\nFeature name V323\n[08:40:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8190\n[08:40:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7932\nThe best R2 score of 0.8190\n--------------------------------------------------------------------------------\nV324\nFeature name V324\n[08:40:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8445\n[08:40:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9822\n[08:40:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9818\nThe best R2 score of 0.9822\n--------------------------------------------------------------------------------\nV329\nFeature name V329\n[08:40:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8342\n[08:40:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9247\n[08:40:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7653\nThe best R2 score of 0.9247\n--------------------------------------------------------------------------------\nV331\nFeature name V331\n[08:40:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -1.5959\n[08:40:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:40:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1643\n[08:41:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -16.7540\nThe best R2 score of 0.1643\n--------------------------------------------------------------------------------\nV332\nFeature name V332\n[08:41:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1210\n[08:41:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4019\n[08:41:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2026\nThe best R2 score of 0.4019\n--------------------------------------------------------------------------------\nV333\nFeature name V333\n[08:41:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0032\n[08:41:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5462\n[08:41:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2234\nThe best R2 score of 0.5462\n--------------------------------------------------------------------------------\nV337\nFeature name V337\n[08:41:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0623\n[08:41:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -3.1499\nThe best R2 score of 0.0623\n--------------------------------------------------------------------------------\nid_01\nFeature name id_01\n[08:41:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0657\n[08:41:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0973\n[08:41:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0109\nThe best R2 score of 0.0973\n--------------------------------------------------------------------------------\nid_02\nFeature name id_02\n[08:41:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2925\n[08:41:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2662\nThe best R2 score of 0.2925\n--------------------------------------------------------------------------------\nid_03\nFeature name id_03\n[08:41:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0354\n[08:41:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1405\nThe best R2 score of -0.0354\n--------------------------------------------------------------------------------\nid_05\nFeature name id_05\n[08:41:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0579\n[08:41:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:41:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0397\nThe best R2 score of 0.0579\n--------------------------------------------------------------------------------\nid_06\nFeature name id_06\n[08:41:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0154\n[08:42:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0227\nThe best R2 score of 0.0154\n--------------------------------------------------------------------------------\nid_07\nFeature name id_07\n[08:42:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0483\n[08:42:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.2187\nThe best R2 score of 0.0483\n--------------------------------------------------------------------------------\nid_09\nFeature name id_09\n[08:42:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0154\n[08:42:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1018\nThe best R2 score of 0.0154\n--------------------------------------------------------------------------------\nid_31_chrome_version_newness\nFeature name id_31_chrome_version_newness\n[08:42:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0113\n[08:42:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0710\nThe best R2 score of 0.0113\n--------------------------------------------------------------------------------\nid_31_safari_version_newness\nFeature name id_31_safari_version_newness\n[08:42:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0087\n[08:42:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1185\nThe best R2 score of -0.0087\n--------------------------------------------------------------------------------\nid_33_resolution\nFeature name id_33_resolution\n[08:42:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0185\n[08:42:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0057\nThe best R2 score of 0.0185\n--------------------------------------------------------------------------------\nTransactionAmt_decimal\nFeature name TransactionAmt_decimal\n[08:42:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1644\n[08:42:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1886\n[08:42:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:42:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1770\nThe best R2 score of 0.1886\n--------------------------------------------------------------------------------\nTransactionAmt_decimal_length\nFeature name TransactionAmt_decimal_length\n[08:42:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3625\n[08:43:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:43:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3555\nThe best R2 score of 0.3625\n--------------------------------------------------------------------------------\nid_18_15.0\nFeature name id_18_15.0\n[08:43:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1568\n[08:43:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:43:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1608\n[08:43:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:43:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1589\nThe best R2 score of 0.1608\n--------------------------------------------------------------------------------\ncard3_143.0\nFeature name card3_143.0\n[08:44:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:44:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:44:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9697\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard3_144.0\nFeature name card3_144.0\n[08:44:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9816\n[08:44:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:44:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9681\nThe best R2 score of 0.9816\n--------------------------------------------------------------------------------\ncard3_150.0\nFeature name card3_150.0\n[08:44:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9946\n[08:44:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:44:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:44:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:45:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard3_185.0\nFeature name card3_185.0\n[08:45:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:45:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:45:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:45:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:45:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard3_infrequent_category\nFeature name card3_infrequent_category\n[08:45:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7029\n[08:45:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:45:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8709\n[08:46:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:46:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.7902\nThe best R2 score of 0.8709\n--------------------------------------------------------------------------------\nR_emaildomain_2_com\nFeature name R_emaildomain_2_com\n[08:46:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8689\n[08:46:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:46:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8770\n[08:46:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:46:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8677\nThe best R2 score of 0.8770\n--------------------------------------------------------------------------------\nid_14_-420.0\nFeature name id_14_-420.0\n[08:46:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2087\n[08:47:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:47:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.7268\n[08:47:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:47:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5869\nThe best R2 score of 0.7268\n--------------------------------------------------------------------------------\nid_14_-360.0\nFeature name id_14_-360.0\n[08:47:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2540\n[08:47:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:47:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6435\n[08:47:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:47:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6226\nThe best R2 score of 0.6435\n--------------------------------------------------------------------------------\nid_14_60.0\nFeature name id_14_60.0\n[08:48:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0234\n[08:48:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:48:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0508\nThe best R2 score of -0.0234\n--------------------------------------------------------------------------------\nM4_M0\nFeature name M4_M0\n[08:48:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1491\n[08:48:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:48:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1541\n[08:48:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:48:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1393\nThe best R2 score of 0.1541\n--------------------------------------------------------------------------------\nM4_M1\nFeature name M4_M1\n[08:49:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0413\n[08:49:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:49:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0458\n[08:49:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0458\n--------------------------------------------------------------------------------\ncard6_credit\nFeature name card6_credit\n[08:49:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:49:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:49:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:49:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:49:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9998\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nM8_F\nFeature name M8_F\n[08:49:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2030\n[08:50:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:50:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2044\n[08:50:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:50:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1949\nThe best R2 score of 0.2044\n--------------------------------------------------------------------------------\nhours_1.0\nFeature name hours_1.0\n[08:50:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:50:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:50:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_2.0\nFeature name hours_2.0\n[08:50:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:51:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:51:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_3.0\nFeature name hours_3.0\n[08:51:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:51:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:51:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:51:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:51:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_4.0\nFeature name hours_4.0\n[08:51:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:51:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:51:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_5.0\nFeature name hours_5.0\n[08:52:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:52:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:52:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_6.0\nFeature name hours_6.0\n[08:52:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:52:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:52:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_11.0\nFeature name hours_11.0\n[08:52:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:52:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:52:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:53:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:53:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_12.0\nFeature name hours_12.0\n[08:53:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:53:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:53:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_13.0\nFeature name hours_13.0\n[08:53:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:53:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:53:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:53:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:53:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_14.0\nFeature name hours_14.0\n[08:54:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:54:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:54:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:54:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:54:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_15.0\nFeature name hours_15.0\n[08:54:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:54:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:54:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:54:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:54:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[08:54:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:55:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\n[08:55:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:55:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 1.0000\n[08:55:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:55:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n22, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_16.0\nFeature name hours_16.0\n[08:55:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:55:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:55:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_17.0\nFeature name hours_17.0\n[08:55:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:55:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:55:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:56:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:56:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[08:56:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:56:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_18.0\nFeature name hours_18.0\n[08:56:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:56:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:56:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_19.0\nFeature name hours_19.0\n[08:56:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:56:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:56:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_20.0\nFeature name hours_20.0\n[08:57:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:57:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:57:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_21.0\nFeature name hours_21.0\n[08:57:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:57:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:57:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nhours_22.0\nFeature name hours_22.0\n[08:57:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:57:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:57:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:58:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:58:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard4_american express\nFeature name card4_american express\n[08:58:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:58:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:58:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard4_discover\nFeature name card4_discover\n[08:58:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:58:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:58:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[08:58:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:58:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard4_mastercard\nFeature name card4_mastercard\n[08:59:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[08:59:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:59:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nid_13_19.0\nFeature name id_13_19.0\n[08:59:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1277\n[08:59:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:59:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5467\n[08:59:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[08:59:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2521\nThe best R2 score of 0.5467\n--------------------------------------------------------------------------------\nid_13_49.0\nFeature name id_13_49.0\n[09:00:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3470\n[09:00:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:00:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3577\n[09:00:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:00:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2748\nThe best R2 score of 0.3577\n--------------------------------------------------------------------------------\nid_13_52.0\nFeature name id_13_52.0\n[09:00:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3194\n[09:00:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:00:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3282\n[09:00:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:00:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3141\nThe best R2 score of 0.3282\n--------------------------------------------------------------------------------\nR_emaildomain_anonymous.com\nFeature name R_emaildomain_anonymous.com\n[09:01:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1787\n[09:01:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:01:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2009\n[09:01:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:01:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1365\nThe best R2 score of 0.2009\n--------------------------------------------------------------------------------\nR_emaildomain_gmail.com\nFeature name R_emaildomain_gmail.com\n[09:01:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3332\n[09:01:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:01:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3532\n[09:02:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:02:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3212\nThe best R2 score of 0.3532\n--------------------------------------------------------------------------------\nR_emaildomain_hotmail.com\nFeature name R_emaildomain_hotmail.com\n[09:02:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2827\n[09:02:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:02:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2766\nThe best R2 score of 0.2827\n--------------------------------------------------------------------------------\nP_emaildomain_4_com\nFeature name P_emaildomain_4_com\n[09:02:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1103\n[09:02:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:02:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1214\n[09:02:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:03:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1085\nThe best R2 score of 0.1214\n--------------------------------------------------------------------------------\nid_31_tablet_False\nFeature name id_31_tablet_False\n[09:03:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9076\n[09:03:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:03:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9236\n[09:03:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:03:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9158\nThe best R2 score of 0.9236\n--------------------------------------------------------------------------------\naddr2_60.0\nFeature name addr2_60.0\n[09:03:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9748\n[09:04:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:04:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9792\n[09:04:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:04:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9787\nThe best R2 score of 0.9792\n--------------------------------------------------------------------------------\naddr2_87.0\nFeature name addr2_87.0\n[09:04:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9994\n[09:04:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:04:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:04:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:04:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:04:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:04:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9997\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nid_19_100.0\nFeature name id_19_100.0\n[09:05:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0939\n[09:05:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:05:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1873\n[09:05:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:05:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0857\nThe best R2 score of 0.1873\n--------------------------------------------------------------------------------\nid_19_193.0\nFeature name id_19_193.0\n[09:05:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0315\n[09:05:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:05:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0510\n[09:05:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:05:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0291\nThe best R2 score of 0.0510\n--------------------------------------------------------------------------------\nid_19_266.0\nFeature name id_19_266.0\n[09:06:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2601\n[09:06:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:06:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2420\nThe best R2 score of 0.2601\n--------------------------------------------------------------------------------\nid_19_271.0\nFeature name id_19_271.0\n[09:06:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0413\n[09:06:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:06:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0701\n[09:06:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:06:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score -0.0495\nThe best R2 score of 0.0701\n--------------------------------------------------------------------------------\nid_19_312.0\nFeature name id_19_312.0\n[09:06:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0700\n[09:07:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:07:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0721\n[09:07:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:07:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0251\nThe best R2 score of 0.0721\n--------------------------------------------------------------------------------\nid_19_321.0\nFeature name id_19_321.0\n[09:07:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0127\n[09:07:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:07:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0481\nThe best R2 score of 0.0127\n--------------------------------------------------------------------------------\nid_19_410.0\nFeature name id_19_410.0\n[09:07:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1033\n[09:07:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:07:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0919\nThe best R2 score of 0.1033\n--------------------------------------------------------------------------------\nid_19_427.0\nFeature name id_19_427.0\n[09:08:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1053\n[09:08:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:08:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0490\nThe best R2 score of 0.1053\n--------------------------------------------------------------------------------\nid_19_infrequent_category\nFeature name id_19_infrequent_category\n[09:08:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1600\n[09:08:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:08:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1584\nThe best R2 score of 0.1600\n--------------------------------------------------------------------------------\nM6_F\nFeature name M6_F\n[09:08:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2614\n[09:08:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:08:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2679\n[09:08:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:08:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2514\nThe best R2 score of 0.2679\n--------------------------------------------------------------------------------\nM3_F\nFeature name M3_F\n[09:09:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0559\n[09:09:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:09:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0494\nThe best R2 score of 0.0559\n--------------------------------------------------------------------------------\nM5_F\nFeature name M5_F\n[09:09:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1226\n[09:09:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:09:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1234\n[09:09:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:09:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1185\nThe best R2 score of 0.1234\n--------------------------------------------------------------------------------\ncard1_2884\nFeature name card1_2884\n[09:10:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:10:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:10:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9859\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_6019\nFeature name card1_6019\n[09:10:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9928\n[09:10:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:10:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9937\n[09:10:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:10:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9919\nThe best R2 score of 0.9937\n--------------------------------------------------------------------------------\ncard1_7508\nFeature name card1_7508\n[09:11:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9870\n[09:11:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:11:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:11:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:11:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_7585\nFeature name card1_7585\n[09:11:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9704\n[09:11:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:11:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9889\n[09:11:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:11:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9840\nThe best R2 score of 0.9889\n--------------------------------------------------------------------------------\ncard1_7919\nFeature name card1_7919\n[09:11:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9662\n[09:11:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:12:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9926\n[09:12:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:12:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:12:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:12:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9932\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_9500\nFeature name card1_9500\n[09:12:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9998\n[09:12:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:12:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9946\nThe best R2 score of 0.9998\n--------------------------------------------------------------------------------\ncard1_9633\nFeature name card1_9633\n[09:12:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9576\n[09:12:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:13:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9878\n[09:13:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:13:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9931\n[09:13:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:13:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9930\nThe best R2 score of 0.9931\n--------------------------------------------------------------------------------\ncard1_10616\nFeature name card1_10616\n[09:13:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9881\n[09:13:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:13:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9912\n[09:13:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:13:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9909\nThe best R2 score of 0.9912\n--------------------------------------------------------------------------------\ncard1_12544\nFeature name card1_12544\n[09:13:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9996\n[09:14:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:14:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:14:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:14:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9971\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_12695\nFeature name card1_12695\n[09:14:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:14:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:14:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9966\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_12839\nFeature name card1_12839\n[09:14:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9916\n[09:14:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:14:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9958\n[09:15:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:15:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:15:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:15:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_15066\nFeature name card1_15066\n[09:15:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9709\n[09:15:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:15:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:15:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:15:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard1_15885\nFeature name card1_15885\n[09:15:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9852\n[09:15:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:15:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9966\n[09:16:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:16:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9998\n[09:16:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:16:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9966\nThe best R2 score of 0.9998\n--------------------------------------------------------------------------------\ncard1_16132\nFeature name card1_16132\n[09:16:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9997\n[09:16:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:16:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9944\nThe best R2 score of 0.9997\n--------------------------------------------------------------------------------\ncard1_17188\nFeature name card1_17188\n[09:16:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9980\n[09:16:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:16:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9917\nThe best R2 score of 0.9980\n--------------------------------------------------------------------------------\ncard1_infrequent_category\nFeature name card1_infrequent_category\n[09:17:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6181\n[09:17:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:17:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9885\n[09:17:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:17:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9934\n[09:17:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:17:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9932\nThe best R2 score of 0.9934\n--------------------------------------------------------------------------------\nid_31_firefox_False\nFeature name id_31_firefox_False\n[09:18:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8548\n[09:18:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:18:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8661\n[09:18:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:18:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8567\nThe best R2 score of 0.8661\n--------------------------------------------------------------------------------\nM2_F\nFeature name M2_F\n[09:18:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0264\n[09:18:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:18:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0212\nThe best R2 score of 0.0264\n--------------------------------------------------------------------------------\nP_emaildomain_anonymous.com\nFeature name P_emaildomain_anonymous.com\n[09:18:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0571\n[09:18:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:19:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0672\n[09:19:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:19:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.0538\nThe best R2 score of 0.0672\n--------------------------------------------------------------------------------\nP_emaildomain_aol.com\nFeature name P_emaildomain_aol.com\n[09:19:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0190\n[09:19:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:19:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0192\n[09:19:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0192\n--------------------------------------------------------------------------------\nP_emaildomain_bellsouth.net\nFeature name P_emaildomain_bellsouth.net\n[09:19:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0163\n[09:19:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:19:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0409\nThe best R2 score of 0.0163\n--------------------------------------------------------------------------------\nP_emaildomain_comcast.net\nFeature name P_emaildomain_comcast.net\n[09:19:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0120\n[09:20:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:20:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0088\nThe best R2 score of 0.0120\n--------------------------------------------------------------------------------\nP_emaildomain_cox.net\nFeature name P_emaildomain_cox.net\n[09:20:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0071\n[09:20:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:20:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0044\nThe best R2 score of 0.0071\n--------------------------------------------------------------------------------\nP_emaildomain_gmail.com\nFeature name P_emaildomain_gmail.com\n[09:20:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0282\n[09:20:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:20:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0293\n[09:20:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0293\n--------------------------------------------------------------------------------\nP_emaildomain_hotmail.com\nFeature name P_emaildomain_hotmail.com\n[09:20:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1507\n[09:20:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:20:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1537\n[09:21:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:21:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1120\nThe best R2 score of 0.1537\n--------------------------------------------------------------------------------\nP_emaildomain_icloud.com\nFeature name P_emaildomain_icloud.com\n[09:21:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0041\n[09:21:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:21:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0115\nThe best R2 score of -0.0041\n--------------------------------------------------------------------------------\nP_emaildomain_live.com\nFeature name P_emaildomain_live.com\n[09:21:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0130\n[09:21:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:21:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0600\nThe best R2 score of -0.0130\n--------------------------------------------------------------------------------\nP_emaildomain_live.com.mx\nFeature name P_emaildomain_live.com.mx\n[09:21:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0000\n[09:22:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:22:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0670\nThe best R2 score of -0.0000\n--------------------------------------------------------------------------------\nP_emaildomain_me.com\nFeature name P_emaildomain_me.com\n[09:22:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0018\n[09:22:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:22:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0377\nThe best R2 score of -0.0018\n--------------------------------------------------------------------------------\nP_emaildomain_msn.com\nFeature name P_emaildomain_msn.com\n[09:22:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0011\n[09:22:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:22:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0202\nThe best R2 score of -0.0011\n--------------------------------------------------------------------------------\nP_emaildomain_optonline.net\nFeature name P_emaildomain_optonline.net\n[09:22:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0083\n[09:22:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:22:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0203\nThe best R2 score of 0.0083\n--------------------------------------------------------------------------------\nP_emaildomain_outlook.com\nFeature name P_emaildomain_outlook.com\n[09:23:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0040\n[09:23:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:23:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0331\nThe best R2 score of 0.0040\n--------------------------------------------------------------------------------\nP_emaildomain_verizon.net\nFeature name P_emaildomain_verizon.net\n[09:23:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0076\n[09:23:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:23:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0178\n[09:23:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0178\n--------------------------------------------------------------------------------\nP_emaildomain_yahoo.com\nFeature name P_emaildomain_yahoo.com\n[09:23:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0302\n[09:23:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:23:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0298\nThe best R2 score of 0.0302\n--------------------------------------------------------------------------------\nP_emaildomain_infrequent_category\nFeature name P_emaildomain_infrequent_category\n[09:23:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0460\n[09:23:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:24:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0274\nThe best R2 score of 0.0460\n--------------------------------------------------------------------------------\naddr1_126.0\nFeature name addr1_126.0\n[09:24:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:24:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:24:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_181.0\nFeature name addr1_181.0\n[09:24:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:24:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:24:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:24:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:24:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_184.0\nFeature name addr1_184.0\n[09:24:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:25:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:25:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9984\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_191.0\nFeature name addr1_191.0\n[09:25:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:25:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:25:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_204.0\nFeature name addr1_204.0\n[09:25:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:25:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:25:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:25:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:25:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:25:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:26:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_231.0\nFeature name addr1_231.0\n[09:26:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9999\n[09:26:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:26:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9951\nThe best R2 score of 0.9999\n--------------------------------------------------------------------------------\naddr1_264.0\nFeature name addr1_264.0\n[09:26:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:26:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:26:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:26:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:26:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_272.0\nFeature name addr1_272.0\n[09:26:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:27:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:27:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:27:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:27:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_299.0\nFeature name addr1_299.0\n[09:27:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:27:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:27:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9995\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_310.0\nFeature name addr1_310.0\n[09:27:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9975\n[09:27:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:27:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:27:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:28:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_315.0\nFeature name addr1_315.0\n[09:28:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:28:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:28:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_325.0\nFeature name addr1_325.0\n[09:28:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:28:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:28:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_330.0\nFeature name addr1_330.0\n[09:28:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:28:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:28:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:29:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:29:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_337.0\nFeature name addr1_337.0\n[09:29:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:29:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:29:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_387.0\nFeature name addr1_387.0\n[09:29:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:29:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:29:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:29:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:29:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_433.0\nFeature name addr1_433.0\n[09:29:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:30:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:30:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:30:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:30:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9975\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_441.0\nFeature name addr1_441.0\n[09:30:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:30:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:30:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_472.0\nFeature name addr1_472.0\n[09:30:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:30:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:30:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_485.0\nFeature name addr1_485.0\n[09:30:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:31:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:31:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:31:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:31:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\naddr1_infrequent_category\nFeature name addr1_infrequent_category\n[09:31:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.7383\n[09:31:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:31:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9953\n[09:31:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:31:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9981\n[09:31:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:31:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9994\n[09:32:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:32:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n18, R2 score 0.9985\nThe best R2 score of 0.9994\n--------------------------------------------------------------------------------\nProductCD_H\nFeature name ProductCD_H\n[09:32:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:32:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:32:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nProductCD_R\nFeature name ProductCD_R\n[09:32:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:32:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:32:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nM9_F\nFeature name M9_F\n[09:32:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0367\n[09:33:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:33:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0374\n[09:33:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nThe best R2 score of 0.0374\n--------------------------------------------------------------------------------\nid_20_222.0\nFeature name id_20_222.0\n[09:33:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5102\n[09:33:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:33:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6643\n[09:33:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:33:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6281\nThe best R2 score of 0.6643\n--------------------------------------------------------------------------------\nid_20_325.0\nFeature name id_20_325.0\n[09:33:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1236\n[09:33:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:33:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0817\nThe best R2 score of 0.1236\n--------------------------------------------------------------------------------\nid_20_333.0\nFeature name id_20_333.0\n[09:34:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2135\n[09:34:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:34:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4834\n[09:34:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:34:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4474\nThe best R2 score of 0.4834\n--------------------------------------------------------------------------------\nid_20_401.0\nFeature name id_20_401.0\n[09:34:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0381\n[09:34:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:34:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0108\nThe best R2 score of 0.0381\n--------------------------------------------------------------------------------\nid_20_500.0\nFeature name id_20_500.0\n[09:34:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2413\n[09:34:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:34:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3849\n[09:35:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:35:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.1684\nThe best R2 score of 0.3849\n--------------------------------------------------------------------------------\nid_20_507.0\nFeature name id_20_507.0\n[09:35:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2923\n[09:35:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:35:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2630\nThe best R2 score of 0.2923\n--------------------------------------------------------------------------------\nid_20_533.0\nFeature name id_20_533.0\n[09:35:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5013\n[09:35:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:35:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6157\n[09:35:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:35:57] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.5932\nThe best R2 score of 0.6157\n--------------------------------------------------------------------------------\nid_20_549.0\nFeature name id_20_549.0\n[09:36:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3083\n[09:36:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:36:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.5867\n[09:36:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:36:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4860\nThe best R2 score of 0.5867\n--------------------------------------------------------------------------------\nid_20_595.0\nFeature name id_20_595.0\n[09:36:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1229\n[09:36:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:36:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0641\nThe best R2 score of 0.1229\n--------------------------------------------------------------------------------\nid_20_597.0\nFeature name id_20_597.0\n[09:37:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1220\n[09:37:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:37:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3940\n[09:37:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:37:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2551\nThe best R2 score of 0.3940\n--------------------------------------------------------------------------------\nid_20_612.0\nFeature name id_20_612.0\n[09:37:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0136\n[09:37:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:37:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0493\nThe best R2 score of 0.0136\n--------------------------------------------------------------------------------\nid_20_infrequent_category\nFeature name id_20_infrequent_category\n[09:37:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2680\n[09:37:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:37:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.4493\n[09:38:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:38:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.4184\nThe best R2 score of 0.4493\n--------------------------------------------------------------------------------\nid_38_F\nFeature name id_38_F\n[09:38:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.8476\n[09:38:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:38:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8682\n[09:38:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:38:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.8500\nThe best R2 score of 0.8682\n--------------------------------------------------------------------------------\ncard2_111.0\nFeature name card2_111.0\n[09:39:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:39:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:39:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:39:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:39:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_170.0\nFeature name card2_170.0\n[09:39:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:39:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:39:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_174.0\nFeature name card2_174.0\n[09:39:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:39:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:39:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_215.0\nFeature name card2_215.0\n[09:40:04] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:40:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:40:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_225.0\nFeature name card2_225.0\n[09:40:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:40:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:40:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:40:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:40:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:40:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:40:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_268.0\nFeature name card2_268.0\n[09:40:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9999\n[09:41:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:41:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9954\nThe best R2 score of 0.9999\n--------------------------------------------------------------------------------\ncard2_321.0\nFeature name card2_321.0\n[09:41:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:41:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:41:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_360.0\nFeature name card2_360.0\n[09:41:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9998\n[09:41:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:41:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:41:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:41:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9905\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_361.0\nFeature name card2_361.0\n[09:42:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:42:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:42:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:42:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:42:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_481.0\nFeature name card2_481.0\n[09:42:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:42:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:42:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_490.0\nFeature name card2_490.0\n[09:42:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:42:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:42:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:42:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:43:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_512.0\nFeature name card2_512.0\n[09:43:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:43:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:43:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:43:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:43:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_514.0\nFeature name card2_514.0\n[09:43:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:43:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:43:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_553.0\nFeature name card2_553.0\n[09:43:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:43:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:43:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:44:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:44:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_555.0\nFeature name card2_555.0\n[09:44:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:44:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:44:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_567.0\nFeature name card2_567.0\n[09:44:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9795\n[09:44:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:44:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:44:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:44:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9966\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_583.0\nFeature name card2_583.0\n[09:44:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:45:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:45:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard2_infrequent_category\nFeature name card2_infrequent_category\n[09:45:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6597\n[09:45:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:45:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9944\n[09:45:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:45:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9964\n[09:45:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:45:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9957\nThe best R2 score of 0.9964\n--------------------------------------------------------------------------------\nM7_F\nFeature name M7_F\n[09:46:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.3285\n[09:46:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:46:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3366\n[09:46:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:46:26] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.3268\nThe best R2 score of 0.3366\n--------------------------------------------------------------------------------\nweekday_1.0\nFeature name weekday_1.0\n[09:46:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:46:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:46:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:46:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:46:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:47:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:47:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nweekday_2.0\nFeature name weekday_2.0\n[09:47:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:47:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:47:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nweekday_3.0\nFeature name weekday_3.0\n[09:47:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:47:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:47:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nweekday_4.0\nFeature name weekday_4.0\n[09:47:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:47:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:47:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:48:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:48:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:48:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:48:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nweekday_5.0\nFeature name weekday_5.0\n[09:48:23] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:48:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:48:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_102.0\nFeature name card5_102.0\n[09:48:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:48:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:48:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:48:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:48:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_117.0\nFeature name card5_117.0\n[09:49:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:49:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:49:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:49:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:49:18] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9991\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_137.0\nFeature name card5_137.0\n[09:49:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:49:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:49:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_166.0\nFeature name card5_166.0\n[09:49:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9991\n[09:49:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:49:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:49:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:49:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:50:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:50:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 0.9996\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_195.0\nFeature name card5_195.0\n[09:50:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:50:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:50:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:50:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:50:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 1.0000\n[09:50:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:50:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n14, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_198.0\nFeature name card5_198.0\n[09:50:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9636\n[09:50:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:50:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.8697\nThe best R2 score of 0.9636\n--------------------------------------------------------------------------------\ncard5_219.0\nFeature name card5_219.0\n[09:51:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9993\n[09:51:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:51:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9967\nThe best R2 score of 0.9993\n--------------------------------------------------------------------------------\ncard5_224.0\nFeature name card5_224.0\n[09:51:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9998\n[09:51:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:51:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\n[09:51:32] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:51:34] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9997\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_226.0\nFeature name card5_226.0\n[09:51:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:51:49] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:51:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_229.0\nFeature name card5_229.0\n[09:52:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:52:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:52:08] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\ncard5_236.0\nFeature name card5_236.0\n[09:52:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 1.0000\n[09:52:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:52:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 1.0000\nThe best R2 score of 1.0000\n--------------------------------------------------------------------------------\nP_emaildomain_1_hotmail\nFeature name P_emaildomain_1_hotmail\n[09:52:33] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1532\n[09:52:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:52:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1491\nThe best R2 score of 0.1532\n--------------------------------------------------------------------------------\nP_emaildomain_1_outlook\nFeature name P_emaildomain_1_outlook\n[09:52:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0036\n[09:52:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:52:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.0216\nThe best R2 score of 0.0036\n--------------------------------------------------------------------------------\nP_emaildomain_1_infrequent_category\nFeature name P_emaildomain_1_infrequent_category\n[09:53:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0419\n[09:53:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:53:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0159\nThe best R2 score of 0.0419\n--------------------------------------------------------------------------------\nid_17_100.0\nFeature name id_17_100.0\n[09:53:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.5949\n[09:53:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:53:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.3455\nThe best R2 score of 0.5949\n--------------------------------------------------------------------------------\nid_17_166.0\nFeature name id_17_166.0\n[09:53:36] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.9128\n[09:53:40] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:53:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.9243\n[09:53:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:53:50] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.9141\nThe best R2 score of 0.9243\n--------------------------------------------------------------------------------\nid_32_24.0\nFeature name id_32_24.0\n[09:54:07] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.6282\n[09:54:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:54:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.6462\n[09:54:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:54:22] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.6217\nThe best R2 score of 0.6462\n--------------------------------------------------------------------------------\nDeviceInfo_MacOS\nFeature name DeviceInfo_MacOS\n[09:54:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.1444\n[09:54:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:54:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.1226\nThe best R2 score of 0.1444\n--------------------------------------------------------------------------------\nDeviceInfo_SM-J700M Build/MMB29K\nFeature name DeviceInfo_SM-J700M Build/MMB29K\n[09:54:54] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score -0.0075\n[09:54:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:55:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score -0.1016\nThe best R2 score of -0.0075\n--------------------------------------------------------------------------------\nDeviceInfo_Trident/7.0\nFeature name DeviceInfo_Trident/7.0\n[09:55:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.0626\n[09:55:13] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:55:16] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.0404\nThe best R2 score of 0.0626\n--------------------------------------------------------------------------------\nDeviceInfo_Windows\nFeature name DeviceInfo_Windows\n[09:55:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2669\n[09:55:29] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:55:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2696\n[09:55:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:55:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n10, R2 score 0.2296\nThe best R2 score of 0.2696\n--------------------------------------------------------------------------------\nDeviceInfo_infrequent_category\nFeature name DeviceInfo_infrequent_category\n[09:55:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n2, R2 score 0.2541\n[09:55:59] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[09:56:02] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n6, R2 score 0.2495\nThe best R2 score of 0.2541\n--------------------------------------------------------------------------------\nCPU times: user 2h 57s, sys: 41min 6s, total: 2h 42min 3s\nWall time: 2h 41min 31s\n" ], [ "master_df.to_csv('master_df_time_adjusted_top_300.csv', index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a20d478b6830445ade5192f8161a8ea3d538e3
17,176
ipynb
Jupyter Notebook
F4_profiling/3_call_NMD_targets/1_DESEQ.ipynb
borisz264/NMD_screen_2020
96eb9323e1de32dc2be049e3724839bad4babff1
[ "MIT" ]
null
null
null
F4_profiling/3_call_NMD_targets/1_DESEQ.ipynb
borisz264/NMD_screen_2020
96eb9323e1de32dc2be049e3724839bad4babff1
[ "MIT" ]
null
null
null
F4_profiling/3_call_NMD_targets/1_DESEQ.ipynb
borisz264/NMD_screen_2020
96eb9323e1de32dc2be049e3724839bad4babff1
[ "MIT" ]
null
null
null
43.264484
289
0.618479
[ [ [ "#installation, may not be needed\n#if (!requireNamespace(\"BiocManager\", quietly = TRUE))\n# install.packages(\"BiocManager\")\n\n#BiocManager::install(\"DESeq2\")", "_____no_output_____" ], [ "suppressMessages(library(DESeq2))", "_____no_output_____" ], [ "#UPF1 knockdown vs scramble\ncount_file <- \"colombo_combined_counts.tsv\"\nrow_names <- \"tx_id\"\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('scr1_Homo_sapiens_RNA.Seq', 'scr2_Homo_sapiens_RNA.Seq', 'scr3_Homo_sapiens_RNA.Seq', 'UPF1_KD1_Homo_sapiens_RNA.Seq', 'UPF1_KD2_Homo_sapiens_RNA.Seq', 'UPF1_KD3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/UPF1_kd_vs_scr.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#UPF1 rescue vs knockdown\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('UPF1_KD1_Homo_sapiens_RNA.Seq', 'UPF1_KD2_Homo_sapiens_RNA.Seq', 'UPF1_KD3_Homo_sapiens_RNA.Seq', 'UPF1_rescue1_Homo_sapiens_RNA.Seq', 'UPF1_rescue2_Homo_sapiens_RNA.Seq', 'UPF1_rescue3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/UPF1_rescue_vs_kd.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG6 knockdown vs scramble\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('scr1_Homo_sapiens_RNA.Seq', 'scr2_Homo_sapiens_RNA.Seq', 'scr3_Homo_sapiens_RNA.Seq', 'SMG6_KD1_Homo_sapiens_RNA.Seq', 'SMG6_KD2_Homo_sapiens_RNA.Seq', 'SMG6_KD3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG6_kd_vs_scr.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG6 rescue vs knockdown\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('SMG6_KD1_Homo_sapiens_RNA.Seq', 'SMG6_KD2_Homo_sapiens_RNA.Seq', 'SMG6_KD3_Homo_sapiens_RNA.Seq', 'SMG6_rescue1_Homo_sapiens_RNA.Seq', 'SMG6_rescue2_Homo_sapiens_RNA.Seq', 'SMG6_rescue3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG6_rescue_vs_kd.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG7 knockdown vs scramble\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('scr4_Homo_sapiens_RNA.Seq', 'scr5_Homo_sapiens_RNA.Seq', 'scr6_Homo_sapiens_RNA.Seq', 'SMG7_KD1_Homo_sapiens_RNA.Seq', 'SMG7_KD2_Homo_sapiens_RNA.Seq', 'SMG7_KD3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG7_kd_vs_scr.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG7 rescue vs knockdown\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('SMG7_KD1_Homo_sapiens_RNA.Seq', 'SMG7_KD2_Homo_sapiens_RNA.Seq', 'SMG7_KD3_Homo_sapiens_RNA.Seq', 'SMG7_rescue1_Homo_sapiens_RNA.Seq', 'SMG7_rescue2_Homo_sapiens_RNA.Seq', 'SMG7_rescue3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG7_rescue_vs_kd.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG6/7 double_knockdown vs scramble\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('scr4_Homo_sapiens_RNA.Seq', 'scr5_Homo_sapiens_RNA.Seq', 'scr6_Homo_sapiens_RNA.Seq', 'dKD1_Homo_sapiens_RNA.Seq', 'dKD2_Homo_sapiens_RNA.Seq', 'dKD3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/dSMG_kd_vs_scr.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG6 rescue vs SMG6/7 double knockdown\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('dKD1_Homo_sapiens_RNA.Seq', 'dKD2_Homo_sapiens_RNA.Seq', 'dKD3_Homo_sapiens_RNA.Seq', 'dKD_SMG6_rescue1_Homo_sapiens_RNA.Seq', 'dKD_SMG6_rescue2_Homo_sapiens_RNA.Seq', 'dKD_SMG6_rescue3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG6_rescue_vs_dSMG_kd.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n\n#SMG7 rescue vs SMG6/7 double knockdown\ncountdata <- read.csv(count_file, row.names=row_names, sep='\\t')[ ,c('dKD1_Homo_sapiens_RNA.Seq', 'dKD2_Homo_sapiens_RNA.Seq', 'dKD3_Homo_sapiens_RNA.Seq', 'dKD_SMG7_rescue1_Homo_sapiens_RNA.Seq', 'dKD_SMG7_rescue2_Homo_sapiens_RNA.Seq', 'dKD_SMG7_rescue3_Homo_sapiens_RNA.Seq')]\ncountdata <- round(countdata)\ncondition <- factor(c(rep(\"ctl\", 3), rep(\"exp\", 3)))\n# Create a coldata frame and instantiate the DESeqDataSet. See ?DESeqDataSetFromMatrix\ncoldata <- data.frame(row.names=colnames(countdata), condition)\ndds <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design=~condition)\n# Run the DESeq pipeline\ndds <- DESeq(dds)\n#filter for at least 30 counts between all datasets\ndds <- dds[ rowSums(counts(dds)) > 30, ]\n# Get differential expression results\nres <- results(dds)\n## Order by adjusted p-value\nres <- res[order(res$padj), ]\n## Merge with normalized count data\n#res <- merge(as.data.frame(res), as.data.frame(counts(dds, normalized=TRUE)), by=\"row.names\", sort=FALSE)\n#names(res)[1] <- \"Transcript\"\n## Write results\nwrite.table(res, file='DESeq_results/SMG7_rescue_vs_dSMG_kd.txt', quote=F, sep='\\t', col.names = NA, row.names = TRUE)\n", "converting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\nconverting counts to integer mode\n\nestimating size factors\n\nestimating dispersions\n\ngene-wise dispersion estimates\n\nmean-dispersion relationship\n\nfinal dispersion estimates\n\nfitting model and testing\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7a21ff40a0c667e658184470b1166b42aef3968
51,725
ipynb
Jupyter Notebook
logistic-regression/gradient-descent-logistic-regression.ipynb
appliedecon/data602-lectures
ecc394095270aff9487a68462ad79aa3ddae458b
[ "MIT" ]
3
2021-09-28T00:27:00.000Z
2022-03-19T17:30:27.000Z
logistic-regression/gradient-descent-logistic-regression.ipynb
appliedecon/data602-lectures
ecc394095270aff9487a68462ad79aa3ddae458b
[ "MIT" ]
null
null
null
logistic-regression/gradient-descent-logistic-regression.ipynb
appliedecon/data602-lectures
ecc394095270aff9487a68462ad79aa3ddae458b
[ "MIT" ]
6
2021-09-27T16:16:49.000Z
2022-03-23T21:29:25.000Z
167.394822
21,056
0.889454
[ [ [ "## Load the iris data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.datasets import load_iris\nfrom numpy.linalg import inv\nimport pandas as pd\nimport numpy as np\n\niris = load_iris()\niris['data'][:5,:]\n\ny = np.where(iris['target'] == 2, 1, 0)\nX = iris['data']\n\nconst = np.ones(shape=y.shape).reshape(-1,1)\n\nmat = np.concatenate( (const, X), axis=1)\nmat[:5,:]", "_____no_output_____" ] ], [ [ "## Recall the algorithm we created for gradient descent for linear regression\nUsing the following cost function:\n$$J(w)=\\frac{1}{2}\\sum(y^{(i)} - \\hat{y}^{(i)})^2$$", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef gradientDescent(x, y, theta, alpha, m, numIterations):\n thetaHistory = list()\n \n xTrans = x.transpose()\n costList = list()\n \n for i in range(0, numIterations):\n # data x feature weights = y_hat\n hypothesis = np.dot(x, theta)\n # how far we are off\n loss = hypothesis - y \n # mse\n cost = np.sum(loss ** 2) / (2 * m)\n costList.append(cost)\n\n # avg gradient per example\n gradient = np.dot(xTrans, loss) / m \n\n # update\n theta = theta - alpha * gradient\n thetaHistory.append(theta)\n \n return thetaHistory, costList", "_____no_output_____" ] ], [ [ "## For Logistic regression we replace with our likehihood function:\n\n$$\nJ(w)=\\sum{[-y^{(i)}log(\\theta(z^{(i)}))-(1-y^{(i)})log(1-\\theta(z^{(i)})]}\n$$\n\n## And add the sigmoid function to bound $y$ between 0 and 1", "_____no_output_____" ] ], [ [ "def gradientDescent(x, y, alpha, numIterations):\n def mle(y,yhat):\n '''\n This replaces the mean squared error\n '''\n return (-y.dot(np.log(yhat)) - ((1-y)).dot(np.log(1-yhat)))\n \n def sigmoid(z):\n '''\n Transforms values to follow the sigmoid function and bound between 0 and 1\n '''\n return 1./(1. + np.exp(-np.clip(z, -250, 250)))\n \n # number of examples in the training data\n m = x.shape[0]\n\n # initialize weights to small random numbers\n theta = np.random.normal(loc=0.0, scale=0.1, size=x.shape[1])\n \n # history of theta values\n thetaHistory = list()\n \n xTrans = x.transpose()\n \n # history of cost values\n costList = list()\n \n for i in range(0, numIterations):\n \n # predicted value based on feature matrix and current weights\n hypothesis = np.dot(x, theta)\n \n # sigmoid transformation so we have bounded values\n hypothesis = sigmoid(hypothesis)\n \n # how far we are off from the actual value\n loss = hypothesis - y \n \n # determine cost based on the log likehilood function\n cost = mle(y, hypothesis)\n costList.append(cost)\n\n # avg gradient per example\n gradient = np.dot(xTrans, loss) / m \n\n # update the weights\n theta = theta - alpha * gradient\n thetaHistory.append(theta)\n \n return thetaHistory, costList", "_____no_output_____" ] ], [ [ "## Let's try it out\n- Run the algorithm, which gives us the weight and cost history. \n- Plot the cost to see if it converges. \n- Make predictions with the last batch of weights. \n- Apply the sigmoid function to the above predictions. \n- Plot the actual vs. predicted values. \n- Plot the evolution of the weights for each iteration.", "_____no_output_____" ] ], [ [ "iters = 500000\n\nimport datetime\n\nstart_ts = datetime.datetime.now()\nbetaHistory, costList = gradientDescent(mat, y, alpha=0.01, numIterations=iters)\n \nend_ts = datetime.datetime.now()\n\nprint(f'Completed in {end_ts-start_ts}')\n\n# cost history\nplt.plot(costList)\nplt.title(f'Final cost: {costList[-1]:,.2f}', loc='left')\nplt.show()\n\n# predict history\ngs_betas = betaHistory[iters-1]\ngs_predictions = np.dot(mat, gs_betas)\n\n# we need to apply the sigmoid/activation function to bound the predictions between (0,1)\ngs_predictions = 1./(1+np.exp(-gs_predictions))\n\nplt.plot(y, gs_predictions, 'bo', alpha=0.2)\nplt.xlabel('Actual')\nplt.ylabel('Predicted')\nplt.title('Gradient Descent Regression Fit on Training Data')\nplt.show()\n\nfrom collections import defaultdict\nthetas = defaultdict(list)\n\nfor i in range(len(betaHistory)):\n for j in range(len(betaHistory[i])):\n thetas[j].append(betaHistory[i][j])\n \nthetasD = pd.DataFrame.from_dict(thetas)\nthetasD.plot(legend=False)\nplt.title('Beta Estimates')\nplt.ylabel('Coefficient')\nplt.xlabel('Iteration')\nplt.show()", "Completed in 0:00:17.566409\n" ] ], [ [ "- We can see the loss is decreasing over the iterations. \n- Predictions are bounded between 0 and 1 because of the sigmoid function. \n- Weights update after each iteration and will eventually stabilize.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a229d82da48a96058b1e9ed277ff92d8a09045
14,466
ipynb
Jupyter Notebook
naversearchCrawlerSelenium.ipynb
JeongCheck/Crawling
e823ecda4d3a70d35976f18186c4538c4943f063
[ "MIT" ]
null
null
null
naversearchCrawlerSelenium.ipynb
JeongCheck/Crawling
e823ecda4d3a70d35976f18186c4538c4943f063
[ "MIT" ]
null
null
null
naversearchCrawlerSelenium.ipynb
JeongCheck/Crawling
e823ecda4d3a70d35976f18186c4538c4943f063
[ "MIT" ]
3
2021-05-18T04:20:20.000Z
2021-05-27T01:32:08.000Z
22.53271
213
0.496613
[ [ [ "# 셀레니움을 이용한 네이버 블로그(검색창) 크롤러\n- 네이버 메인 검색 페이지에서 크롤링한다.", "_____no_output_____" ] ], [ [ "import platform\nprint(platform.architecture())", "('64bit', '')\n" ], [ "!python --version", "Python 3.7.9\r\n" ], [ "pwd", "_____no_output_____" ], [ "# 네이버에서 검색어 입력받아 검색 한 후 블로그 메뉴를 선택하고\n# 오른쪽에 있는 검색옵션 버튼을 눌러서\n# 정렬 방식과 기간을 입력하기\n\n#Step 0. 필요한 모듈과 라이브러리를 로딩하고 검색어를 입력 받습니다.\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport math\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib.request as req\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport tqdm\nfrom tqdm.notebook import tqdm", "_____no_output_____" ], [ "query_txt = '성심당여행대전'\nstart_date= \"20190101\"\nend_date= \"20210501\"", "_____no_output_____" ], [ "os.getenv('HOME')", "_____no_output_____" ], [ "webdriver.__version__", "_____no_output_____" ], [ "#Step 1. 크롬 웹브라우저 실행\npath = os.getenv('HOME')+ '/chromedriver'", "_____no_output_____" ], [ "driver = webdriver.Chrome(path)\n# 사이트 주소는 네이버\nc\ntime.sleep(1)", "_____no_output_____" ], [ "#Step 2. 네이버 검색창에 \"검색어\" 검색\nelement = driver.find_element_by_name(\"query\")\nelement.send_keys(query_txt)\nelement.submit()\ntime.sleep(2)", "_____no_output_____" ], [ "#Step 3. \"블로그\" 카테고리 선택\ndriver.find_element_by_link_text(\"블로그\").click( ) \ntime.sleep(2)", "_____no_output_____" ], [ "#Step 4. 오른쪽의 검색 옵션 버튼 클릭\ndriver.find_element_by_class_name(\"btn_option._search_option_open_btn\").click( )\ntime.sleep(2)", "_____no_output_____" ], [ "driver.find_element_by_class_name(\"txt.txt_option._calendar_select_trigger\").click() # 관련도순 xpath\n# element.find_element_by_css_selector(\"#header > div.header_common > div > div.area_search > form > fieldset > a.button.button_blog\").click() # 관련도순 xpath\n# element.clear()\n# element.send_keys(query_txt) # query_txt는 위에서 입력한 '이재용'\n# element.submit()", "_____no_output_____" ], [ "#Step 1. 크롬 웹브라우저 실행\npath = os.getenv('HOME')+ '/chromedriver'\n\ndriver = webdriver.Chrome(path)\n# 사이트 주소는 네이버\ndriver.get('http://www.naver.com')\ntime.sleep(0.1)\n\n# # login\n# login = {\n# \"id\" : \"iminu95\",\n# \"pw\" : \"95bbkmjamy\"\n# }\n\n# # 아이디와 비밀번호를 입력합니다.\n# time.sleep(0.5) ## 0.5초\n\n# driver.find_element_by_class_name('link_login').click( )\n# time.sleep(1)\n\n# # driver.find_element_by_name('id').send_keys('아이디') # \"아이디라는 값을 보내준다\"\n# driver.find_element_by_name('id').send_keys(login.get(\"id\"))\n# time.sleep(0.5) ## 0.5초\n# driver.find_element_by_name('pw').send_keys(login.get(\"pw\")) \n# time.sleep(0.5) ## 0.5초\n# driver.find_element_by_class_name('btn_global').click( )\n# time.sleep(0.5) ## 0.5초\n\n\n\n\n#Step 2. 네이버 검색창에 \"검색어\" 검색\nelement = driver.find_element_by_name(\"query\")\nelement.send_keys(query_txt)\nelement.submit()\ntime.sleep(0.1)\n\n#Step 3. \"블로그\" 카테고리 선택\ndriver.find_element_by_link_text(\"블로그\").click( ) \ntime.sleep(2)\n\n#Step 4. 오른쪽의 검색 옵션 버튼 클릭\ndriver.find_element_by_class_name(\"btn_option._search_option_open_btn\").click( )\ntime.sleep(2)\n\n\n#Step 6. 날짜 입력\n# driver.find_element_by_class_name(\"txt.txt_option._calendar_select_trigger\").click() # 관련도순 xpath\n\n# driver.find_element_by_id(\"search_start_date\").send_keys(start_date)\n# driver.find_element_by_id(\"search_end_date\").send_keys(end_date)\n# time.sleep(0.1)\n\n# driver.find_element_by_id(\"periodSearch\").click()\n# time.sleep(0.1)\n\n# searched_post_num = driver.find_element_by_class_name('search_number').text\n# print(searched_post_num)\n\nurl_list = []\ntitle_list = []\n\ntotal_page = 2 \n# total_page = math.ceil(int(searched_post_num.replace(',', '').strip('건')) / 7)\nprint('total_page :', total_page)\n\nfor i in tqdm(range(0, total_page)): # 페이지 번호\n url = f'https://section.blog.naver.com/Search/Post.naver?pageNo={i}&rangeType=sim&orderBy=recentdate&startDate={start_date}&endDate={end_date}&keyword={query_txt}'\n driver.get(url)\n# response = requests.get(url)\n# soup = BeautifulSoup(response.text, 'html.parser')\n# print(soup)\n time.sleep(0.5)\n# area = soup.findAll('div', {'class' : 'list_search_post'}) #.find_all('a', {'class' : 'url'})\n# print(area)\n \n # URL 크롤링 시작\n titles = \"a.sh_blog_title._sp_each_url._sp_each_title\" # #content\n article_raw = driver.find_elements_by_class_name(titles)\n# article_raw = driver.find_elements_by_css_selector('#content > section > div.area_list_search > div:nth-child(1)')\n \n# article_raw = driver.find_elements_by_xpath(f'//*[@id=\"content\"]/section/div[2]/div[{i}]')\n \n# print(article_raw)\n\n # url 크롤링 시작 # 7개 \n for article in article_raw:\n url = article.get_attribute('href') \n print(url)\n url_list.append(url)\n \n # 제목 크롤링 시작 \n for article in article_raw:\n title = article.get_attribute('title') \n title_list.append(title)\n \n print(title)\n \nprint('url갯수: ', len(url_list))\nprint('url갯수: ', len(title_list))\n\n# df = pd.DataFrame({'url':url_list, 'title':title_list})\n\n# # 저장하기\n# df.to_csv(\"./blog_url.csv\")", "total_page : 2\n" ], [ "li = [2, 3, 4, 4, 5, 6, 7, 8]\nlen(li)", "_____no_output_____" ], [ "for i in range(0, 8, 2):\n print(i)", "0\n2\n4\n6\n" ], [ "new = []\nfor i in range(0, len(li)-1, 2):\n new.append([li[i], li[i+1]])", "_____no_output_____" ], [ "new", "_____no_output_____" ], [ "article_raw = driver.find_elements_by_xpath('//*[@id=\"content\"]/section/div[2]/div[1]')\n# article_raw.get_attribute('href')\nfor i in article_raw:\n print(i.get_attribute('href'))", "None\n" ], [ "//*[@id=\"content\"]/section/div[2]\n//*[@id=\"content\"]/section/div[2]\n//*[@id=\"content\"]/section/div[2]\n\n//*[@id=\"content\"]/section/div[2]/div[1]\n//*[@id=\"content\"]/section/div[2]/div[2]\n//*[@id=\"content\"]/section/div[2]/div[3]\n...\n//*[@id=\"content\"]/section/div[2]/div[7]", "_____no_output_____" ] ], [ [ "1 page = 7 posts\n72 page search\n\nsample = https://section.blog.naver.com/Search/Post.naver?pageNo=1&rangeType=PERIOD&orderBy=sim&startDate=2019-01-01&endDate=2021-05-01&keyword=%EC%84%B1%EC%8B%AC%EB%8B%B9%EC%97%AC%ED%96%89%EB%8C%80%EC%A0%84", "_____no_output_____" ] ], [ [ "## 제목 눌러서 블로그 페이지 열기\ndriver.find_element_by_class_name('title').click()\ntime.sleep(1)", "_____no_output_____" ], [ "type(searched_post_num), searched_post_num", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "re.sub('^[0-9]', '', searched_post_num)", "_____no_output_____" ], [ "searched_post_num", "_____no_output_____" ], [ "searched_post_num.replace(',', '').replace('건', '')", "_____no_output_____" ], [ "total_page = math.ceil(int(searched_post_num.replace(',', '').strip('건')) / 7)\ntotal_page", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a2315fe2368c6c75effb73f8666221654b930d
971,733
ipynb
Jupyter Notebook
U-Net/Demo.ipynb
panecho/CamVid-Segmentation-Pytorch
3e61667cc2e839827e2ca9a0e801f51efe91db3f
[ "MIT" ]
7
2020-09-08T15:09:03.000Z
2021-11-24T22:57:51.000Z
U-Net/Demo.ipynb
panecho/CamVid-Segmentation-Pytorch
3e61667cc2e839827e2ca9a0e801f51efe91db3f
[ "MIT" ]
2
2020-09-08T05:19:08.000Z
2021-11-20T05:28:50.000Z
U-Net/Demo.ipynb
panecho/CamVid-Segmentation-Pytorch
3e61667cc2e839827e2ca9a0e801f51efe91db3f
[ "MIT" ]
6
2021-01-17T18:31:54.000Z
2021-07-29T07:15:51.000Z
971,733
971,733
0.963121
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "%run \"./Unet/train.py\"", "The device being used is: cuda\n\nTraining Started.....\n" ], [ "!cp -r \"./Unet\" \"./drive/My Drive/\"", "_____no_output_____" ], [ "%run \"./Unet/test.py\"", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e7a23f1e3aecc6a441ce1fa77eaca8d241649b6e
8,116
ipynb
Jupyter Notebook
data_analysis/Mean-Variance-Standard Deviation Calculator.ipynb
alanpirotta/freecodecamp_certif
c768832dc65f5dd9d712e0c7a00eb7323493f350
[ "MIT" ]
null
null
null
data_analysis/Mean-Variance-Standard Deviation Calculator.ipynb
alanpirotta/freecodecamp_certif
c768832dc65f5dd9d712e0c7a00eb7323493f350
[ "MIT" ]
null
null
null
data_analysis/Mean-Variance-Standard Deviation Calculator.ipynb
alanpirotta/freecodecamp_certif
c768832dc65f5dd9d712e0c7a00eb7323493f350
[ "MIT" ]
null
null
null
30.859316
781
0.531419
[ [ [ "import numpy as np", "_____no_output_____" ], [ "list = [0,1,2,3,4,5,6,7,8]", "_____no_output_____" ], [ "len(list)", "_____no_output_____" ], [ "if len(list) < 9:\n try:\n num = int(\"error\")\n except ValueError:\n raise ValueError(\"List must contain nine numbers.\") ", "_____no_output_____" ], [ "calculations={}\na = np.array(list)\na = a.reshape(3,3)\na", "_____no_output_____" ] ], [ [ "{\n 'mean': [axis1, axis2, flattened],\n \n 'variance': [axis1, axis2, flattened],\n \n 'standard deviation': [axis1, axis2, flattened],\n \n 'max': [axis1, axis2, flattened],\n \n 'min': [axis1, axis2, flattened],\n \n 'sum': [axis1, axis2, flattened]\n}", "_____no_output_____" ] ], [ [ "calculations['mean']= [a.mean(axis=0).tolist(), a.mean(axis=1).tolist(), a.mean().tolist()]\ncalculations['mean']", "_____no_output_____" ], [ "calculations['variance']= [a.var(axis=0).tolist(), a.var(axis=1).tolist(), a.var().tolist()]\ncalculations", "_____no_output_____" ], [ "calculations['standard deviation']= [a.std(axis=0).tolist(), a.std(axis=1).tolist(), a.std().tolist()]\ncalculations", "_____no_output_____" ], [ "calculations['max']= [a.max(axis=0).tolist(), a.max(axis=1).tolist(), a.max().tolist()]\ncalculations['min']= [a.min(axis=0).tolist(), a.min(axis=1).tolist(), a.min().tolist()]\ncalculations['sum']= [a.sum(axis=0).tolist(), a.sum(axis=1).tolist(), a.sum().tolist()]\ncalculations", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a245011aa03b6da684754c1d890db01e377961
6,932
ipynb
Jupyter Notebook
Torrent_To_Google_Drive_Downloader.ipynb
abhibhaw/Torrent-To-Google-Drive-Downloader
112da280ee7f0f718ac4b30365c2df81a7911d72
[ "MIT" ]
null
null
null
Torrent_To_Google_Drive_Downloader.ipynb
abhibhaw/Torrent-To-Google-Drive-Downloader
112da280ee7f0f718ac4b30365c2df81a7911d72
[ "MIT" ]
null
null
null
Torrent_To_Google_Drive_Downloader.ipynb
abhibhaw/Torrent-To-Google-Drive-Downloader
112da280ee7f0f718ac4b30365c2df81a7911d72
[ "MIT" ]
null
null
null
28.883333
276
0.453837
[ [ [ "<a href=\"https://colab.research.google.com/github/abhibhaw/Torrent-To-Google-Drive-Downloader/blob/master/Torrent_To_Google_Drive_Downloader.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Torrent To Google Drive Downloader ", "_____no_output_____" ], [ "**Important Note:** To get more disk space:\n> Go to Runtime -> Change Runtime and give GPU as the Hardware Accelerator. You will get around 384GB to download any torrent you want.", "_____no_output_____" ], [ "### Install libtorrent and Initialize Session", "_____no_output_____" ] ], [ [ "!apt install python3-libtorrent\n\nimport libtorrent as lt\n\nses = lt.session()\nses.listen_on(6881, 6891)\ndownloads = []", "_____no_output_____" ] ], [ [ "### Mount Google Drive\nTo stream files we need to mount Google Drive.", "_____no_output_____" ] ], [ [ "from google.colab import drive\n\ndrive.mount(\"/content/drive\")", "_____no_output_____" ] ], [ [ "### Add From Torrent File\nYou can run this cell to add more files as many times as you want", "_____no_output_____" ] ], [ [ "from google.colab import files\n\nsource = files.upload()\nparams = {\n \"save_path\": \"/content/drive/My Drive/Torrent\",\n \"ti\": lt.torrent_info(list(source.keys())[0]),\n}\ndownloads.append(ses.add_torrent(params))", "_____no_output_____" ] ], [ [ "### Add From Magnet Link\nYou can run this cell to add more files as many times as you want", "_____no_output_____" ] ], [ [ "params = {\"save_path\": \"/content/drive/My Drive/Torrent\"}\n\nwhile True:\n magnet_link = input(\"Enter Magnet Link Or Type Exit: \")\n if magnet_link.lower() == \"exit\":\n break\n downloads.append(\n lt.add_magnet_uri(ses, magnet_link, params)\n )\n", "_____no_output_____" ] ], [ [ "### Start Download\nSource: https://stackoverflow.com/a/5494823/7957705 and [#3 issue](https://github.com/FKLC/Torrent-To-Google-Drive-Downloader/issues/3) which refers to this [stackoverflow question](https://stackoverflow.com/a/6053350/7957705)", "_____no_output_____" ] ], [ [ "import time\nfrom IPython.display import display\nimport ipywidgets as widgets\n\nstate_str = [\n \"queued\",\n \"checking\",\n \"downloading metadata\",\n \"downloading\",\n \"finished\",\n \"seeding\",\n \"allocating\",\n \"checking fastresume\",\n]\n\nlayout = widgets.Layout(width=\"auto\")\nstyle = {\"description_width\": \"initial\"}\ndownload_bars = [\n widgets.FloatSlider(\n step=0.01, disabled=True, layout=layout, style=style\n )\n for _ in downloads\n]\ndisplay(*download_bars)\n\nwhile downloads:\n next_shift = 0\n for index, download in enumerate(downloads[:]):\n bar = download_bars[index + next_shift]\n if not download.is_seed():\n s = download.status()\n\n bar.description = \" \".join(\n [\n download.name(),\n str(s.download_rate / 1000),\n \"kB/s\",\n state_str[s.state],\n ]\n )\n bar.value = s.progress * 100\n else:\n next_shift -= 1\n ses.remove_torrent(download)\n downloads.remove(download)\n bar.close() # Seems to be not working in Colab (see https://github.com/googlecolab/colabtools/issues/726#issue-486731758)\n download_bars.remove(bar)\n print(download.name(), \"complete\")\n time.sleep(1)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a245f916787c247d81e1704cc49fe13e2bfe93
163,213
ipynb
Jupyter Notebook
scripts/pathways_3_categorization.ipynb
iganna/evo_epigen
21198d2b319f4488dced492d9c3f7a08bb3fc315
[ "MIT" ]
null
null
null
scripts/pathways_3_categorization.ipynb
iganna/evo_epigen
21198d2b319f4488dced492d9c3f7a08bb3fc315
[ "MIT" ]
null
null
null
scripts/pathways_3_categorization.ipynb
iganna/evo_epigen
21198d2b319f4488dced492d9c3f7a08bb3fc315
[ "MIT" ]
1
2019-02-01T17:04:21.000Z
2019-02-01T17:04:21.000Z
63.432958
164
0.487988
[ [ [ "# Analysis of enrichment", "_____no_output_____" ] ], [ [ "import glob\n\nimport json\nimport math\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom functools import reduce\nfrom collections import OrderedDict, defaultdict\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.stats import fisher_exact as fisher\nfrom scipy.stats import chi2_contingency as chisq", "_____no_output_____" ], [ "def ease(n_outliers_path, n_total_path, n_outliers, n_total):\n \"\"\"\n Calculates a contingency table EASE score\n [x y]\n [z k]\n :param n_in_path: number of outliers in the pathway\n :param n_total_path: total number of genes in the pathway\n :param n_outliers: total number of outliers\n :param n_total: total number of genes analysed\n :return:\n \"\"\"\n\n x = max(0, n_outliers_path - 1) # in category, enriched\n y = n_total_path # total, enriched\n z = n_outliers - n_outliers_path # in category, not enriched\n k = n_total - n_total_path # total, not enriched\n \n #if x <= 10:\n _, pvalue = fisher(([[x, y], [z, k]]), alternative='greater')\n #else:\n # _, pvalue, _, _ = chisq(([[x, y], [z, k]]))\n\n return pvalue", "_____no_output_____" ] ], [ [ "## Collecting all pathway names", "_____no_output_____" ] ], [ [ "pathway_tables = glob.glob(\"../pathways/*/gp.csv\")\ndfs = [pd.read_csv(table) for table in pathway_tables]\nfor i, df in enumerate(dfs):\n dfs[i] = df.set_index(\"SYMBOL\")\n dfs[i].sort_index(inplace=True)\n #print(dfs[i].shape)\ndfs[0]\nall_entries = list(pd.concat(dfs, axis=1, sort=True).columns)", "_____no_output_____" ], [ "all_entries[0:10]", "_____no_output_____" ], [ "structures = pd.read_csv(\"../extracted/classification_pathways.csv\", header=0, index_col=\"Pathway\")\nstructures = pd.DataFrame(structures, dtype=bool)", "_____no_output_____" ], [ "del structures[\"DUPLICATE?\"], structures[\"TRUTHFULNESS\"], structures[\"Garbage\"]\nstructures.head()", "_____no_output_____" ], [ "all_2 = set(structures.index)\nset(all_entries) - all_2", "_____no_output_____" ], [ "pathway_types = dict()\nfor pathway in sorted(all_entries):\n x = structures.loc[pathway]\n pathway_types[pathway] = x[x].index[0]", "_____no_output_____" ], [ "reverse_counter = defaultdict(int)\nfor pathway in sorted(all_entries):\n category = pathway_types[pathway]\n reverse_counter[category] += 1\nreverse_counter", "_____no_output_____" ], [ "ALL_PATHS = sum(reverse_counter.values())\nALL_PATHS", "_____no_output_____" ] ], [ [ "# By histone tag:", "_____no_output_____" ] ], [ [ "my_tags = [\"H3K4me3\", \"H3K9ac\", \"H3K27ac\", \"H3K27me3\", \"H3K9me3\"]", "_____no_output_____" ], [ "ENR_COUNTERS = dict()\nfor hg_tag in my_tags:\n files_up_human = glob.glob(f\"../extracted/Human_{hg_tag}_pathways_up*\")\n files_down_human = glob.glob(f\"../extracted/Human_{hg_tag}_pathways_down*\")\n files_up_mouse = glob.glob(f\"../extracted/Mouse_{hg_tag}_pathways_up*\")\n files_down_mouse = glob.glob(f\"../extracted/Mouse_{hg_tag}_pathways_down*\")\n\n files = {\"Human+\": files_up_human[0],\n \"Human-\": files_down_human[0],\n \"Mouse+\": files_up_mouse[0],\n \"Mouse-\": files_down_mouse[0]}\n\n enriched_counter = defaultdict(lambda: defaultdict(int))\n for xtype in files:\n with open(files[xtype], \"r\") as file:\n en_pathways = file.read().strip().split(\"\\n\")\n for pw in en_pathways:\n cat = pathway_types[pw]\n enriched_counter[xtype][cat] += 1\n enriched_counter = pd.DataFrame(enriched_counter).T.fillna(0)\n enriched_counter = pd.DataFrame(enriched_counter, dtype=int)\n ENR_COUNTERS[hg_tag] = enriched_counter\nENR_COUNTERS[my_tags[0]]", "_____no_output_____" ] ], [ [ "Calculates a contingency table EASE score \n[x y] \n[z k] \n:param n_in_path: number of outliers in the pathway \n:param n_total_path: total number of genes in the pathway \n:param n_outliers: total number of outliers \n:param n_total: total number of genes analysed \n:return: ", "_____no_output_____" ] ], [ [ "ksi = defaultdict(dict)\nsigns = {\"+\": \"positively\\u00A0enriched\\u00A0(+)\",\n \"-\": \"negatively\\u00A0enriched\\u00A0(-)\"}\nfor hg_tag in my_tags:\n enriched_counter = ENR_COUNTERS[hg_tag]\n for sign in [\"+\", \"-\"]:\n for org in [\"Human\", \"Mouse\"]:\n for category in enriched_counter:\n n1 = enriched_counter[category][f\"{org}{sign}\"]\n n2 = sum(enriched_counter.loc[f\"{org}{sign}\"])\n n3 = reverse_counter[category]\n n4 = ALL_PATHS\n #print(n1, n2, n3, n4)\n ksi[category][f\"{org},\\u00A0{hg_tag},\\u00A0{signs[sign]}\"] = ease(n1, n2, n3, n4)\npd.DataFrame(ksi).to_csv(f\"../extracted/pvalues.csv\")\npd.DataFrame(ksi)", "_____no_output_____" ], [ "TAU = pd.DataFrame(ksi)", "_____no_output_____" ], [ "def get_highlighter_min(color, point):\n def highlight_min(s):\n '''\n highlight the minimums in a Series.\n '''\n is_max = s <= point\n return [f'background-color: {color}' if v else '' for v in is_max]\n return highlight_min", "_____no_output_____" ], [ "data_round = np.round(TAU, 3)\ncm = sns.light_palette(\"green\", as_cmap=True, reverse=True)\n\ns = data_round.style.apply(get_highlighter_min(\"green\", 0.05), subset=([i for i in TAU.index if \"+\" in i], TAU.columns))\ncm = sns.light_palette(\"red\", as_cmap=True, reverse=True)\n\ns.apply(get_highlighter_min(\"red\", 0.05), subset=([i for i in TAU.index if \"-\" in i], TAU.columns))\ns", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a25f5e042ed5bf1d1efa6d0ca073cae8cfc7bf
4,688
ipynb
Jupyter Notebook
video inference/video_to_imgs.ipynb
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
video inference/video_to_imgs.ipynb
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
video inference/video_to_imgs.ipynb
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
31.675676
84
0.537116
[ [ [ "# AWS Rekognition to get bbox\nimport numpy as np\nimport boto3\nfrom PIL import Image, ImageDraw, ExifTags, ImageColor, ImageFont\nfrom matplotlib import pyplot as plt\nfrom utils.rekognition import determine_color, draw_animal_count\nimport cv2\nimport time\nimport math\nimport os\nfrom utils.config import *\nfrom utils.fix_annotation import *", "_____no_output_____" ], [ "def save_frames(src_file, output_path, output_video, fps=cv2.CAP_PROP_FPS):\n start = time.time()\n \n cap = cv2.VideoCapture(src_file)\n frameRate = cap.get(fps) #frame rate\n print('frameRate', frameRate)\n #function to write a video\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n \n imgSize = (int(width), int(height))\n \n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n videoWriter = cv2.VideoWriter(output_video, fourcc, frameRate, imgSize)\n \n while(cap.isOpened()):\n frameId = cap.get(1) #current frame number\n \n ret, frame = cap.read()\n \n if (ret != True):\n break\n else:\n #inference on the extracted frame\n hasFrame, imageBytes = cv2.imencode(\".jpg\", frame)\n\n if(hasFrame):\n # creating image object of array\n data = Image.fromarray(frame)\n # shrink the frame image size\n # saving a JPG file\n data.save(output_path+'{}.jpg'.format(int(frameId)))\n \n if frameId % 50 == 0:\n print(\"Finish Processing {} frames\".format(int(frameId)))\n lap = time.time()\n print('lap time: ', lap - start)\n videoWriter.write(frame)\n\n cap.release()\n videoWriter.release()\n cv2.destroyAllWindows()\n \n end = time.time()\n print('total time lapse', end - start)\n#'cattle_single_1', 'cattle_multi_1'\nvideo_name_list = ['tests']\nvideo_format = ['.mp4']\nfor v_idx, video_name in enumerate(video_name_list):\n src_video = 'video_data/input_video/'+video_name+video_format[v_idx]\n output_img_path = 'frame_img/'+video_name+'/'\n if not os.path.exists(output_img_path):\n os.makedirs(output_img_path)\n output_video = output_img_path+'ori_video.mp4'\n print(output_video)\n save_frames(src_video, output_img_path, output_video)\n print()", "frame_img/tests/ori_video.mp4\nframeRate 30.0\nFinish Processing 0 frames\nlap time: 0.4605433940887451\nFinish Processing 50 frames\nlap time: 8.51185417175293\nFinish Processing 100 frames\nlap time: 16.091991424560547\nFinish Processing 150 frames\nlap time: 23.63861584663391\nFinish Processing 200 frames\nlap time: 31.303373098373413\nFinish Processing 250 frames\nlap time: 38.85345196723938\nFinish Processing 300 frames\nlap time: 46.18950009346008\nFinish Processing 350 frames\nlap time: 53.64163899421692\ntotal time lapse 59.122246980667114\n\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
e7a26e9e2124b91b77d3f0236277a6dd8803c351
235,901
ipynb
Jupyter Notebook
python/doc/tutorials/src/basic_usage.ipynb
broxtronix/thunder
4dad77721e2c9e225f94a6a5366d51ec83ac4690
[ "Apache-2.0" ]
1
2017-02-02T19:14:42.000Z
2017-02-02T19:14:42.000Z
doc/source/tutorials/source/basic_usage.ipynb
pearsonlab/thunder
b15ba0a38642312d597a98643cf3514e2d46b69d
[ "Apache-2.0" ]
null
null
null
doc/source/tutorials/source/basic_usage.ipynb
pearsonlab/thunder
b15ba0a38642312d597a98643cf3514e2d46b69d
[ "Apache-2.0" ]
null
null
null
417.523894
189,090
0.933595
[ [ [ "# Basic usage", "_____no_output_____" ], [ "Thunder offers a variety of analyses and workflows for spatial and temporal data. When run on a cluster, most methods are efficiently and automatically parallelized, but Thunder can also be used on a single machine, especially for testing purposes. \n\nWe'll walk through a very simple example here as an introduction. The entry point for most workflows is the ``ThunderContext``. If you type ``thunder`` to start the interactive shell, this context is automatically provided as ``tsc``, which is an object that primarily provides functionality for loading and exporting data.\n\nWe'll start by loading and exploring some toy example data:", "_____no_output_____" ] ], [ [ "data = tsc.loadExample('fish-series')", "_____no_output_____" ] ], [ [ "``data`` is a ``Series`` object, which is a generic collection of one-dimensional array data sharing a common index. We can inspect it to see metadata:", "_____no_output_____" ] ], [ [ "data", "_____no_output_____" ] ], [ [ "A ``Series`` object is a collection of key-value records, each containing an identifier as a key and a one-dimensional array as a value. We can look at the first key and value by using ``first()``.", "_____no_output_____" ] ], [ [ "key, value = data.first()", "_____no_output_____" ] ], [ [ "We see that the ``first`` key in this example ``Series`` data is the tuple (0,0,0), corresponding to an x, y, z coordinate of an original movie.", "_____no_output_____" ] ], [ [ "key", "_____no_output_____" ] ], [ [ "The value in this case is a time series of 240 observations, represented as a 1d numpy array.\n", "_____no_output_____" ] ], [ [ "value.shape", "_____no_output_____" ] ], [ [ "We can extract a random subset of records and plot their time series, after converting to `TimeSeries` (which enables time-specific methods), and applying a simple baseline normalization. Here and elsewhere, we'll use the excellent ``seaborn`` package for styling figures, but this is entirely optional.", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context(\"notebook\")", "_____no_output_____" ], [ "examples = data.toTimeSeries().normalize().subset(50, thresh=0.05)\nsns.set_style('darkgrid')\nplt.plot(examples.T);", "_____no_output_____" ] ], [ [ "We can also compute a statistic for each record using the method:", "_____no_output_____" ] ], [ [ "means = data.seriesStdev()\nmeans.first()", "_____no_output_____" ] ], [ [ "``means`` is now itself a ``Series``, where the value of each record is the mean across time\n", "_____no_output_____" ], [ "For this ``Series``, since the keys correspond to spatial coordinates, we can ``pack`` the results back into a local array. ``pack`` is an operation that converts ``Series`` data, with spatial coordinates as keys, into an n-dimensional numpy array. In this case, the result is 3D, reflecting the original input data.", "_____no_output_____" ] ], [ [ "img = means.pack()\nimg.shape", "_____no_output_____" ] ], [ [ "``pack`` is an example of a local operation, meaning that all the data involved will be sent to the Spark driver node. For larger data sets, this can be very problematic - it's a good idea to downsample, subselect, or otherwise reduce the size of your data before attempting to ``pack`` large data sets!\n\nTo look at this array as an image, we'll use `matplotlib` via a helper function included with Thunder.", "_____no_output_____" ] ], [ [ "from thunder import Colorize\nimage = Colorize.image\nimage(img[:,:,0])", "_____no_output_____" ] ], [ [ "It's also easy to export the result to a ``numpy`` or ``MAT`` file.", "_____no_output_____" ], [ "```\ntsc.export(img, \"directory\", \"npy\")\ntsc.export(img, \"directory\", \"mat\")\n```", "_____no_output_____" ], [ "This will put a ``npy`` file or ``MAT`` file called ``meanval`` in the folder ``directory`` in your current directory. You can also export to a location of Amazon S3 or Google Storage if path is specified with an `s3n://`or `gs://` prefix.", "_____no_output_____" ], [ "Thunder includes several other toy data sets, to see the available ones:", "_____no_output_____" ] ], [ [ "tsc.loadExample()", "_____no_output_____" ] ], [ [ "Some of them are `Series`, some are `Images`, and some are associated `Params` (e.g. covariates). Let's load an `Images` dataset:", "_____no_output_____" ] ], [ [ "images = tsc.loadExample('mouse-images')", "_____no_output_____" ], [ "images", "_____no_output_____" ] ], [ [ "Now every record is an key-value pair where the key is an identifier, and the value is an image", "_____no_output_____" ] ], [ [ "key, value = images.first()", "_____no_output_____" ] ], [ [ "The key is an integer", "_____no_output_____" ] ], [ [ "key", "_____no_output_____" ] ], [ [ "And the value is a two-dimensional array", "_____no_output_____" ] ], [ [ "value.shape", "_____no_output_____" ] ], [ [ "Although `images` is not an array, some syntactic sugar supports easy indexing:", "_____no_output_____" ] ], [ [ "im = images[0]\nimage(im)", "_____no_output_____" ] ], [ [ "And we can now apply simple parallelized image processing routines", "_____no_output_____" ] ], [ [ "im = images.gaussianFilter(3).subsample(3)[0]\nimage(im)", "_____no_output_____" ] ], [ [ "For both `Images` and `Series` data, there are a variety of more complex analyses that can be run on these objects, including massively parallel regression, factorization, registration, feature extraction, and more! See the other tutorials for more information.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a2714a8236a5c5c556a197e33c94002d7fce33
306,566
ipynb
Jupyter Notebook
Paper Figures/Introspection Code/Introspection Qiskit.ipynb
Lilgabz/Quantum-Algorithm-Implementations
2bb5df522d76e94b300275dfefff2869ff31bc2c
[ "MIT" ]
1
2022-03-20T17:20:09.000Z
2022-03-20T17:20:09.000Z
Paper Figures/Introspection Code/Introspection Qiskit.ipynb
Lilgabz/Quantum-Algorithm-Implementations
2bb5df522d76e94b300275dfefff2869ff31bc2c
[ "MIT" ]
null
null
null
Paper Figures/Introspection Code/Introspection Qiskit.ipynb
Lilgabz/Quantum-Algorithm-Implementations
2bb5df522d76e94b300275dfefff2869ff31bc2c
[ "MIT" ]
2
2021-12-30T22:23:20.000Z
2022-03-20T17:20:22.000Z
1,803.329412
139,416
0.962895
[ [ [ "## Print Cirq Circuit and Statevector", "_____no_output_____" ] ], [ [ "# importing Qiskit\nfrom qiskit import Aer, transpile, assemble\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister\nfrom qiskit.visualization import plot_histogram, plot_bloch_multivector\nfrom qiskit.visualization import plot_state_paulivec, plot_state_hinton, plot_state_city\nfrom qiskit.visualization import plot_state_qsphere\n\n# import basic plot tools\nfrom qiskit.visualization import plot_histogram, plot_bloch_multivector\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "#get backend simulator\nsim = Aer.get_backend('aer_simulator') \n\nqc = QuantumCircuit(3)\nqc.h(0)\nqc.cx(0,1)\nqc.h(2)\nqc.s(2)\n\n#print circuit\nprint(qc)\n\n#draw bloch spheres\nqc.save_statevector()\nstatevector = sim.run(qc).result().get_statevector()\n\nprint(\"\\n\")\nprint(statevector)\nprint(\"\\n\")\n\nplot_bloch_multivector(statevector)", " ┌───┐ \nq_0: ┤ H ├──■──\n └───┘┌─┴─┐\nq_1: ─────┤ X ├\n ┌───┐├───┤\nq_2: ┤ H ├┤ S ├\n └───┘└───┘\n\n\nStatevector([0.5+0.j , 0. +0.j , 0. +0.j , 0.5+0.j , 0. +0.5j, 0. +0.j ,\n 0. +0.j , 0. +0.5j],\n dims=(2, 2, 2))\n\n\n" ], [ "plot_state_city(statevector)", "_____no_output_____" ], [ "plot_state_qsphere(statevector)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a27b5553a6807c4f903d8a4d23b5706c5e6250
229,913
ipynb
Jupyter Notebook
turning_rate/distance_from_center.ipynb
BevanLab/open_field_gait
6e941329ffb6f5477cc200f46f82ba03f45d6d06
[ "MIT" ]
null
null
null
turning_rate/distance_from_center.ipynb
BevanLab/open_field_gait
6e941329ffb6f5477cc200f46f82ba03f45d6d06
[ "MIT" ]
null
null
null
turning_rate/distance_from_center.ipynb
BevanLab/open_field_gait
6e941329ffb6f5477cc200f46f82ba03f45d6d06
[ "MIT" ]
null
null
null
342.13244
85,704
0.918056
[ [ [ "import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport cv2\n%matplotlib inline", "_____no_output_____" ], [ "vid = 'D:\\\\Open Field Behavior\\Test 520 - 9 mth - WT - NULL - 3047.mp4'", "_____no_output_____" ], [ "cam = cv2.VideoCapture(vid) \n \ntry: \n \n # creating a folder named data \n if not os.path.exists('data'): \n os.makedirs('data') \n \n# if not created then raise error \nexcept OSError: \n print ('Error: Creating directory of data') \n \n# frame \ncurrentframe = 0\n \nwhile(True): \n \n # reading from frame \n ret,frame = cam.read() \n \n if ret: \n # if video is still left continue creating images \n name = './data/frame' + str(currentframe) + '.jpg'\n print ('Creating...' + name) \n \n # writing the extracted images \n cv2.imwrite(name, frame) \n \n # increasing counter so that it will \n # show how many frames are created \n currentframe += 1\n break\n else: \n break\n \n# Release all space and windows once done \ncam.release() \ncv2.destroyAllWindows() ", "Creating..../data/frame0.jpg\n" ], [ "img = cv2.imread('data/frame0.jpg')\nplt.imshow(img)\nplt.plot(190, 390, 'ro')\nplt.plot(190, 90, 'ro')\nplt.plot(490, 90, 'ro')\nplt.plot(490, 390, 'ro')\nplt.plot((190+490)/2, (390+90)/2, 'bo')", "_____no_output_____" ], [ "center = ((190+490)/2, (390+90)/2)", "_____no_output_____" ], [ "os.listdir('D:\\\\Open field behavior\\plot-poses\\Test 520 - 9 mth - WT - NULL - 3047\\kalman_filter_smoothing_plot_center_back.npy')", "_____no_output_____" ], [ "positions = np.load('D:\\\\Open field behavior\\plot-poses\\Test 523 - 9 mth - HET - NULL - 3063\\kalman_filter_smoothing_plot_center_back.npy')", "_____no_output_____" ], [ "plt.scatter(positions[:, 0], positions[:, 2], .1)\nplt.plot(center[0], center[1], 'ro')\nplt.show()", "_____no_output_____" ], [ "os.listdir('D:\\\\Open field behavior\\plot-poses')", "_____no_output_____" ], [ "import os\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\n\n\n\n\n\ncenter = ((190+490)/2, (390+90)/2)\nstart = 0; end = 15\nplot_poses = 'D:\\Open field behavior\\plot-poses\\\\'\ndist_from_center = {'WT-NULL': {9: [], 18: []}, 'HET-NULL': {9: [], 18: []}, 'HET-ZFP': {9: [], 18: []}}\nfrom tqdm import tqdm\ndef distance_between_two_points(x1, y1, x2, y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2) * (4/31)\n\n\nfor folder in tqdm(os.listdir(plot_poses)):\n mouse_folder = folder\n if 'Test' not in folder:\n continue\n folder = plot_poses + folder\n smoothed_center_back = np.load(folder + '/kalman_filter_smoothing_plot_center_back.npy')\n \n smoothed_cb_x = smoothed_center_back[:,0]\n smoothed_cb_y = smoothed_center_back[:,2]\n \n \n control = mouse_folder.split('mth -')[1][:11].replace(' ', '')\n if control[-1] == '-':\n control = control[:-1]\n month = int(mouse_folder.split('mth')[0][-3:])\n fps = len(smoothed_cb_x) // (15*60)\n \n distances = []\n for i in range(start*60*fps, end*60*fps):\n cb_position = smoothed_center_back[i, [0,2]] # (x,y) of center back\n distances.append(distance_between_two_points(cb_position[0], cb_position[1], center[0], center[1]))\n dist_from_center[control][month].append(np.average(distances))", "100%|████████████████████████████████████████████████████████████████████████████████| 102/102 [00:09<00:00, 10.93it/s]\n" ], [ "import pandas as pd\ndatasets = [dist_from_center]\nnames = [\"Average Distance From Center\"]\n\ntime = str(start) + '-' + str(end)\nfor idx in range(len(datasets)):\n dict_to_df = {'Genotype': [], 'Metric': []}\n dic = datasets[idx]\n print(names[idx])\n for key in dic: \n \n for key2 in dic[key]:\n geno = key + \": \" + str(key2) + \" Month\"\n for dat in dic[key][key2]:\n dict_to_df['Genotype'].append(geno)\n dict_to_df['Metric'].append(dat)\n df = pd.DataFrame.from_dict(dict_to_df)\n print(plot_poses + 'final_csv\\\\' + names[idx] + \"_\" + time + \".csv\")\n df.to_csv(plot_poses + 'final_csv\\\\' + names[idx] + \"_\" + time + \".csv\")", "Average Distance From Center\nD:\\Open field behavior\\plot-poses\\final_csv\\Average Distance From Center_0-15.csv\n" ], [ "center = ((190+490)/2, (390+90)/2)\nstart = 2; end = 5\nplot_poses = 'D:\\Open field behavior\\plot-poses\\\\'\ntime_spent_in_box = {'WT-NULL': {9: [], 18: []}, 'HET-NULL': {9: [], 18: []}, 'HET-ZFP': {9: [], 18: []}}\nfrom tqdm import tqdm\ndef distance_between_two_points(x1, y1, x2, y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2) * (4/31)\ncount = 0\n\nfor folder in tqdm(os.listdir(plot_poses)):\n mouse_folder = folder\n if 'Test' not in folder:\n continue\n folder = plot_poses + folder\n smoothed_center_back = np.load(folder + '/kalman_filter_smoothing_plot_center_back.npy')\n \n smoothed_cb_x = smoothed_center_back[:,0]\n smoothed_cb_y = smoothed_center_back[:,2]\n \n \n control = mouse_folder.split('mth -')[1][:11].replace(' ', '')\n if control[-1] == '-':\n control = control[:-1]\n month = int(mouse_folder.split('mth')[0][-3:])\n fps = len(smoothed_cb_x) // (15*60)\n upper_right = (np.array(center) * (4/31) + 15)#* (4/31)\n lower_left = (np.array(center)* (4/31) - 15)#* (4/31)\n lower_right = ((lower_left[0] + 30) , lower_left[1])\n upper_left = ((upper_right[0] - 30), upper_right[1])\n \n plt.scatter(upper_left[0], upper_left[1], label = 'Border', color = 'red')\n plt.scatter(upper_right[0], upper_right[1], color = 'red')\n plt.scatter(lower_right[0], lower_right[1], color = 'red')\n plt.scatter(lower_left[0], lower_left[1], color = 'red')\n plt.plot([upper_left[0], lower_left[0]], [upper_left[1], lower_left[1]], color = 'red')\n plt.plot([upper_left[0], upper_right[0]], [upper_left[1], upper_right[1]], color = 'red')\n plt.plot([lower_left[0], lower_right[0]], [lower_left[1], lower_right[1]], color = 'red')\n plt.plot([upper_right[0], lower_right[0]], [upper_right[1], lower_right[1]], color = 'red')\n \n #plt.scatter(smoothed_cb_x * (4/31), smoothed_cb_y * (4/31), alpha = .01)\n \n time_in_box = 0\n x_bottom = lower_left[1]\n x_top = upper_left[1]\n y_left = lower_left[0]\n y_right = lower_right[0]\n #print('x_bottom: {}, y_right: {}, x_top: {}, y_left:{}'.format(x_bottom, y_left, x_top, y_right))\n label_counter = 2\n labels = []\n print(mouse_folder)\n for i in range(start*60*fps, end*60*fps):\n cb_position = smoothed_center_back[i, [0,2]] *(4/31)# (x,y) of center back\n\n \n in_box = False\n if cb_position[1] < x_top and cb_position[1] > x_bottom:\n if cb_position[0] > y_left and cb_position[0] < y_right:\n in_box = True\n \n #print(in_box)\n color = 'green' if in_box else 'blue'\n if in_box:\n time_in_box += 1\n #label = 'In Box' if in_box else 'Out of Box'\n #labels = list(labels)\n #labels.append(label)\n #label = set(labels)\n #if len(labels) == 2: \n label = ''\n plt.scatter(cb_position[0], cb_position[1] , alpha = .1, color = color)\n \n \n \n time_spent_in_box[control][month].append(time_in_box/fps)\n plt.xlabel('X (cm)')\n plt.ylabel('Y (cm)')\n plt.title('Trajectory Plot Example With 30 x 30 Box')\n plt.savefig('30x30_box.pdf')\n \n plt.show()\n break\n #dist_from_center[control][month].append(np.average(distances))", " 0%| | 0/102 [00:00<?, ?it/s]" ], [ "import pandas as pd\ndatasets = [time_spent_in_box]\nnames = [\"Time Spent In Box 30x30\"]\n\ntime = str(start) + '-' + str(end)\nfor idx in range(len(datasets)):\n dict_to_df = {'Genotype': [], 'Metric': []}\n dic = datasets[idx]\n print(names[idx])\n for key in dic: \n \n for key2 in dic[key]:\n geno = key + \": \" + str(key2) + \" Month\"\n for dat in dic[key][key2]:\n dict_to_df['Genotype'].append(geno)\n dict_to_df['Metric'].append(dat)\n df = pd.DataFrame.from_dict(dict_to_df)\n print(plot_poses + 'final_csv\\\\' + names[idx] + \"_\" + time + \".csv\")\n df.to_csv(plot_poses + 'final_csv\\\\' + names[idx] + \"_\" + time + \".csv\")", "Time Spent In Box 30x30\nD:\\Open field behavior\\plot-poses\\final_csv\\Time Spent In Box 30x30_2-5.csv\n" ], [ "time_spent_in_box", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a27c63990749a031f972e4f4b4b5a851e3ce9b
18,737
ipynb
Jupyter Notebook
boiler/unity parallelize.ipynb
ajmal017/longshot
0978fb107ab83034372e0e633483d381ac06f25f
[ "MIT" ]
2
2021-08-12T03:56:34.000Z
2021-08-14T18:18:28.000Z
boiler/unity parallelize.ipynb
ajmal017/longshot
0978fb107ab83034372e0e633483d381ac06f25f
[ "MIT" ]
null
null
null
boiler/unity parallelize.ipynb
ajmal017/longshot
0978fb107ab83034372e0e633483d381ac06f25f
[ "MIT" ]
null
null
null
35.35283
195
0.452207
[ [ [ "from database.strategy import Strategy\nfrom database.sec import SEC\nfrom database.market import Market\nfrom database.weather import Weather\nfrom transformer.date_transformer import DateTransformer\nfrom transformer.column_transformer import ColumnTransformer\nfrom transformer.model_transformer import ModelTransformer\nfrom transformer.product_transformer import ProductTransformer\nfrom transformer.predictor_transformer import PredictorTransformer\nfrom preprocessor.model_preprocessor import ModelPreprocessor\nfrom preprocessor.predictor_preprocessor import PredictorPreprocessor\nfrom modeler.modeler import Modeler as sp\nfrom utils.date_utils import DateUtils\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta, timezone\nfrom tqdm import tqdm\nimport math\nimport findspark\nfindspark.init()\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession", "_____no_output_____" ], [ "## Loading Constants\nstart = \"2008-01-01\"\nend = datetime(2021,1,7).strftime(\"%Y-%m-%d\")\n# Loading Databases\nstrat_db = Strategy(\"unity\")\nmarket = Market()\ndataset = \"pdr\"\n## Acquiring Datasets\nmarket.connect()\ntickers = market.retrieve_data(\"sp500\")\nclassification = market.retrieve_data(\"dataset_week_classification\")\nregression = market.retrieve_data(\"dataset_week_regression\")\nfor col in regression.columns:\n if -99999 == regression[col].min():\n regression.drop(col,axis=1,inplace=True)\nfor col in classification.columns:\n if -99999 == classification[col].min():\n classification.drop(col,axis=1,inplace=True)\nmarket.close()\nreload = True", "_____no_output_____" ], [ "gap = 5\nweek_gap = int(gap/5)\ntraining_years = 5", "_____no_output_____" ], [ "## setting up groups\ntickers[\"group\"] = [x % 6 for x in list(tickers.index)]", "_____no_output_____" ], [ "tickers_df = spark.createDataFrame(tickers[[\"Symbol\",\"group\"]])", "_____no_output_____" ], [ "test = spark.createDataFrame(tickers_parallel.value)\n", "_____no_output_____" ], [ "\n# try:\n\n# mr = ModelPreprocessor(ticker)\n# prot = ProductTransformer(ticker,start,end)\n# ticker_regression = all_regression.copy()\n# ticker_regression[\"y\"] = ticker_regression[ticker]\n# ticker_regression[\"y\"] = ticker_regression[\"y\"].shift(-week_gap)\n# ticker_regression = ticker_regression[:-week_gap]\n# ticker_classification = all_classification.copy()\n# ticker_classification[\"y\"] = ticker_classification[ticker]\n# ticker_classification[\"y\"] = ticker_classification[\"y\"].shift(-week_gap)\n# ticker_classification = ticker_classification[:-week_gap]\n# except Exception as e:\n# print(str(e))\n# continue\n# try:\n ## regression_model\n# first = ticker_regression[(ticker_regression[\"year\"] == year - training_years) & (ticker_regression[\"quarter\"] == quarter)].index.values.tolist()[0]\n# last = ticker_regression[(ticker_regression[\"year\"] == year) & (ticker_regression[\"quarter\"] == quarter)].index.values.tolist()[0]\n# rqpd = ticker_regression.iloc[first:last-1]\n# qpd = mr.day_trade_preprocess_regression(rqpd.copy(),ticker,True)\n# rpr = sp.regression(qpd,ranked=False,tf=False,deep=False)\n# ## classification_model\n# first = ticker_classification[(ticker_classification[\"year\"] == year - training_years) & (ticker_classification[\"quarter\"] == quarter)].index.values.tolist()[0]\n# last = ticker_classification[(ticker_classification[\"year\"] == year) & (ticker_classification[\"quarter\"] == quarter)].index.values.tolist()[0]\n# cqpd = ticker_classification.iloc[first:last-1]\n# qpd = mr.day_trade_preprocess_classify(cqpd.copy(),ticker)\n# cpr = sp.classification(qpd,tf=False,deep=False,multioutput=False)\n# price_results = pd.DataFrame([cpr,rpr])\n# product_qpds = []\n# for j in range(len(price_results)):\n# price_result = price_results.iloc[j]\n# if price_result[\"model_type\"] == \"regression\":\n# weekly_price_data = ticker_regression\n# weekly_price_data = weekly_price_data[(weekly_price_data[\"year\"] == year) & (weekly_price_data[\"quarter\"] == quarter)]\n# product_qpd = mr.day_trade_preprocess_regression(weekly_price_data.copy(),ticker,False)\n# else:\n# weekly_price_data = ticker_classification\n# weekly_price_data = weekly_price_data[(weekly_price_data[\"year\"] == year) & (weekly_price_data[\"quarter\"] == quarter)]\n# product_qpd = mr.day_trade_preprocess_classify(weekly_price_data.copy(),ticker)\n# product_qpds.append(product_qpd)\n# sim = prot.merge_weeklies_v2(product_qpds,price_results,year,quarter,\"price\")\n", "_____no_output_____" ], [ "quarterly_range = range(1,2)\nyearly_range = range(2017,2018)\ndef model(group):\n results = []\n all_regression = regression.copy()\n all_classification = classification.copy()\n group_tickers = tickers[tickers[\"group\"]== group]\n model_range = range(group_tickers.index.size)\n for i in tqdm(model_range):\n try:\n ticker = group_tickers.iloc[i][\"Symbol\"].replace(\".\",\"-\")\n mr = ModelPreprocessor(ticker)\n prot = ProductTransformer(ticker,start,end)\n ticker_regression = all_regression.copy()\n ticker_regression[\"y\"] = ticker_regression[ticker]\n ticker_regression[\"y\"] = ticker_regression[\"y\"].shift(-week_gap)\n ticker_regression = ticker_regression[:-week_gap]\n ticker_classification = all_classification.copy()\n ticker_classification[\"y\"] = ticker_classification[ticker]\n ticker_classification[\"y\"] = ticker_classification[\"y\"].shift(-week_gap)\n ticker_classification = ticker_classification[:-week_gap]\n except Exception as e:\n print(str(e))\n continue\n for year in yearly_range:\n for quarter in quarterly_range:\n try:\n ## regression_model\n first = ticker_regression[(ticker_regression[\"year\"] == year - training_years) & (ticker_regression[\"quarter\"] == quarter)].index.values.tolist()[0]\n last = ticker_regression[(ticker_regression[\"year\"] == year) & (ticker_regression[\"quarter\"] == quarter)].index.values.tolist()[0]\n rqpd = ticker_regression.iloc[first:last-1]\n qpd = mr.day_trade_preprocess_regression(rqpd.copy(),ticker,True)\n rpr = sp.regression(qpd,ranked=False,tf=False,deep=False)\n ## classification_model\n first = ticker_classification[(ticker_classification[\"year\"] == year - training_years) & (ticker_classification[\"quarter\"] == quarter)].index.values.tolist()[0]\n last = ticker_classification[(ticker_classification[\"year\"] == year) & (ticker_classification[\"quarter\"] == quarter)].index.values.tolist()[0]\n cqpd = ticker_classification.iloc[first:last-1]\n qpd = mr.day_trade_preprocess_classify(cqpd.copy(),ticker)\n cpr = sp.classification(qpd,tf=False,deep=False,multioutput=False)\n price_results = pd.DataFrame([cpr,rpr])\n product_qpds = []\n for j in range(len(price_results)):\n price_result = price_results.iloc[j]\n if price_result[\"model_type\"] == \"regression\":\n weekly_price_data = ticker_regression\n weekly_price_data = weekly_price_data[(weekly_price_data[\"year\"] == year) & (weekly_price_data[\"quarter\"] == quarter)]\n product_qpd = mr.day_trade_preprocess_regression(weekly_price_data.copy(),ticker,False)\n else:\n weekly_price_data = ticker_classification\n weekly_price_data = weekly_price_data[(weekly_price_data[\"year\"] == year) & (weekly_price_data[\"quarter\"] == quarter)]\n product_qpd = mr.day_trade_preprocess_classify(weekly_price_data.copy(),ticker)\n product_qpds.append(product_qpd)\n sim = prot.merge_weeklies_v2(product_qpds,price_results,year,quarter,\"price\")\n results.append(sim)\n except Exception as e:\n print(ticker,str(e))\n continue\n return pd.con(results)", "_____no_output_____" ], [ "sim = group_parallel.map(lambda x: model(x)).collect()", "_____no_output_____" ], [ "pd.concat(sim)", "_____no_output_____" ], [ "product = pd.concat(sim)\n# product = product.merge(regression,on=[\"year\",\"quarter\",\"week\"],how=\"left\")", "_____no_output_____" ], [ "strat_db.connect()\nstrat_db.store_data(\"parallel_weekly_sim\",product)\nstrat_db.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a285ab2d4165bb36dd06edd9ee427c385323b4
90,550
ipynb
Jupyter Notebook
notebooks/Henrik_feat_engineering.ipynb
viggotw/SopraSteria_Team3
72d8eab72759cc052ce72709b07eb8be997d8541
[ "MIT" ]
null
null
null
notebooks/Henrik_feat_engineering.ipynb
viggotw/SopraSteria_Team3
72d8eab72759cc052ce72709b07eb8be997d8541
[ "MIT" ]
null
null
null
notebooks/Henrik_feat_engineering.ipynb
viggotw/SopraSteria_Team3
72d8eab72759cc052ce72709b07eb8be997d8541
[ "MIT" ]
null
null
null
238.918206
30,998
0.867421
[ [ [ "import pandas as pd\nimport numpy as np\nimport stumpy\nimport sklearn\nimport lightgbm\nimport xgboost\nimport skforecast\nimport statsmodels\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "prediction_input = pd.read_parquet(\"data/prediction_input.parquet\")\ninput_dataset_2 = pd.read_parquet(\"data/input_dataset-2.parquet\")", "_____no_output_____" ], [ "plt.plot(prediction_input)\nplt.show()\n", "_____no_output_____" ], [ "df = pd.DataFrame(input_dataset_2)\ndf_prediction = pd.DataFrame(prediction_input)", "_____no_output_____" ], [ "df_prediction.plot()", "_____no_output_____" ], [ "print(df.columns)\ndf.shape\ndf.index\ndf_prediction.head()", "Index(['Unit_4_Power', 'Unit_4_Reactive Power', 'Turbine_Guide Vane Opening',\n 'Turbine_Pressure Drafttube', 'Turbine_Pressure Spiral Casing',\n 'Turbine_Rotational Speed', 'mode', 'Bolt_1_Steel tmp',\n 'Bolt_1_Tensile', 'Bolt_2_Tensile', 'Bolt_3_Tensile', 'Bolt_4_Tensile',\n 'Bolt_5_Tensile', 'Bolt_6_Tensile', 'Bolt_1_Torsion', 'Bolt_2_Torsion',\n 'Bolt_3_Torsion', 'Bolt_4_Torsion', 'Bolt_5_Torsion', 'Bolt_6_Torsion',\n 'lower_bearing_vib_vrt', 'turbine_bearing_vib_vrt'],\n dtype='object')\n" ], [ "figure, ax1 = plt.subplots()\nax1.plot(df.iloc[:, 6])", "_____no_output_____" ], [ "def add_seconds_operational(dataframe):\n # Find index of \"start\" modes in the timeseries\n start_ts = dataframe[dataframe['mode'] == 'start'].index\n # Calculate secods until next \"start\" mode\n secs_since_last_start = (start_ts[1:] - start_ts[:-1]).seconds\n # Extract a list of \"timeslots\" that pairwise indicate a sequence of operation\n last_start_before_counting = start_ts[:-1][secs_since_last_start > 1]\n last_start_before_counting = list(last_start_before_counting)\n last_start_before_counting.append(dataframe.index[-1])\n # Create new feature\n dataframe['sec_since_last_start'] = 0\n for t1, t2 in zip(last_start_before_counting[:-1], last_start_before_counting[1:]):\n dataframe.loc[t1:t2, 'sec_since_last_start'] = range(len(dataframe[t1:t2]))\n # Force \"start\" mde to equal 0 seconds\n dataframe.loc[dataframe['mode'] == 'start', 'sec_since_last_start'] = 0\n # Look at the last timeslot\n dataframe.loc[t1:t2, ['mode', 'sec_since_last_start']]\n\n return dataframe\n\ndf = add_seconds_operational(df)", "_____no_output_____" ], [ "def add_hour_feature(dataframe):\n dataframe['hour'] = dataframe.index.hour\n return dataframe", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a287d4293bf4e8bbc1beb145101490d64a2bae
2,360
ipynb
Jupyter Notebook
debug/jupyter/stuck.ipynb
stas00/fastai-misc
e7e8c18ed798f91b2e026c667f795f45992608b8
[ "Apache-2.0" ]
1
2018-06-01T17:39:59.000Z
2018-06-01T17:39:59.000Z
debug/jupyter/stuck.ipynb
stas00/fastai-misc
e7e8c18ed798f91b2e026c667f795f45992608b8
[ "Apache-2.0" ]
null
null
null
debug/jupyter/stuck.ipynb
stas00/fastai-misc
e7e8c18ed798f91b2e026c667f795f45992608b8
[ "Apache-2.0" ]
null
null
null
17.352941
52
0.495763
[ [ [ "#", "_____no_output_____" ], [ "#", "_____no_output_____" ], [ "print(\"no problem in this cell\")", "no problem in this cell\n" ], [ "print(\"no problem in this cell either\")", "no problem in this cell either\n" ], [ "print(\"always gets stuck in this cell\")", "always gets stuck in this cell\n" ], [ "print(\"even though the execution continues\")", "even though the execution continues\n" ], [ "import time\ntime.sleep(5)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a28ac051f96fe6358ee1b78a0502dccc0bbfa5
110,294
ipynb
Jupyter Notebook
run.ipynb
ghetthub/capsnet
d3a435a283117133652be3ed9ba326ca5363faad
[ "MIT" ]
null
null
null
run.ipynb
ghetthub/capsnet
d3a435a283117133652be3ed9ba326ca5363faad
[ "MIT" ]
null
null
null
run.ipynb
ghetthub/capsnet
d3a435a283117133652be3ed9ba326ca5363faad
[ "MIT" ]
null
null
null
517.812207
71,012
0.94502
[ [ [ "import numpy as np\nfrom keras import callbacks\n\nimport capsulenet", "Using TensorFlow backend.\n/home/watiz/.virtualenvs/siamese_reid/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "## Args", "_____no_output_____" ] ], [ [ "class args:\n save_dir = \"weights/\"\n debug = True\n \n # model\n routings = 1\n \n # hp\n batch_size = 32\n lr = 0.001\n lr_decay = 1.0\n lam_recon = 0.392\n \n # training\n epochs = 3\n shift_fraction = 0.1\n digit = 5", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = capsulenet.load_mnist()", "_____no_output_____" ] ], [ [ "## Define model", "_____no_output_____" ] ], [ [ "model, eval_model, manipulate_model = capsulenet.CapsNet(input_shape=x_train.shape[1:],\n n_class=len(np.unique(np.argmax(y_train, 1))),\n routings=args.routings)", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "capsulenet.train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)", "INFO:tensorflow:Summary name conv1/kernel:0 is illegal; using conv1/kernel_0 instead.\nINFO:tensorflow:Summary name conv1/bias:0 is illegal; using conv1/bias_0 instead.\nINFO:tensorflow:Summary name primarycap_conv2d/kernel:0 is illegal; using primarycap_conv2d/kernel_0 instead.\nINFO:tensorflow:Summary name primarycap_conv2d/bias:0 is illegal; using primarycap_conv2d/bias_0 instead.\nINFO:tensorflow:Summary name digitcaps/W:0 is illegal; using digitcaps/W_0 instead.\nINFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.\nINFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.\nINFO:tensorflow:Summary name dense_2/kernel:0 is illegal; using dense_2/kernel_0 instead.\nINFO:tensorflow:Summary name dense_2/bias:0 is illegal; using dense_2/bias_0 instead.\nINFO:tensorflow:Summary name dense_3/kernel:0 is illegal; using dense_3/kernel_0 instead.\nINFO:tensorflow:Summary name dense_3/bias:0 is illegal; using dense_3/bias_0 instead.\nEpoch 1/3\n187/187 [==============================] - 128s - loss: 0.2853 - capsnet_loss: 0.2520 - decoder_loss: 0.0851 - capsnet_acc: 0.7122 - val_loss: 0.0985 - val_capsnet_loss: 0.0764 - val_decoder_loss: 0.0565 - val_capsnet_acc: 0.9540\nEpoch 2/3\n187/187 [==============================] - 151s - loss: 0.1101 - capsnet_loss: 0.0843 - decoder_loss: 0.0658 - capsnet_acc: 0.9402 - val_loss: 0.0740 - val_capsnet_loss: 0.0522 - val_decoder_loss: 0.0557 - val_capsnet_acc: 0.9740\nEpoch 3/3\n187/187 [==============================] - 153s - loss: 0.0883 - capsnet_loss: 0.0642 - decoder_loss: 0.0615 - capsnet_acc: 0.9545 - val_loss: 0.0647 - val_capsnet_loss: 0.0439 - val_decoder_loss: 0.0530 - val_capsnet_acc: 0.9770\nTrained model saved to 'weights//trained_model.h5'\n" ], [ "capsulenet.test(eval_model, data=(x_test, y_test), args=args)", "------------------------------Begin: test------------------------------\nTest acc: 0.9784\n\nReconstructed images are saved to weights//real_and_recon.png\n------------------------------End: test------------------------------\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a2a705fe4443f62ca57371106d606543498bbf
10,951
ipynb
Jupyter Notebook
experiement/persuasion/DataProcess/Split Data.ipynb
RoderickGu/Pretraining_GPT
0a3ecd38116dc271e273f57490b9b45b660bf401
[ "Apache-2.0" ]
4
2019-11-18T09:36:04.000Z
2019-12-11T18:30:16.000Z
experiement/persuasion/DataProcess/Split Data.ipynb
RoderickGu/Pretraining_GPT
0a3ecd38116dc271e273f57490b9b45b660bf401
[ "Apache-2.0" ]
null
null
null
experiement/persuasion/DataProcess/Split Data.ipynb
RoderickGu/Pretraining_GPT
0a3ecd38116dc271e273f57490b9b45b660bf401
[ "Apache-2.0" ]
null
null
null
32.98494
83
0.423888
[ [ [ "import torch\nimport torch.nn as nn\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "df = pd.read_csv(\"../data/FullData/full_dialog.csv\").iloc[:, 1:]", "_____no_output_____" ], [ "dialog_list = set(item for item in set(df[\"B2\"]) if item[:3] != \"BAD\")\nall_dialogs = []\n\nfor tag in dialog_list:\n all_dialogs.append(df[df[\"B2\"] == tag])", "_____no_output_____" ], [ "indices = np.arange(len(all_dialogs))\nnp.random.shuffle(indices)\ncut_index = int(len(all_dialogs)*0.9)\ntrain_indices = indices[:cut_index]\nval_indices = indices[cut_index:]", "_____no_output_____" ], [ "raw_train_dialogs = [all_dialogs[i] for i in train_indices]\nraw_val_dialogs = [all_dialogs[i] for i in val_indices]", "_____no_output_____" ], [ "torch.save(raw_train_dialogs, \"raw_train_dialogs.pkl\")\ntorch.save(raw_val_dialogs, \"raw_val_dialogs.pkl\")", "_____no_output_____" ], [ "raw_train_dialogs[0]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a2aa468caeaec35ad1f6547359743f967e334d
848,506
ipynb
Jupyter Notebook
MachineLearning/5_KNNyArbolesDeDecision/KNN_Arboles.ipynb
guillelencina/cursos-python
83ece92c65f4145bb46b3078a218cc5dbd957853
[ "MIT" ]
171
2020-07-09T18:42:13.000Z
2022-03-31T22:01:23.000Z
MachineLearning/5_KNNyArbolesDeDecision/KNN_Arboles.ipynb
canveo/cursos-python
a5d509d212d5fe5f1ba45940cfcf93c1f3455f9f
[ "MIT" ]
133
2020-05-17T19:53:19.000Z
2022-03-23T23:39:23.000Z
MachineLearning/5_KNNyArbolesDeDecision/KNN_Arboles.ipynb
canveo/cursos-python
a5d509d212d5fe5f1ba45940cfcf93c1f3455f9f
[ "MIT" ]
86
2020-07-09T17:59:19.000Z
2022-03-29T23:32:19.000Z
520.236665
294,504
0.941563
[ [ [ "<a href=\"https://colab.research.google.com/github/institutohumai/cursos-python/blob/master/MachineLearning/5_KNNyArbolesDeDecision/KNN_Arboles.ipynb\"> <img src='https://colab.research.google.com/assets/colab-badge.svg' /> </a>\n<div align=\"center\"> Recordá abrir en una nueva pestaña </div>", "_____no_output_____" ], [ "# Modelos no paramétricos: K-Nearest Neighbours y Árboles de decisión\n\nDocumentación:\n- KNN para clasificación: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier\n- KNN para regresión: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor\n- Árboles para clasificación: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html\n- Árboles para regresión: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html", "_____no_output_____" ], [ "## Seteo de librerias", "_____no_output_____" ] ], [ [ "import os\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.datasets import make_classification, make_blobs, load_breast_cancer\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, KFold\n\n# modelos\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\n\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nDISPLAY_PRECISION = 4\npd.set_option(\"display.precision\", DISPLAY_PRECISION)", "_____no_output_____" ] ], [ [ "# 1. KNN\n\n", "_____no_output_____" ], [ "## 1.1 Introducción: Fronteras de decisión\n\nPara familirizarnos con este modelo y podervisualizar como quedan las fronteras de decisión empezaremos con un problema de clasificación binaria con dos features con un dataset de juguete que generaremos nosotros con la función [make_classification](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html).", "_____no_output_____" ] ], [ [ "# construyamos el dataset para un problema de clasificación binaria de dos dimensiones\nX, y = make_classification(n_samples=200, n_features=2, n_informative=2, n_redundant=0, n_classes=2,n_clusters_per_class=1,\n random_state=1, class_sep=1.1)\n# scatter plot, colores por etiquetas\ndf = pd.DataFrame(dict(x=X[:,0], y=X[:,1], label=y))\ncolors = {0:'red', 1:'blue'}\nfig, ax = plt.subplots()\ngrouped = df.groupby('label')\nfor key, group in grouped:\n group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])", "_____no_output_____" ], [ "# instanciemos y entrenemos el modelo\nmodel = KNeighborsClassifier(n_neighbors=10,weights='uniform')\nmodel.fit(X,y)", "_____no_output_____" ], [ "# visualicemos las predicciones\n\n# elegimos algunos colores de la lista de colores\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\n# tenemos que armar una grilla y setear un step\nh = .02 \nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\nZ = model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n# usamos un pcolormesh\nZ = Z.reshape(xx.shape)\nplt.figure()\nplt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n# ploteamos también los puntos de entrenamiento\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.title(\"2-Class classification (k = %i, weights = '%s')\"\n % (10, 'uniform'))\n\nplt.show()", "_____no_output_____" ], [ "model = KNeighborsClassifier(n_neighbors=200,weights='uniform')\nmodel.fit(X,y)", "_____no_output_____" ], [ "# visualicemos las predicciones\n\n# elegimos algunos colores de la lista de colores\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\n# tenemos que armar una grilla y setear un step\nh = .02 \nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\nZ = model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n# usamos un pcolormesh\nZ = Z.reshape(xx.shape)\nplt.figure()\nplt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n# ploteamos también los puntos de entrenamiento\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.title(\"2-Class classification (k = %i, weights = '%s')\"\n % (200, 'uniform'))\n\nplt.show()", "_____no_output_____" ], [ "model = KNeighborsClassifier(n_neighbors=200,weights='distance')\nmodel.fit(X,y)", "_____no_output_____" ], [ "# visualicemos las predicciones\n\n# elegimos algunos colores de la lista de colores\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\n# tenemos que armar una grilla y setear un step\nh = .02 \nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\nZ = model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n# usamos un pcolormesh\nZ = Z.reshape(xx.shape)\nplt.figure()\nplt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n# ploteamos también los puntos de entrenamiento\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.title(\"2-Class classification (k = %i, weights = '%s')\"\n % (200, 'distance'))\n\nplt.show()", "_____no_output_____" ] ], [ [ "## 1.2 Conjunto de datos de cáncer de mama\n\nEl conjunto de datos etiquetado proviene de la \"Base de datos (diagnóstico) de cáncer de mama de Wisconsin\" disponible gratuitamente en la biblioteca sklearn de python. Para obtener más detalles, consulte:\nhttps://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29\n\nNúmero de muestras: 569\n\nNúmero de funciones: 30 atributos numéricos y predictivos\n\nNúmero de clases: 2\n\nLas características se calculan a partir de una imagen digitalizada de una aspiración con aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen. Se calculan diez características de valor real para cada núcleo celular. La media, el error estándar y el \"peor\" o el más grande (la media de los tres valores más grandes) de estas características se calcularon para cada imagen, lo que dio como resultado 30 características. Por ejemplo, las medidas del radio son para el \"radio medio\", el \"error estándar del radio\" y el \"peor radio\". Todos los valores de las características se recodifican con cuatro dígitos significativos.\n\nLas dos clases objetivo corresponden a resultados negativos (benignos) y resultados positivos (malignos).\n\nEste conjunto de datos original se dividirá aleatoriamente en dos conjuntos para fines de entrenamiento y prueba.", "_____no_output_____" ] ], [ [ "data = load_breast_cancer()\n#print(data.DESCR)\n\nprint(\"Descripción:\")\nprint(data.keys()) # dict_keys(['target_names', 'target', 'feature_names', 'data', 'DESCR'])\nprint(\"---\")\n\n# Note that we need to reverse the original '0' and '1' mapping in order to end up with this mapping:\n# Benign = 0 (negative class)\n# Malignant = 1 (positive class)\n\ndata_clases = [data.target_names[1], data.target_names[0]]\ndata_target = [1 if x==0 else 0 for x in list(data.target)]\ndata_features = list(data.feature_names)\n\nprint(\"Clases Target:\")\nprint(\"Clases\", data_clases)\nprint(\"---\")\nprint(\"Distribución de clases n=%d:\" % len(data_target))\nprint(pd.Series(data_target).value_counts())\nprint(\"---\")", "Descripción:\ndict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names', 'filename'])\n---\nClases Target:\nClases ['benign', 'malignant']\n---\nDistribución de clases n=569:\n0 357\n1 212\ndtype: int64\n---\n" ], [ "pd.DataFrame(data.data[:,:], columns=data_features).info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 569 entries, 0 to 568\nData columns (total 30 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 mean radius 569 non-null float64\n 1 mean texture 569 non-null float64\n 2 mean perimeter 569 non-null float64\n 3 mean area 569 non-null float64\n 4 mean smoothness 569 non-null float64\n 5 mean compactness 569 non-null float64\n 6 mean concavity 569 non-null float64\n 7 mean concave points 569 non-null float64\n 8 mean symmetry 569 non-null float64\n 9 mean fractal dimension 569 non-null float64\n 10 radius error 569 non-null float64\n 11 texture error 569 non-null float64\n 12 perimeter error 569 non-null float64\n 13 area error 569 non-null float64\n 14 smoothness error 569 non-null float64\n 15 compactness error 569 non-null float64\n 16 concavity error 569 non-null float64\n 17 concave points error 569 non-null float64\n 18 symmetry error 569 non-null float64\n 19 fractal dimension error 569 non-null float64\n 20 worst radius 569 non-null float64\n 21 worst texture 569 non-null float64\n 22 worst perimeter 569 non-null float64\n 23 worst area 569 non-null float64\n 24 worst smoothness 569 non-null float64\n 25 worst compactness 569 non-null float64\n 26 worst concavity 569 non-null float64\n 27 worst concave points 569 non-null float64\n 28 worst symmetry 569 non-null float64\n 29 worst fractal dimension 569 non-null float64\ndtypes: float64(30)\nmemory usage: 133.5 KB\n" ], [ "# separamos un 25% para test/held-out\nX = pd.DataFrame(data.data[:,:], columns=data_features)\ny = pd.Series(data_target)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)", "_____no_output_____" ] ], [ [ "## 1.3 Overfitting: cantidad de vecinos y pesos", "_____no_output_____" ] ], [ [ "# veamos como le va a nuestro modelo variando la cantidad de vecinos y el tipo de peso\nvalores_k = list(range(1,50,4))\nresultados_train_u = []\nresultados_test_u = []\nresultados_train_w = []\nresultados_test_w = []\n\nfor k in valores_k:\n # instanciamos el modelo uniforme\n clf_u = KNeighborsClassifier(n_neighbors=k, weights='uniform')\n clf_u.fit(X_train, y_train)\n y_train_pred = clf_u.predict(X_train)\n y_pred = clf_u.predict(X_test)\n resultados_train_u.append(accuracy_score(y_train, y_train_pred))\n resultados_test_u.append(accuracy_score(y_test, y_pred))\n\n clf_w = KNeighborsClassifier(n_neighbors=k, weights='distance')\n clf_w.fit(X_train, y_train)\n y_train_pred = clf_w.predict(X_train)\n y_pred = clf_w.predict(X_test)\n resultados_train_w.append(accuracy_score(y_train, y_train_pred))\n resultados_test_w.append(accuracy_score(y_test, y_pred))", "_____no_output_____" ], [ "# veamos que paso en cada caso\nf, ax = plt.subplots(1,2,figsize=(14,5),sharey=True)\nax[0].plot(valores_k, resultados_train_u, valores_k, resultados_test_u);\nax[0].legend(['pesos uniformes - train', 'pesos uniformes - test']);\nax[0].set(xlabel='k',ylabel='accuracy');\n\nax[1].plot(valores_k, resultados_train_w, valores_k, resultados_test_w);\nax[1].legend(['pesos distancia - train', 'pesos distancia - test']);\nax[1].set(xlabel='k');", "_____no_output_____" ], [ "# ahora busquemos nuestro mejor modelo usando validacion cruzada y gridsearchcv pero incluyamos otra distancia!\nmodel = KNeighborsClassifier()\nn_neighbors = np.array([1,2,3,5,8,10,15,20,30,50])\nparam_grid = {'n_neighbors': n_neighbors, \n 'weights':['uniform', 'distance'], \n 'metric':['euclidean', 'chebyshev', 'manhattan']}\ngrid = GridSearchCV(estimator=model, param_grid=param_grid)\ngrid.fit(X_train, y_train)\nprint(grid.best_params_)\npd.DataFrame(grid.cv_results_).sample(3)", "{'metric': 'manhattan', 'n_neighbors': 10, 'weights': 'distance'}\n" ], [ "# evaluemos este clasificador usando el classification report\nprint(classification_report(y_test, grid.best_estimator_.predict(X_test), target_names=data_clases))", " precision recall f1-score support\n\n benign 0.96 0.98 0.97 90\n malignant 0.96 0.92 0.94 53\n\n accuracy 0.96 143\n macro avg 0.96 0.95 0.95 143\nweighted avg 0.96 0.96 0.96 143\n\n" ] ], [ [ "## 1.4 Efectos de escala\n\nDado que KNN esta basado en distancias si no usamos una distancia que involucra la varianza entre variables como la distancia de Mahalabois, nuestro modelo se verá afectado\n\n![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAjAAAAGkCAIAAACgjIjwAAAgAElEQVR4Ae29D2hU17r+vyGIFQltIUiQUoLQKfGmhiGlt46DTDyW3JJTrbSWHJFQJEiCJ0fC0Wr02syptj9p8TThYltBpI2tSPO7ejOSm+sdmgRN7XiOB0/tbRMTRAltCPmjWIk2JsP6MnnN68reeybzd++1Z56h2LXW7L3Wuz7vzn5mrfXutTWBDwiAAAiAAAgoQEBTwAaYAAIgAAIgAAICgoSLAARAAARAQAkCECQl3AAjQAAEQAAEIEi4BkAABEAABJQgAEFSwg0wAgRAAARAAIKEawAEQAAEQEAJAhAkJdwAI0AABEAABCBIuAZAAARAAASUIABBUsINMAIEQAAEQACChGsABEAABEBACQIQJCXcACNAAARAAAQgSLgGQAAEQAAElCAAQUreDVu2bNkkfaqrq0+dOhVndUOznzgPjvMwsufBgwd8vNyK8Vs+LOmEXH/SlWT0RLnXcjqJRuXOplhVEq3jFBDIBQIQpOS9nJ+frxk+mzZtWrDGo0ePLl68OBgMLnhkQgeQPffu3aOzdK3ovk2oZtODdfWbHmN7odxrOZ2oYbrOplJVok3jeBDIHQIQpOR9TXel9vb2qampu3fvfv7551Ty6aefxq50/fr1mqalXZAuXbp04cKF6elpal3XStrvobr6Y3fZrm/lXuv4JGSSrrOpVJVQuzgYBHKKAAQpeXfTza6jo4Or+OijjzRNc7vdVNLX17dx48b8/PylS5eWlpaeOHFCCOH3+wsKCjRNe/nll48cOSKEMD2M66REQ0NDZWXl0NCQEOLq1auVlZUbN26kr06fPl1ZWfn555+/8cYblZWVNGVnbIWs7e3t9fl8+fn5Ho/nb3/7G7dy7NixsrKy/Pz85557zu/3T01N0VcbN26srKxkkaPsgwcPjPVzVUKIN954Y+PGjVeuXKG2Xn755d7eXj4gWluVlZVvvPHG8ePHn376aZ/PNz09XVlZ+e///u9ff/31Cy+88OSTT27btu327dv19fVPPvnkihUrjh8/znVGYygLEvM5f/585fwPwzStx9hZrooMiNajBVGw/UiAAAgIgddPpHAVGAWpq6tL07S8vDwhxPT09PLlyzVN27hx4+bNmxctWqRp2pUrV6qqqhYvXqxpWn5+/o4dO6IdprPr3Xff1TSNxl6HDx+mmcK+vj4hREVFhaZply5dkm++ulaEEPRtQUHB5s2by8rKNE175plnqBWqfPHixa+99tqyZcs0TauoqKCvyFTWJ8reu3fPWL9scH5+fl5eHrVVWlqqaVphYeGCbWmatmjRory8vKVLl7799ttTU1OaphXMfrZs2UKGFRQUlJaWbty4kQhcv349BmruNU1jMh8ay+bPfqhHy5Yti1GPsbNclRAiBj0yIBoKmRjSIAACEKSUrgG6K8kjpPHxcbpRTk9P3759+9SpUzx9t2XLFk3TTp8+LYSQ539iHCYbd+XKFU3TaIGKFEjTtGPHjk1PT+fl5dHtXr5L6lrhW/OxY8eEEFNTU3QjnpycHB4ezpv9/PDDD0KI27dvr1ixQtO0c+fOCSGiCZKxftlasoRGMJOTk3l5eZqm3bt3L3ZbhK6lpUUI8eDBAxIkTdOuXr0qhDh69CgJG6njunXrNE07e/Ys2RwNtcxETpO1//mf/0kq2NXVFbse2WUMc8Ee8ZFGFDIupEEABIgApuySvxLoBicLEsmGpmnhcJju+6dPn962bdtLL71Ed1ujIMU4TGdZUVHRokWLpqamFi1a9Nprry1atKiqqurs2bOapu3YsYPvfRzUYHoP/fnnn6laMv727dunT5/WNG39+vXc3M6dOzVNq6mpSVGQbty4Ibc1Pj4euy1CNDIyQmeRINFwUwhB53LMSFVVFQt8DIayCMlpIURvb++SJUs0Tfvyyy+571NTU/G4jKuK3SN2ihEFt4gECIAAE4AgMYqEE3RXkgWJbk/PPvusEGJ8fLyoqIju9YcOHfL5fHwDlaUixmE6gxoaGjRN2717t6ZpLS0tFRUVBQUF27Zt4/gIvkvSiXIrfGdkuaKDWSR4EUUIsW/fPk3T3n777RQFKdG2eHBJ9pMgLV68mLLEtqqqirKyIMVgKDOR04ODg7SS995771GF8btMhklWRaMnH0mtMHZuFAkQAAEmAEFiFAkn6ObCgnT37l1am9m3b58QorW1VdO0zZs3U72bN2/WCdL58+djH6YzqKenR9O0p59+WtO0H374gQIonn766SeffJIGZPINl6fUqJUYd8bvv/9e07SlS5eOj49Tiy+//LKmaRSCQWOIwcFBul+TZpDSkOBx/bK1Okv4Lhy7Laqc64lfkGKgli3h9OjoKE1L0iiQW4xRj66zXFXsHsXAzo0iAQIgwAQgSIwi4QTdldxu9/r168vKyihsoaioiO7s9Nv5mWeeOXv2LIchtLa2CiFeffVVChz4j//4jxiHGQ2iH/UFBQVCiL/97W90B6ehjPHeJ7di/JaMJ1PpyOLi4rq6urVr12qa9vzzz9NSDYnTq6++2traWlZWtnTpUloN0vVCZyrfr6k8zraSFqQYDGVLOE3reZqmvfrqq+vnPt9//32MemLAjEEvNnYdNGRBAAQgSMlfA3SDo9toXl7e8uXL6+vreQkkHA5v2rSJvi0uLqapNhKPY8eO0Tp/ZWVljMOMltXU1GiaxtNWZEB7ezsdyTdcysqtxL4z3rt3b8eOHSSomqZVVlYODw9TJZcuXSosLKSV/0OHDlVWVrIg6eqXrdVZQlkSvxhtJS1IMRjKlnCaekHN8b8XLlyIUY+us1yVECJGj2Jjl4khDQIggCi7jF8DDx484KkwubGpqamff/6ZptooqMz0MPmUJNK6VmLXEA6Hh4eHOcKbDw6HwyMjI/woEpdTKIHcC/mr2OlobcU+K/a30VDHPsv4bbR6YsPMRI+MtqEEBLKbAEZI2e1f9A4EQAAEHEMAguQYV8FQEAABEMhuApYK0sTExN+lz927dwnu0NBQMBjs7+/PbtboHQiAAAiAQAwClgrS8ePHV65c6Z77XLx4UQgRCAQ8Hs+uXbvKy8ubm5tj2IqvQAAEQAAEspiApYLU0NDw1VdfyTRnZmbcbjc95jIxMVFaWnrz5k35AKRBAARAAARyhIClglRRUREKhSYmJh4+fEh8u7q6ysvLmXV9ff3Jkyc5iwQIgAAIgEDuELBOkGZmZoqLiysrK1evXl1cXLx//34hxJkzZ+rq6hh3Y2PjgQMHOMsJ19yHS5AAARBQh8DcHyj+n4UErLzMrBOkX375pb6+/pdffhFCjIyMrF279tSpU21tbbQxKPV5/+zH2H+Xy2UsRAkIgIAiBPAXqogj0m6GxZ61TpB0pA4ePPjnP/85EAjU1tbyV42NjU1NTZzlhMVQuF0kQAAE4iGAv9B4KDnxGIs9a50g3bp1q62tjV1y4MCBd955JxQKeb1eLqytrQ0EApzlhMVQuF0kQAAE4iGAv9B4KDnxGIs9a50g9ff3r1y5kgLqRkZGPB7PxYsXw+Gw1+vt6ekRQgwMDKxatWpsbMzoNouhGA1QqsTvF5omfL7If93dSpkGY3KUAP5Cs9XxFnvWOkESQnz11Vdut7u6utrtdtPbDYQQoVDI4/FUV1eXlZV1dnaa+tViKKY2KFLo90d0iD8+n/D7OYcECNhDIJm/UPywssdXibWajGcTa2He0ZYK0ryWE8lYDCUR0yw9trt7nhpR234/NMlSL6AxI4GE/0LT8cOqr69v586dmzZtqq6uvnTpElnV19d35MgRo4VJlNy9e/e99957++23o/1QTqJOx52SsGdT6yEEKTV+1p4tz9Hx70tNgyBZ6wa0ZiCQ2G0rHT+szp07t2zZspaWlo6OjhMnTixbtoxeRR8MBtevX28wMJkCj8ezb9++9vb20tJS+T33ydTl2HMS82zK3YQgpYzQwgq0OXfpfl9Ckyx0ApoyIZDYbUv+YcWVJTLSn56eXrZsWTAY5LM7OztfeOEFIQQLUjgcDgaDZ8+eDYVCfNjo6Gh7e3tHRwe/ZsVYQgdfunSJKqRlhdLSUq4kpxKJeTZlNHN3uJQrymgFFkPJaF9SqZz+Zvn3pTxIwmJSKmBxbooEEvsL5R9WulajlesOE6Kjo2P58uWG4kgBCdKDBw/cbveWLVvq6uqef/75d999VwjR19e3YsWKnTt3bt26dcWKFZOTk8YSrrO1tXXr1q2UDYfDeXl5/PYyPiYXEol5NmUiEKSUEVpYAQkS/b7kQRJlIUgW+gFN6QkkdtsyHQzx7yx93Sb5L7/8srKy0uSLOUG6evXqoUOH6ICOjo6KigohxOeff75p0yYqPHv27PDwsLGE6/z0009ramo4u2jRIh5UcWEuJBLzbMpEIEgpI7S2AhoV8R+vrEOYuLPWFWjtMYHEblumgmQ6j/e4hXmps2fPut3ueUVzGZ6yC4VCu3fvrqqqWrFiBa0qjYyMFBcXFxQUbNmy5fz587RljK5krhpx7Nixbdu2cTYvL8/0pcl8QLYmEvNsyhQgSCkjtLwCeaaOY767uyMPJ3HWcqPQYE4TSPi2xQN8wib/sIoD5PDwcF5e3ujoKB87Ojr6zDPPTE1NkSB1dnYuX778yy+/vHLlSkdHx7p16/jIq1evfvTRR4WFhadOnaJCY4kQ4uuvv968eTMdMD4+vmTJEq4hpxIJezY1OhCk1PjZcTYJkq5l+n0Z9yS87mxkQSAlAsnctmiY7/Ml90tq27Ztr732Gr3kc3JycuPGjVu2bOE1pIaGhurqaurS7t27CwsLhRCHDh3auXMnFW7ZsuWDDz4wljCF0dHR/Pz8kZERIcSRI0e4Nj4gRxLJeDYFNBCkFODZcSqpEe3UwNs00O9Lnsezwy60mdMELL5tCSEePHiwffv2JUuWFBUVLV26dOvWrffu3WNBun79emFh4caNG30+n9/vX7JkSTgcHh8fLysr83q9a9eu9Xq947MfXYnsxdbW1meffbaioqK4uJiUSf42R9IWexaC5KTriuc5KEE/LnmmLpFJeCf1GraqT8Di2xYDCYfD9+7dixb/ZvrVgwcPdOEJxhK5/snJSc7mYMJiz0KQHHON6QZApEm8PJzgJLxjeg1DHUHA4tuWI5hkh5EWexaC5JjLhnZTlbdVJYnStOQm4R3TcRiqPgGLb1vqA8kaCy32LATJMVcOSRGby0MiBDIwEyTsImDxbcuubuZguxZ7FoLkjGvMNKqbV5Kc0QdYmb0ELL5tZS9I5XpmsWchSMpdAaYG0Xyd7jEjfkjW9BQUgoBlBCy+bVnWLzRksWchSM645GihyBjtjfk6Z/gv2620+LaV7TgV6p/FnoUgKeT7aKbQSIiGR3IgAw2bop2FchCwjEASty26qlN58XGm34dE9HpmP5aRVK2hJDybShcgSKnQs+JcUiAO76YmKYsHj6xwANqIg0City1+oo7q5gidOJp6dIgF70MSQly5cmXZsmXHjx+P37AsOzJRz6bYfQhSigAzfjqrjvw3LI+ZMm4BGgCBhQgkdNvSPVFHdet+csVu0IL3IQkhjh49WlRU9PLLL0OQYrsjjd9CkNIIMyNVyatE9JfMGzRkpD1UCgKJE0hIkPg3ltxOQoJkwfuQhBA9PT2Tk5M1NTUQJNlTGU1DkDKKNw2Vm/6hmv7GTENjqAIEkiKQkCDJv7Hk1qKVy8dQ2oL3IXGjECRGYUECgmQB5JSaMBUk09+YKTWDk0EgBQIJCZLpJZ3QbywL3ofEMCBIjMKCBATJAsipNiGvHgkhklgBTtUCnA8CMQmkLkgJ/cay4H1I3F0IEqOwIAFBsgByGpqQV490j8emoXZUAQKpEUhIkISIvEnS53vcZBK/sTL9PiQ2DoLEKCxIQJAsgIwmQCDLCSQqSEKIFH9jWfA+JPIZBMnKaxeCZCVttAUC2UkgCUFKC4hMvw8pLUY6uhKLPQtBcvTVAuNBQAkCFt+2lOhzbhhhsWchSLlxWaGXIJBJAhbftjLZFdQ9j4DFnoUgzaOPDAiAQBIELL5tJWEhTkmOgMWehSAl5yacBQIg8JiAxbetxw0jlWECFnsWgpRhf2am+tR3Ss6MXag1RwlYfNvKUcp2dNtiz0KQ7HByam2m/gxHau3jbBDQE0jitnW//+OJ9qJfe6t+7a263/+xvkbk1SCQhGdTMRyClAo9G8413WHFdC8WG4xDk7lKINHbFqnRRHsR//drb1WuwlO634l6NsXOQJBSBGj16bzDijxrl8SD7lbbjfaymkCity3WITmRBCHd2/Pu3r373nvvvf32252dnbrapqena2pqrl+/zuWXLl06duwYZzOUGB4e1tWckCV9fX0NDQ26Gijb19e3e/du3VemhbpjEsom6tmEKjceDEEyMlG6hHZENs7axb9TstLdg3HOJBD/bWt67DsWoTVjT2hC04QW6C2kwoTm7oxvz/N4PPv27Wtvby8tLf3yyy9lllNTU5qmvfTSS1zY2tpaVZXxYdnSpUu5RUokZMmPP/64Y8cOXQ2UDQaDy5cv131lWqg7JqFs/J5NqNpoB0OQopFRtJykSN4HjHZh0bTI/mD4gIAtBOK/bf3aW0Xaw2qkCW1P/1OJCpLx7XmXLl164YUXqPuhUKi0tFRGQTJQWlp6+PBhKtcJUm9v79mzZ2/dusVnhcPhYDB49uzZUCgkhLh9+/b333//ww8/BINBOsZ4yujoaHt7e0dHx9TUlBDi6tWrmqYFg8FwOMzVxrZEV+ft27cvXLjA53Z2dp47d256evrChQukPePj4+3t7b29vXQMFQ4PD589e7avr49PpNc7cQd1fdGZLZ8Vv2fls5JOQ5CSRmfPiTRT1909r3WfD1uAzwOCjMUE4r9t8fCIxkb8L5fHabnx7Xmtra1bt26l08PhcF5enlEG+vr68vPzaeJOFqRNmzb5fL6Ghobnnnvu9OnTQogHDx643e4tW7bU1dU9//zz7777bjAYfOGFF9xu99q1a8PhsPGUvr6+FStW7Ny5c+vWrStWrJicnGxpadE0raamZnp6mvtFgmRqibHOYDBYUFAghJienvZ4PK+99lpNTY3P5ysoKAgGg0uWLPF6vTU1NStWrDh06JAQggpffvnl+vr6oqIimpMMh8Pr169ft25dXV3dM888c+LECbkvP/74o85sNlUIEb9n5bOSTkOQkkZn24maZr5TMmbtbHNJzjcc/22Lwxn29D/FarRm7AkSpERDG+SdTz/99NOamhp2xaJFi2iYQiUkA0KIDz74gCbuWJA6Ojp4Ku/nn39++umnw+Hw1atX6RYvhOjo6KioqAgGg4sWLbp79y6VGE/5/PPPN23aRM2dPXuWVo80w59lNEtMzWBBam1tXb9+PVV+/PhxEiS259y5c263mwRp8eLF4+PjQogbN248+eST4XD49OnTHo+Hzr1x48bixYv/53/+h881NZsOzhVB+uc//zk6Osp9HhoaCgaD/f39XKJLxH+5607MyizP2tG7zGmmzjT6Liu7j04pSCD+v1AWpEBvIQkSq9FEe9H02HcJ9U4WpGPHjm3bto1Pz8vL+/TTT6tnP8eOHWMZEEKUlZUdPnyYBamhoeHZZ5/dNPfJy8sbGhoSQoRCod27d1dVVa1YsWL9+vXBYPBf/uVfqH7TU0ZGRoqLiwsKCrZs2XL+/Hk6MoYg6SwxrZMFadu2bUeOHKE6R0dHSZAKCwup5MKFCzSQCgaDFRUVVCiEWLp06Q8//LB9+/Z3332XC59++ukjR45wX0zN5oPj9yyfkkrChhHS4OBgSUkJz8MGAgGPx7Nr167y8vLm5mbTzlgMxdQGdQpNg7w5+k4dO2FJ7hBI9C+UJ+jkRBK4ZEH6+uuvN2/eTJWMj48vWbKkt7e3dfbT29srCxJNl+3bt4+CGvbt21ddXT0ufaampjo7O5cvX/7ll19euXKlo6Nj3bp1wWCQxyimp1DTV69e/eijjwoLC0+dOiWEiC1IsiWmdbIg7dy5c8+ePdTE9evXSZA4qEEWpHXr1jHJJUuWDA8P79y5Uw7Vy8/P/+STT7gvpmZzDYl6lk9MLmG1ID18+HDDhg0+n48EaWZmxu12Dw4OCiEmJiZKS0tv3rxp7InFUIwGqFZijLJDRINqPsopexL9C+XQBhakhOLrmK0sSKOjo/n5+SMjI0KII0eOVFdX82FCCFmQhBCHDx/Oy8sjQbpw4cKzzz57+/ZtIcSVK1cKCwunp6cbGhq4ht27dxcWFsqCZHrKoUOHdu7cSY1u2bLlgw8+IEGSF5BiWGJaJwtSKBRasWIFzcXV1dXFEKRFixZRaEZHR8dzzz0nhOjq6nr++efv3btH6cLCwv/93/9lQTI1m9El6lk+MbmE1YJ0+PDhlpaW7du3kyB1dXWVl5ez6fX19SdPnuQsJyyGwu2qnJDfbxaJnJ1dWMI4SWWXZbFtSfyF3u//mLZpmGgvSk6NhBCyIAkhWltbn3322YqKiuLiYlImZq4TJCHESy+9xGHfhw8fLiwsfO2115YvX37u3DkhxPXr1wsLCzdu3Ojz+fx+/5IlS+SbOEma7pTx8fGysjKv17t27Vqv10vi4fV6lyxZIge8xbDEaAYLkhCipaWloKCgsLBw8+bNy5YtkyO85RFS2ezn1VdfLSoqunr1KhF49913ly9fvn79+qKiokuXLsniamo2c0vCs3xuEglLBeny5cu06MeCdObMmbq6Ora7sbHxwIEDnOWEa+7DJUgwAYyWGAUSdhGw+LYVo5vhcHhycjLGAdG+Mj3x3r17cqie7lzTUx48eCAHU1CAnO7EGFnTOilCgWO7z507x+ObaFUZIUSrmWowmk3lFnvWOkG6e/duRUUFzcixILW1tcmPfe2f/RgRWwzFaICyJaaxDKaLTMp2AYZlAQH8hWbaiT/++OPy5cs//fTT1tbW5557jhaoMt1oNkfZ7d+/v6Ghgbb6ePPNN5ubm/v7+wOBQG1tLWNtbGxsamriLCdwuTMKXcJ0jg6CpKOEbKYJ4C8004SFEH19fUeOHHnvvfd4Is6CRi32rHUjpObm5u1zn9WrV2/atOnEiROhUMjr9TLW2traQCDAWU5YDIXbVTzB29nRApK8jGR4+EHxrsA8ZxPAX6iz/Rfdeos9a50gyV3mKbtwOOz1ent6eoQQAwMDq1atGhsbk4+ktMVQjAYoWCI/jUTKRCVcrqDNMClbCcwt8uL/WUjAyovWZkGip888Hk91dXVZWZlxj15iAUHSXRO0dNTd/Ti4TojIXnb0Hw2VEAiug4YsCICA4gTsEaREoUCQdMRo6Yj/pb1WSY14XzsIkg4asiAAAooTgCAp7iBz82iJiP+Vn0mi6bvZJ/LMz0UpCIAACKhJAIKkpl8WsIpn5zhBL6HgMZNpOPgCleJrEAABELCVAATJVvzJNs46xAkhIluA89oSpZOtHueBAAiAgA0EIEg2QE9Lk3JMnTHKDgtIaYGMSkAABKwkAEGyknaa25KXjuhVFPILKdLcGKoDARAAgQwTgCBlGHDmq+fHYzFNl3nYaAEEQCCDBCBIGYRrQdU0cccN+XyRR5HwAQEQAAEnEoAgOdFrj2w2DaWjMAcH9wqmgwAI5CoBCJKDPW86RwdBcrBHYToI5DYBCJKD/R9tB9Vo5Q7uKkwHARDIAQIQJAc7mSPr5KGS6TyegzsJ00EABHKGAATJqa7mZ4+oAxzOIIuTU/sGu0EABHKSAATJkW7nYZA8SMJDSI70JYwGARCYIwBBmiPhqP/TMIhivnWPxzqqHzAWBEAABB4TgCA9ZuGglKY92rZOZ7Om4TkkHRJkQQAEHEMAguQYV8mG0gJSd7dcFpEoCNI8IsiAAAg4igAEyVHumjOWBGku9+j/NI+HmG8dFmRBAAScQgCC5BRP6e3kTYNInDTt0fCI3h6rPxp5EAABEFCeAARJeRdFMZAEiXSIZ+o4EeUkFIMACICAugQgSOr6JrZlPDDiB4/oUSRsHRSbG74FARBQlgAESVnXxDKMhkekQD5f5F2xPDaCIMUCh+9AAAQUJgBBUtg5UUzjp2IpfsE4VEJcQxRyKAYBEFCaAARJafeYGsdzdDxOEiLy+BFnEddgyg2FIAACihOAICnuIBPzeABECsRHUDlvasflSIAACICAIwhAkBzhpnlG8ioRrR7ReIg3EIIgzYOFDAiAgHMIQJCc46s5S1mQaEhEUsTPIQkRCXDABwRAAAQcR8AZty6Xy+U4shk1mCbrWJnkURGHPGTUAFQOAiAAAmknAEFKO9K0VUjhczQvp9u2TohHm6vKAyNqmEMe0mYHKgIBEAABSwhAkCzBnHgjuoAFGgMZJcr0sMRbwxkgAAIgYD8BCJL9PjBaYDrtRkMlPpin6TicgZ+N5WOQAAEQAAEHEYAgqegs47Sb6aslaHikaZGdGoynqNgx2AQCIAAC0QlAkKKzse8bY5gc6Q2V88QdSRGbSbIEfWIgSIAACDiLAARJRX9x+Bwbx6+I5UUjjvb2+x8dRULFWZ7T40qQAAEQAAGVCUCQVPSOUZBYbHhbINIbDnYgfdKdqMuq2FXYBAIgAAJzBCBIcyQU+z+PhMgumo6TF4poaq67+/EudhQaLk/3QZAU8yrMAQEQiEUAghSLjr3f6cLnaJDEJslBd/Q0Ej+cxMdg1wYZBdIgAAKKE4AgKe6geebpwhlouUierJOHUKb6NK86ZEAABEBAJQIQJJW8EdMWViNWHZrWoywvMsl18JFyIdIgAAIgoCYBqwWpv78/GAzevHlTxjE0NBQMBvv7++VCOY297HhJiUWIhke8tsTR3swNUXaMAgkQAAFHELBUkP7617++8sore/fu/d3vfvfZZ58RoEAg4PF4du3aVV5e3tzcbEotxwWJJ+VIdWjFiF5bzmkaDOmWnUxhohAEQAAE1CRgnSANDAyUlJTcuXNHCDE6OohRHXUAACAASURBVFpcXDwxMTEzM+N2uwcHB4UQExMTpaWlusETUctxQeJhEF9DrEZyFDi2DmI+SIAACDiRgHWCFA6HSXiEEHfu3HG5XCMjI11dXeXl5Qyuvr7+5MmTnOWEa+7DJTmVoIGR3GXaSUiO8Ka3mEOTZEpIgwAIOIuAdYJEXGZmZk6fPr1hw4aWlhYhxJkzZ+rq6hhZY2PjgQMHOMsJjJB4/wViQiMkHh5RoWlcAzNEAgRAAAQUJ2C1II2Ojn7xxRc1NTVvvfXWnTt32traduzYwYz2z344y4kcFyTjuIeWjnQjJHrqyFjIGJEAARAAAZUJWC1IzKK6urq5uTkQCNTW1nJhY2NjU1MTZzmR44LEAd8MhNeQOLiuuzvyyj7j5B6fggQIgAAIKE7AOkG6ceOGvD70zjvv7N27NxQKeb1eZlRbWxsIBDjLiZwVJJIikhkO+KasTntYoozvlmWMSIAACICAygSsE6SBgYGVK1feuHFDCDE2NubxeL755ptwOOz1ent6eoQQAwMDq1atGhsbM/LKTUHiZ4+MAQssPyRLFIankygjRpSAAAiAgMoErBMkIcSpU6dKS0u3bdtWWlrKzyGFQiGPx1NdXV1WVtbZ2WkKKwcFiZ4okmlQCa8ecSy4LE662Af5dKRBAARAQHEClgpS0ixyUJBMd/3h3btNB0OkTCRUmLhL+mLDiSAAAnYRgCDZRX6BdqMFy1G5cVsgXeCD8YAF2sPXIAACIGA3AQiS3R6I0j4PhuTveR5PFwgu7xjEx5vWwN8iAQIgAAKqEYAgqeaRR/aYygkHL/AWdnS0PFnH/TGtgb9FAgRAAARUIwBBUs0jj+2Ro+yEEHLwAq0hsSzRgMm47BRt3u9xG0iBAAiAgDIEIEjKuMLMEHkujuVHJ040EtJN4uHtfGY4UQYCIKA0AQiS0u4h41iW5M3r5OdkowXdIdbOAd6FiSAAAnMEIEhzJBT+P83FaVpkcyD+0MCIguvoyVlZrhBlx6CQAAEQcAoBCJIDPEVLQcZJOZIoXijigZTxSAd0EiaCAAjkPAEIkgMuAR4M8UYMvMed6WSdA7oEE0EABEDAQACCZECiXgELEsmPcfWIhUo922ERCIAACMRLAIIULyl7jyMRkgdGFHRHUkSKZa+FaB0EQAAEUiQAQUoRoHWn0xIRB3/LC0UQJOvcgJZAAAQyRgCClDG0mamYQxh01Ucr1x2GLAiAAAgoSwCCpKxrzA3TDYZ4T1VdULj5ySgFARAAAYUJQJAUdo6ZabIg8d5C9KASnj0yA4YyEAABxxCAIDnGVWwo6RAtKdE2QhxlJ8sVH48ECIAACDiCAATJEW7SG9ndLXh/VVYj2q9BzupPQx4EQAAEFCYAQVLYOfNN4+Ui3klo/vePcohuMMWCQhAAAfUJQJDU91HEQl4uInPpVRQ0QUdDJVIpnsdzRq9gJQiAAAhIBCBIEgxVk6YyQxKl21AVsXaq+hB2gQAILEwAgrQwI3uP0M3UsTFcziX0PlmsITEQJEAABJxFAIKktL9oGMTLQnJgN83a0eBJnsGDICntURgHAiAQnQAEKTobu7/hmTo5mJvT0fb5ZvWy23y0DwIgAAKJEYAgJcbLyqMpTkEXzM2CJI+W2CrWMC5BAgRAAAScQgCCpK6nImOduRA6v6+b4xfmyiKhd7oPa5iuHFkQAAEQUJ8ABElVH/n9fs3/WHJ8vm6fn8IWaMNv3vab32tuOmZStXuwCwRAAAT0BCBIeiJK5Gen3nh27pFJs3nSJB4t0Ryd/G4kJeyHESAAAiCQOAEIUuLMLDhjbupt3vOwfr9Pi0zcsRqxIXrp4i+QAAEQAAHnEIAgKekrKVSOxkCPAru1yKwdz9Gx6RAkRoEECICAcwlAkJT0nanCdHf7NT9HNJAs8eOx2KNBSUfCKBAAgQQIQJASgGXdoWaCRIF2/I28mMSjqMdBENbZipZAAARAID0EIEjp4Zj+WuYtHwkKsZOfSaI3UJAC8Twey1X67UGNIAACIJBhAhCkDANOpXoe+GiaT+vmpSPeVpXivOVobwhSKrxxLgiAgL0EIEj28o+3dSnKIXJKtBf0idlHaeOtFMeBAAiAgEoEIEgqeSO6LcahD4+T5JNoTCWXIA0CIAACTiEAQXKGp0wFyRhZx4tJzugVrAQBEAABiQAESYKhdnJ+lEPk8VjdQ7LyYpLaXYF1IAACIGBCwGpBGhwcDAaD//jHP2RbhoaGgsFgf3+/XCinXS6XnM3ZtBTl8GhnVWNJzsJBx0EABJxOwFJBOnjwYHl5+a5duzZs2PCHP/zht99+E0IEAgGPx7Nr167y8vLm5mZToBAkUywoBAEQAIFsImCdIP30008lJSV37twhfL///e/b2tpmZmbcbvfg4KAQYmJiorS09ObNm0a+ECQjE5SAAAiAQJYRsE6QhoeHv/32W8ZXX1/f0tLS1dVVXl4uF548eZKznIAgMYpoifv9H0+0F/3aW/Vrb9X9/o+jHYZyEAABEFCWgHWCJCO4detWSUnJTz/9dObMmbq6Ov6qsbHxwIEDnOWEa+7DJUjIBO73f7yn6mNNE2tKvltT8l3g/YgsyQcgDQIgAALqE7BBkEZGRnw+3yeffCKEaGtr27FjB2PaP/vhLCcwQmIUpok9VR+vKfluor2I/ltT8t2eKgySTFGhEARAQF0CVgvStWvXVq9efeLECUISCARqa2sZT2NjY1NTE2c5AUFiFLrE9FhkPERqtGbsCS3yznMt0Fu4pyoyZsLcnQ4XsiAAAioTsFSQvv322xdffPH8+fNMJBQKeb1eztbW1gYCAc5yAoLEKHSJX3sjahTRpDk10oS2p/8pCJIOFLIgAALqE7BOkIaGhtxud1dX18O5z8zMTDgc9nq9PT09QoiBgYFVq1aNjY0ZqUGQjEyoZKK9SNNE5N/ZsRH/y+XRTkQ5CIAACKhGwDpBOnz48FxowqP//+UvfxFChEIhj8dTXV1dVlbW2dlpCgiCZIqFIutoMLSn/ylWozVjT9A8HkIbTLmhEARAQE0C1glSKv2HIBnpcWTdo+C6///fSJDWjD0x0V5E83jTY98ZT0QJCIAACKhJAIKkpl8WtkqOrKOY7z1VkUeRSI0QZbcwQRwBAiCgGAEIkmIOicMcY2RdJIrhs9fXlHwXibHTBOLr4qCIQ0AABJQjAEFSziULGhQ7sm6ivQjR3gsyxAEgAAIKEoAgKeiUBUziCDqOYqAEly9wPr4GARAAASUJQJCUdEtMoyicIRJch8i6mKDwJQiAgLMIQJBs9lcSm6KyIPHDsIiss9mLaB4EQCAdBCBI6aCYbB33+z++X/WU0LTpkiemS5749f3CaE8O+Xw+Tffx+TVfN/avS5Y9zgMBEFCOAATJTpfcr3pquiTy2BD9N13yRESfDB+/3z8rRv7ZILpuTevWtIg+rSn5N9reG5F1BmYoAAEQcB4BCJI9Ppse++7X9wtJjXjmLdBbeL/qqch/0guNJDXqlsZI3ZoWUak9VU/92luFyDp7vIhWQQAE0koAgpRWnHFX9mtvFc3RsRrRpqg6QeruJhHyzY6KJD2KJP2kSd3d3XE3iwNBAARAQF0CaRCkmZmZTPcv+7YOmmgvEppm3BR1zdgTQtPWjD3hF/5u0T23dPRojm6+Ij0SJJ/Pl2n+qB8EQAAELCCQmCDV1ta6XC632y3vglpcXJxpQ7NPkCic4X7VU7rQ7f/+LLKq9N/9r/uF3+fnQAYxX4o496jc7/dn2gWoHwRAAAQyTSABQTp37lxNTc39+/eHhoZWrlz5xRdfkHEQpCScxIIU6C2UN0WleTzeFHVuhPRoMMRCNJt4NI+HEVIS/HEKCICAggQSEKTXX399aGiI+7By5cpvvvlGCAFBYiaJJhaMsptbQzIVpEfzeFhDShQ7jgcBEFCTQAKC1NTUxKMiIcS9e/dcLte1a9cgSEm79tfeKoq1my6JLB3p4uvmXnf01Ox4yD8/ruFRlN3+P/5b0q3jRBAAARBQikACgvTw4UOXy1VZWckduHbtmsvlgiAxkCQS9/s/jsiSIXRbfmZ2LqSB5ui6aUvvWX3qDrwfOTeJdnEKCIAACKhGIAFBEkKEw+H+/n65D5OTk3/605/kkkyksy+oYUFK8mzenv6naEA0O3Pn92mRYPD9s0/Urin5Dq8+WhAmDgABEHAEgcQEyYIIb1NqOSVIps/Mrhl7otundWs+zddNjy51+zS/Xwv0FtIrzOVnaU0ZohAEQAAEFCcQVZB8Pt+1a9dk67u6uiyYnZNb5HROCZLpM7O+7kgsnk+LzNHxWyf8/siyEwSJrxMkQAAEHE0gqiA1Nja6XK7m5mbq3p/+9CeXy3Xo0CFbeptTgmT6zGy3T/v1/UJNE/KztH5/ZJCE1yDZck2iURAAgbQTiCpIQoiLFy+6XK61a9euWrWqrKzs1q1baW8+zgpzSpD4ESX5mVmhRQTJr/l1r0ESmhZ4v2pNyXcIbYjzWsJhIAACyhKIJUhCiOPHj7tmPx0dHTb2ITcFSd7mjt5Ssb/k/9tT9TE/S+vrjry3Yk3Jd4H3q/hZWhvdhKZBAARAIBUCUQVpYmJi7dq1Lpers7Pz1KlTLperuro6lZZSOTenBIlABf8YiWIg7VkzFnlb0nTJExTCsKbkO35dhU/rRpRdKpcWzgUBEFCHQFRBWrt27YYNG3777Tey9e7dux6PB0ENFnjufv+jMdCv7xdGIut8kSAG2gWcXldBc3SRmDtN7Kn6GPF1FjgFTYAACFhAIKogBQIBY/MffvihsdCCkqwfIc17kbn0Dllfd2Tnb3rdkXFbB7wGyYJrD02AAAhYRiCqIFlmQTwNZbcgyZsyTJdEZur8/sioiPcT4oCFaNs6xMMQx4AACICA4gQgSPY7iDdloDk6kiIO76b3mmNezn4/wQIQAIEME4AgZRhwzOp1mzLQ2IgUKPjHJ+gB2OAfTfZdjVkrvgQBEAABRxKAINnpNt2mDEKLhDD892dP3a96yu9/tIfqnv6njBuB22k02gYBEACBzBCAIGWGa3y16jZloKUjTUQ2X6AXmWtCo4eN6H3nXKvfLzRN+HyR/7q7uRgJEAABEHAwAQiSnc7TbcpAghT84xP0kiR6CKnbF4n5jrxJdu41E35/RIf44/MJvMGcaSABAiDgXAIQJDt9x4LEmy8E//joGdhf3y+kHb4fqdH7hbQXQ3f3PDUi6/1+aJKdfkTbIAACaSEAQUoLxpQq4Sg73n9BjvmmGAduwHSODoLEfJAAARBwLgEIkv2+Mz7xGuO95loUj0Urt797sAAEQAAE4iMQ5fYW38mWHZXdD8YKIXRPvOqyMmfjYIgCHCjGAQEOMiukQQAEnEUAguQsf0XWiuQQBgpw4Hk8BDg4zJ0wFwRAQCIAQZJgOCTJUXYU4KATIZ1iOaRPMBMEQAAEBATJkRcBSRE9OisPmITQD6Ec2T0YDQIgkJME7BGkCxcuyLSHhoaCwWB/f79cKKezfg1J7mz86WiBDNHK468ZR4IACICA9QRsEKSjR496vV7uaiAQ8Hg8u3btKi8vb25u5nI5kXOCFN9ODKazc6YPKskwkQYBEAABNQlYKkh37tzZu3ev2+1mQZqZmXG73YODg0KIiYmJ0tLSmzdvGknlliDxGhGB0K0RSXRMBYkDHKQDkQQBEAABBxCwVJCampo+/PDDzs5OFqSurq7y8nLmVF9ff/LkSc5ywjX34ZKsTZgOcEyVZxZB3OKVtcDQMRAAgawhYKkghcNhIURPTw8L0pkzZ+rq6phmY2PjgQMHOMuJHBohmQ5woguSEJHNVWmXVU2bFxHO9JAAARAAAUcQsFSQiIgsSG1tbTt27GBS+2c/nOVEDglStICEaOXMCAkQAAEQcDgBmwUpEAjU1tYyw8bGxqamJs5yIocEyXQwZDqPx3SQAAEQAIGsIGCzIIVCIZ6+E0LU1tYGAgEj2FwUJDnQznQez4gJJSAAAiDgZAI2C1I4HPZ6vT09PUKIgYGBVatWjY2NGXnmkCDRo620KEQgKK17/NXICCUgAAIg4HACNguSECIUCnk8nurq6rKyss7OTlOeuSVIHKXg80XeC0tSZDqVZwoLhSAAAiDgTAI2CFISoHJLkEwn6CBISVw3OAUEQMBRBCBI6rmLtqijmTr5fRIItFPPV7AIBEAgjQQgSGmEmY6qKJaBV4x4mwYE2qWDLuoAARBQmQAESSXvkOroZucoazqPN2u7HI4nD6hU6hhsAQEQAIGFCUCQFmZk3RGsOvKOQLox03xr5AOFiGzZwIOr+QciBwIgAAKqE4AgqeQheZWIY+1oScnMTNNpPN34yuw8lIEACICAigQgSCp5xVRMTGVn1moeUMl9MK1DPgBpEAABEFCTAARJJb+Yiomp7MxaLQ+o5G5EK5ePQRoEQAAEVCMAQVLMI4ksCpnqV/QBlWI9hTkgAAIgMJ8ABGk+D3tzFL9Ai0b0b8wQBVNBij6gsrdvaB0EQAAEFiAAQVoAkHVfxx4bRQnujn2SdcajJRAAARBImQAEKWWEaanAdKKNR0AxZYdOlfe9S4tFqAQEQAAELCYAQbIYeJTmTCfaSJBia1WU+lAMAiAAAo4jAEFSw2XRAuM0LfKwq3EDBh48qWE+rAABEACB1AlAkFJnmI4aTAWGxkbRtSrKulI67EEdIAACIGA5AQiS5chNGzQVJFoXMh0kdXf7Nb/P97gubBr0mAVSIAACziQAQVLGb8bIBRIc0qr5gtPtm6dG1AdTUVOmezAEBEAABBYgAEFaAJClX8sBc/Lwh7SKBcfn82ndWFey1DVoDARAIPMEIEiZZ5xEC8ZABl5Pmn2pefR1pSQawykgAAIgoAQBCJISbtAbsZDg8GBJPtE0Plw+AGkQAAEQUJkABElJ7ywkOKbfG4dVSvYNRoEACICAOQEIkjkXm0spvs7nm/cQ0nzBMcZAxNz3zuYOoXkQAAEQWJAABGlBRJYfwCEMFNdA8XXzo+zIJjkGAmpkuZ/QIAiAQJoJQJDSDDTV6uSFIFlw5KC7VNvA+SAAAiCgIgEIkmJemT8v98g40yUjxQyHOSAAAiCQIgEIUooA0326Lr6OdwfSNJMd7dLdOOoDARAAARsJQJBshG/WtDwY4rgFnrvDSpEZM5SBAAhkBwEIkmJ+lOPreN2I5/FkuVLMcJgDAiAAAikSgCClCDCtp8vxdRTzzSXUDgQprbxRGQiAgFIEIEjKuEMXX0ebfNO/QggsJinjKBgCAiCQIQIQpAyBTbxanpejU2kwxP/S9B0WkxLnijNAAAScQgCCpIynjPF1FMLAgyQhHm/cgLk7ZfwGQ0AABNJFAIKULpIp18ODIVIgWkOiGAd61YS8WQMEKWXeqAAEQEA1AhAkNTxiukQkvzF29q0T82zVjajmfYcMCIAACDiPAARJAZ9RKF13t+CxEU3W0SDJKEVCRB6S5aBwBXoAE0AABEAgdQIQpNQZplYDSwsFNXDYAukQjZx0z8PycEoXB5GaITgbBEAABOwlAEGyl78UpyBPwcmPx5Iy8XhIJ1HywpLdXUH7IAACIJAKAQhSKvTScS7rEMcp0Ayepj2awSNB0o2c5Jb5RLkQaRAAARBwGgElBGloaCgYDPb390ej53K5on3l+HKWE0qQ8JAmcZw3DYOizdRxDY5ngQ6AAAjkNAH7BSkQCHg8nl27dpWXlzc3N5t6IycEibdjIOFhZeJymscjQLqZOh5mmeJDIQiAAAg4gYDNgjQzM+N2uwcHB4UQExMTpaWlN2/eNHLLZkEiveElIt1MHbGgADwaCXGAAw+MSLqM1FACAiAAAo4iYLMgdXV1lZeXM7H6+vqTJ09ylhNZLkgcxs0PHnHPKSHHfxsFCbF2OlzIggAIOJOAzYJ05syZuro6RtfY2HjgwAHOcsI19+GSrE3oguion3KgHS8vCfEo6oElKmuhoGMgAAI5QcBmQWpra9uxYweT3j/74Swnsn+EJAcs8OOxvGMQjZwoqxtOQY34KkECBEDA4QRsFqRAIFBbW8sMGxsbm5qaOMuJLBckHvRQgifuKMGFjIMSmKnTAUEWBEDA4QRsFqRQKOT1eplhbW1tIBDgLCeyWZA4JIETHOZAAQ78r6xAuig7JoUECIAACDiWgM2CFA6HvV5vT0+PEGJgYGDVqlVjY2NGmNksSCwznGBB4g2/6YEkDm2gJSUjJpSAAAiAgJMJ2CxIQohQKOTxeKqrq8vKyjo7O01hZrMg8SNEnKBVIh4YaVpkK1VSKV3ktyksFIIACICAMwnYL0jxcMtmQeLHiTghj4d4AYmlSD4sHnY4BgRAAAQcQgCCZLejWGA4QY/B8tsoKOiOpuloFCWPpew2H+2DAAiAQLoIQJDSRTKFeoxRdhSzoAu3I2WSYx9SaBOnggAIgIBqBCBIaniEZMZUgdhAXlXiB5L4KyRAAARAwPkEIEjq+ZDm7uSHkORVJd71Tj3DYREIgAAIpEIAgpQKvcycS4LE4yE5QSqVmWZRKwiAAAjYSwCCZC//KK3z8Ii+p9UjDgePchKKQQAEQMDRBCBI6rlPt55EasTzeJiyU89jsAgEQCAtBCBIacGYWiXyzqo6NSL54WAHfkg2tQZxNgiAAAgoSACCZLdTOOabDJG1h1ePuBB7e9vtLrQPAiCQOQIQpMyxjaNm04eKaM86Opsej2VlQsB3HFBxCAiAgEMJQJBsdZy8oSoZwm8rpyyNn2hOjxeTbDUZjYMACIBAhghAkDIENr5qdZsAsfDoNmWgKTtaT+IdhuJrAUeBAAiAgFMIQJBs9ZSsLjQY4reVswjxLnbynt+2Wo3GQQAEQCATBCBImaAad50sSLq1Ihoh8XuP5FUlIYRuXBV3azgQBEAABFQmAEGy2zs0MCLJkeO/OZCBEhzOYBoHYXcn0D4IgAAIpE4AgpQ6w5RrYB2iIRHP3cmaRIpFasTilHLLqAAEQAAE1CEAQVLAFyQ2/NCr7tlYmqCjyT2aylPAZJgAAiAAAmknAEFKO9LEK+SwBTqVYsHliDt5SYmXnRJvB2eAAAiAgMoEIEgKeIc0hmbqKGaB1YhXmKhECAFBUsBjMAEEQCATBCBImaCaYJ0sSPKiEaX5yVmWK0TZJUgXh4MACDiFAARJDU/JgQzyE0i8tze91JzeQMGFatgOK0AABEAgLQQgSGnBmHIl/BySTo1oeYnUiKIbeMyUcpuoAARAAASUIgBBUsAdvGLE4Qx+v6BYO5q44yyPkxSwGiaAAAiAQHoJQJDSyzPx2uTJOiEEjZD4NRO8ksTKlHgLOAMEQAAEHEEAgmSrm3jbBRokkQ5RmkZLFPCNgZGtXkLjIAAC1hCAIFnDOUorvCCkEySao6NREe9oF6UOFIMACIBAdhCAINnqR3mbVB4YUWC3bu7OVjPROAiAAAhYQACCZAHk6E3onnLVbRrEK0nRK8A3IAACIJA1BCBItrpSFiQeIWGOzlafoHEQAAG7CECQ7CI/1y5F2fFGDBS/gCiGOTz4PwiAQO4QgCAp4GvTsZE8eFLARpgAAiAAApkmAEHKNOE46udYO/lYCJJMA2kQAIEcIABBUsDJcqydbE60cvkYpEEABEAgWwhAkBTwJG/WwK+FpU1UeZsG0yGUAobDBBAAARBIIwEIUhphJlsV6RCfzXEN8q7eCHNgPkiAAAhkKQEIkt2OpWePOMqOzNFJFBViVcluX6F9EACBjBKAIGUUbxyV83Sc7qlYefqOqoEgxYETh4AACDiXAATJbt/pIhd4PYnKdTN1uoPtth3tgwAIgEAaCdgjSBcuXND1YWhoKBgM9vf368op63K5TMuzoVAe98ibf/MCEh/A32ZDt9EHEAABENATsEGQjh496vV6ZUMCgYDH49m1a1d5eXlzc7P8FaVzRZB4+k4eGLEg8bdGQCgBARAAAecTsFSQ7ty5s3fvXrfbLQvSzMyM2+0eHBwUQkxMTJSWlt68eVMHNpsFSQjBEQ3yNB0X8ubf2GtVd1kgCwIgkF0ELBWkpqamDz/8sLOzUxakrq6u8vJyplpfX3/y5EnOUsI199GVZ09Wjmhg4TEtzJ4+oycgAAIgMI+ApYIUDoeFED09PbIgnTlzpq6ujo1qbGw8cOAAZymR5SMk7i3PznEJvdS8u1suQBoEQAAEspKApYJEBHWC1NbWtmPHDoa7f/bDWUrkiiDJ03fUc3kxSQcFWRAAARDILgKZFaSDBw+6Zz/ykEgnSIFAoLa2lqk2NjY2NTVxlhI5JEi0aRA9hIQXI+muA2RBAASymkBmBenGjRvfzn4uX77MGHWCFAqFZLmqra0NBAJ8MCVyS5B0nUcWBEAABHKDQGYFyZShTpDC4bDX6+3p6RFCDAwMrFq1amxsTHdiLgoSvyQJ0d66qwFZEACBLCVgvyAJIUKhkMfjqa6uLisr6+zsNKLOOUGSY74proFD74x0UAICIAACWUHABkFKgltuCZLpjgymAXhJoMQpIAACIKAqAQiSep4xnaODIKnnKFgEAiCQXgIQpPTyTEdtxh1UaT1J04SpVqWjTdQBAiAAArYTgCDZ7gKDAbrBEK0n8TwenkwyAEMBCIBAdhCAIKnnR1mQZB3i/RrkA9QzHxaBAAiAQHIEIEjJccvwWRxlR3N0ulERBCnD+FE9CICALQQgSLZgj6NRGhtpmjDdr8G4zhRHlTgEBEAABFQmAEFS2Tuzb6YwPoHE83hq2w7rQAAEQCAhAhCkhHBZfrDp7Bxi7Sz3AxoEARCwgAAEyQLIqTXB60lUjW49KbW6cTYIgAAIqEMAgqSOL6JbQnN0Pp/5elL08/ANCIAACDiIAATJQc6CqSAAAiCQzQQgSNnsXfQNBEAABBxEAILkIGfBVBAAARDIZgIQpGz2LvoGAiAAAg4iAEFykLNgKgiAAAhkMwEIUjZ7F30DARAAAQcRO3oJ7gAACVpJREFUgCA5yFkwFQRAAASymQAEKZu9i76BAAiAgIMIQJAc5CyYCgIgAALZTACClM3eRd9AAARAwEEEIEgOchZMBQEQAIFsJgBBymbvom8gAAIg4CACECQHOQumggAIgEA2E4AgZbN30TcQAAEQcBABCJKDnAVTQQAEQCCbCUCQstm76BsIgAAIOIgABMlBzoKpIAACIJDNBCBI2exd9A0EQAAEHEQAguQgZ8FUEAABEMhmAhCkbPYu+gYCIAACDiIAQXKQs2AqCIAACGQzAQhSNnsXfQMBEAABBxGAIDnIWTAVBEAABLKZAAQpm72LvoEACICAgwhAkBzkLJgKAiAAAtlMAIKUzd5F30AABEDAQQQgSA5yFkwFARAAgWwmYLUgDQ4OBoPBf/zjHzqoQ0NDwWCwv79fV05Zl8tlWo5CEAABEACBrCFgqSAdPHiwvLx8165dGzZs+MMf/vDbb78Rx0Ag4PF4du3aVV5e3tzcbIRroyChaaM7Ml0C5pkmrKvfRuA6S5DNcQLWCdJPP/1UUlJy584dIv773/++ra1NCDEzM+N2uwcHB4UQExMTpaWlN2/e1HnFxj8YNK3zhQVZMLcAstyEjcBlM5AGAesEaXh4+Ntvv2Xi9fX1LS0tQoiurq7y8nK5/OTJk5ylhAsfEACBTBLQ/cUhCwK2ELBOkOTu3bp1q6Sk5KeffhJCnDlzpq6ujr9tbGw8cOAAZ5EAARAAARDIEQI2CNLIyIjP5/vkk08IcVtb244dOxj3/tkPZ5EAARAAARDIEQKZFaSDBw+6Zz9er5eAXrt2bfXq1SdOnGC+gUCgtraWs42NjU1NTZxFAgRAAARAIEcIZFaQbty48e3s5/Lly0KIb7/99sUXXzx//rwMNxQKsVwJIWprawOBgHwA0iAAAiAAArlAILOCJBMcGhpyu91dXV0P5z4zMzNCiHA47PV6e3p6hBADAwOrVq0aGxuTT0QaBEAABEAgFwhYJ0iHDx/WRQn95S9/IcShUMjj8VRXV5eVlXV2duYCd/QRBEAABEBAR8A6QdI1jCwIgAAIgAAIyAQgSDINpEEABEAABGwjAEGyDT0aBgEQAAEQkAkoLUimO7HG3oZV7lvq6QsXLsiVWNm0lW3JfbSlyzY6ur+/PxgM6narshj+P//5z9HRUfaCBa1PTEz8XfrcvXuXWregae4mEiBgJKCuIJnuxLrgNqzGHiZdcvToUTke3cqmrWxL5mNLl2109F//+tdXXnll7969v/vd7z777DNCYTH8wcHBkpKSYDBoZevHjx9fuXIlPSPodrsvXrwohLC44/KFhzQIEAFFBcl0J9Z4tmFNi1/v3Lmzd+9et9vNgmRZ03HuNpuWbsqV2NVlGx09MDDAu/2Ojo4WFxdPTExY6WghxMOHDzds2ODz+UiQLGu9oaHhq6++ki8Ay5qWG0UaBHQEFBUk051Y49mGVde95LJNTU0ffvhhZ2cnC5JlTce522xy/Ypxll1dttHR4XCY9pgXQty5c8flco2MjFjpaCHE4cOHW1patm/fToJkWesVFRWhUGhiYuLhw4d0VVjWdIyLEF+BgKKCJDuGd2K1bBvWcDgshOjp6WFBsqxpu3abtbfL5G7rHU3j0dOnT2/YsIH2nrfS0ZcvX960aZMQggXJmtZnZmaKi4srKytXr15dXFy8f/9+u646+c8caRAQQqguSPJOrBZvwyoLkpVNW9mW7m/Ari4LIexy9Ojo6BdffFFTU/PWW2/duXPHMvh3796tqKigYAoWJGta/+WXX+rr63/55RfCvnbt2lOnTlnTtO56QxYEdAQUEqQFd2LN3DasxqZ1I6TMNa3zB60t27XbrCxIVnZZt+WulU0z/+rq6ubmZsua3r9/f0NDQ8/s580332xubu7v77esde61EOLgwYN//vOfbWlaNgNpEFBrhLTgTqyZ24ZV1zRdGfLdOXNNG69CK9vStW5Ll41b7lpD4MaNG/KrIN955529e/da07QQorm5efvcZ/Xq1Zs2bTpx4oQ1rd+6dYte1kzeP3DgwDvvvGNN07rrDVkQ0BFQaIQkW2a6E6vF27DKd2crm7ayLZm5blBojRk2OnpgYGDlypU3btwQQoyNjXk8nm+++caaXuuw85SdNa339/evXLmSAjpGRkY8Hs/FixetaVrXcWRBQEdAUUGKthOrlduwyoIkhLCyaSvbki8I67tsr6NPnTpVWlq6bdu20tJSfg7JevgsSJZdZl999ZXb7a6urna73fxyMus7Ll97SIOAWlN28ftjcnKSosLiPyVdR1rZtJVtxeBjoxkWNB0Oh8fGxuhNKDIEC5qWm9OlLWg9HA6btmJaqDMPWRDIEAFFR0gZ6i2qBQEQAAEQUJYABElZ18AwEAABEMgtAhCk3PI3egsCIAACyhKAICnrGhgGAiAAArlFAIKUW/5Gb0EABEBAWQIQJGVdA8NAAARAILcIQJByy9/oLQiAAAgoSwCCpKxrYBgIgAAI5BYBCFJu+Ru9BQEQAAFlCUCQlHUNDAMBEACB3CIAQcotf6O3IAACIKAsAQiSsq6BYSAAAiCQWwQgSLnl77T39uLFi9u3b6eXOAghPvnkE3oldtobQoUgAAJZTwCClPUuzngHt27d+uabbwohLly44HK5/v73v2e8STQAAiCQjQQgSNnoVWv7NDw8/OKLL3700UerV69uaWmxtnG0BgIgkD0EIEjZ40sbe3Lu3DmXy/XWW2/ZaAOaBgEQcDoBCJLTPaiE/adOnXK5XK+88srk5KQSBsEIEAABBxKAIDnQaYqZfPPmzVWrVv3Xf/3Xq6++iogGxZwDc0DASQQgSE7yloK2hsPh119/va6uTghx7do1l8v1zTffyHZenv3IJUiDAAiAgCkBCJIpFhTGS6C5uflf//Vfx8bG6AQKbeDs//3f/61evbqtrS3e6nAcCIBADhOAIOWw8zPc9a+++qq8vPytt96CIGWYNKoHgSwhAEHKEkcq2I3Lly/fv39///79ECQFvQOTQEBBAhAkBZ2SVSZBkLLKnegMCGSSAAQpk3RRtxAQJFwFIAACcRKAIMUJCoclSQCClCQ4nAYCuUcAgpR7Pre2xxAka3mjNRBwMAEIkoOd5wjTIUiOcBOMBAEVCECQVPACbAABEAABEBAQJFwEIAACIAACShCAICnhBhgBAiAAAiDw/wAWixF0G6Ig7wAAAABJRU5ErkJggg==)\n\n![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAjAAAAGkCAIAAACgjIjwAAAgAElEQVR4Ae2dD2gVV77HB4KoiLQFkSClBKG3xJcaLil9NQnhpvjIlmxrpc8lKxJEpChuVkK1Vn3WbLVdsYiVh+0TRPpsFWn21ZdINq8vrAkaNd11sWtfm7QiFakhGKOoJDYml/O4OeZ4MvObycydf2dmvhdpz5w5f36/z2/u+eacOXdGY/iAAAiAAAiAgAIENAVsgAkgAAIgAAIgwCBIuAhAAARAAASUIABBUiIMMAIEQAAEQACChGsABEAABEBACQIQJCXCACNAAARAAAQgSLgGQAAEQAAElCAAQVIiDDACBEAABEAAgoRrAARAAARAQAkCECQlwgAjQAAEQAAEIEi4BkAABEAABJQgAEFSIgwwAgRAAARAAIKEawAEQAAEQEAJAhCkgMKwcuXK5dKnvr7++PHjNvu+PvGxWdhmMW7PgwcPeHm5C90pmw2GWEw2WE7nYVKkOeThL6qAgFIEIEgBhWPu3Lma4bN8+fJpuz948ODMmTM7OjqmLemoALfn/v37jDFdF/IpR22GVVg2WE47tSfqHJz6i/IgoBoBCFJAEeEDZUtLy+jo6N27dz/99FOe88knn1hbsHTpUk3TPBek8+fPnzlzZmxsjDGm68LNmG7ti09nZYNlv5x2p+PgpimnXaM8CIAAY3jad1BXAR8029raRIcffvihpmnpdJox1tvbu2zZsrlz586ZM6e0tPTIkSO8WFNT07x58zRNe+mll/bt22dRUjTb2NhYW1t7/fp1xtilS5dqa2uXLVvGz544caK2tvbTTz9ljL3xxhu1tbUPHjwwdsFN7e7uzmQyc+fOLS8v/+tf/yraF4k33nhj2bJlFy9e5MVeeuml7u5ucZYxdujQobKysrlz5z777LNNTU2jo6P8bG1t7RtvvHH48OGnnnoqk8mMjY3V1tb+27/92xdffPH8888/8cQTa9asuX37dkNDwxNPPLFw4cLDhw/zimaUZEESfjHGvvrqq9qpH46CbMfIQW6KG2Dm0bQoZCxIgwAImBHADMmMjMf5RkE6ffq0pmkFBQVjY2MLFizQNG3ZsmUrVqyYMWOGpmkXL15kjNXV1c2cOVPTtLlz527YsMGipDD33Xff1TSNT7z27NnDlwl7e3sZYzU1NZqmnT9/njEmBnFdF+LUvHnzVqxYUVZWpmna008/LdoXiblz5xYUFPBipaWlmqYVFhaKs9yMmTNnvvrqq/Pnz9c0raamhp/VNG3GjBkFBQVz5sxZvXr16OiopmnzJj4rV67khefNm1daWrps2TJu/w8//GDhu/BFGM+XIvk0dO7Eh2OcP3++WTtmHHhTjDELj6xRCCZIgAAIWBOAIFnz8eysUZBu3brFR9ubN28eP35crN2tXLlS07QTJ07wvuV1pNu3b1uU5OUvXryoaRq/O8UVSNO0Q4cOjY2NFRQUCM2QB3G5CzGmHzp0iDE2OjrKh/Lh4WEdC94Cn74MDw8XFBRomsaH7/7+/oKJz7fffssYu3379sKFCzVNO3XqVG5WPvE5cOAAY+zBgwdckDRNu3TpEr+hxbWNz6hefvllTdNOnjxp4bvsi5wWBv/Xf/0XV8HTp09btENysOORBQphAxIgAALTEoAgTYvImwJ8zJKX7LhyaJqWzWZHR0dPnDixZs2aF198kY/XpCBxhTArKQwtKiqaMWPG6OjojBkzXn311RkzZtTV1Z08eVLTtA0bNvBi8sBNDsQ///yzXPL27duifTn/6tWr8uGtW7cYYydOnNA0benSpaLKxo0bNU1bu3atEKSBgQF+lgtSQUEBP+R1xXaPuro6Ic9mlGRf5DRvsLu7e/bs2Zqmff7556JHkiHJgQuStUe8UxIF7xH/BQEQsEMAgmSHkgdl+JglCxIf45555plbt24VFRXxEXz37t2ZTEYMwbodB9YlhZWNjY2apm3evFnTtAMHDtTU1MybN2/NmjXy5gh54LYYiMWEiSuN6ELkixUt3qAsSOLeFWNs27ZtmqatXr1aCBLfT8ElVtO0mTNn8sY5lrq6On4oBMnCd9kXOc0Yu3LlCr8J99577/EGLdqx4MCtMvNI16mMQiaGNAiAgDUBCJI1H8/O8kFKCNLdu3f57Zlt27YdPXpU07QVK1bwzlasWGEUpK+++ooxZl1S2NrV1aVp2lNPPaVp2rfffst3Tzz11FNPPPFENpvlxeQxlA/EvAtrpRFdWBf7xz/+oWnanDlzhIy99NJLmqbxzRp8Ciia4jOkaQXJwnfZFzl98+ZNvlTIZ2a8R4t2LDhYeyR3KsgI34WnSIAACFgTgCBZ8/HsLB+z0un00qVLy8rK+M6FoqKiW7du8b++n3766ZMnT4ptCEePHuV9v/LKK3xHwL//+79bl5Rt5dOCefPmMcb++te/cg3gExReTB5D5S7EeEpOfeQu5BZELTEK8zaLi4vXr19fVVWladpzzz3HbwvlJ0gWvsuWyGl+N07TtFdeeWXp5Gfv3r18m4aRtjUHC4/kTo0oZGhIgwAIWBCAIFnA8fIUH7P4WFxQULBgwYKGhgZ+HyWbzS5fvpyfKi4u5kttQjwOHTrE9wvU1tZal5TNXbt2raZpYuGL997S0iLKyGOo3IUYT10K0v379zds2MB1V9O02tra/v5+3nt+gmThu+yLnK6treV9yf/t6uoyo23NwcIjuVMBUGizYI4ECICANQEIkjWf4M4+ePDAbAgbHR39+eefxWqbRcm8zdV1kXc7uorZbLa/v1/8Akl3No9Dr3w3a2daDp57lAcEVAGBuBKAIMU1svALBEAABCJGAIIUsYDBXBAAARCIKwG1BOnKlSsdHR1///vf44obfoEACIAACJgRUEiQdu3aVV1dvWnTptdee+23v/3tL7/8YmY08kEABEAABOJHQBVB+v7770tKSu7cucMR//rXv25ubo4fbngEAiAAAiBgRkAVQerv7z937pywsqGhgT/rTOQgAQIgAAIgEG8CqgiSTPnatWslJSXff/+9nMnTqcmP8RRyQAAEQiEw+aXE/2NIIOArSjlBGhgYyGQyH3/8MQkilUqR+cgEARAIiwC+lWGR97vf4COrliBdvnx5yZIl4vV0RtzBAzLagBwQAAGZAL6VMo04pYOPrEKCdO7cuRdeeEE84pOMa/CASDOQGTkCTU1M01gmk/vX2Rk585U2GN9KpcPjwrjgI6uKIF2/fj2dTp8+ffrh5Gd8fNxIMnhARhuQEzkCTU05HRKfTIY1NYkjJNwSwLfSLUFV6wcfWVUEac+ePbobgn/4wx+MYQoekNEG5ESLQGfnFDXixjc1QZM8C2M+30rMWD3D72ND+UTWnTmqCJJNL4IHZNMwFFOWALlGN60gYcC0H1DH30ovZqy9vb0bN25cvnx5fX39+fPnubW9vb379u2zb7lFybt377733nurV69ub2+3KBbvU44j6xoHBMk1QjSgNgHN5Bo3y2csN3nCEp/9qDobtryYsZ46dWr+/PkHDhxoa2s7cuTI/Pnz+fvpOzo6li5dat9yi5Ll5eXbtm1raWkpLS3ljVsUjuspZ5H1goLJl9WLpv1oI3hAfniBNoMkQE6GyFGRW0WeIhvh0oW9Es6+lfnNWKUrZmxsbP78+R0dHSKvvb39+eefZ4wJQcpmsx0dHSdPnuzp6RHFbt682dLS0tbWJt6HYszhhc+fP88bZIz19PSUlpaKRhKVcBZZL9BAkLygiDYUJkBqCTkqcifIU2QjmEhxYs6GLbOZqVm+4dJqa2tbsGCBITuXwQXpwYMH6XR65cqV69evf+655959913GWG9v78KFCzdu3Lhq1aqFCxcODw8bc0SbR48eXbVqFT/MZrMFBQXibWSiTBISziLrBREIkhcU0YbaBBwph9nAqMt3NJFSG49b65wNW6S2kzRN7Pr8889ra2vJk1yQLl26tHv3bl6gra2tpqaGMfbpp58uX76cZ548ebK/v9+YI9r85JNP1q5dKw5nzJghJlUiMwkJZ5H1gggEyQuKaEN5AnzEy2Ryv0ay3vNtc8C0P5FSno1bA50NWyRfkqaJXSdPnkyn0+RJsWTX09OzefPmurq6hQsX8rtKAwMDxcXF8+bNW7lyJf+xozFHtHno0KE1a9aIw4KCgrGxMXGYnISzyHrBBYLkBUW0ESMCNgdM3YRJADDLFwXil3A8bDmasRp49ff3FxQU3Lx5U5y5efPm008/PTo6ygWpvb19wYIFn3/++cWLF9va2l5++WVR8tKlSx9++GFhYeHx48d5pjGHMfbFF1+sWLGCF7h169bs2bNFC4lKOI6sazoQJNcI0UDsCNgZMEndcrLyFB9q+Qxb9mesFKc1a9a8+uqrd+/eZYwNDw8vW7Zs5cqV4h5SY2NjfX09r7d58+bCwkLG2O7duzdu3MgzV65c+cEHHxhzRFc3b96cO3fuwMAAY2zfvn2iNVEgIYl8IusODQTJHT/UDpZAYD8PmnbAJAXJycpTsOD87C34YevBgwdvvvnm7Nmzi4qK5syZs2rVqvv37wtB+uGHHwoLC5ctW5bJZJqammbPnp3NZm/dulVWVlZZWVlVVVVZWXlr4qPLkSEdPXr0mWeeqampKS4u5sokn01IOvjIQpAScmnFwU07E5cg/VTNniB9l/sKftjivWez2fv375vtfyNPPXjwQLc9wZgjXMtms8PDw+IwgYngIwtBSuBlFg2XdZMhcjWMnKYE6d60E6kgjQmrr+CHrbA8TVq/wUcWgpS0aywa/honH5pGPKU7dEGKBk2frQx+2PLZITT/iEDwkYUg4eJTjgA5GTLbrp3AXW2qBSz4YUs1AnG1J/jIQpDiei1F2C9yawA5GSKlK8KeR9P04IetaHKKntXBRxaCFL2rJPYWk5MefktJ5zspXboyOPSbQPDDlt8eoX1OIPjIQpBw7flIQLcxwWZPZpMh/hhT0QjesydQhJsIftgK19/k9B58ZCFIybm6gvbUuDHB+pk9wj5SkPhkCLvaBCV1EnkMW/n9pSK77Pf7kHhfXRMfud9EpfOIrEs+ECSXAFGdJkDe3SGVhqyft5iRrSHTVwJOhy33wQ3gfUiMsYsXL86fP//w4cO+0lO5caeRde8LBMk9Q7RAECDv7tgXJMZym7wzmdw/s/11RK/ICoOAo2HL5V8qjLEA3ofEGDt48GBRUdFLL70EQQrymoIgBUk7QX2RGxMYy6kLPjEj4EiQ3P+lEsD7kBhjXV1dw8PDa9euhSAFeblGbHhwdOkHyRF96QiQkyHyr2NdRRxGjoCjb6XZXyRm+UYaAbwPSXQKQRIogklAkILhnLheSEEi/zpOHJrYOexIkMgLw9FfKgG8D0mECIIkUASTgCAFwzmJvbi/d51EahH02b0gOfpLJYD3IYkgQJAEimASEKRgOCe0F2xMSELgHQkSY7k39mYyj8Hk8Xsyv9+HJIyDIAkUwSQgSMFwRi8gEFsCTgXJ/RbKAN6HxKMFQQr4qoUgBQwc3YFA3AjkIUieIPD7fUieGBnpRoKPLAQp0hcMjAeB8AkEP2yF73MyLAg+shCkZFxZ8BIEfCMQ/LDlmytoeAqB4CMLQZoSAByAAAg4JRD8sOXUQpTPj0DwkYUg5Rcp1IoDAfeP+IwDBdc+BD9suTYZDdgiEHxkIUi2AoNC8SPgfvNx/Jjk51Hww1Z+dqKWUwLBRxaC5DRGKB8HAuSjAciHCMTBW599yGPYGunbP9RSdK+77l533Ujffp8NRPN5Esgjsnn2NFktBEE6c+bMZO9T/j80NPQ36XP37t0ppycOggdktAE5MSBAPhoAgpRfZJ1+K7kaDbUUiX/3uuvy6xq1fCXgNLLujQlakA4ePFhZWUnaffjw4UWLFqUnP2fPnjUWCx6Q0QbkxICA2aM8RT5uL9mPstNvpdAhOWG/O1FS9/a8u3fvvvfee6tXr25vbxdleGJsbGzt2rU//PCDyD9//vyhQ4fEoU+J/v5+XcuOLOnt7W1sbNS1wA97e3s3b96sO0Vm6so4OnQaWUeNk4WDE6Q7d+6888476XTaTJAaGxuPHTtGWikygwckukYiTgTIyZBYx8PtJUextv+tHBu8IESoYnCWlnsbidbaXcgzHa3dGd+eV15evm3btpaWltLS0s8//1x2YXR0VNO0F198UWQePXq0rs73admcOXNEjzzhyJLvvvtuw4YNuhb4YUdHx4IFC3SnyExdGUeH9iPrqFmLwsEJ0s6dO/fu3dve3m4mSDU1NT09PUNDQw8fPjSzOHhAZpYgP9IESEHi63hClmQHyfJygSSn7X8r73XXce0RaqQxbUvfk04Fyfj2vPPnzz///PM8Cj09PaWlpXJEuAyUlpbu2bOH5+sEqbu7++TJk9euXRO1stlsR0fHyZMne3p6GGO3b9/+xz/+8e2333Z0dPAyxio3b95saWlpa2sbHR1ljF26dEnTtI6Ojmw2K5q1tkTX5u3bt+UbHO3t7adOnRobGztz5gzXnlu3brW0tHR3d/P2eWZ/f//Jkyd7e3tFp/z1TsJBnS86s+Va9iMr13KTDk6QeFS6urpIQRofHy8uLq6trV2yZElxcfH27dtJr1KTH/IsMkHAPgGzaRBuL9lnyEvaH7bE9IjPjcR/Rb7Nro1vzzt69OiqVat49Ww2W1BQYJSB3t7euXPn8oU7WZCWL1+eyWQaGxufffbZEydOMMYePHiQTqdXrly5fv3655577t133+3o6Hj++efT6XRVVVU2mzVW6e3tXbhw4caNG1etWrVw4cLh4eEDBw5omrZ27dqxsTHhFxck0hJjmx0dHfPmzeMvyS0vL3/11VfXrl2byWTmzZvX0dExe/bsysrKtWvXLly4cPfu3YwxnvnSSy81NDQUFRXxNclsNrt06dKXX355/fr1Tz/99JEjR2RfvvvuO53ZwlTGmP3IyrXcpIMTJG6lmSDduHGjoaHhxo0bjLGBgYGqqqrjx48bHQsekNEG5MSGAPkwcnEbSeemWb6uWAIP7X8rxXaGLX1PCjWqGJzFBcnp1gb5yaeffPLJ2rVrBfwZM2bwaQrP4TLAGPvggw/4wp0QpLa2NrGU9/PPPz/11FPZbPbSpUt8iGeMtbW11dTUdHR0zJgxg++0Iqt8+umny5cv592dPHmS3z3SDBeNmSVkm0KQjh49unTpUt744cOHuSAJe06dOpVOp7kgzZw589atW4yxq1evPvHEE9ls9sSJE+Xl5bzu1atXZ86c+T//8z+iLmk2L5xoQRIIeGLXrl1vvfWWLjMUQEYbkBNvAuTqHLmOF28O9r3LQ5Bauwu5IAk1GmopGhu8YL9TxpgsSIcOHVqzZo2oXlBQ8Mknn9RPfA4dOiRkgDFWVla2Z88eIUiNjY3PPPPM8slPQUHB9evXGWM9PT2bN2+uq6tbuHDh0qVLOzo6/umf/om3T1YZGBgoLi6eN2/eypUrv/rqK17SQpB0lpBtCkFas2bNvn37eJs3b97kglRYWMhzzpw5wydSHR0dNTU1PJMxNmfOnG+//fbNN9989913ReZTTz21b98+4QtptihsP7KiisuEKjOka9euNTc3C2d27Njx9ttvi0ORCB6Q6BqJhBAgBYlcx0sIkGnddPqtFAt0cmLaXowFZEH64osvVqxYwcvcunVr9uzZ3d3dRyc+3d3dsiDx5bJt27bxTQ3btm2rr6+/JX1GR0fb29sXLFjw+eefX7x4sa2t7eWXX+7o6BBzFLIK7/rSpUsffvhhYWEhX+CxFiTZErJNIUgbN27csmUL7+KHH37ggiQ2NciC9PLLLwtQs2fP7u/v37hxo7xVb+7cuR9//LHwhTRbtOA0sqJi3omQBembb77hc9u+vr5FixZduXKFL9mVl5dj23feQUVFlwTMbi+5bNZ+9WhtOnc6bImtDUKQHO2vExhlQbp58+bcuXMHBgYYY/v27auvrxfFGGOyIDHG9uzZU1BQwAXpzJkzzzzzzO3btxljFy9eLCwsHBsba2xsFC1s3ry5sLBQFiSyyu7duzdu3Mg7Xbly5QcffMAY0zRNvoFkYQnZphCknp6ehQsX8rW49evXWwjSjBkz+NaMtra2Z599ljF2+vTp55577v79+zxdWFj4v//7v0KQSLMFOqeRFRXzToQsSKtXrxYTo2PHjqXT6fr6+nQ6feTIEdKl4AGRZiAz9gTI20vBeB26HDp1M49v5Ujffv6YhqGWovzUSLdkxxg7evToM888U1NTU1xczJVJOKITJMbYiy++KLZ979mzp7Cw8NVXX12wYMGpU6cYYz/88ENhYeGyZcsymUxTU9Ps2bPlQZxLmq7KrVu3ysrKKisrq6qqKisruXhUVlbOnj1b3vBmYYnRDCFIjLEDBw7MmzevsLBwxYoV8+fPl3d4yzOksonPK6+8UlRUdOnSJU7g3XffXbBgwdKlS4uKis6fPy+LK2m24JZHZEXd/BJBC1J+VopawQMSXSMBAgEQIG9WkauIARhjswt1vpXZbHZ4eNim2XIxsuL9+/flrXpyecYYWeXBgwfyZgq+QU5X0eKQbJPvUBB7u0+dOiXmN2ZNGSGYtcxbMJrN84OPLATJLKbIB4EQCJA3qyBIIURCpS6/++67BQsWfPLJJ0ePHn322WfJHch+2AtBmoZq8ICmMQink0QggFs7hn3Cj/jK+QGY4Siq+FY6wpVf4d7e3n379r333ntiIS6/dhzVCj6ymCE5ChAKJ5eAJ7d2ptUScjIkr+N5Yoa3UQx+2PLWfrRmRiD4yEKQzGKBfBB4TECWBJFLioc4a0zY0RKyTbGO54kZRsNc5kw+PgX/jyEBl9eG0+oQJKfEUD6JBIQkyM6T4iEXkNP2tcRCt9ybIZuENAioRgCCpFpEYI+KBORbOLJ9ZvlyGZ52pCVcvTKZ3NOwm5oeN2bWnVn+45pIgUAUCECQohAl2Bg2AXIyRE56zCw10wyzfLId92aQzSITBBQhAEFSJBAwQ2kCpBKQkx4zN8gWHEkaY7nZkjxh4n05MsPMPOSDgAoEIEgqRAE2RICAxa0dO9Z7pSUuzbBjKsqAQFgEIEhhkUe/0SNgdmvHpideaYlLM2xai2IgEDwBCFLwzNFjcglAS5Ibe3hugwAEyQYkFAEB7whM+9tY77pCSyAQMQIQpIgFDOZGmoBXq3aRhgDjQcCMAATJjAzyQcBjAuSeOnKzg8cdozkQiAgBCFJEAgUzo0+A3J8NQYp+YOGBZwQgSJ6hRENJJmDnzpDZb2DN8pPME74nkwAEKZlxh9deErB5Z4icDJHreF4ah7ZAIDoEIEjRiRUsVZIAqShce3TTJlKQyHU8JR2FUSDgOwEIku+I0UG8CZCKwudMmcxj1zOZRw/+MWY+LoQUCCSbAAQp2fGH964JkHeAOjtzD+rWffgMCb+N1WHBIQgIAoYvjTijZCL4NxgqiQFGKUTAbCFOnglxc8mSCnkCU0AgbAIQpLAjgP4jToCUGU1jnZ2EY8ZpE1EIWSCQVAIQpKRGHn57R8C4y47fMdL1QG5/0JXBIQgkmQAEKcnRh++eEdDdGSKnTeT2B/6WI01jmUzuHzmv8sxKNAQCahOAIKkdH1gXWQLGaZPx3XpcjeS7TeTUKrIMYDgIOCMAQXLGC6UTQkD3E6L8vNZNm4yNkIt45OzKWBc5IBA/AhCk+MUUHrklYHNy47YbRq/RQZDcg0ULESUAQYpo4GC2XwQCmLWI6Rc24/kVRbQbTQIQpGjGDVb7RoDcWeDhrEWefnFl0t1bIhXRN3fRMAgoRACCpFAwYIoKBMx+KmSW78hmndhwndOpHamIjnpBYRCIKAEIUkQDB7P9IqCTB96NTkjy7tsoNrpJEnbZ5c0WFWNAAIIUgyDCBS8JkIJkFJL8uiSnWfzBd5lM7vF3uuW7/HpBLRCIKAEVBenMmTNmNPEsOzMyyPeQgHybh03shfNKJ0i182r65SEBNAUCoRBQTpAOHjxYWVlpxgKCZEZGzhebuLz6u15uPKx0wE5xkfB81kIKUpzCFNblgX7jQUAhQbpz584777yTTqchSG6uLf/+undjlcu6cXIqTr64DCuqg4COgEKCtHPnzr1797a3t1sLUmrio3MDh5wAufhD/lUeIWLxc8qn6VeEYgpTQYAkoJAgZbNZxlhXV5e1IJFuIJMTIBd/oi5IUXcq4MVGfBdAILoEFBIkDhGC5OZiIjdxMUa8vdRNLwHXdeSUaqO/+gt0qhEL+OpCd0oRgCApFQ63xpCTIXLJy21PAda375Rqoz9JnnQnQJxTulKN2BTjcJA8AhCkWMWcHOzIJa8IuW3TKQVHf5I86U4o4VCQWCgc0Kk6BCBI6sTCG0ti+TevHacUHP0dLTZ6E34nrShIzIn5KBtDAhCkGAaV/+Xr+W9owiU1rVMKjv7kZIicl4TCVkFioXBAp+oQUE6QrNHgh7HWfJJ8VsHRnzSJnJeEEjjSPHX0MhQm6DRcAhCkcPknrnf/9nSRw2voo7+dxcawLgI1iYVFA/2qQACCpEIUkmKD36Oz3+3nF6dpFxvza9aTWmoS88Q1NBJFAhCkKEYtkjbzcVk3QyL/SHfjnt+jv85+nanWZ3WFFTn0m5gibsKMSBCAIEUiTHEwMpNh/J9wJnLbLqznE9ZnhddIgAAImBGAIJmRQb7HBDQtJ0i6D59S6DLVPOQzCZ1tYoZnfVZXC4cgAAIkAQgSiQWZ3hMg34Wqm1V436t3LZL7I4QgWZ/1zgq0BAJxJgBBinN0lfKNfB0qX7VTyk4zY6x/tWN91qxN5IMACMgEIEgyDaR9JCBu+Is+jHeVxCkFE2IyJGzjHvGlSHL+R67jiepIgAAI6AhAkHRAknsoBINcfXLPhQ/ouj1dPvXl3lpjCzpB4ouNwn5SkMRZY2vIAQEQMBKAIBmZJDFHdy+HHF7dcwmmF/d2ki3Igi3UqKnpcdlIe/fYDaRAIDwCEKTw2CvTM5ZKShkAACAASURBVLmypJsQeGWsbobkVbN+tyPEhtuvablXTMlqxFjuUCxCGs/6bSHaB4EYEIAgxSCIbl0gV5Z8EiS3toZR3yjYXG90gqTsixDluV1nZxgE0ScI2CMAQbLHKdalsEPMOrxGweZDvE6QjLpl3WwwZ8Xcjnfn02JsML6gl9gTgCDFPsTTO0hOhtQcXqd3xocSRsHmxHT5Rt3ywRZnTZJBJMPtrF2UBgF/CECQ/OEaqVbJESr44XWkb/9QS9G97rp73XUjffvNEJLFfF2VIvnwHoWRas48yCCS7ghHkACBEAlAkEKEr1DXoS/sjPTt31K3X9NYRcmFipILre/nZMkIiCzmt/HkCM4ViO9iUHYLg24OJ3ia5YsCSIBAKAQgSKFgV7FTvrwT1gNPt9Ttryi5MNRSxP9VlFzYUkdMkozFKkoukI/I093gcUncb81zaZ5ZdVJKyXU8sxaQDwJBEoAgBUkbfREExgZz8yGuRhWDsyY2VGut3YVb6nJzJrF2Z1aMS5coxjsgB2KibydZ4Qq2E0sflyU5kOt4j+sgBQLhEYAghccePU8QuNedU6OcJk2qkca0LX1P6gTJrJimMVm3BFSsSnEUEZ3biTgikSgCEKREhVtFZ4daijSN5f6b+7Hp438inxstDuUyOemauPM01FIk+4ZVKSONsBZjZUuQBgFrAhAkaz446zsBvk8hNx/qe1KITcXgLL6OJ7Y2mBXjd5VEMW6uo1UpX3fo+Y4PHYBAjAhAkGIUzGi6IpSmtbuQC1LF4KyhliK+jjc2eIG7ZVFMt6/B0Q5srGhF86qB1fEkAEGKZ1wj5xWf6OQmRt2FXI0sdtlpTNNtxstvxwG5skduBIgcTxgMAlEkAEGKYtRiaPO97rrW9+u0TGdFyQWzfQqMsXvddVv+4/Vpi9kERK7sQZBs0kMxEPCcAATJc6RoME8CHYN/rBicda+7bqilSLeNW24xwzJ/7nt92mJyFbO02U48s3yzdpAPAiDgCQEIkicY0YgHBDpZZ4ZlrBuyU8a6BfksORki1/HkWtOmsUtiWkQoAAIkAQgSiQWZIRCwIzYZlulknr1BgRQkch3PPg7skrDPCiVBQEcAgqQDgsPQCDTlXnEnvYGVMkRjHl+x3uoHObsiZY9yDnkgkHQCHn+9/caZSqX87gLth0UgwzKt3YUWT/u2M4XKw/j8duiRHZGzKwgSyQqZIGAkAEEyMkFOCARG+vZX/O6P1k/79na9zg8nzXZDmOX7YQPaBIHoEoAgRTd2sbI89wSgTKfF0759mh55C1GeDMlbG4zPI/e2X7QGAvEgEKggXb9+vaOjo6+vj2Q3NDT0N+lz9+5dYzEs2RmZBJNDvhbPk67FY7w1ponnqxqf9q3+9IixiZtgE3fB5FtT/J1J3r4OwxPyaAQEVCMQnCC1traWl5dv2rSpurr6o48+MoI4fPjwokWL0pOfs2fPGstAkIxMAsjhj+2xXk/L2wz+GO/cz12lJ6san/bt+XaGvA22rsilSEyJxHOM5MmTdQs4CwKJJRCQII2Pj6fT6StXrjDGhoaGSktLf/rpJx30xsbGY8eO6TJ1hxAkHZBgDo2vxSOf65OfMfwx3uJBdkKWxOO9GWORWK8T7vMpke7p2hAkwQcJEDAjEJAgnT59urq6WhjR0NDw2WefiUOeqKmp6enpGRoaevjwoe6UOIQgCRTBJMR6Wu75cpPvKzKup7kx5tH0q6lJSBFfu5Of9h2J9ToBwWwLg1m+qIgECCScQECC9OWXX65fv16w3rp1644dO8QhY2x8fLy4uLi2tnbJkiXFxcXbt2+Xz4p0avIjcpDwlYDZa/F0b89zY4MQJPH6Cd3TvqM1PZLvJMlYyJ8oyQWQBgEQCEiQmpubN2zYIHBvn/iIQ8bYjRs3Ghoabty4wRgbGBioqqo6fvy4XICnMUMyMrGbI+/66rT7sAOxbiZPX/jDtvlb9ez2Pl25it/9Mfe+ie5CoUZiVTBa0yMzQSJ/ojQdFZwHgWQRCEiQWltb161bJ9Bu3bp1586d4tCY2LVr11tvvWXMhyAZmdjKkXd9McbErfbpKou3EInpi3E9bbo2bJ3XmGb2tO8AtjPkJdZWfuXL26pNnAOB2BMISJB6enoqKysFzXXr1rW2topDxti1a9eam5tFzo4dO95++21xKBIQJIHCQYJcLbJ3k10IkrzpYEvfk7q35zkwxqSoxrTcb2MHZ/2573X5ad8BrNf5JB4ePgDChBmyQSBuBAISpGw2W1lZ2dXVxRj78ccfFy9ePDg4yBj75ptv+vv7GWN9fX2LFi3i2/AGBgbKy8ux7duza41cLbInSNwG3S673OuIfvdHz8yTNtEZJ0N+r9e5EGsPAaApEACBHIGABIkx1tPTU15eXl9fX1ZW1t7ezvGvXr1aTIyOHTuWTqfr6+vT6fSRI0fI+GCGRGKZJtNsd5dZvqE5/va8ipIL4u15GtM8fOo2nwbpJkMjffv5LaV73XUWr0cyGOssw7VYO+sOpUEABCwIBCdIFkbYPwVBss/qcUlyMkRODR7X0adG+vbzx57y9bQm1jTtu4v0TZgf8+d8y4L0aOudxrgKtr5fd6+7zryB/M+YibJZvv2ePL8vZb9rlASBiBKAIEU0cE7MJgWJnBo4atXG2yJstsfX5eTVOb5IyLfz5X4CVXJBbLqz2abNYiQbh2JNdOXTfSmiJ2SBQIwIQJBiFEwLV/wZIDWmTfsGIwujxClZkMRPceVtFN7+FFf069MWbVLPSOWTLUEaBEAAgpSYa4APk7oH2rjzvonlHq/gro1cbS5IvKkAfoqrM9hzsSYnnxAkHXYcgoCRgAejibFR/3JwD8k/tvm1bOc1r9O2zLdI8JtSdh5tN22DTgt4K9Zm95/M8p1ai/IgEFcCEKS4RjY4vzIs42bhTrfFjm9nyD24YfLReT79FNc/QORkiFzH888GtAwCUSQAQYpi1NSyuZN1ulm444IkdjQ82l/X1CSeDSEeJtT6ft3Y4AW1nKesIQWJXMejaiMPBJJLAIKU3Nh76LmbXeA6QeIvm9CamnIbvgdn8XfI+rfLzkMIclOe35eSG0caBOJKAIIU18gG7VfeN5N4RXmOlWGZP/e9XvGnX8k/xfXvt7E+kfL2vpRPRqJZEFCKAARJqXBE25j8doHzW1DiZ7billLF4Cz5p7jRRgPrQQAEbBCAINmAhCL2COS3C1wnSHzCJG4p2esZpUAABOJAAIIUhyiq40MeC3ca02T50emTOq7BEhAAAb8JQJD8Jpy49p0u3MmCZNzgkDh8+TqMR+flSw71FCIAQVIoGPEwxdEucK5AYkcDpkf5XQPY1JcfN9RSjQAESbWIxMEe+wt3XL34jgZMj/KLPfmTW/K3UPm1j1ogEBgBCFJgqJPVkU1N4vsg+IMeeBUxW0oWLxfekj+5hSC5IIqqoRGAIIWGPvYd23mJX4ZlRLEMy/Alu9iT8dZBs0fkmeV72ztaAwEPCUCQPISJpqYQsPP4BiFIuptJUxrCgSUBcjJEruNZNoOTIBA+AQhS+DGIsQXTLtxpTONrdFGZHim4mY0UJHIdL8ZXGlyLBwEIUjziqK4X1rvA+Z5vxphQJnU9YblX5GYyjw3MZHI5KnyUNUwFOLAhQgQgSBEKViRNtXh8A99i18SaREJlD8lFMHJ2EooXeHReKNjRqbcEIEje8kRrBAGzhTuuQ1G5e0QugqkjSAR3ZIFA1AhAkKIWsWjaS26fE3u+rZf1FPHYbNOaWb4iZsMMEIgQAQhShIIVYVPFZEj2gW9k4Bvt5Hw10+RkiFzHU9N+WAUC6hOAIKkfo5hYaNwFzgUpEtMjNrGjwbiFgVzHi0nA4AYIBE4gdoKk4LbcwIOqbIe6m0lCkJQ1WGcYNrPpgOAQBLwlEC9BwoDh7dXhQ2taRtN9Mk3STmofevS2SWxm85YnWgMBmUCMBIlczicX/mUASAdIoKmpaUKNmnK/htU6J/7lBKrJuBYWoFXoCgRAQBECMRIkcjkfgqTIhZa7ByPUqFOaJHVqWi4fmqRMoGAICIRGwANBGh8fD8z8VCpl2pfZ9luzfNOGcMJ7Ap2dXIQyE7MiSY9yySauSZ2dnd53jBZBAASiQ8CZIK1bty6VSqXT6fb2duFjcXGxSPudsBIkcjJEruP5bSXaNxDIZPi9o07NeBNpUpAy8mN5DC0gAwRAIPYEHAjSqVOn1q5dOzIycv369UWLFv3nf/4np6O0IJHreLGPqmIOTi7WaRO3jnTTI37I+P+wcKdY6GAOCARKwIEgvf7669evXxfWLVq06C9/+QtjTBVB4j8Vkf/KVufhl4JaUhOTM6RHq3NTRWlyHS+j8Y3gnayzk2H5LqnXCvxOMAEHgrRz504xK2KM3b9/P5VKXb582UNBun79ekdHR19fn1lErJbseB1syzVjF2r+5D0kUpAereNlOjP8Qavi90kZloE4hRo3dA4CgRJwIEgPHz5MpVK1tbXCwMuXL6dSKa8EqbW1tby8fNOmTdXV1R999JHoRU5ML0hy6cSmlfx18OTCXdPUfQ2Pd9nxR9txEeKh489d5c8WqhictX3wV3/ue32kb7+dwI707R9qKbrXXXevu85mFTvNogwIgIB/BBwIEmMsm83qpi/Dw8O///3v3ds3Pj6eTqevXLnCGBsaGiotLf3pp5+MzUKQjEz0OQr/OnhSk/gaXefELSX9nm8uP7IsjfTtH6l7MvfDpUzuX6Zz+pW9kb79W+r2axqrKLlQUXKh9f2cLOlB4RgEQEAxAs4Eyb8d3qdPn66urhZwGhoaPvvsM3EoEqnJj8hBYgoBclchuf9wSrXgDiY16fFdJOPmOvFCCi5LI3VPjpXMGmop4v/GSmZ1/G6WmDyRt5221O2vKLkgqlSUXNhSZ2teFRwI9AQCIGAgYCpImUzm8uXLcvnTp097tTonN8vTX3755fr160X+1q1bd+zYIQ5FAjMkgYJOkLsKVRIkxhjXEtp+KfeR5HRqXI0qBmfxt8q2dheO1D2Z+zexdsdvMsm3nbb8x+tcjeQqW+pycyas3UmAkQQB5QiYCtLWrVtTqZS4l/P73/8+lUrt3r3bJw+am5s3bNggGt8+8RGHIgFBEijohNmvgM3y6Vb8zbUpSIyxe911YyWztv/pkRRxQdrSN6FGk4Kks/XPfa9rmc6KP/2KFxZVIEg6UDgEAQUJmAoSY+zs2bOpVKqqqmrx4sVlZWXXrl3zz4HW1tZ169aJ9rdu3bpz505xKBIQJIGCTpCTIXIdj64fRK58f8i6v6GWIqZpQy1FsrpoLJfD843Vc4U1Rlbh+cYqyAEBEFCEgJUgMcYOHz7M79q0tbX5anFPT09lZaXoYt26da2treJQJCBIAgWdIAWJXMej6weRa1+Q+HaGkbont/Q9KWvSn/8jd1eJ3KfAtzPk5kNSlYrBWa3v11WUXCCrBOEz+gABELBBwFSQhoaGqqqqUqlUe3v78ePHU6lUfX29jQbzLJLNZisrK7u6uhhjP/744+LFiwcHB41tQZCMTPQ5Cu+y46ZqzPSq0/kiBKm1u5ALUk5augs7M1pOkwYv6MozxoQgyVWGWor4XjuyirER5IAACIRCwHRoqKqqeu2113755Rdu1t27d8vLy/3b1MAY6+npKS8vr6+vLysrk5+VJ3OBIMk0TNMK/zrY/g0k4Z1xl91I3ZP8R0tNrEkUkxPYZSfTQBoEokLAVJDIFbO9e/eG6xgEKVz+7nvXvTTWToO5H7e+XzhWMmusZBbTNLG/LvesKNZEvgH9XncdX6OrKLmgaQz76+xwRhkQCJ2AqSCFbhlpgI+CFNbTDcLql+Trf6b9G0iyLSN9+/kzF4Zaioxbt5tYk7FZ6ypy40iDAAgoQgCCNBGIsO67kP3GWqLs30By9A159KOliWffOaqIwiAAAuoQgCCx3KOl5WeE8+CQ29W8jRvZb2biJ56io3g9sDyPG0iChJ0EZMkOJZQBAWUJQJBYTo2M7yoNQJCM/XZ2Mk1jTVNv1AdgSVCXp9+CxP2wliU8dDWoaKMfEHBMAILEchpAfszyycJ5ZBrb5xKly4+RIBnv9OSBzWYVUpbw0FWb9FAMBEIhYDIWh2KLjU592dRAjvjkepoNCx0UMfarafT6oU6iHPShVlGfbiBZOKmTJWwHt2CFUyAQOgEIEsstkelWyZjJOp634TL2y7cz6NYPA5BGb/0yaS2Y9Tqyc951xZ9+hYeuknyQCQKKEIAgTQSC3O0WQIiM/Rq3VxhvNQVgmA9dhChI/DmtuXcj4aGrPkQWTYKAVwQgSJMk+UQkkyG2FUwW8eX/un6NEmWcvflih++NBnkDyegMHrpqZIIcEFCNAARJtYhMbkMPXhp9JhGuIIln3IX70NVY/8bM5wsIzSeAAAQpAUFWw8XgdzTIfgtBCvGhq/Gd/cqkkQaB/AlAkPJnh5r2CYR7A0nYGeIuO3JvinFfizAVCRBIIAEIUgKDHoLLeTxT1Q8rQ3zoKrk3BYLkR5TRZnQJQJCiG7soWR7uDSSZVFgPXTX7LZlZvmwz0iCQEAIQpCgEOvq3wsO9gaRCjMnJELmOp4K1sAEEQiEAQQoFu5NOo38rXJEbSE6ge1+WFCRyHc/7vtEiCESEAARJ7UCRf0KTY5vCfkCQeHCi/6eFwhcZTIsFAQiS2mEk/4SOmiCpcwMp9GDrfgYduj0wAASUIgBBUiocBmPMbnmb5RsaUCEDN5BUiAJsAAH1CUCQ1I4RORki1/FU9QPrdapGBnaBgHIEIEjKhWSKQaQgket4U6opdABBUigYMAUE1CYAQVI7Pmzi7RjyI8DJl5orvC8cN5CUv8JgIAioQgCCpEokrOywvhWu9uYtCJJVZHEOBEBAIgBBkmBEMUneTyIX+kLyDjsaQgKPbkEgegQgSNGL2RSLyftJyggSbiBNCRYOQAAELAlAkCzx+HHS2/s9Zvu/zfL98Mi8TUWeqWpuIM6AAAgoRACCFGwwPL/fQ06GyHW8YB3lveEGUhjU0ScIRJUABCnAyJE6QSqKfaPI6uQ6nv02vSuJG0gkS28nyWQXyASBKBKAIAUYNVInSEVxZJTnsy5HvZsXxg0kko2q4SKNRSYIBEoAghQgbrP7Omb59k2z3hduvx1PS0KQjDj9mCQbe0EOCESUAAQpwMCRkyFyiArQKP+6wg0kI1ufJsnGjpADAlEkEKggXb9+vaOjo6+vjyQ1NDT0N+lz9+5dY7FUKmXMjEwOKUjkEBUZl6wMxQ0kIx2zybBZvrEF5IBAjAkEJ0itra3l5eWbNm2qrq7+6KOPjEwPHz68aNGi9OTn7NmzxjLRFiSbzwEyuh3BHKzXkUEj/yaJ7ySZZIBMEDAlEJAgjY+Pp9PpK1euMMaGhoZKS0t/+uknnVGNjY3Hjh3TZeoOlROkPPZLKXm/R8fZzeFI3/6hlqI/971eMThrpG+/m6biV5cUpPhOkuMXQHjkL4GABOn06dPV1dXClYaGhs8++0wc8kRNTU1PT8/Q0NDDhw91p8RhavIjcsJMYL+Ugf5I3/6RuieZpnVmtLGSWffeL7zXXWcolegMXDWJDj+ctyQQkCB9+eWX69evF5Zs3bp1x44d4pAxNj4+XlxcXFtbu2TJkuLi4u3bt8tnRVqhGRK5zkL+ASysT0BipO7JsZJZQy1FFYOzWrsLx0pm5fQJn6kE4j5JnuotjkDANoGABKm5uXnDhg3Cqu0TH3HIGLtx40ZDQ8ONGzcYYwMDA1VVVcePH5cL8LRCgkSusyRYkMYGL9x7P6dAXI00pmlMa+0uHKl7MvcPa3fGqxk5IAACUwn4KEi7du3iGxQqKytbW1vXrVsnut66devOnTvFoTGxa9eut956y5ivkCCZ7Ysyyzc6E6+ce911fI2uYnAWVyONaVv6JtQodoKUx63DeEUb3oCALwR8FKSrV6+em/h8/fXXPT09lZWVwoN169a1traKQ8bYtWvXmpubRc6OHTvefvttcSgSCgkSORki1/GE9bFODLUUMU0baikSasQTIj823uMmUGxCCUdUI+CjIMmuZrPZysrKrq4uxtiPP/64ePHiwcFBXuCbb77p7+/v6+tbtGgR34Y3MDBQXl6u+rZvUpDIdTwZRGDpwP+G59sZRuqe3NL3pNCkisHcvobczCkuWxvIPznIayGwUKMjEIgNgYAEiTHW09NTXl5eX19fVlbW3t4uCK5evZrPjY4dO5ZOp+vr69Pp9JEjR0QBOaHQDEnlHxWF8Te8EKTW7kIuSBWDuftJfB1vbPCCHMfopsk/OSBI0Q0oLFeKQHCC5InbagkSY0zB/VKh/g0vdtkNtRRxNYrZLjuzW4Rm+Z5c9mgEBBJCAIIUu0CH+jf8ve46vkY3VjKLaVr89teRkyHyb4DYXVhwCAR8JwBB8h1x0B2Y/a1ulu+1fSN9+3Oy1F031FIUv93epCCRfwN4zRXtgUD8CUCQYhdjcsjE3/DexTmMO3TeWY+WQEBhAhAkhYOjM83mxjlSkPA3vA6mu0MFbx26cwi1QUAJAhAkJcIwvRGO/ix3VHj6vlECBEAABIIgAEEKgrLbPsgFN3ImJHrC3/ACBRIgAAIRIQBBikKgyAU3a0Gy45bNNUA7TaEMCIAACLgmAEFyjTCABsw2yJnl2zEJy3p2KKEMCIBAgAQgSAHCzrsrcjJEruPZ7IKsS/Zis0EUAwEQAAHXBCBIrhHabMDN+hgpFeQ6nk1jyLpkLzYbRDEQAAEQcE0AguQaoZ0G3K+PuW9BttNsrc8sX66LNAiAAAj4QwCC5A9XuVWv1sc83DhHToZIO2VH3KTdTBDd9Iu6IAAC0SEAQfI/Vgquj5GCRNrpCR5vp3eemIRGQAAE1CMAQfI/JmbrYGb5/luU6yEwkSAnXqQiBuM4egEBEFCVAATJz8jwdSpNY8bJBzlM+2kL0baHa4BE65NZRt+5HDY1TZbA/0EABEAgRyDxguTfvQ0xBeGzgUwmNykRH3KYFmfjlDCbCJrlx8l3+AICIOCEQLIFSWgGR6bTDCcc9WV1EyDekVin8rAjfcfqHQuvZdN0fORTSIMACCSVQIIFiRwTydEzj4vDOAHi3WkTb/eWp0p5NB6tKiRSIx9fnfJvHuyr2WgcBBJGIMGCRI6J5OiZxzVhth5llm+nC3JUJTPttBZkGf9mona8CLd3OxaiDAiAwASBBAuSmTaY5Tu6YkhhI+dkNpslR1Uy02aDARfjvmcyTNOm3Evz2wySORkdvy1B+yAAAtMRSLAgkaMSOX5NB5E4TzZOzsmIyoYs0iqdGvFKZL+G9kLOCHJWRzKPBKWQg4TuQSAEAhCkqdDJ8WtqEbtHOsFws5GBtIpsUP2h1kMsdiJhNt81y7fTJsqAAAj4QyDBghTAj0O9WqciR0++P8J4WZCFjcVCyTGb6vm3y4NUaNKMUICgUxAAAYlAsgWJMeaVZkhMvU+So6puqsF7VXyoJad6pHdeQSQbJ83wqke0AwIgkC+BpApSkLcx8o3N43pmo6pxYqH4UGs2ezPLf4zARUqn3ORSp4vmURUEQMArAokUpMBGKA9lj7SZzPTq0vCjHVJZA5jVRWIe7AdwtAkCkSKQPEEihz9yoHQZSM/VghxVyUyXlvtXneSs+KzOPxpoGQRAYCqB5AkSOfyRA+VUUs6OApM9R2bZnLHZLOaoa1HYc50WLSMBAiAQcQLJEySz2xVm+fkFOBjZc2SbTSWwWcxR17rC0ZrV6YzHIQiAgG8EkidI5GSInNC4gW4mb2b59vvKb/pCOmhEYbOYfWtREgRAAARsE4AgTaAiJzS2IRIFjWO92GJOlLadlff0hXTQaKTNYrbtRUEQAAEQsE9ARUE6c+aMmQOpVMrslIP8vId1+30Yx3rGiNf02W/QTM/IjozNyjMzeY4l5+dej2VyPZjlGztCDgiAAAjkS8BkAMq3Off1Dh48WFlZadaON4IkBndfn/Xpuey5mb4I3eJWCU3SPepUFJMDQK7jyQWQBgEQAAEvCCgkSHfu3HnnnXfS6XQQguQFu+nb4EO5V7JnNk0xy5ft40rD7RFKyRVO/qEoKUikEMqNIw0CIAACXhBQSJB27ty5d+/e9vb2+AiSFxF63AapFvanL3xWJKsRf9CDyOc9iQL8UJarx6YgBQIgAALeE1BIkLLZLGOsq6vLWpBSEx/vSajfok6QuJBomoNbU/x5rMYZm27hztuJnfpgYSEIgIAaBBQSJA5kWkFSg1tIVojpC0+I6YtIWNvFNUxXhsuPnXU/XUUcggAIgICnBMIUpF27dqUnPvKUCII0TXyFfvDpDp8hZTK5eZLxWau6tnSrc/wsr5jJ6MriEARAAAQCJhCmIF29evXcxOfrr78WbnspSHz85YN1Z6foIvIJvstAzJa4P3YEyewVUNi2EPlrAg6AQBwIhClIJD/PBCm/8Zq0KdxMo6xq2qPXOOkM090K0p0Vh6JBXt6mkonqSIAACICAPwRiKkjk3jPdpgB/gHrcKimrXFF0c77OztzPWqddteP25bdtQSgZZlQehxnNgQAI5AgoJ0jWYbH7w1hyxIycIJnJKvnycu6yf3sTSGm0jhbOggAIgIATAjEVJLNx2SzfCTJ9Wf/mDRaySsoDKWB6c/M6JluOnMDn5ToqgQAIBEYgpoJEjpXkqOqSNCkMvE33QmUmn+LeD9+vIVbqSAFz6SCvTrZMQvakOzQCAiCQSAJJEiRyVLWI+rSKQiocH6YthMqiR90pcsQXnXrSha5Hs0MLaTSrgnwQAAEQcEggpoJktr/ZPh07wz2pcLyi8Wc9pLpY20NWkTvl4iQ/eWFaEbXu0ewsaYmQRrNayAcBEAABJwTiK0huHulNDrXGQdli3qDbAscF0uYWODl+dnRRlHdUWNSyk+Cap/tFlyyNdhpBGRAAARCwJBBrQbL0Q8bRFgAADTJJREFU3OokOdQaBcmYw1XQQqisejU5Z5wGkQVtiihZ1zqT65xQO/Fkhzz01bojnAUBEEg2AQgSFX+bikIKktlTfEjBoDqfJs9sUc6miE7TuuG0bLYsjcY1SUNVZIAACICAIwIQJAoXqTTy0CwqiXkDzxGzB/GIObF2RwqGaMdmguyO17UpojY7EsVIs0k+ogoSIAACIJAXAQgShY0ccOWhWZ6mcJEQOwv4oVAOIVHuF7jMFJG3TNpMVqE8Ns3zSedM+8MJEACB5BKAIJnEXigKPy8/8M3ilCwAPC22A5j04yBbVkRRTeiQSIhTjDl4VZJcS06TzcpuyoWRBgEQAAEXBCBI5vBkRRHzG3IsFqO2tWaYd2XrzLSTFQultNUBVUi4Jp8k3ZQLIA0CIAACzgkkQJDk5TVxR8c5qUc1yLFYjNrTakbe/ZptHNcJJCmibjp1/4sul72jOgiAQGIIxF2QPJ80WEuOUCb5AtJphnzKUZpsnBRIR83aKeyHztnpF2VAAASSRCDWgkQqATms2w85WV10RJ71UDM811f7jqMkCIAACPhMINaCRCoBqRn2KZPV5Y781gxMVuwHCyVBAAQiRSDWgmS9vJZ3nKaVHGhG3mxREQRAIMEEYi1I5GxGLK+5iTokxw091AUBEAABikDyBEleXqOI6PO83aSnbx3HIAACIAACjwjEWpDcb1mednUOFxIIgAAIgIBHBOIuSAG8hMKjSKAZEAABEEg4gQQIUt4RJhf3yPtSeXeBiiAAAiAAApMEIEiTJIz/92mTnrEj5IAACIAACDCWeEGy2LNAToY82aTn65Vn4ZGv/aJxEAABEHBHINmCZL1ngRQkch3PXQy8rG3tkZc9oS0QAAEQ8JhAggWJnOvoRCha47sdjzy+ftAcCIAACHhGIMGCRM51dILkZpOezRh5uMJm0yObhqEYCIAACARLIMGCpMKeBW9nYCp4FOzli95AAATiRCDBgmScDIn5UDAR9nyFLXSPguGGXkAABGJKAII0NbDkqtfUIp4dkX2RomKzS7Iu2YvNBlEMBEAABAIkkGBBcv9gIZdx8mOFzds1QJcOojoIgAAIOCGQbEESa3SZDNM01tTkBJ3rsuSEhlzHc9QVbyEUjxzZicIgAAIgMJVA4gVpKo5Aj0hBwgpboDFAZyAAAgoRCEGQzpw5QwIYGhr6m/S5e/eusVgqlTJmRjgHK2wRDh5MBwEQ8JhA0IJ08ODByspK0onDhw8vWrQoPfk5e/assVjcBCncNUMjX+SAAAiAQHgEghOkO3fuvPPOO+l02kyQGhsbjx07Zo0ihoJk7TDOggAIgEBiCAQnSDt37ty7d297e7uZINXU1PT09AwNDT18+NCMf2ryY1YgQfkePuIhQdTgKgiAgLoEghOkbDbLGOvq6iIFaXx8vLi4uLa2dsmSJcXFxdu3byeZYYb0CAtuPpHXBzJBAASiTCA4QeKUzATpxo0bDQ0NN27cYIwNDAxUVVUdP37cCBaClGNCbg0n9+wZCSIHBEAABFQl4KMg7dq1i29QkKdEZoKk47Nr16633npLl8kYgyDlmJBbwyFIxssFOSAAApEi4KMgXb169dzE5+uvvxZMzATp2rVrzc3NotiOHTvefvttcSgSEKQcCj8e8SAQIwECIAACIRHwUZBIj4yC9M033/T39/f19S1atOjKlSt8ya68vDwp275JTNaZ5GSIXMezbifvs9hPkTc6VAQBEDAnEL4grV69ms+Njh07lk6n6+vr0+n0kSNHSJsxQ8phIQWJXMcjIbrMxH4KlwBRHQRAwIRA0IJkYobdbAjSI1JhqQI5DyMF0m5IUQ4EQAAEHhGAIEX2UgjlIarkPAyCFNmLCIaDgFIEIEhKhUN5Y7CfQvkQwUAQiC4BCFJ0YxeG5eRkiFzHC8M69AkCIBBpAhCkSIcvcONJQSLX8QI3DR2CAAhEnQAEKeoRDNz+sPZTBO4oOgQBEAiYAAQpYOCx6C6U/RSxIAcnQAAELAhAkCzg4BQIgAAIgEBwBCBIwbFGTyAAAiAAAhYEIEgWcHAKBEAABEAgOAIQpOBYoycQAAEQAAELAhAkCzg4BQIgAAIgEBwBCFJwrNETCIAACICABQEIkgUcnAIBEAABEAiOAAQpONboCQRAAARAwIIABMkCDk6BAAiAAAgERwCCFBxr9AQCIAACIGBBAIJkAQenQAAEQAAEgiMAQQqONXoCARAAARCwIABBsoCDUyAAAiAAAsERgCAFxxo9gQAIgAAIWBCAIFnAwSkQAAEQAIHgCECQgmONnkAABEAABCwIQJAs4OAUCIAACIBAcAQgSMGxRk8gAAIgAAIWBCBIFnBwCgRAAARAIDgCEKTgWKMnEAABEAABCwIQJAs4OAUCIAACIBAcAQhScKzREwiAAAiAgAUBCJIFHJwCARAAARAIjgAEKTjW6AkEQAAEQMCCAATJAg5OgQAIgAAIBEdALUG6cuVKR0fH3//+dzMAqVTK7BTyQQAEQAAEIk1AIUHatWtXdXX1pk2bXnvttd/+9re//PKLkWxcBQl+GWOtcg7ipXJ0YFt0CagiSN9//31JScmdO3c4yl//+tfNzc1GrBgIjExUzkG8VI6O0ba4xsvoKXLUJKCKIPX39587d04wamhoOHDggDgUiRQ+IAACfhIQ3zUkQCB4AqoIkuz5tWvXSkpKvv/+ezkTaRAAARAAgXgTUE6QBgYGMpnMxx9/HG/u8A4EQAAEQEBHIExB2rVrV3riU1lZyc26fPnykiVLjhw5orMShyAAAiAAArEnEKYgXb169dzE5+uvv2aMnTt37oUXXvjqq69iDx0OggAIgAAIGAmEKUiyNdevX0+n06dPn344+RkfH5cLIA0CIAACIBBvAqoI0p49e3Rbh/7whz/EGz28AwEQAAEQkAmoIkiyTUiDAAiAAAgkkAAEKYFBh8sgAAIgoCIBCJKKUYFNIAACIJBAAhETpGmfvhrpEJ45cybS9uuMv379ekdHR19fny4/HocxCxZjLN5frnhcdbH3IkqCZOfpq9EN2MGDB8XvsaLrhbC8tbW1vLx806ZN1dXVH330kciPRyJmwWKMxfvLFY+rLgleREaQbD59NYoxu3PnzjvvvJNOp2MjSOPj4+l0+sqVK4yxoaGh0tLSn376KYqhMdocv2AxxmL85TJGEDkqE4iMINl8+qrKrM1s27lz5969e9vb22MjSKdPn66urhb+NjQ0fPbZZ+Iw0on4BYsxFuMvV6QvtgQaHxlBkmMTs6evZrNZxlhXV1dsBOnLL79cv369CNnWrVt37NghDiOdiF+wdOGI2ZdL5x0OFScQPUGK69NX4yRIzc3NGzZsEJf+9omPOIxBIk7BksMR1y+X7CPSKhNQWpDi+vRVo18xmyG1trauW7dOXPdbt27duXOnOIxBIpaChEcbx+DKjLoLSgtSXJ++qvOLX0NxGuN6enrk5cd169a1trZG/asi2x+nYHG/8GhjOb5Ih0VAaUGSocT+6atxGuOy2WxlZWVXVxdj7Mcff1y8ePHg4KAczain4xQsxljsv1xRv96SY39kBCn2T1+N2RjX09NTXl5eX19fVlbW3t4es29UzIIV+y9XzC6/GLsTGUGKcQxi7Nrw8DDflhZjH+EaCICAVwQgSF6RRDsgAAIgAAKuCECQXOFDZRAAARAAAa8IQJC8Iol2QAAEQAAEXBGAILnCh8ogAAIgAAJeEYAgeUUS7YAACIAACLgiAEFyhQ+VQQAEQAAEvCIAQfKKJNoBARAAARBwRQCC5AofKoMACIAACHhFAILkFUm0AwIgAAIg4IoABMkVPlQGARAAARDwigAEySuSaAcEQAAEQMAVAQiSK3yoDAIgAAIg4BUBCJJXJJPeztmzZ998882rV69yEB9//PH27duTDgX+gwAIOCEAQXJCC2UtCaxatepf//VfGWNnzpxJpVJ/+9vfLIvjJAiAAAhMIQBBmoIDB24I9Pf3v/DCCx9++OGSJUsOHDjgpinUBQEQSCABCFICg+6jy6dOnUqlUr/5zW987ANNgwAIxJQABCmmgQ3JrePHj6dSqX/5l38ZHh4OyQR0CwIgEFUCEKSoRk5Bu3/66afFixf/93//9yuvvIIdDQoGCCaBgOIEIEiKBygy5mWz2ddff339+vWMscuXL6dSqb/85S+y9V9PfOQcpEEABEBAJgBBkmkgnT+Bjz766J//+Z8HBwd5E3xrgzj8v//7vyVLljQ3N+ffAWqCAAjEnQAEKe4RVsC/Y8eOVVdX/+Y3v4EgKRANmAAC6hKAIKkbm9hY9vXXX4+MjGzfvh2CFJuYwhEQ8IMABMkPqmiTIABBIqAgCwRAQCIAQZJgIOknAQiSn3TRNgjEgQAEKQ5RjIQPEKRIhAlGgkCIBCBIIcJPVtcQpGTFG96CgHMCECTnzFAjLwIQpLywoRIIJIgABClBwYarIAACIKAyAQiSytGBbSAAAiCQIAIQpAQFG66CAAiAgMoE/h8FRaACFadcEAAAAABJRU5ErkJggg==)\n\nhttps://stats.stackexchange.com/questions/287425/why-do-you-need-to-scale-data-in-knn", "_____no_output_____" ] ], [ [ "XX,yy = make_classification(n_samples=400,n_features=2,n_classes=2,\n n_redundant=0,n_informative=2,\n n_clusters_per_class=2,random_state=48)\n\nXX[:,0] = XX[:,0]*30 + 150\n\nprint('Media x: {}'.format(np.mean(XX[:,0])))\nprint('SD x: {}'.format(np.std(XX[:,0])))\n\nprint('Media y: {}'.format(np.mean(XX[:,1])))\nprint('SD y: {}'.format(np.std(XX[:,1])))", "Media x: 150.4912351511699\nSD x: 39.90933364722069\nMedia y: 0.011031649918614495\nSD y: 1.2066153568418274\n" ], [ "kf = KFold(n_splits=5)\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(XX, yy)\nprint(cross_val_score(knn, XX, yy, cv=kf).mean())", "0.885\n" ], [ "from sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nXX_scaled = scaler.fit_transform(XX)\nprint('Media x: {}'.format(np.mean(XX_scaled[:,0])))\nprint('SD x: {}'.format(np.std(XX_scaled[:,0])))\n\nprint('Media y: {}'.format(np.mean(XX_scaled[:,1])))\nprint('SD y: {}'.format(np.std(XX_scaled[:,1])))", "Media x: 1.4210854715202004e-16\nSD x: 1.0000000000000004\nMedia y: -1.1102230246251566e-17\nSD y: 0.9999999999999997\n" ], [ "knn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(XX, yy)\nprint(cross_val_score(knn, XX_scaled, yy, cv=kf).mean())", "0.9675\n" ] ], [ [ "***\n# 2. Árboles de decisión\n\nContinuaremos trabajando con el dataset de cancer de mama para familiarizarnos con los árboles de decisión", "_____no_output_____" ], [ "## 2.1 Mi primer arbolito", "_____no_output_____" ] ], [ [ "# instanciemos el modelo y entremoslo en el conjunto de autos\narbol = DecisionTreeClassifier(criterion='gini', max_depth=2, min_samples_leaf=1, min_samples_split=2, ccp_alpha=0)\narbol.fit(X_train,y_train)\naccuracy_score(y_train, arbol.predict(X_train))", "_____no_output_____" ], [ "# veamos que tan bien le fue a este modelo\nprint(classification_report(y_true=y_test,y_pred=arbol.predict(X_test)))", " precision recall f1-score support\n\n 0 0.93 0.94 0.94 90\n 1 0.90 0.89 0.90 53\n\n accuracy 0.92 143\n macro avg 0.92 0.92 0.92 143\nweighted avg 0.92 0.92 0.92 143\n\n" ], [ "# visualicemos los errores de este árbol en una matriz de confusión\ncf_matrix = confusion_matrix(y_test, arbol.predict(X_test))\nsns.heatmap(cf_matrix, annot=True);", "_____no_output_____" ] ], [ [ "## 2.2 Feature importance\n\nLos árboles nos permiten definir una manera de medir la importancia de los features (o *Feature Importances*) basado en la ganancia de información obtenida cada vez que se utilizo cada feature para hacer un split. Para esto, una vez entrando el árbol, el método que utilizaremos es: \n\n```\n# arbol.feature_importances_\n```\n\n", "_____no_output_____" ] ], [ [ "# calculando las 5 feature importances mas altas\nimportances = pd.Series(arbol.feature_importances_).sort_values(ascending=False)[:5]\nimportances", "_____no_output_____" ], [ "f5_names = list(pd.Series(data.feature_names)[importances.index.to_list()])\nfig, ax = plt.subplots()\nimportances.plot.barh(ax=ax)\nax.set_yticklabels(f5_names)\nax.invert_yaxis()", "_____no_output_____" ] ], [ [ "## 2.3 Desbalance de clases\nComo este dataset tiene un desbalance de clases, podes incluir eso en el modelo utilizando el parámetro class_weight que nos permite manejar directamente el desbalance\n", "_____no_output_____" ] ], [ [ "arbol = DecisionTreeClassifier(criterion='gini', max_depth=2, min_samples_leaf=1,\n min_samples_split=2, ccp_alpha=0, class_weight=\"balanced\")\narbol.fit(X_train, y_train)\naccuracy_score(y_train, arbol.predict(X_train))", "_____no_output_____" ], [ "print(classification_report(y_true=y_test,y_pred=arbol.predict(X_test)))", " precision recall f1-score support\n\n 0 1.00 0.90 0.95 90\n 1 0.85 1.00 0.92 53\n\n accuracy 0.94 143\n macro avg 0.93 0.95 0.93 143\nweighted avg 0.95 0.94 0.94 143\n\n" ], [ "cf_matrix = confusion_matrix(y_test, arbol.predict(X_test))\nsns.heatmap(cf_matrix, annot=True);", "_____no_output_____" ] ], [ [ "## 2.4 Visualización\n\nPara visualizar el árbol sklearn tiene el método tree.plot_tree:", "_____no_output_____" ] ], [ [ "plot_tree(arbol);", "_____no_output_____" ] ], [ [ "Podemos obtener una representación mas estilizada con la ayuda de las librerías *graphviz* + *dot*. Ref: https://towardsdatascience.com/visualizing-decision-trees-with-python-scikit-learn-graphviz-matplotlib-1c50b4aa68dc", "_____no_output_____" ] ], [ [ "# libreria\nfrom sklearn.externals.six import StringIO \nfrom IPython.display import Image \nfrom sklearn.tree import export_graphviz\nimport pydotplus\nimport matplotlib.pyplot as plt\n\ndot_data = StringIO()\nexport_graphviz(arbol, out_file=dot_data, \n filled=True, rounded=True,\n special_characters=True)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \nImage(graph.create_png())", "/usr/local/lib/python3.7/dist-packages/sklearn/externals/six.py:31: FutureWarning: The module is deprecated in version 0.21 and will be removed in version 0.23 since we've dropped support for Python 2.7. Please rely on the official version of six (https://pypi.org/project/six/).\n \"(https://pypi.org/project/six/).\", FutureWarning)\n" ] ], [ [ "## 2.5 Overfitting: profundidad del árbol y post-pruning\n\nDado que los árboles son modelos que tienden a overfittear tenemos que recurrir a distintas técnicas para mitigar este problema. Veamos primero el efecto de la profundidad del árbol en el trade-off sesgo varianza.", "_____no_output_____" ] ], [ [ "profundidad = list(range(1,20))\nresultados_train = []\nresultados_test = []\n\nfor depth in profundidad:\n # instanciamos el modelo uniforme\n arbol = DecisionTreeClassifier(criterion='gini', max_depth=depth, min_samples_leaf=1, min_samples_split=2, ccp_alpha=0, class_weight=\"balanced\")\n arbol.fit(X_train, y_train)\n y_train_pred = arbol.predict(X_train)\n y_pred = arbol.predict(X_test)\n resultados_train.append(accuracy_score(y_train, y_train_pred))\n resultados_test.append(accuracy_score(y_test, y_pred))", "_____no_output_____" ], [ "# veamos que paso en cada caso\nf, ax = plt.subplots(1,1,figsize=(14,5),sharey=True)\nax.plot(profundidad, resultados_train, profundidad, resultados_test);\nax.legend(['accuracy train', 'accuracy test']);\nax.set(xlabel='profundidad',ylabel='accuracy');", "_____no_output_____" ], [ "# veamos que pasa con un árbol sin corte de profundidad\nnp.random.seed(2021)\narbol = DecisionTreeClassifier(criterion='gini', ccp_alpha=0)\narbol.fit(X_train, y_train)\n#print(classification_report(y_true=y_test,y_pred=arbol.predict(X_test)))\nprint('Accuracy en entrenamiento: %f' % accuracy_score(y_train,arbol.predict(X_train)))\nprint('Accuracy en test: %f' % accuracy_score(y_test,arbol.predict(X_test)))", "Accuracy en entrenamiento: 1.000000\nAccuracy en test: 0.909091\n" ], [ "# grafiquemos este árbol\ndot_data = StringIO()\nexport_graphviz(arbol, out_file=dot_data, \n filled=True, rounded=True,\n special_characters=True)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \nImage(graph.create_png())", "_____no_output_____" ] ], [ [ "Una técnica que nos permite mitigar el overfitting es lo que se conoce como post-prunning. El objetivo de esta técnica es *podar* el árbol entrenado, penalizando de alguna forma los árboles más complejos. El algortimo de poda que tenemos implementado en Scikit-Learn es el [Minimal Cost-Complexity Pruning](https://scikit-learn.org/stable/modules/tree.html#minimal-cost-complexity-pruning). El hiperparámetro que controla esta penalización es ccp_alpha$\\geq 0$, cuando este hiperparámetro es 0, no realizamos ningún tipo de poda, y a medida que aumentamos dicho hiperparámetro penalizaremos más fuertemente la cantidad de nodos terminales del árbol.", "_____no_output_____" ] ], [ [ "arbol = DecisionTreeClassifier(criterion='gini', ccp_alpha=0.01)\narbol.fit(X_train, y_train)\n#print(classification_report(y_true=y_test,y_pred=arbol.predict(X_test)))\nprint('Accuracy en entrenamiento: %f' % accuracy_score(y_train,arbol.predict(X_train)))\nprint('Accuracy en test: %f' % accuracy_score(y_test,arbol.predict(X_test)))", "Accuracy en entrenamiento: 0.971831\nAccuracy en test: 0.916084\n" ], [ "dot_data = StringIO()\nexport_graphviz(arbol, out_file=dot_data, \n filled=True, rounded=True,\n special_characters=True)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \nImage(graph.create_png())", "_____no_output_____" ], [ "# veamos como afecta el rendimiento y la profundidad del árbol\nccp_alpha_vals = np.arange(0,1,0.05)\nresultados_train = []\nresultados_test = []\nprofundidad = []\n\nfor ccp in ccp_alpha_vals:\n # instanciamos el modelo uniforme\n arbol = DecisionTreeClassifier(criterion='gini', ccp_alpha=ccp)\n arbol.fit(X_train, y_train)\n # guardamos la profundidad del árbol\n profundidad.append(arbol.tree_.max_depth)\n y_train_pred = arbol.predict(X_train)\n y_pred = arbol.predict(X_test)\n resultados_train.append(accuracy_score(y_train, y_train_pred))\n resultados_test.append(accuracy_score(y_test, y_pred))", "_____no_output_____" ], [ "f,ax = plt.subplots(2,1,figsize=(12,8),sharex=True)\nax[0].plot(ccp_alpha_vals, resultados_train, ccp_alpha_vals, resultados_test);\nax[0].legend(['accuracy train', 'accuracy test']);\nax[0].set(xlabel='ccp_alpha',ylabel='Accuracy');\nax[1].plot(ccp_alpha_vals, profundidad)\nax[1].set(xlabel='ccp_alpha',ylabel='Profundidad');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a2af7d595b1e1c59e664efac1eccd635e8548e
1,950
ipynb
Jupyter Notebook
vulnerability/vulnerability.ipynb
musakacmaz/deep-learning-for-se
d0f622dfe9cfca45c0eba7254e1bcc171d1e50a9
[ "MIT" ]
null
null
null
vulnerability/vulnerability.ipynb
musakacmaz/deep-learning-for-se
d0f622dfe9cfca45c0eba7254e1bcc171d1e50a9
[ "MIT" ]
null
null
null
vulnerability/vulnerability.ipynb
musakacmaz/deep-learning-for-se
d0f622dfe9cfca45c0eba7254e1bcc171d1e50a9
[ "MIT" ]
null
null
null
20.967742
71
0.558462
[ [ [ "# import necessary packages\n\nimport tensorflow as tf\nimport pandas as pd", "_____no_output_____" ], [ "# read vulnerability data\n\ndrupal = pd.read_csv('datasets/vulnerability/drupal.csv')\nmoodle = pd.read_csv('datasets/vulnerability/moodle.csv')\nphpmyadmin = pd.read_csv('datasets/vulnerability/phpmyadmin.csv')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
e7a2c9a2fa601124b547df08fd2627c40a2a5304
260,072
ipynb
Jupyter Notebook
examples/DSB2018/U-Net_Finetune.ipynb
psteinb/VoidSeg
6e091656f6d777452b0e36644e9eb4a6818792d2
[ "BSD-3-Clause" ]
15
2019-12-10T07:06:04.000Z
2022-02-14T11:58:06.000Z
examples/DSB2018/U-Net_Finetune.ipynb
psteinb/VoidSeg
6e091656f6d777452b0e36644e9eb4a6818792d2
[ "BSD-3-Clause" ]
1
2020-01-14T09:53:59.000Z
2020-01-14T09:53:59.000Z
examples/DSB2018/U-Net_Finetune.ipynb
psteinb/VoidSeg
6e091656f6d777452b0e36644e9eb4a6818792d2
[ "BSD-3-Clause" ]
2
2020-01-31T16:06:58.000Z
2021-07-14T13:07:12.000Z
326.313676
115,660
0.926709
[ [ [ "### This notebook trains a N2V network in the first step and then finetunes it for segmentation.", "_____no_output_____" ] ], [ [ "# We import all our dependencies.\nimport warnings\nwarnings.filterwarnings('ignore')\nimport sys\nsys.path.append('../../')\nfrom voidseg.models import Seg, SegConfig\nfrom n2v.models import N2VConfig, N2V\nimport numpy as np\nfrom csbdeep.utils import plot_history\nfrom voidseg.utils.misc_utils import combine_train_test_data, shuffle_train_data, augment_data\nfrom voidseg.utils.seg_utils import *\nfrom n2v.utils.n2v_utils import manipulate_val_data\nfrom voidseg.utils.compute_precision_threshold import compute_threshold, precision\nfrom keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\nfrom scipy import ndimage\nimport tensorflow as tf\nimport keras.backend as K\nimport urllib\nimport os\nimport zipfile\nfrom tqdm import tqdm, tqdm_notebook", "Using TensorFlow backend.\n" ] ], [ [ "### Download DSB2018 data.\n\nFrom the Kaggle 2018 Data Science Bowl challenge, we take the same subset of data as has been used [here](https://github.com/mpicbg-csbd/stardist), showing a diverse collection of cell nuclei imaged by various fluorescence microscopes. We extracted 4870 image patches of size 128×128 from the training set and added Gaussian noise with mean 0 and sigma = 10 (n10), 20 (n20) and 40 (n40). This notebook shows results for n40 images.", "_____no_output_____" ] ], [ [ "# create a folder for our data\nif not os.path.isdir('./data'):\n os.mkdir('data')\n\n# check if data has been downloaded already\nzipPath=\"data/DSB.zip\"\nif not os.path.exists(zipPath):\n #download and unzip data\n data = urllib.request.urlretrieve('https://owncloud.mpi-cbg.de/index.php/s/LIN4L4R9b2gebDX/download', zipPath)\n with zipfile.ZipFile(zipPath, 'r') as zip_ref:\n zip_ref.extractall(\"data\")", "_____no_output_____" ] ], [ [ "The downloaded data is in `npz` format and the cell below extracts the training, validation and test data as numpy arrays", "_____no_output_____" ] ], [ [ "trainval_data = np.load('data/DSB/train_data/dsb2018_TrainVal40.npz')\ntest_data = np.load('data/DSB/test_data/dsb2018_Test40.npz', allow_pickle=True)\ntrain_images = trainval_data['X_train']\nval_images = trainval_data['X_val']\ntest_images = test_data['X_test']\n\ntrain_masks = trainval_data['Y_train']\nval_masks = trainval_data['Y_val']\ntest_masks = test_data['Y_test']", "_____no_output_____" ], [ "print(\"Shape of train_images: \", train_images.shape, \", Shape of train_masks: \", train_masks.shape)\nprint(\"Shape of val_images: \", val_images.shape, \", Shape of val_masks: \", val_masks.shape)\nprint(\"Shape of test_images: \", test_images.shape, \", Shape of test_masks: \", test_masks.shape)", "Shape of train_images: (3800, 128, 128) , Shape of train_masks: (3800, 128, 128)\nShape of val_images: (670, 128, 128) , Shape of val_masks: (670, 128, 128)\nShape of test_images: (50,) , Shape of test_masks: (50,)\n" ] ], [ [ "### Data preparation for training a N2V network\n\nSince, we can use all the noisy data for training N2V network, we combine the noisy train_images and test_images and use them as input to the N2V network.", "_____no_output_____" ] ], [ [ "X, Y = combine_train_test_data(X_train=train_images,Y_train=train_masks,X_test=test_images,Y_test=test_masks)\nprint(\"Combined Dataset Shape\", X.shape)", "Combined Dataset Shape (4300, 128, 128)\n" ], [ "X_val = val_images\nY_val = val_masks", "_____no_output_____" ] ], [ [ "Next, we shuffle the training pairs and augment the training and validation data.", "_____no_output_____" ] ], [ [ "random_seed = 1 # Seed to shuffle training data (annotated GT and raw image pairs)\n\nX, Y = shuffle_train_data(X, Y, random_seed = random_seed)\nprint(\"Training Data \\n..................\")\nX, Y = augment_data(X, Y)\nprint(\"\\n\")\nprint(\"Validation Data \\n..................\")\nX_val, Y_val = augment_data(X_val, Y_val)", "Training Data \n..................\nRaw image size after augmentation (34400, 128, 128)\nMask size after augmentation (34400, 128, 128)\n\n\nValidation Data \n..................\nRaw image size after augmentation (5360, 128, 128)\nMask size after augmentation (5360, 128, 128)\n" ], [ "# Adding channel dimension\nX = X[..., np.newaxis]\nprint(X.shape)\nX_val = X_val[..., np.newaxis]\nprint(X_val.shape)", "(34400, 128, 128, 1)\n(5360, 128, 128, 1)\n" ] ], [ [ "Let's look at one of our training and validation patches.", "_____no_output_____" ] ], [ [ "sl=0\nplt.figure(figsize=(14,7))\nplt.subplot(1,2,1)\nplt.imshow(X[sl,...,0], cmap='gray')\nplt.title('Training Patch');\nplt.subplot(1,2,2)\nplt.imshow(X_val[sl,...,0], cmap='gray')\nplt.title('Validation Patch');", "_____no_output_____" ] ], [ [ "### Configure N2V Network", "_____no_output_____" ], [ "The data preparation for training a denoising N2V network is now done. Next, we configure N2V network by specifying `N2VConfig` parameters.", "_____no_output_____" ] ], [ [ "config = N2VConfig(X, unet_kern_size=3, n_channel_out=1,train_steps_per_epoch=400, train_epochs=200, \n train_loss='mse', batch_norm=True, \n train_batch_size=128, n2v_perc_pix=0.784, n2v_patch_shape=(64, 64),\n unet_n_first = 32,\n unet_residual = False,\n n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, unet_n_depth=4)\n\n# Let's look at the parameters stored in the config-object.\nvars(config)", "_____no_output_____" ], [ "# a name used to identify the model\nmodel_name = 'n40_denoising'\n# the base directory in which our model will live\nbasedir = 'models'\n# We are now creating our network model.\nmodel = N2V(config, model_name, basedir=basedir)\nmodel.prepare_for_training(metrics=())", "_____no_output_____" ] ], [ [ "Now, we begin training the denoising N2V model. In case, a trained model is available, that model is loaded else a new model is trained.", "_____no_output_____" ] ], [ [ "# We are ready to start training now.\nquery_weightpath = os.getcwd()+\"/models/\"+model_name\nweights_present = False\nfor file in os.listdir(query_weightpath):\n if(file == \"weights_best.h5\"):\n print(\"Found weights of a trained N2V network, loading it for prediction!\")\n weights_present = True \n break \nif(weights_present):\n model.load_weights(\"weights_best.h5\")\nelse:\n print(\"Did not find weights of a trained N2V network, training one from scratch!\")\n history = model.train(X, X_val)", "Found weights of a trained N2V network, loading it for prediction!\n" ] ], [ [ "### Data preparation for segmentation step\n\nNext, we normalize all raw data with the mean and std (standard deviation) of the raw `train_images`. Then, we shuffle the raw training images and the correponding Ground Truth (GT). Lastly, we fractionate the training pairs of raw images and corresponding GT to realize the case where not enough annotated, training data is available. For this fractionation, please specify `fraction` parameter below. It should be between 0 (exclusive) and 100 (inclusive).", "_____no_output_____" ] ], [ [ "fraction = 2 # Fraction of annotated GT and raw image pairs to use during training. \nrandom_seed = 1 # Seed to shuffle training data (annotated GT and raw image pairs).\n\n\nassert 0 <fraction<= 100, \"Fraction should be between 0 and 100\"\nmean, std = np.mean(train_images), np.std(train_images)\n\nX_normalized = normalize(train_images, mean, std)\nX_val_normalized = normalize(val_images, mean, std)\nX_test_normalized = normalize(test_images, mean, std)\n\nX_shuffled, Y_shuffled = shuffle_train_data(X_normalized, train_masks, random_seed = random_seed)\nX_frac, Y_frac = fractionate_train_data(X_shuffled, Y_shuffled, fraction = fraction)\nprint(\"Training Data \\n..................\")\nX, Y_train_masks = augment_data(X_frac, Y_frac)\nprint(\"\\n\")\nprint(\"Validation Data \\n..................\")\nX_val, Y_val_masks = augment_data(X_val_normalized, val_masks)", "Training Data \n..................\nRaw image size after augmentation (608, 128, 128)\nMask size after augmentation (608, 128, 128)\n\n\nValidation Data \n..................\nRaw image size after augmentation (5360, 128, 128)\nMask size after augmentation (5360, 128, 128)\n" ] ], [ [ "Next, we do a one-hot encoding of training and validation labels for training a 3-class U-Net. One-hot encoding will extract three channels from each labelled image, where the channels correspond to background, foreground and border.", "_____no_output_____" ] ], [ [ "X = X[...,np.newaxis]\nY = convert_to_oneHot(Y_train_masks)\nX_val = X_val[...,np.newaxis]\nY_val = convert_to_oneHot(Y_val_masks)\nprint(X.shape, Y.shape)\nprint(X_val.shape, Y_val.shape)", "(608, 128, 128, 1) (608, 128, 128, 3)\n(5360, 128, 128, 1) (5360, 128, 128, 3)\n" ] ], [ [ "Let's look at one of our validation patches.", "_____no_output_____" ] ], [ [ "sl=0\nplt.figure(figsize=(20,5))\nplt.subplot(1,4,1)\nplt.imshow(X_val[sl,...,0])\nplt.title('Raw validation image')\nplt.subplot(1,4,2)\nplt.imshow(Y_val[sl,...,0])\nplt.title('1-hot encoded background')\nplt.subplot(1,4,3)\nplt.imshow(Y_val[sl,...,1])\nplt.title('1-hot encoded foreground')\nplt.subplot(1,4,4)\nplt.imshow(Y_val[sl,...,2])\nplt.title('1-hot encoded border')", "_____no_output_____" ] ], [ [ "### Configure Segmentation Network\n\nThe data preparation for segmentation is now done. Next, we configure a segmentation network by specifying `SegConfig` parameters. For example, one can increase `train_epochs` to get even better results at the expense of a longer computation. (This holds usually true for a large `fraction`.)", "_____no_output_____" ] ], [ [ "relative_weights = [1.0,1.0,5.0] # Relative weight of background, foreground and border class for training\n\nconfig = SegConfig(X, unet_kern_size=3, relative_weights = relative_weights,\n train_steps_per_epoch=400, train_epochs=3, batch_norm=True, \n train_batch_size=128, unet_n_first = 32, unet_n_depth=4)\n\n# Let's look at the parameters stored in the config-object.\n# a name used to identify the model\nmodel_name = 'seg_finetune'\n# the base directory in which our model will live\nbasedir = 'models'\n# We are now creating our network model.\nseg_model = Seg(config, model_name, basedir=basedir)\nvars(config)", "_____no_output_____" ] ], [ [ "##### For finetuning, we initialize segmentation network with the best weights of the denoising N2V network trained above.", "_____no_output_____" ] ], [ [ "ft_layers = seg_model.keras_model.layers\nn2v_layers = model.keras_model.layers\n\nfor i in range(0, len(n2v_layers)-2):\n ft_layers[i].set_weights(n2v_layers[i].get_weights())\n\nfor l in seg_model.keras_model.layers:\n l.trainable=True", "_____no_output_____" ] ], [ [ "Now, we begin training the model for segmentation.", "_____no_output_____" ] ], [ [ "seg_model.train(X, Y, (X_val, Y_val))", "Epoch 1/3\n400/400 [==============================] - 152s 380ms/step - loss: 0.3040 - seg_crossentropy: 0.3040 - val_loss: 0.2867 - val_seg_crossentropy: 0.2867\nEpoch 2/3\n400/400 [==============================] - 143s 358ms/step - loss: 0.1202 - seg_crossentropy: 0.1202 - val_loss: 0.3895 - val_seg_crossentropy: 0.3895\nEpoch 3/3\n400/400 [==============================] - 142s 354ms/step - loss: 0.0760 - seg_crossentropy: 0.0760 - val_loss: 0.4506 - val_seg_crossentropy: 0.4506\n\nLoading network weights from 'weights_best.h5'.\n" ] ], [ [ "### Computing the best threshold on validation images (to maximize Average Precision score). The threshold so obtained will be used to get hard masks from probability images to be predicted on test images.", "_____no_output_____" ] ], [ [ "threshold=seg_model.optimize_thresholds(X_val_normalized.astype(np.float32), val_masks)", "Computing best threshold: \n" ] ], [ [ "### Prediction on test images to get segmentation result", "_____no_output_____" ] ], [ [ "predicted_images, precision_result=seg_model.predict_label_masks(X_test_normalized, test_masks, threshold)\nprint(\"Average precision over all test images at IOU = 0.5: \", precision_result)", "Average precision over all test images at IOU = 0.5: 0.6144172916479654\n" ], [ "plt.figure(figsize=(10,10))\nplt.subplot(1,2,1)\nplt.imshow(predicted_images[22])\nplt.title('Prediction')\nplt.subplot(1,2,2)\nplt.imshow(test_masks[22])\nplt.title('Ground Truth')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a2d90e57da680ed9e3b19694bebacee0563d49
334,989
ipynb
Jupyter Notebook
2.3.1_3_Maneras_de_Programar_a_una_Red_Neuronal.ipynb
txusser/Master_IA_Sanidad
789523ecc626e8fc9dc2a79dd3e2fa10cbde577d
[ "MIT" ]
1
2022-01-26T21:37:06.000Z
2022-01-26T21:37:06.000Z
2.3.1_3_Maneras_de_Programar_a_una_Red_Neuronal.ipynb
txusser/Master_IA_Sanidad
789523ecc626e8fc9dc2a79dd3e2fa10cbde577d
[ "MIT" ]
null
null
null
2.3.1_3_Maneras_de_Programar_a_una_Red_Neuronal.ipynb
txusser/Master_IA_Sanidad
789523ecc626e8fc9dc2a79dd3e2fa10cbde577d
[ "MIT" ]
null
null
null
113.941837
70,488
0.786963
[ [ [ "<a href=\"https://colab.research.google.com/github/txusser/Master_IA_Sanidad/blob/main/2.3.1_3_Maneras_de_Programar_a_una_Red_Neuronal.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# 3 Maneras de Programar a una Red Neuronal - DOTCSV\n\n## Código inicial", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy as sc\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_circles\n\n# Creamos nuestros datos artificiales, donde buscaremos clasificar \n# dos anillos concéntricos de datos. \nX, Y = make_circles(n_samples=500, factor=0.5, noise=0.05)\n\n# Resolución del mapa de predicción.\nres = 100 \n\n# Coordendadas del mapa de predicción.\n_x0 = np.linspace(-1.5, 1.5, res)\n_x1 = np.linspace(-1.5, 1.5, res)\n\n# Input con cada combo de coordenadas del mapa de predicción.\n_pX = np.array(np.meshgrid(_x0, _x1)).T.reshape(-1, 2)\n\n# Objeto vacio a 0.5 del mapa de predicción.\n_pY = np.zeros((res, res)) + 0.5\n\n# Visualización del mapa de predicción.\nplt.figure(figsize=(8, 8))\nplt.pcolormesh(_x0, _x1, _pY, cmap=\"coolwarm\", vmin=0, vmax=1)\n\n# Visualización de la nube de datos.\nplt.scatter(X[Y == 0,0], X[Y == 0,1], c=\"skyblue\")\nplt.scatter(X[Y == 1,0], X[Y == 1,1], c=\"salmon\")\n\nplt.tick_params(labelbottom=False, labelleft=False)", "_____no_output_____" ] ], [ [ "## Tensorflow", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nfrom matplotlib import animation\nfrom IPython.core.display import display, HTML\n\n# Definimos los puntos de entrada de la red, para la matriz X e Y.\niX = tf.placeholder('float', shape=[None, X.shape[1]])\niY = tf.placeholder('float', shape=[None])\n\nlr = 0.01 # learning rate\nnn = [2, 16, 8, 1] # número de neuronas por capa.\n\n# Capa 1\nW1 = tf.Variable(tf.random_normal([nn[0], nn[1]]), name='Weights_1')\nb1 = tf.Variable(tf.random_normal([nn[1]]), name='bias_1')\n\nl1 = tf.nn.relu(tf.add(tf.matmul(iX, W1), b1))\n\n# Capa 2\nW2 = tf.Variable(tf.random_normal([nn[1], nn[2]]), name='Weights_2')\nb2 = tf.Variable(tf.random_normal([nn[2]]), name='bias_2')\n\nl2 = tf.nn.relu(tf.add(tf.matmul(l1, W2), b2))\n\n# Capa 3\nW3 = tf.Variable(tf.random_normal([nn[2], nn[3]]), name='Weights_3')\nb3 = tf.Variable(tf.random_normal([nn[3]]), name='bias_3')\n\n# Vector de predicciones de Y.\npY = tf.nn.sigmoid(tf.add(tf.matmul(l2, W3), b3))[:, 0]\n\n\n# Evaluación de las predicciones.\nloss = tf.losses.mean_squared_error(pY, iY)\n\n# Definimos al optimizador de la red, para que minimice el error.\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)\n\nn_steps = 1000 # Número de ciclos de entrenamiento.\n\niPY = [] # Aquí guardaremos la evolución de las predicción, para la animación.\n\nwith tf.Session() as sess:\n \n # Inicializamos todos los parámetros de la red, las matrices W y b.\n sess.run(tf.global_variables_initializer())\n \n # Iteramos n pases de entrenamiento.\n for step in range(n_steps):\n \n # Evaluamos al optimizador, a la función de coste y al tensor de salida pY. \n # La evaluación del optimizer producirá el entrenamiento de la red.\n _, _loss, _pY = sess.run([optimizer, loss, pY], feed_dict={ iX : X, iY : Y })\n \n # Cada 25 iteraciones, imprimimos métricas.\n if step % 25 == 0: \n \n # Cálculo del accuracy.\n acc = np.mean(np.round(_pY) == Y)\n \n # Impresión de métricas.\n print('Step', step, '/', n_steps, '- Loss = ', _loss, '- Acc =', acc)\n \n # Obtenemos predicciones para cada punto de nuestro mapa de predicción _pX.\n _pY = sess.run(pY, feed_dict={ iX : _pX }).reshape((res, res))\n\n # Y lo guardamos para visualizar la animación.\n iPY.append(_pY)\n \n \n# ----- CÓDIGO ANIMACIÓN ----- #\n\nims = []\n\nfig = plt.figure(figsize=(10, 10))\n\nprint(\"--- Generando animación ---\")\n\nfor fr in range(len(iPY)):\n \n im = plt.pcolormesh(_x0, _x1, iPY[fr], cmap=\"coolwarm\", animated=True)\n\n # Visualización de la nube de datos.\n plt.scatter(X[Y == 0,0], X[Y == 0,1], c=\"skyblue\")\n plt.scatter(X[Y == 1,0], X[Y == 1,1], c=\"salmon\")\n\n # plt.title(\"Resultado Clasificación\")\n plt.tick_params(labelbottom=False, labelleft=False)\n\n ims.append([im])\n\nani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000)\n\nHTML(ani.to_html5_video())", "Step 0 / 1000 - Loss = 0.29063216 - Acc = 0.562\nStep 25 / 1000 - Loss = 0.18204297 - Acc = 0.632\nStep 50 / 1000 - Loss = 0.1471082 - Acc = 0.79\nStep 75 / 1000 - Loss = 0.13354021 - Acc = 0.854\nStep 100 / 1000 - Loss = 0.122594796 - Acc = 0.902\nStep 125 / 1000 - Loss = 0.111153014 - Acc = 0.942\nStep 150 / 1000 - Loss = 0.09965967 - Acc = 0.956\nStep 175 / 1000 - Loss = 0.08899061 - Acc = 0.968\nStep 200 / 1000 - Loss = 0.07819476 - Acc = 0.98\nStep 225 / 1000 - Loss = 0.06843367 - Acc = 0.984\nStep 250 / 1000 - Loss = 0.059809823 - Acc = 0.992\nStep 275 / 1000 - Loss = 0.051961992 - Acc = 0.994\nStep 300 / 1000 - Loss = 0.04506078 - Acc = 0.996\nStep 325 / 1000 - Loss = 0.03921565 - Acc = 0.998\nStep 350 / 1000 - Loss = 0.034442045 - Acc = 1.0\nStep 375 / 1000 - Loss = 0.030614918 - Acc = 1.0\nStep 400 / 1000 - Loss = 0.027491104 - Acc = 1.0\nStep 425 / 1000 - Loss = 0.024817096 - Acc = 1.0\nStep 450 / 1000 - Loss = 0.022489877 - Acc = 1.0\nStep 475 / 1000 - Loss = 0.020462362 - Acc = 1.0\nStep 500 / 1000 - Loss = 0.018674525 - Acc = 1.0\nStep 525 / 1000 - Loss = 0.01712375 - Acc = 1.0\nStep 550 / 1000 - Loss = 0.015800532 - Acc = 1.0\nStep 575 / 1000 - Loss = 0.014612312 - Acc = 1.0\nStep 600 / 1000 - Loss = 0.013566933 - Acc = 1.0\nStep 625 / 1000 - Loss = 0.012653999 - Acc = 1.0\nStep 650 / 1000 - Loss = 0.011832824 - Acc = 1.0\nStep 675 / 1000 - Loss = 0.011105223 - Acc = 1.0\nStep 700 / 1000 - Loss = 0.010456048 - Acc = 1.0\nStep 725 / 1000 - Loss = 0.009875296 - Acc = 1.0\nStep 750 / 1000 - Loss = 0.009351645 - Acc = 1.0\nStep 775 / 1000 - Loss = 0.008877279 - Acc = 1.0\nStep 800 / 1000 - Loss = 0.0084480485 - Acc = 1.0\nStep 825 / 1000 - Loss = 0.008039233 - Acc = 1.0\nStep 850 / 1000 - Loss = 0.0076591335 - Acc = 1.0\nStep 875 / 1000 - Loss = 0.0073076617 - Acc = 1.0\nStep 900 / 1000 - Loss = 0.0069831987 - Acc = 1.0\nStep 925 / 1000 - Loss = 0.0066823033 - Acc = 1.0\nStep 950 / 1000 - Loss = 0.0064092586 - Acc = 1.0\nStep 975 / 1000 - Loss = 0.0061591878 - Acc = 1.0\n--- Generando animación ---\n" ] ], [ [ "## Keras", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport tensorflow.keras as kr\n\nfrom IPython.core.display import display, HTML\n\n\nlr = 0.01 # learning rate\nnn = [2, 16, 8, 1] # número de neuronas por capa.\n\n\n# Creamos el objeto que contendrá a nuestra red neuronal, como\n# secuencia de capas.\nmodel = kr.Sequential()\n\n# Añadimos la capa 1\nl1 = model.add(kr.layers.Dense(nn[1], activation='relu'))\n\n# Añadimos la capa 2\nl2 = model.add(kr.layers.Dense(nn[2], activation='relu'))\n\n# Añadimos la capa 3\nl3 = model.add(kr.layers.Dense(nn[3], activation='sigmoid'))\n\n# Compilamos el modelo, definiendo la función de coste y el optimizador.\nmodel.compile(loss='mse', optimizer=kr.optimizers.SGD(lr=0.05), metrics=['acc'])\n\n# Y entrenamos al modelo. Los callbacks \nmodel.fit(X, Y, epochs=100)", "Epoch 1/100\n500/500 [==============================] - 0s 111us/sample - loss: 0.2468 - acc: 0.5040\nEpoch 2/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.2457 - acc: 0.5100\nEpoch 3/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.2446 - acc: 0.5040\nEpoch 4/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.2434 - acc: 0.5160\nEpoch 5/100\n500/500 [==============================] - 0s 36us/sample - loss: 0.2422 - acc: 0.5200\nEpoch 6/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.2412 - acc: 0.5400\nEpoch 7/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.2400 - acc: 0.5460\nEpoch 8/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.2388 - acc: 0.5780\nEpoch 9/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2376 - acc: 0.5840\nEpoch 10/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2363 - acc: 0.5960\nEpoch 11/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.2350 - acc: 0.6280\nEpoch 12/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.2336 - acc: 0.6220\nEpoch 13/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2320 - acc: 0.6280\nEpoch 14/100\n500/500 [==============================] - 0s 36us/sample - loss: 0.2305 - acc: 0.6580\nEpoch 15/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2289 - acc: 0.6640\nEpoch 16/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.2272 - acc: 0.7040\nEpoch 17/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.2255 - acc: 0.7140\nEpoch 18/100\n500/500 [==============================] - 0s 47us/sample - loss: 0.2238 - acc: 0.7280\nEpoch 19/100\n500/500 [==============================] - 0s 42us/sample - loss: 0.2221 - acc: 0.7440\nEpoch 20/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2201 - acc: 0.7620\nEpoch 21/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.2181 - acc: 0.7740\nEpoch 22/100\n500/500 [==============================] - 0s 50us/sample - loss: 0.2161 - acc: 0.7900\nEpoch 23/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.2140 - acc: 0.8040\nEpoch 24/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.2119 - acc: 0.8020\nEpoch 25/100\n500/500 [==============================] - 0s 48us/sample - loss: 0.2095 - acc: 0.8440\nEpoch 26/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.2072 - acc: 0.8280\nEpoch 27/100\n500/500 [==============================] - 0s 43us/sample - loss: 0.2048 - acc: 0.8620\nEpoch 28/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.2023 - acc: 0.8720\nEpoch 29/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.1997 - acc: 0.8700\nEpoch 30/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.1970 - acc: 0.8940\nEpoch 31/100\n500/500 [==============================] - 0s 43us/sample - loss: 0.1941 - acc: 0.9100\nEpoch 32/100\n500/500 [==============================] - 0s 46us/sample - loss: 0.1910 - acc: 0.9220\nEpoch 33/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.1877 - acc: 0.9260\nEpoch 34/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.1843 - acc: 0.9380\nEpoch 35/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.1811 - acc: 0.9360\nEpoch 36/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.1779 - acc: 0.9480\nEpoch 37/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.1748 - acc: 0.9520\nEpoch 38/100\n500/500 [==============================] - 0s 45us/sample - loss: 0.1713 - acc: 0.9580\nEpoch 39/100\n500/500 [==============================] - 0s 47us/sample - loss: 0.1680 - acc: 0.9620\nEpoch 40/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.1646 - acc: 0.9660\nEpoch 41/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.1610 - acc: 0.9660\nEpoch 42/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.1576 - acc: 0.9720\nEpoch 43/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.1538 - acc: 0.9760\nEpoch 44/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.1503 - acc: 0.9740\nEpoch 45/100\n500/500 [==============================] - 0s 42us/sample - loss: 0.1463 - acc: 0.9780\nEpoch 46/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.1426 - acc: 0.9800\nEpoch 47/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.1386 - acc: 0.9840\nEpoch 48/100\n500/500 [==============================] - 0s 46us/sample - loss: 0.1346 - acc: 0.9860\nEpoch 49/100\n500/500 [==============================] - 0s 48us/sample - loss: 0.1305 - acc: 0.9920\nEpoch 50/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.1263 - acc: 0.9960\nEpoch 51/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.1221 - acc: 0.9980\nEpoch 52/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.1180 - acc: 1.0000\nEpoch 53/100\n500/500 [==============================] - 0s 36us/sample - loss: 0.1137 - acc: 0.9980\nEpoch 54/100\n500/500 [==============================] - 0s 42us/sample - loss: 0.1094 - acc: 1.0000\nEpoch 55/100\n500/500 [==============================] - 0s 43us/sample - loss: 0.1051 - acc: 1.0000\nEpoch 56/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.1011 - acc: 1.0000\nEpoch 57/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0969 - acc: 1.0000\nEpoch 58/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0930 - acc: 1.0000\nEpoch 59/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0891 - acc: 1.0000\nEpoch 60/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0855 - acc: 1.0000\nEpoch 61/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0819 - acc: 1.0000\nEpoch 62/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0785 - acc: 1.0000\nEpoch 63/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0752 - acc: 1.0000\nEpoch 64/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0719 - acc: 1.0000\nEpoch 65/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0689 - acc: 1.0000\nEpoch 66/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0660 - acc: 1.0000\nEpoch 67/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0632 - acc: 1.0000\nEpoch 68/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0606 - acc: 1.0000\nEpoch 69/100\n500/500 [==============================] - 0s 44us/sample - loss: 0.0581 - acc: 1.0000\nEpoch 70/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0556 - acc: 1.0000\nEpoch 71/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0533 - acc: 1.0000\nEpoch 72/100\n500/500 [==============================] - 0s 47us/sample - loss: 0.0511 - acc: 1.0000\nEpoch 73/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0491 - acc: 1.0000\nEpoch 74/100\n500/500 [==============================] - 0s 43us/sample - loss: 0.0471 - acc: 1.0000\nEpoch 75/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0452 - acc: 1.0000\nEpoch 76/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0434 - acc: 1.0000\nEpoch 77/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0418 - acc: 1.0000\nEpoch 78/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0401 - acc: 1.0000\nEpoch 79/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0386 - acc: 1.0000\nEpoch 80/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0371 - acc: 1.0000\nEpoch 81/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0358 - acc: 1.0000\nEpoch 82/100\n500/500 [==============================] - 0s 51us/sample - loss: 0.0344 - acc: 1.0000\nEpoch 83/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0332 - acc: 1.0000\nEpoch 84/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0320 - acc: 1.0000\nEpoch 85/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0309 - acc: 1.0000\nEpoch 86/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0298 - acc: 1.0000\nEpoch 87/100\n500/500 [==============================] - 0s 41us/sample - loss: 0.0288 - acc: 1.0000\nEpoch 88/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0278 - acc: 1.0000\nEpoch 89/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0269 - acc: 1.0000\nEpoch 90/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0260 - acc: 1.0000\nEpoch 91/100\n500/500 [==============================] - 0s 42us/sample - loss: 0.0251 - acc: 1.0000\nEpoch 92/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0243 - acc: 1.0000\nEpoch 93/100\n500/500 [==============================] - 0s 42us/sample - loss: 0.0235 - acc: 1.0000\nEpoch 94/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0228 - acc: 1.0000\nEpoch 95/100\n500/500 [==============================] - 0s 43us/sample - loss: 0.0221 - acc: 1.0000\nEpoch 96/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0215 - acc: 1.0000\nEpoch 97/100\n500/500 [==============================] - 0s 39us/sample - loss: 0.0208 - acc: 1.0000\nEpoch 98/100\n500/500 [==============================] - 0s 38us/sample - loss: 0.0202 - acc: 1.0000\nEpoch 99/100\n500/500 [==============================] - 0s 40us/sample - loss: 0.0196 - acc: 1.0000\nEpoch 100/100\n500/500 [==============================] - 0s 37us/sample - loss: 0.0190 - acc: 1.0000\n" ] ], [ [ "## Sklearn", "_____no_output_____" ] ], [ [ "import sklearn as sk\nimport sklearn.neural_network\n\nfrom IPython.core.display import display, HTML\n\n\nlr = 0.01 # learning rate\nnn = [2, 16, 8, 1] # número de neuronas por capa.\n\n# Creamos el objeto del modelo de red neuronal multicapa.\nclf = sk.neural_network.MLPRegressor(solver='sgd', \n learning_rate_init=lr, \n hidden_layer_sizes=tuple(nn[1:]),\n verbose=True,\n n_iter_no_change=1000,\n batch_size = 64)\n\n\n# Y lo entrenamos con nuestro datos.\nclf.fit(X, Y)", "Iteration 1, loss = 0.66391606\nIteration 2, loss = 0.29448667\nIteration 3, loss = 0.13429471\nIteration 4, loss = 0.13165037\nIteration 5, loss = 0.13430276\nIteration 6, loss = 0.12556423\nIteration 7, loss = 0.12292571\nIteration 8, loss = 0.12204933\nIteration 9, loss = 0.12175702\nIteration 10, loss = 0.12129750\nIteration 11, loss = 0.12073281\nIteration 12, loss = 0.12028767\nIteration 13, loss = 0.11983928\nIteration 14, loss = 0.11939207\nIteration 15, loss = 0.11909108\nIteration 16, loss = 0.11836549\nIteration 17, loss = 0.11771654\nIteration 18, loss = 0.11703195\nIteration 19, loss = 0.11636100\nIteration 20, loss = 0.11559426\nIteration 21, loss = 0.11475135\nIteration 22, loss = 0.11391514\nIteration 23, loss = 0.11296898\nIteration 24, loss = 0.11183055\nIteration 25, loss = 0.11070522\nIteration 26, loss = 0.10945900\nIteration 27, loss = 0.10807801\nIteration 28, loss = 0.10653328\nIteration 29, loss = 0.10483565\nIteration 30, loss = 0.10299502\nIteration 31, loss = 0.10109678\nIteration 32, loss = 0.09868998\nIteration 33, loss = 0.09625805\nIteration 34, loss = 0.09356631\nIteration 35, loss = 0.09051455\nIteration 36, loss = 0.08720220\nIteration 37, loss = 0.08356481\nIteration 38, loss = 0.07956683\nIteration 39, loss = 0.07531904\nIteration 40, loss = 0.07057397\nIteration 41, loss = 0.06558903\nIteration 42, loss = 0.06042358\nIteration 43, loss = 0.05469307\nIteration 44, loss = 0.04914731\nIteration 45, loss = 0.04335916\nIteration 46, loss = 0.03773750\nIteration 47, loss = 0.03259090\nIteration 48, loss = 0.02785086\nIteration 49, loss = 0.02361280\nIteration 50, loss = 0.02024953\nIteration 51, loss = 0.01725262\nIteration 52, loss = 0.01477666\nIteration 53, loss = 0.01294427\nIteration 54, loss = 0.01150503\nIteration 55, loss = 0.01036948\nIteration 56, loss = 0.00950101\nIteration 57, loss = 0.00882902\nIteration 58, loss = 0.00840145\nIteration 59, loss = 0.00797252\nIteration 60, loss = 0.00769409\nIteration 61, loss = 0.00743825\nIteration 62, loss = 0.00731249\nIteration 63, loss = 0.00711639\nIteration 64, loss = 0.00700836\nIteration 65, loss = 0.00685081\nIteration 66, loss = 0.00670581\nIteration 67, loss = 0.00659854\nIteration 68, loss = 0.00655539\nIteration 69, loss = 0.00642937\nIteration 70, loss = 0.00637203\nIteration 71, loss = 0.00619710\nIteration 72, loss = 0.00614971\nIteration 73, loss = 0.00593245\nIteration 74, loss = 0.00579465\nIteration 75, loss = 0.00565489\nIteration 76, loss = 0.00553982\nIteration 77, loss = 0.00541618\nIteration 78, loss = 0.00532437\nIteration 79, loss = 0.00525496\nIteration 80, loss = 0.00514261\nIteration 81, loss = 0.00511693\nIteration 82, loss = 0.00499175\nIteration 83, loss = 0.00497192\nIteration 84, loss = 0.00491734\nIteration 85, loss = 0.00470830\nIteration 86, loss = 0.00461381\nIteration 87, loss = 0.00455140\nIteration 88, loss = 0.00446001\nIteration 89, loss = 0.00440248\nIteration 90, loss = 0.00430629\nIteration 91, loss = 0.00427582\nIteration 92, loss = 0.00420453\nIteration 93, loss = 0.00413087\nIteration 94, loss = 0.00406708\nIteration 95, loss = 0.00399991\nIteration 96, loss = 0.00394088\nIteration 97, loss = 0.00390739\nIteration 98, loss = 0.00384822\nIteration 99, loss = 0.00379567\nIteration 100, loss = 0.00372736\nIteration 101, loss = 0.00364839\nIteration 102, loss = 0.00359586\nIteration 103, loss = 0.00356903\nIteration 104, loss = 0.00350804\nIteration 105, loss = 0.00346888\nIteration 106, loss = 0.00341325\nIteration 107, loss = 0.00338402\nIteration 108, loss = 0.00334556\nIteration 109, loss = 0.00331617\nIteration 110, loss = 0.00327267\nIteration 111, loss = 0.00322546\nIteration 112, loss = 0.00316221\nIteration 113, loss = 0.00311790\nIteration 114, loss = 0.00308636\nIteration 115, loss = 0.00305983\nIteration 116, loss = 0.00307628\nIteration 117, loss = 0.00302102\nIteration 118, loss = 0.00299013\nIteration 119, loss = 0.00294987\nIteration 120, loss = 0.00295874\nIteration 121, loss = 0.00292606\nIteration 122, loss = 0.00289585\nIteration 123, loss = 0.00288184\nIteration 124, loss = 0.00286175\nIteration 125, loss = 0.00284965\nIteration 126, loss = 0.00286328\nIteration 127, loss = 0.00283168\nIteration 128, loss = 0.00285682\nIteration 129, loss = 0.00279665\nIteration 130, loss = 0.00278923\nIteration 131, loss = 0.00278239\nIteration 132, loss = 0.00276704\nIteration 133, loss = 0.00275697\nIteration 134, loss = 0.00275890\nIteration 135, loss = 0.00275535\nIteration 136, loss = 0.00282983\nIteration 137, loss = 0.00275359\nIteration 138, loss = 0.00272988\nIteration 139, loss = 0.00269894\nIteration 140, loss = 0.00272954\nIteration 141, loss = 0.00268760\nIteration 142, loss = 0.00267833\nIteration 143, loss = 0.00267846\nIteration 144, loss = 0.00269751\nIteration 145, loss = 0.00266955\nIteration 146, loss = 0.00265685\nIteration 147, loss = 0.00268063\nIteration 148, loss = 0.00265680\nIteration 149, loss = 0.00263361\nIteration 150, loss = 0.00262043\nIteration 151, loss = 0.00262108\nIteration 152, loss = 0.00262173\nIteration 153, loss = 0.00263316\nIteration 154, loss = 0.00259775\nIteration 155, loss = 0.00258960\nIteration 156, loss = 0.00263879\nIteration 157, loss = 0.00259500\nIteration 158, loss = 0.00257932\nIteration 159, loss = 0.00259434\nIteration 160, loss = 0.00256704\nIteration 161, loss = 0.00258173\nIteration 162, loss = 0.00253499\nIteration 163, loss = 0.00253539\nIteration 164, loss = 0.00253766\nIteration 165, loss = 0.00255039\nIteration 166, loss = 0.00253523\nIteration 167, loss = 0.00253166\nIteration 168, loss = 0.00252858\nIteration 169, loss = 0.00253196\nIteration 170, loss = 0.00251232\nIteration 171, loss = 0.00252011\nIteration 172, loss = 0.00251934\nIteration 173, loss = 0.00249041\nIteration 174, loss = 0.00249983\nIteration 175, loss = 0.00249816\nIteration 176, loss = 0.00249634\nIteration 177, loss = 0.00249739\nIteration 178, loss = 0.00249030\nIteration 179, loss = 0.00246445\nIteration 180, loss = 0.00250390\nIteration 181, loss = 0.00247568\nIteration 182, loss = 0.00247083\nIteration 183, loss = 0.00247611\nIteration 184, loss = 0.00246227\nIteration 185, loss = 0.00245628\nIteration 186, loss = 0.00245701\nIteration 187, loss = 0.00246615\nIteration 188, loss = 0.00244919\nIteration 189, loss = 0.00245754\nIteration 190, loss = 0.00245784\nIteration 191, loss = 0.00243623\nIteration 192, loss = 0.00245733\nIteration 193, loss = 0.00245661\nIteration 194, loss = 0.00242044\nIteration 195, loss = 0.00241922\nIteration 196, loss = 0.00242431\nIteration 197, loss = 0.00242330\nIteration 198, loss = 0.00245886\nIteration 199, loss = 0.00242789\nIteration 200, loss = 0.00240292\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a2fcc28f326ae5734fe788faac597cad8ed2f8
37,814
ipynb
Jupyter Notebook
demo/monitor_counter/testing/test_plotting.ipynb
wi11dey/pylabnet
a6e3362f727c45aaa60e61496e858ae92e85574d
[ "MIT" ]
10
2020-01-07T23:28:49.000Z
2022-02-02T19:09:17.000Z
demo/monitor_counter/testing/test_plotting.ipynb
wi11dey/pylabnet
a6e3362f727c45aaa60e61496e858ae92e85574d
[ "MIT" ]
249
2019-12-28T19:38:49.000Z
2022-03-28T16:45:32.000Z
demo/monitor_counter/testing/test_plotting.ipynb
wi11dey/pylabnet
a6e3362f727c45aaa60e61496e858ae92e85574d
[ "MIT" ]
5
2020-11-17T19:45:10.000Z
2022-01-04T18:07:04.000Z
30.868571
8,192
0.361797
[ [ [ "import plotly.graph_objs as go\nimport numpy as np", "_____no_output_____" ], [ "fig = go.FigureWidget(data=[])", "_____no_output_____" ], [ "fig.add_scatter(\n x=[],\n y=[],\n mode='lines'\n)", "_____no_output_____" ], [ "fig.add_scatter(\n x=[],\n y=[],\n mode='lines'\n)", "_____no_output_____" ], [ "with fig.batch_update():\n fig.data[0].x = np.arange(100)\n fig.data[0].y = np.arange(100)*2", "_____no_output_____" ], [ "with fig.batch_update():\n fig.data[1].x = np.arange(100)\n fig.data[1].y = np.arange(100)*3", "_____no_output_____" ], [ "fig.data", "_____no_output_____" ], [ "foo=np.empty([0,0])", "_____no_output_____" ], [ "len(fig.data)", "_____no_output_____" ], [ "x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nx_rev = x[::-1]\n\ny1_upper = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\ny1_lower = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\ny1_lower = y1_lower[::-1]\ny1_upper + y1_lower", "_____no_output_____" ], [ "x+x_rev", "_____no_output_____" ], [ "np.hstack((np.array(y1_upper),np.array(y1_lower)))", "_____no_output_____" ], [ "fig=go.FigureWidget(data=[])\nfig.add_scatter(\n x=[],\n y=[],\n mode='lines',\n showlegend=False,\n fill='tozerox',\n line=dict(color='rgba(255,255,255,0)'),\n fillcolor='rgba(255,0,0,0.2)'\n)\nfig.add_scatter(\n x=[],\n y=[],\n mode='lines',\n)\nwith fig.batch_update():\n fig.data[0].x = x+x_rev\n fig.data[0].y = y1_upper + y1_lower\n y_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nwith fig.batch_update():\n fig.data[1].x = x\n fig.data[1].y = y_1\n fig.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a322952f1c6fdbf06536fa0d711c30e3800974
738,561
ipynb
Jupyter Notebook
.ipynb_checkpoints/1-linear_regression_K-fold_area-checkpoint.ipynb
UCL-BENV0091-Antarctic/antarctic
70d9f65f9b935dd0234cd855ef6627c775703c80
[ "MIT" ]
null
null
null
.ipynb_checkpoints/1-linear_regression_K-fold_area-checkpoint.ipynb
UCL-BENV0091-Antarctic/antarctic
70d9f65f9b935dd0234cd855ef6627c775703c80
[ "MIT" ]
null
null
null
.ipynb_checkpoints/1-linear_regression_K-fold_area-checkpoint.ipynb
UCL-BENV0091-Antarctic/antarctic
70d9f65f9b935dd0234cd855ef6627c775703c80
[ "MIT" ]
null
null
null
3,711.361809
733,335
0.949783
[ [ [ "# Arctic Project in Linear Regression: K-fold + Y:Area", "_____no_output_____" ], [ "## Import libraries", "_____no_output_____" ] ], [ [ "library(MASS)\nlibrary(tidyverse)", "── \u001b[1mAttaching packages\u001b[22m ──────────────────────────────────────────────────── tidyverse 1.3.0 ──\n\n\u001b[32m✔\u001b[39m \u001b[34mggplot2\u001b[39m 3.3.2 \u001b[32m✔\u001b[39m \u001b[34mpurrr \u001b[39m 0.3.4\n\u001b[32m✔\u001b[39m \u001b[34mtibble \u001b[39m 3.0.4 \u001b[32m✔\u001b[39m \u001b[34mdplyr \u001b[39m 1.0.2\n\u001b[32m✔\u001b[39m \u001b[34mtidyr \u001b[39m 1.1.2 \u001b[32m✔\u001b[39m \u001b[34mstringr\u001b[39m 1.4.0\n\u001b[32m✔\u001b[39m \u001b[34mreadr \u001b[39m 1.4.0 \u001b[32m✔\u001b[39m \u001b[34mforcats\u001b[39m 0.5.0\n\n── \u001b[1mConflicts\u001b[22m ─────────────────────────────────────────────────────── tidyverse_conflicts() ──\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mfilter()\u001b[39m masks \u001b[34mstats\u001b[39m::filter()\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mlag()\u001b[39m masks \u001b[34mstats\u001b[39m::lag()\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mselect()\u001b[39m masks \u001b[34mMASS\u001b[39m::select()\n\n" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "arctic <- read.csv(\"arctic_data.csv\",stringsAsFactors = F) ", "_____no_output_____" ] ], [ [ "## Data segmentation", "_____no_output_____" ] ], [ [ "folds <- cut(seq(1,nrow(arctic)), breaks = 10, labels = FALSE) ", "_____no_output_____" ] ], [ [ "## Prediction", "_____no_output_____" ] ], [ [ "prediction <- as.data.frame(\n sapply(1:10, FUN = function(i) # loop 1:K\n{\n testID <- which(folds == i, arr.ind = TRUE)\n test <- arctic[testID, ]\n train <- arctic[-testID, ] # set K-fold\n \n # print(test) # if needed\n \n # linear regression\n model <- lm(area~rainfall+daylight+population+CO2+ozone+ocean_temp+land_temp,data=train)\n \n # print(summary(model)) # if needed\n \n # prediction output\n predict(model,test)\n}))", "_____no_output_____" ] ], [ [ "## Table gathering and merging", "_____no_output_____" ] ], [ [ "pred_gather <- gather(data=prediction, key=\"fold\",value=\"prediction\",1:10)\nresult <- as.data.frame(cbind(arctic[,c(1,6)],pred_gather))", "_____no_output_____" ] ], [ [ "## Calculate value of R^2", "_____no_output_____" ] ], [ [ "result[\"R^2\"] <- ((result$area-result$prediction)^2)\nR_square <- sum(result$`R^2`)/490", "_____no_output_____" ] ], [ [ "## Plot line chart (Prediction vs True) with title, legend, and specific size of figure", "_____no_output_____" ] ], [ [ "{plot(result$observation,result$area,type ='l',ylim = c(0,1.5),lwd = '2',xlab = \"Date\", ylab = \"Value\",xaxt='n')\n lines(result$observation,result$prediction,lty=1,col='red',lwd = '2')\n axis(1,at=c(1,61,121,181,241,301,361,421,481),\n labels=c(\"Jan 1980\",\"Jan 1985\",\"Jan 1990\",\"Jan 1995\",\"Jan 2000\",\"Jan 2005\",\"Jan 2010\",\"Jan 2015\",\"Jan 2020\"))\n title(main = list(\"Linear Regression\", cex = 1.5, col = \"red\", font = 3))\n legend(\"topright\", inset=.05, c(\"Prediction\",\"True\"), bty = 'n', lty=c(1, 1), col=c(\"red\", \"black\"),lwd =c(2, 2))\n options(repr.plot.width=20, repr.plot.height=10)\n}", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a3261641db18d38db0f5895cfceaebd87715aa
477,696
ipynb
Jupyter Notebook
notebooks/ICUglycemia/Notebooks/2_0_ara_pairing_II.ipynb
aldo-arevalo/mimic-code
37c929216ba0119a2d8115475de8a43925b89be0
[ "MIT" ]
1
2021-04-17T10:17:02.000Z
2021-04-17T10:17:02.000Z
notebooks/ICUglycemia/Notebooks/2_0_ara_pairing_II.ipynb
aldo-arevalo/mimic-code
37c929216ba0119a2d8115475de8a43925b89be0
[ "MIT" ]
null
null
null
notebooks/ICUglycemia/Notebooks/2_0_ara_pairing_II.ipynb
aldo-arevalo/mimic-code
37c929216ba0119a2d8115475de8a43925b89be0
[ "MIT" ]
null
null
null
137.387403
138,546
0.750383
[ [ [ "<a href=\"https://colab.research.google.com/github/aldo-arevalo/mimic-code/blob/master/notebooks/ICUglycemia/Notebooks/2_0_ara_pairing_II.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Data extraction and Pairing of Insulin Inputs to Glucose Measurements in the ICU\n## Interactive notebook: Part II\n\nAuthors: [Aldo Robles Arévalo](mailto:[email protected]); Jason Maley; Lawrence Baker; Susana M. da Silva Vieira; João M. da Costa Sousa; Stan Finkelstein; Jesse D. Raffa; Roselyn Cristelle; Leo Celi; Francis DeMichele\n\n## Overview\n\nThis notebook contains the pairing of pre-processed glucose readings and insulin inputs from the Medical Information Mart for Intensive Care (MIMIC).The curation is detailed in *1.0-ara-data-curation-I.ipynb*.", "_____no_output_____" ], [ "## General instructions\nTo perform the queries, do not forget to specify your project ID that grants you access to the MIMIC database hosted in *bigQuery*. Substitute `projectid` variable with the name of that project. In case you want to save the dataframes to your *BigQuery* project, uncomment and substitute `your_dataset` with the name of your *BigQuery* dataset and execute.\n\nYou can also save the created dataframes and figures in your Google Drive account. After mounting your drive, substitute `base_dir` variable with the path of the folder where you want to save them. In this notebook that folder was named `Insulin Therapy ICU` and `MyDrive` is the parent folder. Figures are saved in the path *Insulin Therapy ICU/DataExtraction/MIMIC_III/Figures/*, you should change it according to your needs or create the folders with the exact names in your Google Drive.", "_____no_output_____" ], [ "## Pairing rules\nOnce merged the insulin inputs and glucose readings from the *1.0-ara-data-curation-I.ipynb* notebok, now we continue with the **pairing** of an insulin event with a preceding glucose reading.\n\nThe goal is to link each insulin dose with the nearest glucose measurement. For this complex task, the following rules were implemented. This operation is done in BigQuery. The following rules or assumptions are proposed:\n\n1. **Rule 1**: A glucose reading should precede a regular insulin administration by up to 90 minutes. This basis for this time window is derived from the diabetic ketoacidosis guidelines which recommend measuring glucose values every 60 minutes while receiving an insulin infusion. An additional 30 minutes were added, 90 minutes in total, to this interval to account for the time it may take for providers to register the event. \n2. **Rule 2**: When a regular insulin event is not preceded, but instead followed, by a blood glucose measurement, this glucose reading is paired with the regular insulin administration if they are recorded within 90 minutes of each other.\n3. **Rule 3**: Sometimes a regular insulin infusion/bolus appears between 2 blood glucose measurements. In this case, the higher glucose value is paired with the regular insulin entry as long as they are entered within 90 minutes of each other.\n4. **Rule 4**: When a regular insulin bolus occurs very close to a regular insulin infusion rate, it is assumed that the patient was given a bolus and then commenced on an infusion. Both regular insulin entries are paired with the preceding blood glucose measurement, or the posterior glucose reading in case its value is higher than the preceding blood glucose and is entered within 90 minutes of the insulin dose.\n5. No glucose values below 90 mg/dL is paired with a subsequent regular insulin bolus or infusion. No clinician will treat this low of a blood glucose value with a regular insulin bolus or infusion.", "_____no_output_____" ], [ "# Code\n\n## Import dependencies and libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.colors as colors\nfrom scipy import stats\nfrom datetime import datetime\nimport time\nimport warnings\n\n# Below imports are used to print out pretty pandas dataframes\nfrom IPython.display import display, HTML\n\n# Imports for accessing data using Google BigQuery.\nfrom google.cloud import bigquery\nfrom google.colab import files, auth\nauth.authenticate_user()\nprint('Authenticated')\n%load_ext google.colab.data_table\n\n# Function to submit query to BigQuery\ndef q(query,projectid):\n client = bigquery.Client(location=\"US\",project=projectid)\n # Location must match that of the dataset(s) referenced in the query.\n query_job = client.query(query,\n location=\"US\",)\n return query_job.to_dataframe()\n\n#Rounding (for heatmap categories)\ndef myround(x, base):\n return int(base * round(float(x)/base))\n\ndef convert_to_datetime(df,time_cols):\n for t_col in time_cols:\n df[t_col] = pd.to_datetime(df[t_col])\n \n return(df)", "Authenticated\n" ], [ "from google.colab import drive\ndrive.mount('/content/gdrive')\n# Select your own folder\nbase_dir = \"/content/gdrive/My Drive/Insulin Therapy ICU\"", "_____no_output_____" ] ], [ [ "## Adjusted datasets\n\n* **Note 1**: Substitute `your_dataset` with the name of your dataset ID (Line 850) where you hosted/stored the tables created in the `1.0-ara-pairing-I.ipynb` notebook. \n* **Note 2**: The table `glucose_insulin_ICU` was created in `1.0-ara-pairing-I.ipynb` notebook. It is equivalent to `glucose_insulin_ICU.csv`.", "_____no_output_____" ] ], [ [ "# Import dataset adjusted or aligned\nprojectid = \"YOUR_PROJECT_ID\" # <-- Add your project ID\n\nquery =\"\"\"\nWITH pg AS(\n SELECT p1.*\n\n -- Column GLC_AL that would gather paired glucose values according to the proposed rules\n ,(CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings, select the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.GLC,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another insulin event\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the POSTERIOR glucose (1 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n\n \n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLC_AL\n\n -- ---------------------------------------------------------------------------------------------\n -- Column GLCTIMER_AL that would gather the timestamp of the paired glucose reading\n , (CASE \n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.TIMER,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the timestamp of the POSTERIOR glucose (1 rows below) measurement that gathers the \n -- previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n\n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLCTIMER_AL\n\n -- -----------------------------------------------------------------------------------------------\n -- Column GLCSOURCE_AL that would indicate whether is fingerstick or lab analyzer sample of \n -- the paired glucose reading\n , (CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.GLCSOURCE,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the whether is figerstick or lab analyzer the POSTERIOR glucose (1 rows below) measurement \n -- that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n\n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLCSOURCE_AL\n\n -- ---------------------------------------------------------------------------------------------\n -- Column RULE that indicateS which pairing rule is applied for the i^th case\n , (CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN 1\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN 3\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 90\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN 4\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN 1\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 90)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the Rule number applied\n THEN 2\n \n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS RULE\n\n FROM `your_dataset.glucose_insulin_ICU` AS p1\n WINDOW w AS(PARTITION BY CAST(p1.HADM_ID AS INT64) ORDER BY p1.TIMER)\n)\n\n-- Create a colum that identifies the glucose readings were paired and are duplicated in pg\nSELECT pg.*\n, (CASE\n WHEN pg.GLCSOURCE_AL IS null \n AND (LEAD(pg.GLCTIMER_AL,1) OVER(x) = pg.GLCTIMER)\n THEN 1 \n WHEN pg.GLCSOURCE_AL IS null \n AND (LAG(pg.GLCTIMER_AL,1) OVER(x) = pg.GLCTIMER)\n AND LAG(endtime,1) OVER(x) IS NOT null \n THEN 1\n ELSE null END) AS Repeated\nFROM pg\nWINDOW x AS(PARTITION BY ICUSTAY_ID ORDER BY pg.timer)\n\"\"\"\n\nICUinputs_adjusted = q(query,projectid)\n\ndel query\n\n# Convert dtypes\nICUinputs_adjusted[[\"Repeated\",\"INFXSTOP\",\"RULE\"]] = ICUinputs_adjusted[\n [\"Repeated\",\"INFXSTOP\",\"RULE\"]].apply(pd.to_numeric, errors='coerce')\n\n# Remove values that are repeated due to the SQL query\nICUinputs_adjusted = ICUinputs_adjusted[ICUinputs_adjusted['Repeated']!=1]\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(ICUinputs_adjusted.shape[0]))\nprint(\"Patients: {}\".format(ICUinputs_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(ICUinputs_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(ICUinputs_adjusted['ICUSTAY_ID'].nunique()))\n\n# Rules\ndisplay(HTML('<h5>Frequency of the rules</h5>'))\nprint(ICUinputs_adjusted['RULE'].value_counts())", "_____no_output_____" ] ], [ [ "### Boluses of short-acting insulin", "_____no_output_____" ] ], [ [ "# Filtering for only short insulin boluses and all sources of glucose\nshort_BOL_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Short\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))].copy()\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(short_BOL_adjusted.shape[0]))\nprint(\"Patients: {}\".format(short_BOL_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(short_BOL_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(short_BOL_adjusted['ICUSTAY_ID'].nunique()))\n\ndisplay(short_BOL_adjusted[['INPUT','GLC_AL']].describe())\n\n# Save as CSV file, uncomment and modify as needed.\n# short_BOL_adjusted.to_csv(base_dir+\"/DataExtraction/BolusesCUR.csv\", index=False, \n# encoding='utf8', header = True)", "_____no_output_____" ], [ "# Aligned and not aligned entries\ndisplay(HTML('<h2>Boluses entries of short-acting insulin<h2>'))\nprint(\"Entries that were aligned: {}\".format(\n short_BOL_adjusted.shape[0]-short_BOL_adjusted.loc[np.isnan(\n short_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Entries that weren't aligned: {}\".format(\n short_BOL_adjusted.loc[np.isnan(short_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Non-paired percentage: {:0.2f}%\".format(\n short_BOL_adjusted.loc[np.isnan(\n short_BOL_adjusted.RULE),'RULE'].shape[0]/short_BOL_adjusted.shape[0]*100))", "_____no_output_____" ], [ "warnings.simplefilter('ignore')\n\n# From Part1 Notebook\nP99_bol_s = 18.0\n\n# Heatmap\nshort_BOL_heat = short_BOL_adjusted.dropna(subset=['GLC_AL']).copy()\nshort_BOL_heat['A'] = ((short_BOL_heat['GLCTIMER_AL'] - \n short_BOL_heat['STARTTIME'])/pd.Timedelta('1 minute'))*60\nshort_BOL_heat=short_BOL_heat.set_index('A')\n\n#Define the cell size on the heat map\nglc_base=25\nins_base=2\n\n#Define heatmap limits\nxlow=0\nxhigh=P99_bol_s\nylow=90\nyhigh=400\nxhigh-=ins_base\n\n#create categories for constructing the heatmap\nshort_BOL_heat['glc_cat']=(short_BOL_heat['GLC_AL'].apply(\n lambda x: myround(x, glc_base))/glc_base)\nshort_BOL_heat['ins_cat']=(short_BOL_heat['INPUT'].apply(\n lambda x: myround(x, ins_base))/ins_base)\n\n#create dataframe for the heatmap using pivot_table\nheat_df=pd.pivot_table(short_BOL_heat, values='ICUSTAY_ID', index=['glc_cat']\n, columns=['ins_cat'], aggfunc='count')\n#trim the heatmap dataframe based on the lmits specificed\nheat_df=heat_df.loc[ylow/glc_base:yhigh/glc_base:,xlow/ins_base:xhigh/ins_base:]\n\n#create labels for the x and y ticks\nheat_xtick=np.arange(xlow, xhigh+ins_base*2, ins_base)\nheat_ytick=np.arange(ylow, yhigh+glc_base*1, glc_base)\n\n#plot heatmap\nsns.set(style=\"ticks\", font_scale=1.2)\nfig, ax = plt.subplots(1, 1, figsize = (12, 12))\nax=sns.heatmap(heat_df, robust=True, annot=True, cmap=\"BuPu\", fmt=\"2.0f\"\n , xticklabels=heat_xtick, yticklabels=heat_ytick\n , norm=colors.PowerNorm(gamma=1./2.))\n\n#titles\nplt.title(f\"Glucose readings prior to a bolus of short-acting insulin\\n(n={int(heat_df.sum().values.sum())})\", \n fontsize=25)\nplt.ylabel(\"Blood glucose (mg/dL)\", fontsize=20)\nplt.xlabel(\"Insulin dose (U)\", fontsize=20)\n\n#invert axis and offset labels\nax.invert_yaxis()\nax.set_yticks(np.arange(0, ((yhigh-ylow)/glc_base)+1))\nax.set_xticks(np.arange(0, ((xhigh-xlow)/ins_base)+2))\n\n# Save figure, uncomment if needed.\nfig.savefig(base_dir+'/DataExtraction/ShortBolusHeatMap.png', bbox_inches='tight',\n dpi=fig.dpi)", "_____no_output_____" ] ], [ [ "### Infusions of short-acting insulin", "_____no_output_____" ] ], [ [ "warnings.simplefilter('default')\n\n# Filtering for only short insulin infusions and all sources of glucose\nshort_INF_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Short\") & \n (ICUinputs_adjusted['EVENT'].str.contains('INFUSION'))].copy()\n\n# Get statistics\ndisplay(HTML('<h5>Counts</h5>'))\nprint(\"Entries: {}\".format(short_INF_adjusted.shape[0]))\nprint(\"Patients: {}\".format(short_INF_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(short_INF_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(short_INF_adjusted['ICUSTAY_ID'].nunique()))\n\ndisplay(short_INF_adjusted[['INPUT_HRS','GLC_AL']].describe())", "_____no_output_____" ], [ "warnings.simplefilter('ignore')\n\n# Heatmap\nshort_INF_heat = short_INF_adjusted.dropna(subset=['GLC_AL']).copy()\nshort_INF_heat['A'] = ((short_INF_heat['GLCTIMER_AL'] - \n short_INF_heat['STARTTIME'])/pd.Timedelta('1 minute'))*60\nshort_INF_heat=short_INF_heat.set_index('A')\n\n#Define the cell size on the heat map\nglc_base=25\nins_base=2\n\n#Define heatmap limits\nxlow=0\nxhigh=P99_bol_s\nylow=90\nyhigh=400\nxhigh-=ins_base\n\n#create categories for constructing the heatmap\nshort_INF_heat['glc_cat']=(short_INF_heat['GLC_AL'].apply(\n lambda x: myround(x, glc_base))/glc_base)\nshort_INF_heat['ins_cat']=(short_INF_heat['INPUT'].apply(\n lambda x: myround(x, ins_base))/ins_base)\n\n#create dataframe for the heatmap using pivot_table\nheat_df_i=pd.pivot_table(short_INF_heat, values='ICUSTAY_ID', index=['glc_cat']\n, columns=['ins_cat'], aggfunc='count')\n#trim the heatmap dataframe based on the lmits specificed\nheat_df_i=heat_df_i.loc[ylow/glc_base:yhigh/glc_base:,xlow/ins_base:xhigh/ins_base:]\n\n#create labels for the x and y ticks\nheat_xtick=np.arange(xlow, xhigh+ins_base*2, ins_base)\nheat_ytick=np.arange(ylow, yhigh+glc_base*1, glc_base)\n\n#plot heatmap\nsns.set(style=\"ticks\", font_scale=1.2)\nfig, ax = plt.subplots(1, 1, figsize = (12, 12))\nax=sns.heatmap(heat_df_i, robust=True, annot=True, cmap=\"BuPu\", fmt=\"2.0f\"\n , xticklabels=heat_xtick, yticklabels=heat_ytick\n , norm=colors.PowerNorm(gamma=1./2.))\n\n#titles\nplt.title(f\"Glucose readings prior to infusions of short-acting insulin\\n(n={int(heat_df_i.sum().values.sum())})\", \n fontsize=25)\nplt.ylabel(\"Blood glucose (mg/dL)\", fontsize=20)\nplt.xlabel(\"Insulin dose (U/hr)\", fontsize=20)\n\n#invert axis and offset labels\nax.invert_yaxis()\nax.set_yticks(np.arange(0, ((yhigh-ylow)/glc_base)+1))\nax.set_xticks(np.arange(0, ((xhigh-xlow)/ins_base)+2))\n\n# Save figure, uncomment if needed.\nfig.savefig(base_dir+'/DataExtraction/ShortInfxnHeatMap.png', \n bbox_inches='tight',dpi=fig.dpi)", "_____no_output_____" ] ], [ [ "### Boluses of intermediate-acting insulin", "_____no_output_____" ] ], [ [ "warnings.simplefilter('default')\n\n# Filtering for only short insulin infusions and all sources of glucose\ninter_BOL_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Intermediate\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))].copy()\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(inter_BOL_adjusted.shape[0]))\nprint(\"Patients: {}\".format(inter_BOL_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(inter_BOL_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(inter_BOL_adjusted['ICUSTAY_ID'].nunique()))\n\ndisplay(inter_BOL_adjusted[['INPUT','GLC_AL']].describe())", "_____no_output_____" ], [ "# Aligned and not aligned entries\ndisplay(HTML('<h2>Boluses entries of intermediate-acting insulin<h2>'))\nprint(\"Entries that were aligned: {}\".format(\n inter_BOL_adjusted.shape[0]-inter_BOL_adjusted.loc[np.isnan(\n inter_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Entries that weren't aligned: {}\".format(\n inter_BOL_adjusted.loc[np.isnan(inter_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Non-paired percentage: {:0.2f}%\".format(\n inter_BOL_adjusted.loc[np.isnan(\n inter_BOL_adjusted.RULE),'RULE'].shape[0]/inter_BOL_adjusted.shape[0]*100))", "_____no_output_____" ] ], [ [ "### Boluses of long-acting insulin", "_____no_output_____" ] ], [ [ "warnings.simplefilter('default')\n\n# Filtering for only short insulin infusions and all sources of glucose\nlong_BOL_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Long\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))].copy()\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(long_BOL_adjusted.shape[0]))\nprint(\"Patients: {}\".format(long_BOL_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(long_BOL_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(long_BOL_adjusted['ICUSTAY_ID'].nunique()))\n\ndisplay(long_BOL_adjusted[['INPUT','GLC_AL']].describe())", "_____no_output_____" ], [ "# Aligned and not aligned entries\ndisplay(HTML('<h2>Boluses entries of long-acting insulin<h2>'))\nprint(\"Entries that were aligned: {}\".format(\n long_BOL_adjusted.shape[0]-long_BOL_adjusted.loc[np.isnan(\n long_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Entries that weren't aligned: {}\".format(\n long_BOL_adjusted.loc[np.isnan(long_BOL_adjusted.RULE),'RULE'].shape[0]))\nprint(\"Non-paired percentage: {:0.2f}%\".format(\n long_BOL_adjusted.loc[np.isnan(\n long_BOL_adjusted.RULE),'RULE'].shape[0]/long_BOL_adjusted.shape[0]*100))", "_____no_output_____" ] ], [ [ "## Non-adjusted datasets\nTo complement this analysis, and to show the difference between implementing and not implementing the proposed rules, three cohorts were created: a) no pairing rules applied, b) paired a glucose reading recorded within 60 minutes of the insulin event instead of 90 minutes, and c) pairing a glucose reading.\n", "_____no_output_____" ], [ "### Scenario C\nGlucose readings CURATED and insulin inputs CURATED but NO RULES\n\n* **Note 1**: Add the name of your dataset hosted in BigQuery (Line 45). \n* **Note 2**: The table `glucose_insulin_ICU` was created in `1.0-ara-pairing-I.ipynb` notebook. It is equivalent to `glucose_insulin_ICU.csv`.", "_____no_output_____" ] ], [ [ "# GLUCOSE READINGS CURATED AND INSULIN INPUTS CURATED but no RULES\n\nquery = \"\"\"\nSELECT pg.*\n , (CASE\n WHEN pg.GLCSOURCE_AL IS null \n AND (LEAD(pg.GLCTIMER_AL,1) OVER(PARTITION BY pg.ICUSTAY_ID ORDER BY pg.TIMER) = pg.GLCTIMER)\n THEN 1 \n WHEN pg.GLCSOURCE_AL IS null \n AND (LAG(pg.GLCTIMER_AL,1) OVER(PARTITION BY pg.ICUSTAY_ID ORDER BY pg.timer) = pg.GLCTIMER)\n AND LAG(endtime,1) OVER(PARTITION BY ICUSTAY_ID ORDER BY timer) IS NOT null \n THEN 1\n ELSE null END) AS Repeated\n FROM(SELECT p1.* \n , (CASE\n -- Select the previous glucose value regardless the time distance\n WHEN p1.EVENT IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK') \n THEN (LAG(p1.GLC,1) OVER(w))\n ELSE null END\n ) AS GLC_AL\n\n , (CASE\n -- Select the previous glucose value regardless the time distance\n WHEN p1.EVENT IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK') \n THEN (LAG(p1.TIMER,1) OVER(w))\n ELSE null END\n ) AS GLCTIMER_AL\n\n , (CASE\n -- Select the previous glucose value regardless the time distance\n WHEN p1.EVENT IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK') \n THEN (LAG(p1.GLCSOURCE,1) OVER(w))\n ELSE null END\n ) AS GLCSOURCE_AL\n , (CASE\n -- Select the previous glucose value regardless the time distance\n WHEN p1.EVENT IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK') \n THEN 1\n ELSE null END\n ) AS RULE\n FROM `your_dataset.glucose_insulin_ICU` AS p1\n WINDOW w AS(PARTITION BY CAST(p1.HADM_ID AS INT64) ORDER BY p1.TIMER)\n ) AS pg\n\"\"\"\n\nglc_curALins_cur = q(query,projectid)\n\nqwe = glc_curALins_cur[(glc_curALins_cur['INSULINTYPE']==\"Short\") & \n (glc_curALins_cur['EVENT'].str.contains('BOLUS'))].copy()\n\ndisplay(HTML('<h4>Statistics for both glucose readings and insulin inputs CURATED</h4>'))\nprint(\"Total entries: {}\".format(glc_curALins_cur.shape[0]))\ndisplay(qwe[['INPUT','GLC_AL']].describe())\n\ndisplay(HTML('<h5>Contains the following information (only for short-acting)</h5>'))\nprint(\"Boluses of short-acting insulin: {}\".format(qwe.shape[0]))\nprint(\"Patients: {} out of {}\".format(qwe['SUBJECT_ID'].nunique(),\n glc_curALins_cur['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(qwe['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(qwe['ICUSTAY_ID'].nunique()))\n\n# Rules\ndisplay(HTML('<h5>Frequency of the rules</h5>'))\nprint(qwe['RULE'].value_counts())\n\n# Save as CSV file, uncomment and modify as needed.\n# qwe.to_csv(base_dir+\"/DataExtraction/BolusesCUR_nr.csv\", index=False,\n# encoding='utf8', header = True)\n\ndel query,qwe", "/usr/lib/python3.6/json/decoder.py:355: ResourceWarning: unclosed <ssl.SSLSocket fd=63, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('172.28.0.2', 52706), raddr=('74.125.142.95', 443)>\n obj, end = self.scan_once(s, idx)\n/usr/lib/python3.6/json/decoder.py:355: ResourceWarning: unclosed <ssl.SSLSocket fd=64, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('172.28.0.2', 52660), raddr=('74.125.20.95', 443)>\n obj, end = self.scan_once(s, idx)\n" ] ], [ [ "### Scenario B\nGlucose reading CURATED and inulin inputs CURATED paired with rules (60 min)\n\n* **Note 1**: Substitute `your_dataset` with the name of your dataset ID (Line 849) where you hosted/stored the tables created in the `1.0-ara-pairing-I.ipynb` notebook. \n* **Note 2**: The table `glucose_insulin_ICU` was created in `1.0-ara-pairing-I.ipynb` notebook. It is equivalent to `glucose_insulin_ICU.csv`.", "_____no_output_____" ] ], [ [ "# Import dataset adjusted or aligned with 60 min\n\nquery =\"\"\"\nWITH pg AS(\n SELECT p1.*\n\n -- Column GLC_AL that would gather paired glucose values according to the proposed rules\n ,(CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings, select the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.GLC,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another insulin event\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLC,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the POSTERIOR glucose (1 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLC,1) OVER(w))\n\n \n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLC_AL\n\n -- ---------------------------------------------------------------------------------------------\n -- Column GLCTIMER_AL that would gather the timestamp of the paired glucose reading\n , (CASE \n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.TIMER,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.TIMER,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.TIMER,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the timestamp of the POSTERIOR glucose (1 rows below) measurement that gathers the \n -- previous conditions\n THEN (LEAD(p1.TIMER,1) OVER(w))\n\n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLCTIMER_AL\n\n -- -----------------------------------------------------------------------------------------------\n -- Column GLCSOURCE_AL that would indicate whether is fingerstick or lab analyzer sample of \n -- the paired glucose reading\n , (CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN (LAG(p1.GLCSOURCE,2) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w)) \n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,2) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN (LAG(p1.GLCSOURCE,1) OVER(w))\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the whether is figerstick or lab analyzer the POSTERIOR glucose (1 rows below) measurement \n -- that gathers the previous conditions\n THEN (LEAD(p1.GLCSOURCE,1) OVER(w))\n\n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS GLCSOURCE_AL\n\n -- ---------------------------------------------------------------------------------------------\n -- Column RULE that indicateS which pairing rule is applied for the i^th case\n , (CASE\n -- 1ST CLAUSE\n -- When previous and following rows are glucose readings,vselect the glucose value that \n -- has the shortest time distance to insulin bolus/infusion.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding and posterior glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Posterior glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n AND ( -- Preceding glucose\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN 1\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 2ND CLAUSE\n -- In case the posterior glucose reading is higher than the preceding\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding and posterior glucose measurements\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Time-gap between glucose and insulin, should be equal or less than 90 minutes\n -- Preceding glucose\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose\n AND ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose values is higher than the preceding glucose\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,1) OVER(w) \n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN 3\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 3RD CLAUSE\n -- When previous timestamp is an insulin bolus/infusion event\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above and regular insulin\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND (LAG(p1.INSULINTYPE,2) OVER(w)) IN('Short')\n -- One row above there is another insulin event\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a shortime or equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Preceding glucose 2 rows above is equal or greater than 90 min\n AND (LAG(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose value is lower than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) >= LEAD(p1.GLC,1) OVER(w)\n -- Return the preceding glucose value 2 rows above\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 4TH CLAUSE\n -- When previous timestamp is for Insulin bolus/infusion but posterior glucose\n -- is higher than the preceding glucose 2 rows above.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 2 rows above\n AND (LAG(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row above there is another regular insulin\n AND (LAG(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LAG(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,1) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Posterior glucose value is higher than the preceding glucose 2 rows above\n AND LAG(p1.GLC,2) OVER(w) < LEAD(p1.GLC,1) OVER(w)\n -- Return the POSTERIOR glucose measurement that gathers the previous conditions\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 5TH CLAUSE\n -- When posterior timestamp is for Insulin bolus/infusion but preceding is glucose\n -- and there is a glucose 2 rows below.\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a shorter OR equal time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Preceding glucose reading is greater or equal to 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Preceding glucose 2 rows above occured up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is lower than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) >= LEAD(p1.GLC,2) OVER(w)\n -- Return the PRECEDING glucose (1 row above) measurement that gathers the previous conditions\n THEN 4\n \n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 6TH CLAUSE\n -- When posterior glucose reading (2 rows below) is higher than preceding glucose.\n \n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 2 rows below\n AND (LEAD(p1.GLCSOURCE,2) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- One row BELOW there is another regular insulin\n AND (LEAD(p1.EVENT,1) OVER(w)) IN('BOLUS_INYECTION','BOLUS_PUSH','INFUSION')\n AND (LEAD(p1.INSULINTYPE,1) OVER(w)) IN('Short')\n AND ( -- Preceding glucose has a longer time-gap to insulin than the posterior\n ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > \n ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,2) OVER(w)), p1.TIMER, MINUTE))\n )\n -- Posterior glucose reading is greater or equal to 90 mg/dL\n AND (LEAD(p1.GLC,2) OVER(w)) >= 90\n -- Posterior glucose (2 rows below) occurs within 90 minutes\n AND ABS(TIMESTAMP_DIFF(LEAD(p1.timer,2) OVER(w), p1.timer, MINUTE)) <= 60\n -- Preceding glucose 1 row above occures up to 90 minutes before\n AND ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60\n -- Posterior glucose value (2 rows below) is higher than the preceding glucose 1 row above\n AND LAG(p1.GLC,1) OVER(w) < LEAD(p1.GLC,2) OVER(w)\n -- Return the POSTERIOR glucose (2 rows below) measurement that gathers the previous conditions\n THEN 4\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 7TH CLAUSE\n -- When it is the last insulin dose and record in an ICU stay\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Identify preceding glucose reading\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin, should be equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Preceding glucose should be equal or greater than 90 mg/dL\n AND (LAG(p1.GLC,1) OVER(w)) >= 90\n -- Return the PRECEDING glucose measurement that gathers the previous conditions\n THEN 1\n\n -- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n -- 8TH CLAUSE\n -- When there is no preceding glucose reading within 90 min, but there is a posterior \n -- glucose within 90 min\n\n -- Identify an insulin event either bolus or infusion\n WHEN p1.EVENT IN('BOLUS_INYECTION', 'BOLUS_PUSH', 'INFUSION')\n -- Regular insulin or short-acting\n AND p1.INSULINTYPE IN('Short')\n -- Identify preceding glucose reading 1 row above\n AND (LAG(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Identify posterior glucose reading 1 row below\n AND (LEAD(p1.GLCSOURCE,1) OVER(w)) IN('BLOOD', 'FINGERSTICK')\n -- Time-gap between preceding glucose and insulin is greater than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LAG(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) > 90)\n -- Time-gap between posterior glucose and insulin is equal or less than 90 minutes\n AND (ABS(TIMESTAMP_DIFF((LEAD(p1.TIMER,1) OVER(w)), p1.TIMER, MINUTE)) <= 60)\n -- Posterior glucose should be equal or greater than 90 mg/dL\n AND (LEAD(p1.GLC,1) OVER(w)) >= 90\n -- Return the Rule number applied\n THEN 2\n \n -- Otherwise, return null value and finish CASE clause\n ELSE null END\n ) AS RULE\n\n FROM `your_dataset.glucose_insulin_ICU` AS p1\n WINDOW w AS(PARTITION BY CAST(p1.HADM_ID AS INT64) ORDER BY p1.TIMER)\n)\n\n-- Create a colum that identifies the glucose readings were paired and are duplicated in pg\nSELECT pg.*\n, (CASE\n WHEN pg.GLCSOURCE_AL IS null \n AND (LEAD(pg.GLCTIMER_AL,1) OVER(x) = pg.GLCTIMER)\n THEN 1 \n WHEN pg.GLCSOURCE_AL IS null \n AND (LAG(pg.GLCTIMER_AL,1) OVER(x) = pg.GLCTIMER)\n AND LAG(endtime,1) OVER(x) IS NOT null \n THEN 1\n ELSE null END) AS Repeated\nFROM pg\nWINDOW x AS(PARTITION BY ICUSTAY_ID ORDER BY pg.timer)\n\"\"\"\n\nICU60min_adjusted = q(query,projectid)\n\ndel query\n\n# Convert dtypes\nICU60min_adjusted[[\"Repeated\",\"INFXSTOP\",\"RULE\"]] = ICU60min_adjusted[\n [\"Repeated\",\"INFXSTOP\",\"RULE\"]].apply(pd.to_numeric, errors='coerce')\n\n# Remove values that are repeated due to the SQL query\nICU60min_adjusted = ICU60min_adjusted[ICU60min_adjusted['Repeated']!=1]\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(ICU60min_adjusted.shape[0]))\nprint(\"Patients: {}\".format(ICU60min_adjusted['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(ICU60min_adjusted['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(ICU60min_adjusted['ICUSTAY_ID'].nunique()))\n\n# Rules\ndisplay(HTML('<h5>Frequency of the rules</h5>'))\nprint(ICU60min_adjusted['RULE'].value_counts())", "/usr/lib/python3.6/json/decoder.py:355: ResourceWarning: unclosed <ssl.SSLSocket fd=80, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('172.28.0.2', 52770), raddr=('74.125.20.95', 443)>\n obj, end = self.scan_once(s, idx)\n/usr/lib/python3.6/json/decoder.py:355: ResourceWarning: unclosed <ssl.SSLSocket fd=79, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('172.28.0.2', 53808), raddr=('74.125.195.95', 443)>\n obj, end = self.scan_once(s, idx)\n" ] ], [ [ "#### Boluses of short-acting insulin", "_____no_output_____" ] ], [ [ "# Filtering for only short insulin boluses and all sources of glucose\nshort_BOL_60 = ICU60min_adjusted[(ICU60min_adjusted['INSULINTYPE']==\"Short\") & \n (ICU60min_adjusted['EVENT'].str.contains('BOLUS'))].copy()\n\n# Get statistics\ndisplay(HTML('<h5>Contains the following information</h5>'))\nprint(\"Entries: {}\".format(short_BOL_60.shape[0]))\nprint(\"Patients: {}\".format(short_BOL_60['SUBJECT_ID'].nunique()))\nprint(\"Hospital admissions: {}\".format(short_BOL_60['HADM_ID'].nunique()))\nprint('ICU stays: {}'.format(short_BOL_60['ICUSTAY_ID'].nunique()))\n\ndisplay(short_BOL_60[['INPUT','GLC_AL']].describe())\n\n# Save as CSV file, uncomment and modify as needed.\n# short_BOL_60.to_csv(base_dir+\"/DataExtraction/BolusesCUR_60.csv\", index=False,\n# encoding='utf8', header = True)", "_____no_output_____" ] ], [ [ "# End", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a32a7cf550cfc34a1689d87f573f341fd15ef7
8,277
ipynb
Jupyter Notebook
loop.ipynb
dineshyadav2020/P_W_Files
dbaa2084fdc2a00b8ca81e6c463ef56a8aa16371
[ "MIT" ]
null
null
null
loop.ipynb
dineshyadav2020/P_W_Files
dbaa2084fdc2a00b8ca81e6c463ef56a8aa16371
[ "MIT" ]
null
null
null
loop.ipynb
dineshyadav2020/P_W_Files
dbaa2084fdc2a00b8ca81e6c463ef56a8aa16371
[ "MIT" ]
null
null
null
22.801653
322
0.42757
[ [ [ "# Loops and Conditions", "_____no_output_____" ], [ "loops provides the methods of iteration while condition allows or blocks the code execution when specified condition\nis meet.", "_____no_output_____" ], [ "For Loop and while Loop", "_____no_output_____" ] ], [ [ "L = ['apple', 'banana','kite','cellphone']\nfor item in L:\n print(item)", "apple\nbanana\nkite\ncellphone\n" ], [ "range(5), range(5,100), sum(range(100))", "_____no_output_____" ], [ "L=[]\nfor k in range(10):\n L.append(10*k)\nL", "_____no_output_____" ], [ "D = {}\nfor i in range(5):\n for j in range(5):\n if i == j :\n D.update({(i,j) : 10*i+j})\n elif i!=j :\n D.update({(i,j): 100*i+j})\nprint(D)", "{(0, 0): 0, (0, 1): 1, (0, 2): 2, (0, 3): 3, (0, 4): 4, (1, 0): 100, (1, 1): 11, (1, 2): 102, (1, 3): 103, (1, 4): 104, (2, 0): 200, (2, 1): 201, (2, 2): 22, (2, 3): 203, (2, 4): 204, (3, 0): 300, (3, 1): 301, (3, 2): 302, (3, 3): 33, (3, 4): 304, (4, 0): 400, (4, 1): 401, (4, 2): 402, (4, 3): 403, (4, 4): 44}\n" ], [ "for i, item in enumerate(['apple', 'banana','kite','cellphone']):\n print(\"The\",i,\"th element is:\", item)", "The 0 th element is: apple\nThe 1 th element is: banana\nThe 2 th element is: kite\nThe 3 th element is: cellphone\n" ], [ "A=[10*k**2+5*k+1 for k in range(10)]\nprint(A)", "[1, 16, 51, 106, 181, 276, 391, 526, 681, 856]\n" ], [ "AA=[[10*x**2+5*y+1 for x in range(3)] for y in range(3)]\nprint(AA)", "[[1, 11, 41], [6, 16, 46], [11, 21, 51]]\n" ], [ "for i in range(3):\n for j in range(3):\n print(\"The\", \"(\",i,\",\",j,\")\",\"th element is: \", AA[i][j])", "The ( 0 , 0 ) th element is: 1\nThe ( 0 , 1 ) th element is: 11\nThe ( 0 , 2 ) th element is: 41\nThe ( 1 , 0 ) th element is: 6\nThe ( 1 , 1 ) th element is: 16\nThe ( 1 , 2 ) th element is: 46\nThe ( 2 , 0 ) th element is: 11\nThe ( 2 , 1 ) th element is: 21\nThe ( 2 , 2 ) th element is: 51\n" ], [ "i=0\nwhile i<5:\n print( i, \"th turn\")\n i = i+1", "0 th turn\n1 th turn\n2 th turn\n3 th turn\n4 th turn\n" ], [ "for i in range(10):\n print(i)\n if i == 3:\n break", "0\n1\n2\n3\n" ], [ "import random as random\nfor i in range(10):\n r = random.uniform(1,10)\n if r<2 and r>0:\n print(\"It is smaller than 2 and greater than 1\",\"|\",r)\n elif r<4 and r>2:\n print(\"It is smaller than 4 and greater than 2\",\"|\",r)\n elif r<6 and r>4:\n print(\"It is smaller tha 6 and greater than 4\",\"|\",r)\n elif r<8 and r>6:\n print(\"It is smaller than 8 and greate than 6\",\"|\",r)\n elif r<10 and r>8:\n print(\"It is smaller than 10 and greater than 8\",\"|\",r)\n ", "It is smaller than 8 and greate than 6 | 7.776816896801124\nIt is smaller than 10 and greater than 8 | 8.900894365576073\nIt is smaller than 10 and greater than 8 | 8.303922292796068\nIt is smaller than 2 and greater than 1 | 1.9805026109354515\nIt is smaller than 4 and greater than 2 | 2.642984031672224\nIt is smaller than 4 and greater than 2 | 2.3577860630219387\nIt is smaller than 4 and greater than 2 | 2.707988363790994\nIt is smaller tha 6 and greater than 4 | 4.950782515186212\nIt is smaller tha 6 and greater than 4 | 4.474577783554499\nIt is smaller than 8 and greate than 6 | 6.260661038808822\n" ], [ "s = 0\nfor i in range(1000+1):\n s = s+i\ns", "_____no_output_____" ], [ "s = 0\nLE = []\nfor i in range(1001):\n if i%2 ==0:\n LE.append(i)\n s= s+i\ns, sum(LE)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a3344b2ec296b33dd1563ba8c4c85be48422b7
36,349
ipynb
Jupyter Notebook
regress_WNBA.ipynb
firebrettbrown/bbgm
31d41cef2175be452793866e1119150518936120
[ "MIT" ]
11
2019-06-25T17:20:48.000Z
2020-07-04T03:09:17.000Z
regress_WNBA.ipynb
firebrettbrown/bbgm
31d41cef2175be452793866e1119150518936120
[ "MIT" ]
null
null
null
regress_WNBA.ipynb
firebrettbrown/bbgm
31d41cef2175be452793866e1119150518936120
[ "MIT" ]
10
2019-06-28T06:26:28.000Z
2022-01-17T18:12:36.000Z
37.473196
234
0.482159
[ [ [ "import json\nimport os\nimport sys\nimport fnmatch\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "df = pd.read_csv('big_stat.csv')#pd.read_csv('BBGM_League_7_all_seasons_Average_Stats.csv')\ndf = df[(df.G*df.MP > 100)]\ndf.shape", "_____no_output_____" ], [ "y = df.iloc[:,-15:]\nX = df.iloc[:,11:-17]\ny = y[(X['AST%'] >0) & (X['AST%'] < 100)]\nX = X[(X['AST%'] >0) & (X['AST%'] < 100)]\nX['MP'] = df.MP\nX['Hgt'] = df['Hgt']\ndf.columns\n", "_____no_output_____" ], [ "stat_list = ['FG','FGA','3P',\"3PA\",'FT','FTA',\\\n 'ORB','DRB','TRB','AST','TOV','STL',\"Blk\",\\\n 'PF','PTS']\nfor name in stat_list:\n den = np.maximum(1,df.MP)\n #if name in ['OWS','DWS']:\n # den = den*df['G']\n #X[name] = df[name]\n X[name + 'p36'] = 36* df[name]/den\n X[name + 'p100'] = X[name + 'p36']*4/3\n\nX['3PtP'] = (2/(1+np.exp(-X['3PAp100']))-1)*X['3P%']/100\nX['Creation'] = X['ASTp100']*0.1843+(X['PTSp100']+X['TOVp100'])*0.0969-2.3021*X['3PtP']+0.0582*(X['ASTp100']*(X['PTSp100']+X['TOVp100'])*X['3PtP'] )-1.1942\nX['Load'] = (X['ASTp100']-(0.38*X['Creation'])*0.75)+X['FGAp100']+X['FTAp100']*0.44+X['Creation']+X['TOVp100']\nX['cTOV'] = X['TOVp100']/X['Load']\nX['DPM'] = X['Blkp100']*0.802+X['DRBp100']*0.42-4.7-0.07551*X['PFp100']+1.597019*X['STLp100']-0.26385*X['TOVp100']\nX['OPM'] = -8.57647+0.6111*X['PTSp100']-0.33918*(0.44*X['FTAp100']+X['FGAp100'])+0.440814*X['FTAp100']+0.379745*X['3PAp100']+0.634044*X['ASTp100']+0.77827*X['ORBp100']-1.08855*X['TOVp100']+0.26262*X['STLp100']\nX['BPM'] = X['OPM'] + X['DPM']\nX['Age'] = df['Age']\n\n#X['PassP'] = ((X['ASTp100']-(0.38*X['Creation']))*0.752+ X['Creation'] + X['TOVp100']) ** 0.67\n#'OPM','DPM','cTOV','Load'#stat_list[:-2]+\nX = X[[_ for _ in X.columns if '%A' in _ or _[-1]=='r' or 'FGP' in _ or 'p36' in _ or _ in (['OPM','BPM','DPM','Creation','cTOV','Load','Age','MP'])]]\n\n\nreplacement_filter = (df.Salary > 0.5) & (df.Salary < 1.0)\nreplacement_player_mean_bs = X[replacement_filter].mean()\nreplacement_player_std_bs = X[replacement_filter].std()\nreplacement_player_cov_bs = X[replacement_filter].cov()\n\nreplacement_player_mean_r = y[replacement_filter].mean()\nreplacement_player_std_r = y[replacement_filter].std()\nreplacement_player_cov_r = y[replacement_filter].cov()\n\n\nreplacement_player_mean_r\n\nX.columns\n", "_____no_output_____" ], [ "np.round(replacement_player_std_r).astype(np.int)\n#_ = plt.hist(X['OWSp36'],150)", "_____no_output_____" ], [ "from sklearn import neural_network\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn import feature_selection\nfrom sklearn import multioutput\nfrom sklearn import ensemble\nfrom sklearn import svm", "_____no_output_____" ], [ "fexp = preprocessing.PolynomialFeatures(degree=2,interaction_only=True)\nscalerX = preprocessing.RobustScaler()\nscalery = preprocessing.StandardScaler()\nprescale_X = scalerX.fit_transform(X)\nprescale_y = scalery.fit_transform(y)\nprescale_X = fexp.fit_transform(prescale_X)", "_____no_output_____" ], [ "trials = 1\nts = []\nfor i in range(trials):\n #clf = neural_network.MLPRegressor((36,5,24,36),'tanh',solver='adam',max_iter=1000)\n #clf = neural_network.MLPRegressor((),'identity',solver='lbfgs',alpha=5e2,tol=1e-9)\n #clf = multioutput.MultiOutputRegressor(linear_model.SGDRegressor(penalty='l2',alpha=5e2,eta0=1e-6,tol=1e-12,max_iter=50,verbose=True))\n clf = multioutput.MultiOutputRegressor(linear_model.ElasticNet(alpha=5e-3))\n #clf = ensemble.ExtraTreesRegressor(8,criterion='mae',max_depth=3,verbose=1)\n #clf = multioutput.MultiOutputRegressor(svm.SVR())\n clf.fit(prescale_X,prescale_y)\n yt = scalery.inverse_transform(clf.predict(prescale_X))\n err = np.linalg.norm(yt-y)\n ts.append((err,clf))", "_____no_output_____" ], [ "#np.array([est.alpha_ for est in clf.estimators_]).mean()\n#0.007250734067011631-7e-3", "_____no_output_____" ], [ "ts = sorted(ts)[::1] # why not the biggest error\nprint(ts[0][0])\nclf = ts[0][1]", "_____no_output_____" ], [ "col_names = X.columns\ncol_names = fexp.get_feature_names(X.columns)\n\nfor i,c in enumerate(y.columns):\n coeffs = clf.estimators_[i].coef_ \n v = np.argsort(abs(coeffs))[::-1]\n print(c)\n coeffs2 = [(coeffs[i2],col_names[i2]) for i2 in v[:10]]\n #for v,n in sorted(coeffs2,reverse=True):\n # print('{:.2f} * {} + '.format(v,n),end='')\n print('| Variable | Coeff |')\n print('|----------|-------|')\n for v,n in sorted(coeffs2,reverse=True):\n print('|{:25s}|{:.2f}|'.format(n,v))\n #for v,n in sorted(coeffs2,reverse=True):\n # print('\\t{:25s}\\t{:.2f}'.format(n,v))\n", "_____no_output_____" ], [ "GEN_YEAR = 2019\ntyear = [GEN_YEAR]\n#if tyear[0] < 1980:\n# print(\"MY PARSING OF THE TABLES IS WRONG WITHOUT the 2PA/3PA TRACKS\")\n# raise\n\nCURRENT_YEAR = 2019\nall_tables = {}\nfor ty in tyear:\n all_tables[ty] = np.load('wnba_{}.pkl'.format(ty))\nteams = all_tables[tyear[0]].keys()\n", "_____no_output_____" ], [ "df.MP.max()", "_____no_output_____" ], [ "player_stats = {k:{} for k in tyear}\ntable_columns = {}\nrosters = {}\nfor ty in tyear:\n tables = all_tables[ty]\n\n for team in tables:\n team_tables = tables[team]\n for table_name in team_tables:\n if table_name in ['draft-rights','team_and_opponent','conf','name','logo']:\n continue\n table = team_tables[table_name].fillna(0)\n #print(table_name)\n #print(table.index)\n for row in table.itertuples():\n name = row[0]\n name = name.replace('\\xa0\\xa0',' ').replace('.','')\n if name == 'Team Totals':\n continue\n nsplit = name.split(' ')\n if nsplit[-1] in ['Jr.','Sr.','I','II','III',\"IV\",'(TW)']:\n name = ' '.join(nsplit[:-1])\n rosters[name] = team\n player_table = player_stats[ty].get(name,{})\n player_row = player_table.get(table_name,[])\n player_row = player_row + [row]\n\n player_table[table_name] = player_row\n player_stats[ty][name] = player_table\n #if name == 'Dennis Smith Jr.' or name == 'Luka Doncic':\n # print(player_stats[ty][name],team)\n table_columns[table_name] = table.columns", "_____no_output_____" ], [ "table_mask = {}\nfor table in table_columns:\n table_mask[table] = [_.strip() !='' for _ in table_columns[table] ]\n table_columns[table] = [_ for _ in table_columns[table] if _.strip() != '']\n#for player in player_stats:\n# for table_in in player_stats[player]:\n# if 'on_off' in table_in or 'salaries' in table_in:\n# continue\n# if len(player_stats[player][table_in]) > 1:\n# pass\n #print(player,table_in,'MP' in player_stats[player][table_in][0]._fields)\n #print(player_stats[player][table_in][0])", "_____no_output_____" ], [ "# add playoff data to normal data\nif False:\n for ty in tyear:\n for player in player_stats[ty]:\n for table_in in player_stats[ty][player]:\n tableN = table_in.split('_')\n tableS = '_'.join(tableN[1:])\n if 'playoffs'==tableN[0] and not table_in in ['playoffs_pbp']:\n #print(table_in)\n if tableS in player_stats[ty][player]:\n player_stats[ty][player][tableS] += player_stats[ty][player][table_in]", "_____no_output_____" ], [ "for ty in tyear:\n for player in player_stats[ty]:\n for tt in player_stats[ty][player]:\n if tt in ['team_stats','team_stats_conf']:\n continue\n new_rows = []\n for tablet in player_stats[ty][player][tt]:\n vector = [_ if _ != '' else '0.0' for _ in tablet[1:]]\n vector = [(float(_.replace('%',''))/100 if type(_) == str and'%' in _ else _) for _ in vector]\n if 'on_off' in tt:\n vector = vector[1:]\n if 'contracts' in tt:\n vector = vector[1:-2]\n if tt in ['salaries2','contracts']:\n vector = [_.replace(',','').replace('$','') for _ in vector]\n try:\n v2 = np.array(vector).astype(np.float)\n except:\n v2 = vector\n new_rows.append(vector)\n a = np.array(new_rows)\n\n if 'MP' in table_columns[tt] and not tt in ['pbp','on_off','on_off_p']:\n try:\n a = a.astype(np.float)\n except:\n a = list(a)\n a[0] = np.array([float(_) for _ in a[0]])\n a[1] = np.array([float(_) for _ in a[1]])\n a = np.array(a)\n try:\n mins = a[:,table_columns[tt].index('MP')].reshape((-1,1))\n new_rows = ((a.T @ mins)/mins.sum()).T\n a = new_rows\n except:\n print(tt,a.shape,player,a,mins)\n pass\n\n player_stats[ty][player][tt] = a\n", "_____no_output_____" ], [ "#player_stats[2019]['Brandon Clarke']\n#player_stats[2019]\nfrom unidecode import unidecode\nwnba_ros = pd.read_csv('wnba_rosters.csv')\nwnba_ros.columns\nunidecode\n\n\nwnba_roster = {}\nfor row in wnba_ros.iterrows():\n name = unidecode(row[1]['name'])\n wnba_roster[name] = dict(row[1])\n hgt = wnba_roster[name]['hgt']\n hgt2 = hgt.split('-')\n if hgt == \"Jun-00\":\n wnba_roster[name]['hgt'] = '6-0'\n else:\n hgt2[1] = str({'May':5,'Jun':6}[hgt2[1]])\n wnba_roster[name]['hgt'] = hgt2[1] + '-' + hgt2[0]\n if wnba_roster[name]['exp'] == 'R':\n wnba_roster[name]['exp'] =0 \n wnba_roster[name]['exp'] = int(wnba_roster[name]['exp'])\n wnba_roster[name]['pos'] = wnba_roster[name]['pos'].replace('-','')", "_____no_output_____" ], [ "yr = wnba_roster['Kelsey Plum']['dob'].split('-')[-1]\nif yr[0] > '5':\n yr = '19' + yr\nelse:\n yr = '20' + yr\nyr", "_____no_output_____" ], [ "player_vectors = []\nplayer_names = []\nplayer_years = []\nplayer_scales = []\nplayer_heights = []\nplayer_confs = []\nplayer_composites = []\ncompNames = ['CUsage','CPassing','CTurnovers','CRim','CLowPost','CMidRange','C3Point','CFreeThrow','CRebound','CSteal','CBlock','CFouling','CDrawFoul']\n\nfor ty in tyear:\n for name in player_stats[ty]:\n #name = 'Jimmy Butler'\n #name = 'Ben Simmons'\n stats = player_stats[ty][name]\n if not 'wnba_totals' in stats:\n if ty == GEN_YEAR and name in player_stats[tyear[-1]] and 'wnba_totals' in player_stats[tyear[-1]][name]:\n stats = player_stats[tyear[-1]][name]\n else:\n continue\n if not name in wnba_roster:\n print(name)\n continue\n \n ht = [int(_) for _ in wnba_roster[name]['hgt'].split('-')]\n hgt = ht[0]*12 + ht[1]\n yr = wnba_roster['Kelsey Plum']['dob'].split('-')[-1]\n if yr[0] > '5':\n yr = '19' + yr\n else:\n yr = '20' + yr\n \n if ty >= 1980: # post 3pt era\n if 'wnba_advanced' in stats:\n d = {\n 'AtRimFGP':np.maximum(stats['wnba_advanced'][0][4],stats['wnba_per_game'][0][8])*100, #ts\n 'LowPostFGP':np.minimum(stats['wnba_advanced'][0][4],stats['wnba_per_game'][0][8])*100, #12\n 'MidRangeFGP':stats['wnba_per_game'][0][8]*100*.8}\n else:\n continue\n d['Age']= CURRENT_YEAR - int(yr)\n\n #print(name)\n MP = np.maximum(1,np.nan_to_num(stats['wnba_totals'][0][2]))\n\n try:\n #for stat in X.columns:\n d.update({'Hgt': hgt,\n 'FG%':stats['wnba_totals'][0][table_columns['wnba_per_game'].index('FG%')]*100,\n 'FG': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('FG')],\n 'FGA': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('FGA')],\n '3P': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('3P')],\n '3PA': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('3PA')],\n 'FT': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('FT')],\n 'FTA': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('FTA')],\n 'ORB': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('ORB')],\n 'DRB': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('DRB')],\n 'TRB': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('TRB')],\n 'AST': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('AST')],\n 'STL': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('STL')],\n 'Blk': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('BLK')],\n 'TOV': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('TOV')],\n 'PF': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('PF')],\n 'PTS': stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('PTS')],\n 'OWS':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('OWS')],\n 'DWS':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('DWS')],\n '3P%':stats['wnba_totals'][0][table_columns['wnba_totals'].index('3P%')]*100,\n 'MP':stats['wnba_per_game'][0][table_columns['wnba_per_game'].index('MP')]*48/40,\n 'FT%':stats['wnba_totals'][0][table_columns['wnba_totals'].index('FT%')]*100,\n 'TS%':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('TS%')]*100,\n '3PAr':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('3PAr')]*100,\n 'FTr':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('FTr')]*100,\n 'USG%':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('USG%')],\n 'FGp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('FG')],\n 'FGAp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('FGA')],\n '3Pp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('3P')],\n '3PAp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('3PA')],\n 'FTp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('FT')],\n 'FTAp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('FTA')],\n 'ORBp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('ORB')],\n 'DRBp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('DRB')],\n 'TRBp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('TRB')],\n 'ASTp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('AST')],\n 'TOVp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('TOV')],\n 'STLp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('STL')],\n 'Blkp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('BLK')],\n 'PFp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('PF')],\n 'PTSp36':stats['wnba_per_minute'][0][table_columns['wnba_per_minute'].index('PTS')],\n 'PER':stats['wnba_advanced'][0][table_columns['wnba_advanced'].index('PER')],})\n for k in list(d.keys()):\n if 'p36' in k:\n d[k[:-3]+'p100'] = d[k]*4/3\n\n d['3PtP'] = (2/(1+np.exp(-d['3PAp100']))-1)*d['3P%']/100\n d['Creation'] = d['ASTp100']*0.1843+(d['PTSp100']+d['TOVp100'])*0.0969-2.3021*d['3PtP']+0.0582*(d['ASTp100']*(d['PTSp100']+d['TOVp100'])*d['3PtP'] )-1.1942\n d['Load'] = (d['ASTp100']-(0.38*d['Creation'])*0.75)+d['FGAp100']+d['FTAp100']*0.44+d['Creation']+d['TOVp100']\n d['cTOV'] = d['TOVp100']/d['Load']\n d['DPM'] = d['Blkp100']*0.802+d['DRBp100']*0.42-4.7-0.07551*d['PFp100']+1.597019*d['STLp100']-0.26385*d['TOVp100']\n d['OPM'] = -8.57647+0.6111*d['PTSp100']-0.33918*(0.44*d['FTAp100']+d['FGAp100'])+0.440814*d['FTAp100']+0.379745*d['3PAp100']+0.634044*d['ASTp100']+0.77827*d['ORBp100']-1.08855*d['TOVp100']+0.26262*d['STLp100']\n d['BPM'] = d['OPM']+d['DPM']\n\n\n #if np.isnan(mp_scale):\n # print(name,ty,MP)\n player_scales.append(MP)\n #d['PassP'] = ((d['ASTp100']-(0.38*d['Creation']))*0.752+ d['Creation'] + d['TOVp100']) ** 0.67\n #if name == 'Marvin Bagley':\n # print(MP)\n # pprint.pprint({k:v for k,v in d.items() if not (('36' in k) or ('100' in k))} )\n player_vectors.append([d[stat] for stat in X.columns])\n player_heights.append(d['Hgt'])\n player_names.append(name)\n player_years.append(ty)\n #player_confs.append(rosters_conf[name])\n except KeyError:\n raise\n pass # player\n\nX.columns", "_____no_output_____" ], [ "#len(player_stats[1964]['Jerry West']['per_game'][0]),len(player_stats[1966]['Jerry West']['per_game'][0]),name\n#d,player_names[-1],X.columns,len(player_stats[1952]['Andy Phillip']['per_game'][0])\n#len(player_stats[1975]['Bob McAdoo']['per_game'][0])#[0][3] \n#stat_list\nn =['G', 'GS', 'MP', 'FG', 'FGA', 'FG%', '2P', '2PA', '2P%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']\n#player_stats[2019]['James Harden']['pbp'][0][12]#[14]\n#for i in range(len(player_stats[2018]['Marvin Bagley']['per_min'][0])):\n# print(n[i], i,player_stats[2018]['Marvin Bagley']['per_min'][0][i])\n", "_____no_output_____" ], [ "first_n = len([yr for yr in player_years if yr == tyear[0]])\ngen_FA = len(teams)*1\nfirst_n,len(teams),gen_FA\n", "_____no_output_____" ], [ "Xn = np.nan_to_num(np.array(player_vectors))\n", "_____no_output_____" ], [ "#Xn = np.nan_to_num(np.array(player_vectors))\n# tuned this to get roughly 8-12 players at 70 or above. Which seemed like normal for a league\nscalerX2 = preprocessing.RobustScaler(quantile_range=(30.0, 55.0))\nscalerX2.fit(Xn[:first_n])\n#scalerX2 = scalerX\nXn_s =scalerX2.transform(np.nan_to_num(Xn))\nXn_fs = fexp.transform(np.nan_to_num(Xn_s))\npredict = clf.predict(Xn_fs)\nratings = np.nan_to_num(scalery.inverse_transform(predict))\nHGT_PRED = 0.25\n\nratings[:,0] = HGT_PRED*np.array(player_heights) + (1-HGT_PRED)*np.maximum(ratings[:,0],np.array(player_heights))\n\n# if we want to scale players down based on minutes played to replacement level\nif True:\n c = np.array(player_scales).reshape((-1,1))\n c = np.tanh(c/50) # basically 1 by 100 MP\n\n ratings[:Xn.shape[0]] = ratings[:Xn.shape[0]]*c + (1-c)*np.repeat(np.array(replacement_player_mean_r).reshape((-1,1)),Xn.shape[0],1).T", "_____no_output_____" ], [ "#for n,v in zip(X.columns,Xn_s[player_names.index('Draymond Green')]):\n# print(n,v)", "_____no_output_____" ], [ "\n#ratings[:,0]\n#table_columns['per_minute']#,table_columns['advanced'].index('0-3')\n#for i,t in enumerate(player_stats[name]['advanced'][0]):\n# print(i,t)", "_____no_output_____" ], [ "#player_vectors[player_names.index('Joel Embiid')][list(X.columns).index('OWSp36')],X.columns[list(X.columns).index('OWSp36')]\n#player_stats[2019]['Joel Embiid']['advanced'][0][12],player_stats[2019]['Joel Embiid']['per_game'][0][3]", "_____no_output_____" ], [ "X.mean(0)-Xn.mean(0)", "_____no_output_____" ], [ "#for n,v in zip(X.columns,Xn.max(0)):\n# print(n,v)\nall_tables[tyear[0]]['LVA']['wnba_totals']", "_____no_output_____" ], [ "tables.keys()", "_____no_output_____" ], [ "trades = {\n 'Coates, Alaina':'MIN',\n 'Talbot, Stephanie':'MIN',\n 'Lavender, Jantel':\"CHI\",\n 'McGee-Stafford, Imani': 'DAL',\n 'Cambage, Liz': \"LVA\",\n 'Harrison, Isabelle': 'DAL',\n 'Jefferson, Moriah': 'DAL',\n}\ntrades2 = {}\nfor name in trades:\n norig = name\n nsplit = [_.rstrip().strip() for _ in name.split(',')]\n name = nsplit[1] +' '+ nsplit[0]\n trades2[name] = trades[norig]\n\nrosters = {}\n\nfor team in all_tables[tyear[0]]:\n for name in all_tables[tyear[0]][team]['wnba_totals'].index:\n name = name.replace('\\xa0\\xa0',' ').replace('.','')\n nsplit = name.split(' ')\n if nsplit[-1] in ['Jr.','Sr.','I','II','III',\"IV\",'(TW)','Jr','Sr']:\n name = ' '.join(nsplit[:-1])\n if name in trades2:\n if trades2[name] != team:\n continue\n rosters[name] = team\n #print(team,)\n#print(rosters)", "_____no_output_____" ], [ "base = json.load(open('wnba_teams.json'))\n\n\nCONFS = []\n\nCONFS.append({'did':0, 'cid':0, 'name':'Eastern'})\n\nCONFS.append({'did':1, 'cid':1, 'name':'Western'})\n\nbase['gameAttributes'] = [{'key':'numGamesPlayoffSeries', 'value':[1,1,5,5]}]\nbase['gameAttributes'].append({'key':'divs', 'value': CONFS})\nbase['gameAttributes'].append({'key':'numGames', 'value': 34})\n\n", "_____no_output_____" ], [ "base", "_____no_output_____" ], [ "tids = {}\nfor t in base['teams']:\n tids[t['abbrev']] = t['tid']\n\nbase['startingSeason'] = tyear[0]\n\ny_keys = [_.lower() for _ in y.columns]\n\ny_map = { 'hgt': 'hgt',\n 'stre': 'str',\n 'spd': 'spd',\n 'jmp': 'jmp',\n 'endu': 'end',\n 'ins': 'ins',\n 'dnk': 'dnk',\n 'ft': 'ft.1',\n 'fg': '2pt',\n 'tp': '3pt',\n 'diq': 'diq',\n 'oiq': 'oiq',\n 'drb': 'drb',\n 'pss': 'pss',\n 'reb': 'reb' }", "_____no_output_____" ], [ "if False:\n ri = 0\n _ = plt.hist(yt[:,ri],100,normed=True,alpha=0.5,label='predicted')\n _ = plt.hist(y.iloc[:,ri],100,normed=True,alpha=0.5,label='labels')\n _ = plt.hist(ratings[:,ri],100,normed=True,alpha=0.5,label='dataset')\n plt.xlim(-100,100)\n plt.legend()\nelif False:\n plt.figure(figsize=(7*3,6*3))\n\n for ri in range(len(X.columns)):\n #ri = 13\n #plt.figure()\n plt.subplot(6,7,ri+1)\n\n _ = plt.hist(Xn[:,ri],100,density=True,alpha=0.5,label='NBA')\n _ = plt.hist(X.iloc[:,ri],100,density=True,alpha=0.5,label='BBGM')\n plt.legend()\n plt.title(X.columns[ri])# + ' ' + str(ri))\n plt.tight_layout()\n #plt.xlim(-100,100)\nfrom collections import OrderedDict\n\ndef write_roman(num):\n\n roman = OrderedDict()\n roman[1000] = \"M\"\n roman[900] = \"CM\"\n roman[500] = \"D\"\n roman[400] = \"CD\"\n roman[100] = \"C\"\n roman[90] = \"XC\"\n roman[50] = \"L\"\n roman[40] = \"XL\"\n roman[10] = \"X\"\n roman[9] = \"IX\"\n roman[5] = \"V\"\n roman[4] = \"IV\"\n roman[1] = \"I\"\n\n def roman_num(num):\n for r in roman.keys():\n x, y = divmod(num, r)\n yield roman[r] * x\n num -= (r * x)\n if num <= 0:\n break\n\n return \"\".join([a for a in roman_num(num)])", "_____no_output_____" ], [ "if gen_FA > 0:\n player_names_f = player_names + [\"Free Agent{}\".format(write_roman(i)) for i in range(gen_FA)]\n player_years_f = player_years + [GEN_YEAR for i in range(gen_FA)]\n # scale them down, we don't want a bunch of amazing replacements\n MEAN_S = 0.95\n STD_S = 0.25\n rp_ratings = np.random.multivariate_normal(MEAN_S*replacement_player_mean_r,STD_S*replacement_player_cov_r,size=(gen_FA))\n ratings_f = np.vstack([ratings,rp_ratings])\nelse:\n player_names_f = player_names\n player_years_f = player_years\n ratings_f = ratings", "_____no_output_____" ], [ "import pprint\nimport copy\nplayers = []\npp = pprint.PrettyPrinter()\nfor i,name in enumerate(player_names_f):\n py = player_years_f[i]\n name = player_names_f[i]\n sname = name.split(' ')\n new_player = {}\n new_player['firstName'] = sname[0]\n new_player['lastName'] = ' '.join(sname[1:])\n year_gap = 0\n if name in player_stats[py]:\n tid = tids[rosters[name]]\n \n try:\n new_player['tid'] = tid\n ht = [int(_) for _ in wnba_roster[name]['hgt'].split('-')]\n hgt = ht[0]*12 + ht[1]\n yr = wnba_roster[name]['dob'].split('-')[-1]\n if yr[0] > '5':\n yr = '19' + yr\n else:\n yr = '20' + yr\n yr = int(yr)\n print\n years_exp = int(wnba_roster[name]['exp'])\n \n try:\n weight = int(wnba_roster[name]['weight'])\n except:\n weight = 180\n \n new_player['pos'] = wnba_roster[name]['pos']\n new_player['born'] = {'year':yr,'loc':''}\n new_player['weight'] = weight\n new_player['hgt'] = hgt\n new_player['draft'] = {'year':GEN_YEAR-years_exp ,\"round\": 0, \"pick\": 0, \"tid\": -1, \"originalTid\": -1,}\n except:\n print(name)\n raise\n continue\n elif i > Xn.shape[0]:\n #print(name)\n new_player['tid'] = int(-1)\n new_player['weight'] = int(np.random.normal(220,20))\n new_player['hgt'] = int(np.random.normal(6*12+6,3))\n new_player['born'] = {'year':int(GEN_YEAR-np.random.normal(32,2)),'loc':''}\n new_player['pos'] = \"GF\"\n #print(new_player)\n else:\n #print(name)\n continue\n #if name == 'Ben Simmons':\n # print(new_player)\n #print(year_gap,py,name)\n try:\n scale_rookie = 1.0\n sub_rookie = 0\n r_vec = {k: scale_rookie*ratings_f[i,y_keys.index(km)]+sub_rookie for k,km in y_map.items()}\n\n r_vec = {k: int(np.clip(v,0,100)) for k,v in r_vec.items()}\n\n new_player['ratings'] = [r_vec]\n #new_player['ratings']\n players.append(new_player)\n except:\n print(name)\n raise\n #if name in ['Luka Doncic']: #\"Trevon Duval\",'LeBron James'\n #print(py,exp_years)\n # pp.pprint(new_player)\n #pp.pprint(player_vectors[player_names.index(name)])", "_____no_output_____" ], [ "base['players'] = players\nwith open('wnba_roster_{}.json'.format(tyear[0]),'wt') as fp:\n json.dump(base,fp, sort_keys=True)", "_____no_output_____" ], [ "len(ratings),len(ratings_f),len(ratings)+gen_FA", "_____no_output_____" ], [ "#[(_['region'],_['tid']) for _ in base['teams']]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a341d628d911acd26d460d3a95c49b7edbb3fe
114,170
ipynb
Jupyter Notebook
DataCleaning/Test.ipynb
hsuyeemon/DemandPrediction
c1011eabf7e86e8b2de75a0d551f54605f29f661
[ "Apache-2.0" ]
null
null
null
DataCleaning/Test.ipynb
hsuyeemon/DemandPrediction
c1011eabf7e86e8b2de75a0d551f54605f29f661
[ "Apache-2.0" ]
null
null
null
DataCleaning/Test.ipynb
hsuyeemon/DemandPrediction
c1011eabf7e86e8b2de75a0d551f54605f29f661
[ "Apache-2.0" ]
null
null
null
40.328506
22,824
0.476693
[ [ [ "import pandas as pd\nimport numpy as np\n", "_____no_output_____" ], [ "#import original file\n#df = pd.read_csv(\"/home/hsuyeemon/work/Data/yearly_data/2016.csv\")", "_____no_output_____" ], [ "#df.head()", "_____no_output_____" ], [ "#df.shape", "_____no_output_____" ], [ "#dates = df[\"date\"]", "_____no_output_____" ], [ "#dates.shape", "_____no_output_____" ], [ "#df.dtypes", "_____no_output_____" ], [ "#convert object type to datetime type\n#df['date'] = pd.to_datetime(df['date'])", "_____no_output_____" ], [ "#remove time stamp\n#df['date'] = df['date'].dt.date", "_____no_output_____" ], [ "#df.to_csv(\"normalized_2016.csv\")", "_____no_output_____" ], [ "import pandas_profiling ", "_____no_output_____" ], [ "#update profilng report\ndf1 = pd.read_csv(\"normalized_2016.csv\") \ndf1\n", "_____no_output_____" ], [ "df1.drop(['Unnamed: 0','id','customerType','customerId'], axis=1, inplace=True)\n#profile = df1.profile_report(title='Pandas Profiling Report') \n#profile.to_file(output_file=\"profile2016.html\") \ndf1", "_____no_output_____" ], [ "df1.dropna(inplace=True)\ndf1", "_____no_output_____" ], [ "duplicateRowsDF2 =df1[df1.duplicated(subset=['date','saturation','storeId','productId','color'],keep=False)]", "_____no_output_____" ], [ "duplicateRowsDF2", "_____no_output_____" ], [ "#duplicateRowsDF2['date'] = pd.to_datetime(duplicateRowsDF2['date'])", "_____no_output_____" ], [ "#duplicateRowsDF2 = duplicateRowsDF2.sort_values(by='date')", "_____no_output_____" ], [ "#duplicateRowsDF2['date'] = duplicateRowsDF2['date'].dt.date", "_____no_output_____" ], [ "duplicateRowsDF2.to_csv(\"test2.csv\")", "_____no_output_____" ], [ "duplicateRowsDF2.head(10)", "_____no_output_____" ], [ "#update profilng report\ndf1 = pd.read_csv(\"normalized_2016.csv\") \ndf1\n", "_____no_output_____" ], [ "df1 = pd.read_csv(\"2016_prepared.csv\") ", "/home/hsuyeemon/anaconda3/envs/my_env/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3062: DtypeWarning: Columns (3) have mixed types.Specify dtype option on import or set low_memory=False.\n has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n" ], [ "duplicateRowsDF2 =df1[df1.duplicated(['date','saturation','productId','color'],keep=False)]", "_____no_output_____" ], [ "duplicateRowsDF2.head(10)", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "#for only one store storeId = 3930 and product Id = 051142505\nnew_csv = df1.loc[(df1['storeId'] == 3930.0) & (df1['productId'] == '051142505')]", "_____no_output_____" ], [ "new_csv.to_csv(\"2016_3930_051142505.csv\")", "_____no_output_____" ], [ "from matplotlib import pyplot\n# load dataset\ndataset = pd.read_csv('2016_3930_051142505.csv', header=0, index_col=0)\ndataset.drop(['storeId','productId'], axis=1, inplace=True)\nvalues = dataset.values", "_____no_output_____" ], [ "# specify columns to plot\ngroups = [0, 1, 2, 3]\ni = 1\n# plot each column\npyplot.figure()\npyplot.plot(values[:, group])\npyplot.title(dataset.columns[3], y=0.5, loc='right')\npyplot.show()", "_____no_output_____" ], [ "dateD =dataset[dataset.duplicated(['date'],keep=False)]", "_____no_output_____" ], [ "dateD.sort_values(by='date')", "_____no_output_____" ], [ "dateProduct =dataset[dataset.duplicated(['date','productId'],keep=False)]", "_____no_output_____" ], [ "dateProduct = dateProduct.sort_values(by=['date','productId'])", "_____no_output_____" ], [ "dateProduct.head(20)", "_____no_output_____" ], [ "dateProductColor =dataset[dataset.duplicated(['date','productId','color'],keep=False)]\ndateProductColor = dateProduct.sort_values(by=['date','productId','color'])", "_____no_output_____" ], [ "dateProductColor.head(10)", "_____no_output_____" ], [ "from matplotlib import pyplot\n# load dataset\ndataset = pd.read_csv('2016_3930_051142505.csv', header=0, index_col=0)\ndataset.drop(['storeId','productId','color','saturation'], axis=1, inplace=True)\nvalues = dataset.values\n\n# plot each column\npyplot.figure()\npyplot.plot(values[:, 1])\npyplot.title(dataset.columns[1], y=0.5, loc='right')\npyplot.show()", "_____no_output_____" ], [ "dataset", "_____no_output_____" ], [ "dup =dataset[dataset.duplicated(['date'],keep=False)]\ndup = dup.sort_values(by=['date'])", "_____no_output_____" ], [ "dup.head(10)", "_____no_output_____" ], [ "df = dataset.groupby(['date']).sum()", "_____no_output_____" ], [ "idx = pd.date_range('2016-01-01', '2016-12-31')\ndf = df.sort_values(by='date')\ns=df.iloc[:]\ntype(s)\n", "_____no_output_____" ], [ "\nprint(s)\n", " quantity\ndate \n2016-01-01 1\n2016-01-02 2\n2016-01-03 6\n2016-01-09 2\n2016-01-10 3\n2016-01-11 3\n2016-01-19 6\n2016-01-23 1\n2016-02-06 2\n2016-02-10 6\n2016-02-11 1\n2016-02-14 1\n2016-02-18 1\n2016-02-22 2\n2016-03-04 2\n2016-03-15 6\n2016-03-16 2\n2016-03-28 2\n2016-03-29 8\n2016-04-01 1\n2016-04-10 3\n2016-04-14 3\n2016-04-15 1\n2016-04-16 7\n2016-04-19 1\n2016-04-20 6\n2016-04-21 3\n2016-04-22 4\n2016-04-25 4\n2016-05-03 2\n2016-05-04 2\n2016-05-09 1\n2016-05-15 3\n2016-05-17 2\n2016-05-19 3\n2016-05-22 2\n2016-06-05 2\n2016-06-12 1\n2016-06-13 2\n2016-06-18 4\n2016-06-24 1\n2016-07-25 3\n2016-07-30 2\n2016-08-02 1\n" ], [ "s.index = pd.DatetimeIndex(s.index)\ns = s.reindex(idx, fill_value=0)", "_____no_output_____" ], [ "s", "_____no_output_____" ], [ "s.to_csv(\"demand.csv\")", "_____no_output_____" ], [ "# plot each column\npyplot.figure()\npyplot.plot(s[:, 1])\npyplot.title(s.columns[1], y=0.5, loc='right')\npyplot.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a360db692b59a5a3c270eed5f86960d342c083
796,334
ipynb
Jupyter Notebook
community/en/transformer_chatbot.ipynb
xuekun90/examples
683776ab06c79b79578a1ef3d4aa0951f865b573
[ "Apache-2.0" ]
4
2021-06-25T17:30:49.000Z
2021-11-08T08:56:57.000Z
community/en/transformer_chatbot.ipynb
xuekun90/examples
683776ab06c79b79578a1ef3d4aa0951f865b573
[ "Apache-2.0" ]
16
2020-01-28T22:15:33.000Z
2022-02-10T00:22:22.000Z
community/en/transformer_chatbot.ipynb
xuekun90/examples
683776ab06c79b79578a1ef3d4aa0951f865b573
[ "Apache-2.0" ]
1
2020-02-11T11:41:55.000Z
2020-02-11T11:41:55.000Z
411.754912
210,490
0.912882
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Transformer Chatbot", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/transformer_chatbot.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/transformer_chatbot.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "This tutorial trains a <a href=\"https://arxiv.org/abs/1706.03762\" class=\"external\">Transformer model</a> to be a chatbot. This is an advanced example that assumes knowledge of [text generation](https://tensorflow.org/alpha/tutorials/text/text_generation), [attention](https://www.tensorflow.org/alpha/tutorials/text/nmt_with_attention) and [transformer](https://www.tensorflow.org/alpha/tutorials/text/transformer).\n\nThe core idea behind the Transformer model is *self-attention*—the ability to attend to different positions of the input sequence to compute a representation of that sequence. Transformer creates stacks of self-attention layers and is explained below in the sections *Scaled dot product attention* and *Multi-head attention*.\n\nNote: The model architecture is identical to the example in [Transformer model for language understanding](https://www.tensorflow.org/alpha/tutorials/text/transformer), and we demonstrate how to implement the same model in the Functional approach instead of Subclassing.", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\ntry:\n # The %tensorflow_version magic only works in colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow as tf\ntf.random.set_seed(1234)\n\n!pip install tfds-nightly\nimport tensorflow_datasets as tfds\n\nimport os\nimport re\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n", "Collecting tf-nightly-gpu-2.0-preview==2.0.0.dev20190520\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/c9/c1/fcaf4f6873777da2cd3a7a8ac3c9648cef7c7413f13b8135521eb9b9804a/tf_nightly_gpu_2.0_preview-2.0.0.dev20190520-cp36-cp36m-manylinux1_x86_64.whl (349.0MB)\n\u001b[K |████████████████████████████████| 349.0MB 31kB/s \n\u001b[?25hRequirement already satisfied: tfds-nightly in /usr/local/lib/python3.6/dist-packages (1.0.2.dev201905140105)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (0.33.4)\nRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (0.2.2)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.1.0)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.12.0)\nCollecting wrapt>=1.11.1 (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520)\n Downloading https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz\nRequirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.16.3)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (3.7.1)\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (0.7.1)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.15.0)\nCollecting tb-nightly<1.15.0a0,>=1.14.0a0 (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/6f/99/4220b50dc87814988e969cc859c07d070423bea820bc24d16c2023057eb6/tb_nightly-1.14.0a20190520-py3-none-any.whl (3.1MB)\n\u001b[K |████████████████████████████████| 3.1MB 33.7MB/s \n\u001b[?25hCollecting google-pasta>=0.1.6 (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f9/68/a14620bfb042691f532dcde8576ff82ee82e4c003cdc0a3dbee5f289cee6/google_pasta-0.1.6-py3-none-any.whl (51kB)\n\u001b[K |████████████████████████████████| 61kB 27.4MB/s \n\u001b[?25hRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.0.7)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (1.0.9)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (0.7.1)\nCollecting tensorflow-estimator-2.0-preview (from tf-nightly-gpu-2.0-preview==2.0.0.dev20190520)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/e7/779651eca277d48486ae03d007162d37c93449bc29358fbe748e13639734/tensorflow_estimator_2.0_preview-1.14.0.dev2019052000-py2.py3-none-any.whl (427kB)\n\u001b[K |████████████████████████████████| 430kB 51.7MB/s \n\u001b[?25hRequirement already satisfied: promise in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (2.2.1)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (0.16.0)\nRequirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (0.2.9)\nRequirement already satisfied: psutil in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (5.4.8)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (2.21.0)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (0.13.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from tfds-nightly) (4.28.1)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (41.0.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.15.0a0,>=1.14.0a0->tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (0.15.3)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.15.0a0,>=1.14.0a0->tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (3.1)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tf-nightly-gpu-2.0-preview==2.0.0.dev20190520) (2.8.0)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->tfds-nightly) (1.24.3)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->tfds-nightly) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->tfds-nightly) (2019.3.9)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->tfds-nightly) (2.8)\nRequirement already satisfied: googleapis-common-protos in /usr/local/lib/python3.6/dist-packages (from tensorflow-metadata->tfds-nightly) (1.5.10)\nBuilding wheels for collected packages: wrapt\n Building wheel for wrapt (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/89/67/41/63cbf0f6ac0a6156588b9587be4db5565f8c6d8ccef98202fc\nSuccessfully built wrapt\n\u001b[31mERROR: thinc 6.12.1 has requirement wrapt<1.11.0,>=1.10.0, but you'll have wrapt 1.11.1 which is incompatible.\u001b[0m\nInstalling collected packages: wrapt, tb-nightly, google-pasta, tensorflow-estimator-2.0-preview, tf-nightly-gpu-2.0-preview\n Found existing installation: wrapt 1.10.11\n Uninstalling wrapt-1.10.11:\n Successfully uninstalled wrapt-1.10.11\nSuccessfully installed google-pasta-0.1.6 tb-nightly-1.14.0a20190520 tensorflow-estimator-2.0-preview-1.14.0.dev2019052000 tf-nightly-gpu-2.0-preview-2.0.0.dev20190520 wrapt-1.11.1\n" ] ], [ [ "##Prepare Dataset", "_____no_output_____" ], [ "We will use the conversations in movies and TV shows provided by [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html), which contains more than 220 thousands conversational exchanges between more than 10k pairs of movie characters, as our dataset.\n\n`movie_conversations.txt` contains list of the conversation IDs and `movie_lines.text` contains the text of assoicated with each conversation ID. For further information regarding the dataset, please check the README file in the zip file.\n", "_____no_output_____" ] ], [ [ "path_to_zip = tf.keras.utils.get_file(\n 'cornell_movie_dialogs.zip',\n origin=\n 'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip',\n extract=True)\n\npath_to_dataset = os.path.join(\n os.path.dirname(path_to_zip), \"cornell movie-dialogs corpus\")\n\npath_to_movie_lines = os.path.join(path_to_dataset, 'movie_lines.txt')\npath_to_movie_conversations = os.path.join(path_to_dataset,\n 'movie_conversations.txt')", "Downloading data from http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip\n9920512/9916637 [==============================] - 1s 0us/step\n" ] ], [ [ "### Load and preprocess data\n\nTo keep this example simple and fast, we are limiting the maximum number of training samples to`MAX_SAMPLES=25000` and the maximum length of the sentence to be `MAX_LENGTH=40`.\n\nWe preprocess our dataset in the following order:\n* Extract `MAX_SAMPLES` conversation pairs into list of `questions` and `answers.\n* Preprocess each sentence by removing special characters in each sentence.\n* Build tokenizer (map text to ID and ID to text) using [TensorFlow Datasets SubwordTextEncoder](https://www.tensorflow.org/datasets/api_docs/python/tfds/features/text/SubwordTextEncoder).\n* Tokenize each sentence and add `START_TOKEN` and `END_TOKEN` to indicate the start and end of each sentence.\n* Filter out sentence that has more than `MAX_LENGTH` tokens.\n* Pad tokenized sentences to `MAX_LENGTH`\n\n", "_____no_output_____" ] ], [ [ "# Maximum number of samples to preprocess\nMAX_SAMPLES = 50000\n\ndef preprocess_sentence(sentence):\n sentence = sentence.lower().strip()\n # creating a space between a word and the punctuation following it\n # eg: \"he is a boy.\" => \"he is a boy .\"\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\n sentence = re.sub(r'[\" \"]+', \" \", sentence)\n # replacing everything with space except (a-z, A-Z, \".\", \"?\", \"!\", \",\")\n sentence = re.sub(r\"[^a-zA-Z?.!,]+\", \" \", sentence)\n sentence = sentence.strip()\n # adding a start and an end token to the sentence\n return sentence\n\n\ndef load_conversations():\n # dictionary of line id to text\n id2line = {}\n with open(path_to_movie_lines, errors='ignore') as file:\n lines = file.readlines()\n for line in lines:\n parts = line.replace('\\n', '').split(' +++$+++ ')\n id2line[parts[0]] = parts[4]\n\n inputs, outputs = [], []\n with open(path_to_movie_conversations, 'r') as file:\n lines = file.readlines()\n for line in lines:\n parts = line.replace('\\n', '').split(' +++$+++ ')\n # get conversation in a list of line ID\n conversation = [line[1:-1] for line in parts[3][1:-1].split(', ')]\n for i in range(len(conversation) - 1):\n inputs.append(preprocess_sentence(id2line[conversation[i]]))\n outputs.append(preprocess_sentence(id2line[conversation[i + 1]]))\n if len(inputs) >= MAX_SAMPLES:\n return inputs, outputs\n return inputs, outputs\n\n\nquestions, answers = load_conversations()", "_____no_output_____" ], [ "print('Sample question: {}'.format(questions[20]))\nprint('Sample answer: {}'.format(answers[20]))", "Sample question: i really , really , really wanna go , but i can t . not unless my sister goes .\nSample answer: i m workin on it . but she doesn t seem to be goin for him .\n" ], [ "# Build tokenizer using tfds for both questions and answers\ntokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus(\n questions + answers, target_vocab_size=2**13)\n\n# Define start and end token to indicate the start and end of a sentence\nSTART_TOKEN, END_TOKEN = [tokenizer.vocab_size], [tokenizer.vocab_size + 1]\n\n# Vocabulary size plus start and end token\nVOCAB_SIZE = tokenizer.vocab_size + 2", "_____no_output_____" ], [ "print('Tokenized sample question: {}'.format(tokenizer.encode(questions[20])))", "Tokenized sample question: [4, 281, 3, 281, 3, 143, 395, 176, 3, 42, 4, 38, 8191, 2, 37, 873, 27, 2031, 3096, 1]\n" ], [ "# Maximum sentence length\nMAX_LENGTH = 40\n\n\n# Tokenize, filter and pad sentences\ndef tokenize_and_filter(inputs, outputs):\n tokenized_inputs, tokenized_outputs = [], []\n \n for (sentence1, sentence2) in zip(inputs, outputs):\n # tokenize sentence\n sentence1 = START_TOKEN + tokenizer.encode(sentence1) + END_TOKEN\n sentence2 = START_TOKEN + tokenizer.encode(sentence2) + END_TOKEN\n # check tokenized sentence max length\n if len(sentence1) <= MAX_LENGTH and len(sentence2) <= MAX_LENGTH:\n tokenized_inputs.append(sentence1)\n tokenized_outputs.append(sentence2)\n \n # pad tokenized sentences\n tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences(\n tokenized_inputs, maxlen=MAX_LENGTH, padding='post')\n tokenized_outputs = tf.keras.preprocessing.sequence.pad_sequences(\n tokenized_outputs, maxlen=MAX_LENGTH, padding='post')\n \n return tokenized_inputs, tokenized_outputs\n\n\nquestions, answers = tokenize_and_filter(questions, answers)", "_____no_output_____" ], [ "print('Vocab size: {}'.format(VOCAB_SIZE))\nprint('Number of samples: {}'.format(len(questions)))", "Vocab size: 8333\nNumber of samples: 44095\n" ] ], [ [ "### Create `tf.data.Dataset`\n\nWe are going to use the [tf.data.Dataset API](https://www.tensorflow.org/api_docs/python/tf/data) to contruct our input pipline in order to utilize features like caching and prefetching to speed up the training process.\n\nThe transformer is an auto-regressive model: it makes predictions one part at a time, and uses its output so far to decide what to do next.\n\nDuring training this example uses teacher-forcing. Teacher forcing is passing the true output to the next time step regardless of what the model predicts at the current time step.\n\nAs the transformer predicts each word, self-attention allows it to look at the previous words in the input sequence to better predict the next word.\n\nTo prevent the model from peaking at the expected output the model uses a look-ahead mask.\n\nTarget is divided into `decoder_inputs` which padded as an input to the decoder and `cropped_targets` for calculating our loss and accuracy.", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 64\nBUFFER_SIZE = 20000\n\n# decoder inputs use the previous target as input\n# remove START_TOKEN from targets\ndataset = tf.data.Dataset.from_tensor_slices((\n {\n 'inputs': questions,\n 'dec_inputs': answers[:, :-1]\n },\n {\n 'outputs': answers[:, 1:]\n },\n))\n\ndataset = dataset.cache()\ndataset = dataset.shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)", "WARNING: Logging before flag parsing goes to stderr.\nW0521 04:24:35.050999 140098170382208 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/util/random_seed.py:58: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n" ], [ "print(dataset)", "<PrefetchDataset shapes: ({inputs: (None, 40), dec_inputs: (None, 39)}, {outputs: (None, 39)}), types: ({inputs: tf.int32, dec_inputs: tf.int32}, {outputs: tf.int32})>\n" ] ], [ [ "## Attention\n\n", "_____no_output_____" ], [ "### Scaled dot product Attention\n\nThe scaled dot-product attention function used by the transformer takes three inputs: Q (query), K (key), V (value). The equation used to calculate the attention weights is:\n\n$$\\Large{Attention(Q, K, V) = softmax_k(\\frac{QK^T}{\\sqrt{d_k}}) V} $$\n\nAs the softmax normalization is done on the `key`, its values decide the amount of importance given to the `query`.\n\nThe output represents the multiplication of the attention weights and the `value` vector. This ensures that the words we want to focus on are kept as is and the irrelevant words are flushed out.\n\nThe dot-product attention is scaled by a factor of square root of the depth. This is done because for large values of depth, the dot product grows large in magnitude pushing the softmax function where it has small gradients resulting in a very hard softmax. \n\nFor example, consider that `query` and `key` have a mean of 0 and variance of 1. Their matrix multiplication will have a mean of 0 and variance of `dk`. Hence, *square root of `dk`* is used for scaling (and not any other number) because the matmul of `query` and `key` should have a mean of 0 and variance of 1, so that we get a gentler softmax.\n\nThe mask is multiplied with *-1e9 (close to negative infinity).* This is done because the mask is summed with the scaled matrix multiplication of `query` and `key` and is applied immediately before a softmax. The goal is to zero out these cells, and large negative inputs to softmax are near zero in the output.", "_____no_output_____" ] ], [ [ "def scaled_dot_product_attention(query, key, value, mask):\n \"\"\"Calculate the attention weights. \"\"\"\n matmul_qk = tf.matmul(query, key, transpose_b=True)\n\n # scale matmul_qk\n depth = tf.cast(tf.shape(key)[-1], tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n\n # add the mask to zero out padding tokens\n if mask is not None:\n logits += (mask * -1e9)\n\n # softmax is normalized on the last axis (seq_len_k)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n\n output = tf.matmul(attention_weights, value)\n\n return output", "_____no_output_____" ] ], [ [ "### Multi-head attention\n\n<img src=\"https://www.tensorflow.org/images/tutorials/transformer/multi_head_attention.png\" width=\"500\" alt=\"multi-head attention\">\n\n\nMulti-head attention consists of four parts:\n* Linear layers and split into heads.\n* Scaled dot-product attention.\n* Concatenation of heads.\n* Final linear layer.\n\nEach multi-head attention block gets three inputs; Q (query), K (key), V (value). These are put through linear (Dense) layers and split up into multiple heads. \n\nThe `scaled_dot_product_attention` defined above is applied to each head (broadcasted for efficiency). An appropriate mask must be used in the attention step. The attention output for each head is then concatenated (using `tf.transpose`, and `tf.reshape`) and put through a final `Dense` layer.\n\nInstead of one single attention head, `query`, `key`, and `value` are split into multiple heads because it allows the model to jointly attend to information at different positions from different representational spaces. After the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality.", "_____no_output_____" ] ], [ [ "class MultiHeadAttention(tf.keras.layers.Layer):\n\n def __init__(self, d_model, num_heads, name=\"multi_head_attention\"):\n super(MultiHeadAttention, self).__init__(name=name)\n self.num_heads = num_heads\n self.d_model = d_model\n\n assert d_model % self.num_heads == 0\n\n self.depth = d_model // self.num_heads\n\n self.query_dense = tf.keras.layers.Dense(units=d_model)\n self.key_dense = tf.keras.layers.Dense(units=d_model)\n self.value_dense = tf.keras.layers.Dense(units=d_model)\n\n self.dense = tf.keras.layers.Dense(units=d_model)\n\n def split_heads(self, inputs, batch_size):\n inputs = tf.reshape(\n inputs, shape=(batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(inputs, perm=[0, 2, 1, 3])\n\n def call(self, inputs):\n query, key, value, mask = inputs['query'], inputs['key'], inputs[\n 'value'], inputs['mask']\n batch_size = tf.shape(query)[0]\n\n # linear layers\n query = self.query_dense(query)\n key = self.key_dense(key)\n value = self.value_dense(value)\n\n # split heads\n query = self.split_heads(query, batch_size)\n key = self.split_heads(key, batch_size)\n value = self.split_heads(value, batch_size)\n\n # scaled dot-product attention\n scaled_attention = scaled_dot_product_attention(query, key, value, mask)\n\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n\n # concatenation of heads\n concat_attention = tf.reshape(scaled_attention,\n (batch_size, -1, self.d_model))\n\n # final linear layer\n outputs = self.dense(concat_attention)\n\n return outputs", "_____no_output_____" ] ], [ [ "## Transformer", "_____no_output_____" ], [ "### Masking\n\n", "_____no_output_____" ], [ "`create_padding_mask` and `create_look_ahead` are helper functions to creating masks to mask out padded tokens, we are going to use these helper functions as `tf.keras.layers.Lambda` layers.\n\nMask all the pad tokens (value `0`) in the batch to ensure the model does not treat padding as input.", "_____no_output_____" ] ], [ [ "def create_padding_mask(x):\n mask = tf.cast(tf.math.equal(x, 0), tf.float32)\n # (batch_size, 1, 1, sequence length)\n return mask[:, tf.newaxis, tf.newaxis, :]", "_____no_output_____" ], [ "print(create_padding_mask(tf.constant([[1, 2, 0, 3, 0], [0, 0, 0, 4, 5]])))", "tf.Tensor(\n[[[[0. 0. 1. 0. 1.]]]\n\n\n [[[1. 1. 1. 0. 0.]]]], shape=(2, 1, 1, 5), dtype=float32)\n" ] ], [ [ "Look-ahead mask to mask the future tokens in a sequence.\nWe also mask out pad tokens.\n\ni.e. To predict the third word, only the first and second word will be used", "_____no_output_____" ] ], [ [ "def create_look_ahead_mask(x):\n seq_len = tf.shape(x)[1]\n look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\n padding_mask = create_padding_mask(x)\n return tf.maximum(look_ahead_mask, padding_mask)", "_____no_output_____" ], [ "print(create_look_ahead_mask(tf.constant([[1, 2, 0, 4, 5]])))", "tf.Tensor(\n[[[[0. 1. 1. 1. 1.]\n [0. 0. 1. 1. 1.]\n [0. 0. 1. 1. 1.]\n [0. 0. 1. 0. 1.]\n [0. 0. 1. 0. 0.]]]], shape=(1, 1, 5, 5), dtype=float32)\n" ] ], [ [ "### Positional encoding\n\nSince this model doesn't contain any recurrence or convolution, positional encoding is added to give the model some information about the relative position of the words in the sentence. \n\nThe positional encoding vector is added to the embedding vector. Embeddings represent a token in a d-dimensional space where tokens with similar meaning will be closer to each other. But the embeddings do not encode the relative position of words in a sentence. So after adding the positional encoding, words will be closer to each other based on the *similarity of their meaning and their position in the sentence*, in the d-dimensional space.\n\nSee the notebook on [positional encoding](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) to learn more about it. The formula for calculating the positional encoding is as follows:\n\n$$\\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$\n$$\\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$", "_____no_output_____" ] ], [ [ "class PositionalEncoding(tf.keras.layers.Layer):\n\n def __init__(self, position, d_model):\n super(PositionalEncoding, self).__init__()\n self.pos_encoding = self.positional_encoding(position, d_model)\n\n def get_angles(self, position, i, d_model):\n angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))\n return position * angles\n\n def positional_encoding(self, position, d_model):\n angle_rads = self.get_angles(\n position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],\n i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],\n d_model=d_model)\n # apply sin to even index in the array\n sines = tf.math.sin(angle_rads[:, 0::2])\n # apply cos to odd index in the array\n cosines = tf.math.cos(angle_rads[:, 1::2])\n\n pos_encoding = tf.concat([sines, cosines], axis=-1)\n pos_encoding = pos_encoding[tf.newaxis, ...]\n return tf.cast(pos_encoding, tf.float32)\n\n def call(self, inputs):\n return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]", "_____no_output_____" ], [ "sample_pos_encoding = PositionalEncoding(50, 512)\n\nplt.pcolormesh(sample_pos_encoding.pos_encoding.numpy()[0], cmap='RdBu')\nplt.xlabel('Depth')\nplt.xlim((0, 512))\nplt.ylabel('Position')\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "### Encoder Layer\n\nEach encoder layer consists of sublayers:\n\n1. Multi-head attention (with padding mask) \n2. 2 dense layers followed by dropout\n\nEach of these sublayers has a residual connection around it followed by a layer normalization. Residual connections help in avoiding the vanishing gradient problem in deep networks.\n\nThe output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.", "_____no_output_____" ] ], [ [ "def encoder_layer(units, d_model, num_heads, dropout, name=\"encoder_layer\"):\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\n\n attention = MultiHeadAttention(\n d_model, num_heads, name=\"attention\")({\n 'query': inputs,\n 'key': inputs,\n 'value': inputs,\n 'mask': padding_mask\n })\n attention = tf.keras.layers.Dropout(rate=dropout)(attention)\n attention = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(inputs + attention)\n\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\n outputs = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention + outputs)\n\n return tf.keras.Model(\n inputs=[inputs, padding_mask], outputs=outputs, name=name)", "_____no_output_____" ], [ "sample_encoder_layer = encoder_layer(\n units=512,\n d_model=128,\n num_heads=4,\n dropout=0.3,\n name=\"sample_encoder_layer\")\n\ntf.keras.utils.plot_model(\n sample_encoder_layer, to_file='encoder_layer.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "### Encoder\n\nThe Encoder consists of:\n1. Input Embedding\n2. Positional Encoding\n3. `num_layers` encoder layers\n\nThe input is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the encoder layers. The output of the encoder is the input to the decoder.", "_____no_output_____" ] ], [ [ "def encoder(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name=\"encoder\"):\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\n\n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\n\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\n\n for i in range(num_layers):\n outputs = encoder_layer(\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n name=\"encoder_layer_{}\".format(i),\n )([outputs, padding_mask])\n\n return tf.keras.Model(\n inputs=[inputs, padding_mask], outputs=outputs, name=name)", "_____no_output_____" ], [ "sample_encoder = encoder(\n vocab_size=8192,\n num_layers=2,\n units=512,\n d_model=128,\n num_heads=4,\n dropout=0.3,\n name=\"sample_encoder\")\n\ntf.keras.utils.plot_model(\n sample_encoder, to_file='encoder.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "### Decoder Layer\n\nEach decoder layer consists of sublayers:\n\n1. Masked multi-head attention (with look ahead mask and padding mask)\n2. Multi-head attention (with padding mask). `value` and `key` receive the *encoder output* as inputs. `query` receives the *output from the masked multi-head attention sublayer.*\n3. 2 dense layers followed by dropout\n\nEach of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.\n\nAs `query` receives the output from decoder's first attention block, and `key` receives the encoder output, the attention weights represent the importance given to the decoder's input based on the encoder's output. In other words, the decoder predicts the next word by looking at the encoder output and self-attending to its own output. See the demonstration above in the scaled dot product attention section.", "_____no_output_____" ] ], [ [ "def decoder_layer(units, d_model, num_heads, dropout, name=\"decoder_layer\"):\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\n enc_outputs = tf.keras.Input(shape=(None, d_model), name=\"encoder_outputs\")\n look_ahead_mask = tf.keras.Input(\n shape=(1, None, None), name=\"look_ahead_mask\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\n\n attention1 = MultiHeadAttention(\n d_model, num_heads, name=\"attention_1\")(inputs={\n 'query': inputs,\n 'key': inputs,\n 'value': inputs,\n 'mask': look_ahead_mask\n })\n attention1 = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention1 + inputs)\n\n attention2 = MultiHeadAttention(\n d_model, num_heads, name=\"attention_2\")(inputs={\n 'query': attention1,\n 'key': enc_outputs,\n 'value': enc_outputs,\n 'mask': padding_mask\n })\n attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)\n attention2 = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention2 + attention1)\n\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention2)\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\n outputs = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(outputs + attention2)\n\n return tf.keras.Model(\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\n outputs=outputs,\n name=name)", "_____no_output_____" ], [ "sample_decoder_layer = decoder_layer(\n units=512,\n d_model=128,\n num_heads=4,\n dropout=0.3,\n name=\"sample_decoder_layer\")\n\ntf.keras.utils.plot_model(\n sample_decoder_layer, to_file='decoder_layer.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "### Decoder\n\nThe Decoder consists of:\n1. Output Embedding\n2. Positional Encoding\n3. N decoder layers\n\nThe target is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the decoder layers. The output of the decoder is the input to the final linear layer.", "_____no_output_____" ] ], [ [ "def decoder(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name='decoder'):\n inputs = tf.keras.Input(shape=(None,), name='inputs')\n enc_outputs = tf.keras.Input(shape=(None, d_model), name='encoder_outputs')\n look_ahead_mask = tf.keras.Input(\n shape=(1, None, None), name='look_ahead_mask')\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\n \n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\n\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\n\n for i in range(num_layers):\n outputs = decoder_layer(\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n name='decoder_layer_{}'.format(i),\n )(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])\n\n return tf.keras.Model(\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\n outputs=outputs,\n name=name)", "_____no_output_____" ], [ "sample_decoder = decoder(\n vocab_size=8192,\n num_layers=2,\n units=512,\n d_model=128,\n num_heads=4,\n dropout=0.3,\n name=\"sample_decoder\")\n\ntf.keras.utils.plot_model(\n sample_decoder, to_file='decoder.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "### Transformer\n\nTransformer consists of the encoder, decoder and a final linear layer. The output of the decoder is the input to the linear layer and its output is returned.", "_____no_output_____" ] ], [ [ "def transformer(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name=\"transformer\"):\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\n dec_inputs = tf.keras.Input(shape=(None,), name=\"dec_inputs\")\n\n enc_padding_mask = tf.keras.layers.Lambda(\n create_padding_mask, output_shape=(1, 1, None),\n name='enc_padding_mask')(inputs)\n # mask the future tokens for decoder inputs at the 1st attention block\n look_ahead_mask = tf.keras.layers.Lambda(\n create_look_ahead_mask,\n output_shape=(1, None, None),\n name='look_ahead_mask')(dec_inputs)\n # mask the encoder outputs for the 2nd attention block\n dec_padding_mask = tf.keras.layers.Lambda(\n create_padding_mask, output_shape=(1, 1, None),\n name='dec_padding_mask')(inputs)\n\n enc_outputs = encoder(\n vocab_size=vocab_size,\n num_layers=num_layers,\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n )(inputs=[inputs, enc_padding_mask])\n\n dec_outputs = decoder(\n vocab_size=vocab_size,\n num_layers=num_layers,\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n )(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])\n\n outputs = tf.keras.layers.Dense(units=vocab_size, name=\"outputs\")(dec_outputs)\n\n return tf.keras.Model(inputs=[inputs, dec_inputs], outputs=outputs, name=name)", "_____no_output_____" ], [ "sample_transformer = transformer(\n vocab_size=8192,\n num_layers=4,\n units=512,\n d_model=128,\n num_heads=4,\n dropout=0.3,\n name=\"sample_transformer\")\n\ntf.keras.utils.plot_model(\n sample_transformer, to_file='transformer.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "## Train model", "_____no_output_____" ], [ "### Initialize model\n\nTo keep this example small and relatively fast, the values for *num_layers, d_model, and units* have been reduced. See the [paper](https://arxiv.org/abs/1706.03762) for all the other versions of the transformer.", "_____no_output_____" ] ], [ [ "tf.keras.backend.clear_session()\n\n# Hyper-parameters\nNUM_LAYERS = 2\nD_MODEL = 256\nNUM_HEADS = 8\nUNITS = 512\nDROPOUT = 0.1\n\nmodel = transformer(\n vocab_size=VOCAB_SIZE,\n num_layers=NUM_LAYERS,\n units=UNITS,\n d_model=D_MODEL,\n num_heads=NUM_HEADS,\n dropout=DROPOUT)", "_____no_output_____" ] ], [ [ "### Loss function\n\nSince the target sequences are padded, it is important to apply a padding mask when calculating the loss.", "_____no_output_____" ] ], [ [ "def loss_function(y_true, y_pred):\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\n \n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')(y_true, y_pred)\n\n mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\n loss = tf.multiply(loss, mask)\n\n return tf.reduce_mean(loss)", "_____no_output_____" ] ], [ [ "### Custom learning rate\n\nUse the Adam optimizer with a custom learning rate scheduler according to the formula in the [paper](https://arxiv.org/abs/1706.03762).\n\n$$\\Large{lrate = d_{model}^{-0.5} * min(step{\\_}num^{-0.5}, step{\\_}num * warmup{\\_}steps^{-1.5})}$$", "_____no_output_____" ] ], [ [ "class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n\n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps**-1.5)\n\n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)", "_____no_output_____" ], [ "sample_learning_rate = CustomSchedule(d_model=128)\n\nplt.plot(sample_learning_rate(tf.range(200000, dtype=tf.float32)))\nplt.ylabel(\"Learning Rate\")\nplt.xlabel(\"Train Step\")", "_____no_output_____" ] ], [ [ "### Compile Model\n", "_____no_output_____" ] ], [ [ "learning_rate = CustomSchedule(D_MODEL)\n\noptimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n\ndef accuracy(y_true, y_pred):\n # ensure labels have shape (batch_size, MAX_LENGTH - 1)\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\n accuracy = tf.metrics.SparseCategoricalAccuracy()(y_true, y_pred)\n return accuracy\n\nmodel.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])", "_____no_output_____" ] ], [ [ "### Fit model\n\nTrain our transformer by simply calling `model.fit()`", "_____no_output_____" ] ], [ [ "EPOCHS = 20\n\nmodel.fit(dataset, epochs=EPOCHS)", "Epoch 1/20\n689/689 [==============================] - 97s 141ms/step - loss: 2.1146 - accuracy: 0.0249\nEpoch 2/20\n689/689 [==============================] - 81s 118ms/step - loss: 1.5008 - accuracy: 0.0530\nEpoch 3/20\n689/689 [==============================] - 82s 119ms/step - loss: 1.3940 - accuracy: 0.0653\nEpoch 4/20\n689/689 [==============================] - 82s 118ms/step - loss: 1.3313 - accuracy: 0.0719\nEpoch 5/20\n689/689 [==============================] - 82s 119ms/step - loss: 1.2744 - accuracy: 0.0765\nEpoch 6/20\n689/689 [==============================] - 82s 119ms/step - loss: 1.2223 - accuracy: 0.0801\nEpoch 7/20\n689/689 [==============================] - 82s 118ms/step - loss: 1.1670 - accuracy: 0.0832\nEpoch 8/20\n689/689 [==============================] - 82s 119ms/step - loss: 1.1050 - accuracy: 0.0861\nEpoch 9/20\n689/689 [==============================] - 82s 119ms/step - loss: 1.0503 - accuracy: 0.0889\nEpoch 10/20\n689/689 [==============================] - 82s 120ms/step - loss: 1.0002 - accuracy: 0.0917\nEpoch 11/20\n689/689 [==============================] - 82s 118ms/step - loss: 0.9540 - accuracy: 0.0945\nEpoch 12/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.9122 - accuracy: 0.0973\nEpoch 13/20\n689/689 [==============================] - 82s 118ms/step - loss: 0.8744 - accuracy: 0.1001\nEpoch 14/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.8396 - accuracy: 0.1029\nEpoch 15/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.8082 - accuracy: 0.1056\nEpoch 16/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.7799 - accuracy: 0.1082\nEpoch 17/20\n689/689 [==============================] - 82s 118ms/step - loss: 0.7540 - accuracy: 0.1108\nEpoch 18/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.7300 - accuracy: 0.1134\nEpoch 19/20\n689/689 [==============================] - 82s 119ms/step - loss: 0.7076 - accuracy: 0.1158\nEpoch 20/20\n689/689 [==============================] - 82s 118ms/step - loss: 0.6881 - accuracy: 0.1183\n" ] ], [ [ "## Evaluate and predict\n\nThe following steps are used for evaluation:\n\n* Apply the same preprocessing method we used to create our dataset for the input sentence.\n* Tokenize the input sentence and add `START_TOKEN` and `END_TOKEN`. \n* Calculate the padding masks and the look ahead masks.\n* The decoder then outputs the predictions by looking at the encoder output and its own output.\n* Select the last word and calculate the argmax of that.\n* Concatentate the predicted word to the decoder input as pass it to the decoder.\n* In this approach, the decoder predicts the next word based on the previous words it predicted.\n\nNote: The model used here has less capacity and trained on a subset of the full dataset, hence its performance can be further improved.", "_____no_output_____" ] ], [ [ "def evaluate(sentence):\n sentence = preprocess_sentence(sentence)\n\n sentence = tf.expand_dims(\n START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0)\n\n output = tf.expand_dims(START_TOKEN, 0)\n\n for i in range(MAX_LENGTH):\n predictions = model(inputs=[sentence, output], training=False)\n\n # select the last word from the seq_len dimension\n predictions = predictions[:, -1:, :]\n predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\n\n # return the result if the predicted_id is equal to the end token\n if tf.equal(predicted_id, END_TOKEN[0]):\n break\n\n # concatenated the predicted_id to the output which is given to the decoder\n # as its input.\n output = tf.concat([output, predicted_id], axis=-1)\n\n return tf.squeeze(output, axis=0)\n\n\ndef predict(sentence):\n prediction = evaluate(sentence)\n\n predicted_sentence = tokenizer.decode(\n [i for i in prediction if i < tokenizer.vocab_size])\n\n print('Input: {}'.format(sentence))\n print('Output: {}'.format(predicted_sentence))\n\n return predicted_sentence", "_____no_output_____" ] ], [ [ "Let's test our model!", "_____no_output_____" ] ], [ [ "output = predict('Where have you been?')", "Input: Where have you been?\nOutput: i m not gonna bring you here .\n" ], [ "output = predict(\"It's a trap\")", "Input: It's a trap\nOutput: no , it s not .\n" ], [ "# feed the model with its previous output\nsentence = 'I am not crazy, my mother had me tested.'\nfor _ in range(5):\n sentence = predict(sentence)\n print('')", "Input: I am not crazy, my mother had me tested.\nOutput: you re a good man , roy . that s a good man , roy , you re a little girl , that s a good man . you re a little girl .\n\nInput: you re a good man , roy . that s a good man , roy , you re a little girl , that s a good man . you re a little girl .\nOutput: i m glad you re not a drug addict .\n\nInput: i m glad you re not a drug addict .\nOutput: the inheritance .\n\nInput: the inheritance .\nOutput: no , i don t know what to do . i just like to tell you .\n\nInput: no , i don t know what to do . i just like to tell you .\nOutput: i m not sure . i m not gonna mention my name .\n\n" ] ], [ [ "## Summary\n\nHere we are, we have implemented a Transformer in TensorFlow 2.0 in around 500 lines of code.\n\nIn this tutorial, we focus on the two different approaches to implement complex models with Functional API and Model subclassing, and how to incorporate them.\n\nTry using a different dataset or hyper-parameters to train the Transformer! Thanks for reading.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
e7a371163877fd6302f892e3f068a4374a51123b
33,779
ipynb
Jupyter Notebook
docs/tutorials/gradients.ipynb
HectorIGH/quantum
3fa7cdb2d974672540ee5cc10c7fda3256765505
[ "Apache-2.0" ]
3
2021-03-28T07:39:07.000Z
2021-03-28T14:08:26.000Z
docs/tutorials/gradients.ipynb
prashanth-up/quantum
6deb5ad96b575b905cf33773d7a71379b8defe18
[ "Apache-2.0" ]
3
2021-08-25T15:15:49.000Z
2022-02-10T05:10:42.000Z
docs/tutorials/gradients.ipynb
prashanth-up/quantum
6deb5ad96b575b905cf33773d7a71379b8defe18
[ "Apache-2.0" ]
1
2021-11-02T18:52:06.000Z
2021-11-02T18:52:06.000Z
39.005774
615
0.523284
[ [ [ "##### Copyright 2020 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Calculate gradients", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/quantum/tutorials/gradients\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/gradients.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/quantum/blob/master/docs/tutorials/gradients.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/gradients.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "This tutorial explores gradient calculation algorithms for the expectation values of quantum circuits.\n\nCalculating the gradient of the expectation value of a certain observable in a quantum circuit is an involved process. Expectation values of observables do not have the luxury of having analytic gradient formulas that are always easy to write down—unlike traditional machine learning transformations such as matrix multiplication or vector addition that have analytic gradient formulas which are easy to write down. As a result, there are different quantum gradient calculation methods that come in handy for different scenarios. This tutorial compares and contrasts two different differentiation schemes.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "!pip install tensorflow==2.3.1", "_____no_output_____" ] ], [ [ "Install TensorFlow Quantum:", "_____no_output_____" ] ], [ [ "!pip install tensorflow-quantum", "_____no_output_____" ] ], [ [ "Now import TensorFlow and the module dependencies:", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport tensorflow_quantum as tfq\n\nimport cirq\nimport sympy\nimport numpy as np\n\n# visualization tools\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom cirq.contrib.svg import SVGCircuit", "_____no_output_____" ] ], [ [ "## 1. Preliminary\n\nLet's make the notion of gradient calculation for quantum circuits a little more concrete. Suppose you have a parameterized circuit like this one:", "_____no_output_____" ] ], [ [ "qubit = cirq.GridQubit(0, 0)\nmy_circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol('alpha'))\nSVGCircuit(my_circuit)", "_____no_output_____" ] ], [ [ "Along with an observable:", "_____no_output_____" ] ], [ [ "pauli_x = cirq.X(qubit)\npauli_x", "_____no_output_____" ] ], [ [ "Looking at this operator you know that $⟨Y(\\alpha)| X | Y(\\alpha)⟩ = \\sin(\\pi \\alpha)$", "_____no_output_____" ] ], [ [ "def my_expectation(op, alpha):\n \"\"\"Compute ⟨Y(alpha)| `op` | Y(alpha)⟩\"\"\"\n params = {'alpha': alpha}\n sim = cirq.Simulator()\n final_state_vector = sim.simulate(my_circuit, params).final_state_vector\n return op.expectation_from_state_vector(final_state_vector, {qubit: 0}).real\n\n\nmy_alpha = 0.3\nprint(\"Expectation=\", my_expectation(pauli_x, my_alpha))\nprint(\"Sin Formula=\", np.sin(np.pi * my_alpha))", "_____no_output_____" ] ], [ [ " and if you define $f_{1}(\\alpha) = ⟨Y(\\alpha)| X | Y(\\alpha)⟩$ then $f_{1}^{'}(\\alpha) = \\pi \\cos(\\pi \\alpha)$. Let's check this:", "_____no_output_____" ] ], [ [ "def my_grad(obs, alpha, eps=0.01):\n grad = 0\n f_x = my_expectation(obs, alpha)\n f_x_prime = my_expectation(obs, alpha + eps)\n return ((f_x_prime - f_x) / eps).real\n\n\nprint('Finite difference:', my_grad(pauli_x, my_alpha))\nprint('Cosine formula: ', np.pi * np.cos(np.pi * my_alpha))", "_____no_output_____" ] ], [ [ "## 2. The need for a differentiator\n\nWith larger circuits, you won't always be so lucky to have a formula that precisely calculates the gradients of a given quantum circuit. In the event that a simple formula isn't enough to calculate the gradient, the `tfq.differentiators.Differentiator` class allows you to define algorithms for computing the gradients of your circuits. For instance you can recreate the above example in TensorFlow Quantum (TFQ) with:", "_____no_output_____" ] ], [ [ "expectation_calculation = tfq.layers.Expectation(\n differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n\nexpectation_calculation(my_circuit,\n operators=pauli_x,\n symbol_names=['alpha'],\n symbol_values=[[my_alpha]])", "_____no_output_____" ] ], [ [ "However, if you switch to estimating expectation based on sampling (what would happen on a true device) the values can change a little bit. This means you now have an imperfect estimate:", "_____no_output_____" ] ], [ [ "sampled_expectation_calculation = tfq.layers.SampledExpectation(\n differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n\nsampled_expectation_calculation(my_circuit,\n operators=pauli_x,\n repetitions=500,\n symbol_names=['alpha'],\n symbol_values=[[my_alpha]])", "_____no_output_____" ] ], [ [ "This can quickly compound into a serious accuracy problem when it comes to gradients:", "_____no_output_____" ] ], [ [ "# Make input_points = [batch_size, 1] array.\ninput_points = np.linspace(0, 5, 200)[:, np.newaxis].astype(np.float32)\nexact_outputs = expectation_calculation(my_circuit,\n operators=pauli_x,\n symbol_names=['alpha'],\n symbol_values=input_points)\nimperfect_outputs = sampled_expectation_calculation(my_circuit,\n operators=pauli_x,\n repetitions=500,\n symbol_names=['alpha'],\n symbol_values=input_points)\nplt.title('Forward Pass Values')\nplt.xlabel('$x$')\nplt.ylabel('$f(x)$')\nplt.plot(input_points, exact_outputs, label='Analytic')\nplt.plot(input_points, imperfect_outputs, label='Sampled')\nplt.legend()", "_____no_output_____" ], [ "# Gradients are a much different story.\nvalues_tensor = tf.convert_to_tensor(input_points)\n\nwith tf.GradientTape() as g:\n g.watch(values_tensor)\n exact_outputs = expectation_calculation(my_circuit,\n operators=pauli_x,\n symbol_names=['alpha'],\n symbol_values=values_tensor)\nanalytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n\nwith tf.GradientTape() as g:\n g.watch(values_tensor)\n imperfect_outputs = sampled_expectation_calculation(\n my_circuit,\n operators=pauli_x,\n repetitions=500,\n symbol_names=['alpha'],\n symbol_values=values_tensor)\nsampled_finite_diff_gradients = g.gradient(imperfect_outputs, values_tensor)\n\nplt.title('Gradient Values')\nplt.xlabel('$x$')\nplt.ylabel('$f^{\\'}(x)$')\nplt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\nplt.plot(input_points, sampled_finite_diff_gradients, label='Sampled')\nplt.legend()", "_____no_output_____" ] ], [ [ "Here you can see that although the finite difference formula is fast to compute the gradients themselves in the analytical case, when it came to the sampling based methods it was far too noisy. More careful techniques must be used to ensure a good gradient can be calculated. Next you will look at a much slower technique that wouldn't be as well suited for analytical expectation gradient calculations, but does perform much better in the real-world sample based case:", "_____no_output_____" ] ], [ [ "# A smarter differentiation scheme.\ngradient_safe_sampled_expectation = tfq.layers.SampledExpectation(\n differentiator=tfq.differentiators.ParameterShift())\n\nwith tf.GradientTape() as g:\n g.watch(values_tensor)\n imperfect_outputs = gradient_safe_sampled_expectation(\n my_circuit,\n operators=pauli_x,\n repetitions=500,\n symbol_names=['alpha'],\n symbol_values=values_tensor)\n\nsampled_param_shift_gradients = g.gradient(imperfect_outputs, values_tensor)\n\nplt.title('Gradient Values')\nplt.xlabel('$x$')\nplt.ylabel('$f^{\\'}(x)$')\nplt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\nplt.plot(input_points, sampled_param_shift_gradients, label='Sampled')\nplt.legend()", "_____no_output_____" ] ], [ [ "From the above you can see that certain differentiators are best used for particular research scenarios. In general, the slower sample-based methods that are robust to device noise, etc., are great differentiators when testing or implementing algorithms in a more \"real world\" setting. Faster methods like finite difference are great for analytical calculations and you want higher throughput, but aren't yet concerned with the device viability of your algorithm.", "_____no_output_____" ], [ "## 3. Multiple observables\n\nLet's introduce a second observable and see how TensorFlow Quantum supports multiple observables for a single circuit.", "_____no_output_____" ] ], [ [ "pauli_z = cirq.Z(qubit)\npauli_z", "_____no_output_____" ] ], [ [ "If this observable is used with the same circuit as before, then you have $f_{2}(\\alpha) = ⟨Y(\\alpha)| Z | Y(\\alpha)⟩ = \\cos(\\pi \\alpha)$ and $f_{2}^{'}(\\alpha) = -\\pi \\sin(\\pi \\alpha)$. Perform a quick check:", "_____no_output_____" ] ], [ [ "test_value = 0.\n\nprint('Finite difference:', my_grad(pauli_z, test_value))\nprint('Sin formula: ', -np.pi * np.sin(np.pi * test_value))", "_____no_output_____" ] ], [ [ "It's a match (close enough).\n\nNow if you define $g(\\alpha) = f_{1}(\\alpha) + f_{2}(\\alpha)$ then $g'(\\alpha) = f_{1}^{'}(\\alpha) + f^{'}_{2}(\\alpha)$. Defining more than one observable in TensorFlow Quantum to use along with a circuit is equivalent to adding on more terms to $g$.\n\nThis means that the gradient of a particular symbol in a circuit is equal to the sum of the gradients with regards to each observable for that symbol applied to that circuit. This is compatible with TensorFlow gradient taking and backpropagation (where you give the sum of the gradients over all observables as the gradient for a particular symbol).", "_____no_output_____" ] ], [ [ "sum_of_outputs = tfq.layers.Expectation(\n differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n\nsum_of_outputs(my_circuit,\n operators=[pauli_x, pauli_z],\n symbol_names=['alpha'],\n symbol_values=[[test_value]])", "_____no_output_____" ] ], [ [ "Here you see the first entry is the expectation w.r.t Pauli X, and the second is the expectation w.r.t Pauli Z. Now when you take the gradient:", "_____no_output_____" ] ], [ [ "test_value_tensor = tf.convert_to_tensor([[test_value]])\n\nwith tf.GradientTape() as g:\n g.watch(test_value_tensor)\n outputs = sum_of_outputs(my_circuit,\n operators=[pauli_x, pauli_z],\n symbol_names=['alpha'],\n symbol_values=test_value_tensor)\n\nsum_of_gradients = g.gradient(outputs, test_value_tensor)\n\nprint(my_grad(pauli_x, test_value) + my_grad(pauli_z, test_value))\nprint(sum_of_gradients.numpy())", "_____no_output_____" ] ], [ [ "Here you have verified that the sum of the gradients for each observable is indeed the gradient of $\\alpha$. This behavior is supported by all TensorFlow Quantum differentiators and plays a crucial role in the compatibility with the rest of TensorFlow.", "_____no_output_____" ], [ "## 4. Advanced usage\nHere you will learn how to define your own custom differentiation routines for quantum circuits.\nAll differentiators that exist inside of TensorFlow Quantum subclass `tfq.differentiators.Differentiator`. A differentiator must implement `differentiate_analytic` and `differentiate_sampled`.\n\nThe following uses TensorFlow Quantum constructs to implement the closed form solution from the first part of this tutorial.", "_____no_output_____" ] ], [ [ "class MyDifferentiator(tfq.differentiators.Differentiator):\n \"\"\"A Toy differentiator for <Y^alpha | X |Y^alpha>.\"\"\"\n\n def __init__(self):\n pass\n\n @tf.function\n def get_gradient_circuits(self, programs, symbol_names, symbol_values):\n \"\"\"Return circuits to compute gradients for given forward pass circuits.\n \n When implementing a gradient, it is often useful to describe the\n intermediate computations in terms of transformed versions of the input\n circuits. The details are beyond the scope of this tutorial, but interested\n users should check out the differentiator implementations in the TFQ library\n for examples.\n \"\"\"\n raise NotImplementedError(\n \"Gradient circuits are not implemented in this tutorial.\")\n\n @tf.function\n def _compute_gradient(self, symbol_values):\n \"\"\"Compute the gradient based on symbol_values.\"\"\"\n\n # f(x) = sin(pi * x)\n # f'(x) = pi * cos(pi * x)\n return tf.cast(tf.cos(symbol_values * np.pi) * np.pi, tf.float32)\n\n @tf.function\n def differentiate_analytic(self, programs, symbol_names, symbol_values,\n pauli_sums, forward_pass_vals, grad):\n \"\"\"Specify how to differentiate a circuit with analytical expectation.\n\n This is called at graph runtime by TensorFlow. `differentiate_analytic`\n should calculate the gradient of a batch of circuits and return it\n formatted as indicated below. See\n `tfq.differentiators.ForwardDifference` for an example.\n\n Args:\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n containing the string representation of the operators that will\n be used on all of the circuits in the expectation calculations.\n forward_pass_vals: `tf.Tensor` of real numbers with shape\n [batch_size, n_ops] containing the output of the forward pass\n through the op you are differentiating.\n grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n representing the gradient backpropagated to the output of the\n op you are differentiating through.\n\n Returns:\n A `tf.Tensor` with the same shape as `symbol_values` representing\n the gradient backpropagated to the `symbol_values` input of the op\n you are differentiating through.\n \"\"\"\n\n # Computing gradients just based off of symbol_values.\n return self._compute_gradient(symbol_values) * grad\n\n @tf.function\n def differentiate_sampled(self, programs, symbol_names, symbol_values,\n pauli_sums, num_samples, forward_pass_vals, grad):\n \"\"\"Specify how to differentiate a circuit with sampled expectation.\n\n This is called at graph runtime by TensorFlow. `differentiate_sampled`\n should calculate the gradient of a batch of circuits and return it\n formatted as indicated below. See\n `tfq.differentiators.ForwardDifference` for an example.\n\n Args:\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n containing the string representation of the operators that will\n be used on all of the circuits in the expectation calculations.\n num_samples: `tf.Tensor` of positive integers representing the\n number of samples per term in each term of pauli_sums used\n during the forward pass.\n forward_pass_vals: `tf.Tensor` of real numbers with shape\n [batch_size, n_ops] containing the output of the forward pass\n through the op you are differentiating.\n grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n representing the gradient backpropagated to the output of the\n op you are differentiating through.\n\n Returns:\n A `tf.Tensor` with the same shape as `symbol_values` representing\n the gradient backpropagated to the `symbol_values` input of the op\n you are differentiating through.\n \"\"\"\n return self._compute_gradient(symbol_values) * grad", "_____no_output_____" ] ], [ [ "This new differentiator can now be used with existing `tfq.layer` objects:", "_____no_output_____" ] ], [ [ "custom_dif = MyDifferentiator()\ncustom_grad_expectation = tfq.layers.Expectation(differentiator=custom_dif)\n\n# Now let's get the gradients with finite diff.\nwith tf.GradientTape() as g:\n g.watch(values_tensor)\n exact_outputs = expectation_calculation(my_circuit,\n operators=[pauli_x],\n symbol_names=['alpha'],\n symbol_values=values_tensor)\n\nanalytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n\n# Now let's get the gradients with custom diff.\nwith tf.GradientTape() as g:\n g.watch(values_tensor)\n my_outputs = custom_grad_expectation(my_circuit,\n operators=[pauli_x],\n symbol_names=['alpha'],\n symbol_values=values_tensor)\n\nmy_gradients = g.gradient(my_outputs, values_tensor)\n\nplt.subplot(1, 2, 1)\nplt.title('Exact Gradient')\nplt.plot(input_points, analytic_finite_diff_gradients.numpy())\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.subplot(1, 2, 2)\nplt.title('My Gradient')\nplt.plot(input_points, my_gradients.numpy())\nplt.xlabel('x')", "_____no_output_____" ] ], [ [ "This new differentiator can now be used to generate differentiable ops.\n\nKey Point: A differentiator that has been previously attached to an op must be refreshed before attaching to a new op, because a differentiator may only be attached to one op at a time.", "_____no_output_____" ] ], [ [ "# Create a noisy sample based expectation op.\nexpectation_sampled = tfq.get_sampled_expectation_op(\n cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.01)))\n\n# Make it differentiable with your differentiator:\n# Remember to refresh the differentiator before attaching the new op\ncustom_dif.refresh()\ndifferentiable_op = custom_dif.generate_differentiable_op(\n sampled_op=expectation_sampled)\n\n# Prep op inputs.\ncircuit_tensor = tfq.convert_to_tensor([my_circuit])\nop_tensor = tfq.convert_to_tensor([[pauli_x]])\nsingle_value = tf.convert_to_tensor([[my_alpha]])\nnum_samples_tensor = tf.convert_to_tensor([[1000]])\n\nwith tf.GradientTape() as g:\n g.watch(single_value)\n forward_output = differentiable_op(circuit_tensor, ['alpha'], single_value,\n op_tensor, num_samples_tensor)\n\nmy_gradients = g.gradient(forward_output, single_value)\n\nprint('---TFQ---')\nprint('Foward: ', forward_output.numpy())\nprint('Gradient:', my_gradients.numpy())\nprint('---Original---')\nprint('Forward: ', my_expectation(pauli_x, my_alpha))\nprint('Gradient:', my_grad(pauli_x, my_alpha))", "_____no_output_____" ] ], [ [ "Success: Now you can use all the differentiators that TensorFlow Quantum has to offer—and define your own.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a378f7faccefb4d0dbb17d0675d0548fa1f062
102,784
ipynb
Jupyter Notebook
Tensorflow 2/Simple Classification/Classification with TF2.ipynb
alperiox/DL-DS-ML
33d1de52582b29aeb02cfc1b6457b08bb770b9ad
[ "MIT" ]
2
2020-11-14T15:52:21.000Z
2020-12-11T14:23:28.000Z
Tensorflow 2/Simple Classification/Classification with TF2.ipynb
Raqhea/DL-DS-ML
33d1de52582b29aeb02cfc1b6457b08bb770b9ad
[ "MIT" ]
null
null
null
Tensorflow 2/Simple Classification/Classification with TF2.ipynb
Raqhea/DL-DS-ML
33d1de52582b29aeb02cfc1b6457b08bb770b9ad
[ "MIT" ]
null
null
null
102,784
102,784
0.840033
[ [ [ "%tensorflow_version 2.x\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import MinMaxScaler \nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# Download the dataset, source: https://www.openml.org/d/31\n!wget https://www.openml.org/data/get_csv/1586225/php0iVrYT.csv", "--2020-02-27 21:34:53-- https://www.openml.org/data/get_csv/1586225/php0iVrYT.csv\nResolving www.openml.org (www.openml.org)... 131.155.11.11\nConnecting to www.openml.org (www.openml.org)|131.155.11.11|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: unspecified [text/plain]\nSaving to: ‘php0iVrYT.csv.1’\n\nphp0iVrYT.csv.1 [ <=> ] 10.27K --.-KB/s in 0s \n\n2020-02-27 21:34:54 (98.6 MB/s) - ‘php0iVrYT.csv.1’ saved [10512]\n\n" ], [ "# Import the data\ndataset = pd.read_csv(\"php0iVrYT.csv\")", "_____no_output_____" ], [ "\"\"\"\n* V1: Recency - months since last donation\n* V2: Frequency - total number of donation\n* V3: Monetary - total blood donated in c.c.\n* V4: Time - months since first donation), and a binary variable representing whether he/she donated blood in March 2007 \n (1 stand for donating blood; 0 stands for not donating blood).\n\nThe target attribute is a binary variable representing whether he/she donated blood in March 2007 (2 stands for donating blood; 1 stands for not donating blood).\n\"\"\"\ncols = ['Recency', 'Frequency', 'Monetary', 'Time','Class']\ndataset.columns = cols", "_____no_output_____" ], [ "# Substract 1 from Class column to convert the classification to binary classification\ndataset.Class = dataset.Class - 1", "_____no_output_____" ], [ "dataset.describe()", "_____no_output_____" ], [ "X = dataset.iloc[:, :-1].values # features\nY = dataset.iloc[:, -1].values # target variable", "_____no_output_____" ], [ "# Split the data\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = .25)", "_____no_output_____" ], [ "# Scale features\nscaler = MinMaxScaler(feature_range = (0, 10))\nX_train, X_test = scaler.fit_transform(X_train), scaler.fit_transform(X_test)", "_____no_output_____" ], [ "X_train", "_____no_output_____" ], [ "# Train the model\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape = (X_train.shape[1],)),\n tf.keras.layers.Dense(256, activation = 'relu'),\n tf.keras.layers.Dropout(.4),\n tf.keras.layers.Dense(1, activation = 'sigmoid')\n])\n\n# compile\nopt = tf.keras.optimizers.Adamax(learning_rate = 0.01)\nmodel.compile(optimizer = opt,\n loss = 'binary_crossentropy',\n metrics = ['accuracy'])\n\n\n# fit the model\nr = model.fit(X_train, y_train, validation_data = (X_test, y_test), epochs = 250, verbose = 2)\n\n# plot the loss\nplt.plot(r.history['loss'], label = 'loss')\nplt.plot(r.history['val_loss'], label = 'val_loss')\nplt.legend()\nplt.show()\nplt.plot(r.history['accuracy'], label = 'accuracy')\nplt.plot(r.history['val_accuracy'], label = 'val_accuracy')\nplt.legend()\nplt.show()", "Train on 561 samples, validate on 187 samples\nEpoch 1/250\n561/561 - 0s - loss: 0.5884 - accuracy: 0.7291 - val_loss: 0.4005 - val_accuracy: 0.8342\nEpoch 2/250\n561/561 - 0s - loss: 0.5389 - accuracy: 0.7576 - val_loss: 0.3762 - val_accuracy: 0.8342\nEpoch 3/250\n561/561 - 0s - loss: 0.5134 - accuracy: 0.7754 - val_loss: 0.3761 - val_accuracy: 0.8289\nEpoch 4/250\n561/561 - 0s - loss: 0.5062 - accuracy: 0.7701 - val_loss: 0.3758 - val_accuracy: 0.8128\nEpoch 5/250\n561/561 - 0s - loss: 0.5207 - accuracy: 0.7469 - val_loss: 0.3759 - val_accuracy: 0.8342\nEpoch 6/250\n561/561 - 0s - loss: 0.4950 - accuracy: 0.7807 - val_loss: 0.3764 - val_accuracy: 0.8182\nEpoch 7/250\n561/561 - 0s - loss: 0.4908 - accuracy: 0.7594 - val_loss: 0.3771 - val_accuracy: 0.8342\nEpoch 8/250\n561/561 - 0s - loss: 0.5013 - accuracy: 0.7772 - val_loss: 0.3761 - val_accuracy: 0.8289\nEpoch 9/250\n561/561 - 0s - loss: 0.4950 - accuracy: 0.7665 - val_loss: 0.3758 - val_accuracy: 0.8342\nEpoch 10/250\n561/561 - 0s - loss: 0.4982 - accuracy: 0.7665 - val_loss: 0.3788 - val_accuracy: 0.8342\nEpoch 11/250\n561/561 - 0s - loss: 0.5100 - accuracy: 0.7772 - val_loss: 0.3750 - val_accuracy: 0.8396\nEpoch 12/250\n561/561 - 0s - loss: 0.4971 - accuracy: 0.7683 - val_loss: 0.3751 - val_accuracy: 0.8396\nEpoch 13/250\n561/561 - 0s - loss: 0.4896 - accuracy: 0.7701 - val_loss: 0.3737 - val_accuracy: 0.8289\nEpoch 14/250\n561/561 - 0s - loss: 0.4919 - accuracy: 0.7772 - val_loss: 0.3780 - val_accuracy: 0.8396\nEpoch 15/250\n561/561 - 0s - loss: 0.4917 - accuracy: 0.7701 - val_loss: 0.3744 - val_accuracy: 0.8128\nEpoch 16/250\n561/561 - 0s - loss: 0.4971 - accuracy: 0.7665 - val_loss: 0.3752 - val_accuracy: 0.8396\nEpoch 17/250\n561/561 - 0s - loss: 0.4894 - accuracy: 0.7701 - val_loss: 0.3759 - val_accuracy: 0.8449\nEpoch 18/250\n561/561 - 0s - loss: 0.4950 - accuracy: 0.7629 - val_loss: 0.3743 - val_accuracy: 0.8289\nEpoch 19/250\n561/561 - 0s - loss: 0.4918 - accuracy: 0.7825 - val_loss: 0.3780 - val_accuracy: 0.8396\nEpoch 20/250\n561/561 - 0s - loss: 0.4933 - accuracy: 0.7736 - val_loss: 0.3761 - val_accuracy: 0.8342\nEpoch 21/250\n561/561 - 0s - loss: 0.4948 - accuracy: 0.7825 - val_loss: 0.3757 - val_accuracy: 0.8449\nEpoch 22/250\n561/561 - 0s - loss: 0.4956 - accuracy: 0.7754 - val_loss: 0.3765 - val_accuracy: 0.8396\nEpoch 23/250\n561/561 - 0s - loss: 0.4852 - accuracy: 0.7861 - val_loss: 0.3763 - val_accuracy: 0.8396\nEpoch 24/250\n561/561 - 0s - loss: 0.5013 - accuracy: 0.7754 - val_loss: 0.3781 - val_accuracy: 0.8289\nEpoch 25/250\n561/561 - 0s - loss: 0.4866 - accuracy: 0.7825 - val_loss: 0.3754 - val_accuracy: 0.8342\nEpoch 26/250\n561/561 - 0s - loss: 0.4906 - accuracy: 0.7825 - val_loss: 0.3764 - val_accuracy: 0.8289\nEpoch 27/250\n561/561 - 0s - loss: 0.4872 - accuracy: 0.7772 - val_loss: 0.3772 - val_accuracy: 0.8289\nEpoch 28/250\n561/561 - 0s - loss: 0.4849 - accuracy: 0.7772 - val_loss: 0.3776 - val_accuracy: 0.8182\nEpoch 29/250\n561/561 - 0s - loss: 0.4816 - accuracy: 0.7861 - val_loss: 0.3773 - val_accuracy: 0.8289\nEpoch 30/250\n561/561 - 0s - loss: 0.4917 - accuracy: 0.7754 - val_loss: 0.3774 - val_accuracy: 0.8289\nEpoch 31/250\n561/561 - 0s - loss: 0.4932 - accuracy: 0.7718 - val_loss: 0.3786 - val_accuracy: 0.8235\nEpoch 32/250\n561/561 - 0s - loss: 0.4891 - accuracy: 0.7754 - val_loss: 0.3769 - val_accuracy: 0.8235\nEpoch 33/250\n561/561 - 0s - loss: 0.4810 - accuracy: 0.7825 - val_loss: 0.3768 - val_accuracy: 0.8182\nEpoch 34/250\n561/561 - 0s - loss: 0.4865 - accuracy: 0.7754 - val_loss: 0.3795 - val_accuracy: 0.8182\nEpoch 35/250\n561/561 - 0s - loss: 0.4907 - accuracy: 0.7897 - val_loss: 0.3783 - val_accuracy: 0.8128\nEpoch 36/250\n561/561 - 0s - loss: 0.4881 - accuracy: 0.7790 - val_loss: 0.3804 - val_accuracy: 0.8182\nEpoch 37/250\n561/561 - 0s - loss: 0.4845 - accuracy: 0.7914 - val_loss: 0.3776 - val_accuracy: 0.8075\nEpoch 38/250\n561/561 - 0s - loss: 0.4764 - accuracy: 0.7897 - val_loss: 0.3781 - val_accuracy: 0.8182\nEpoch 39/250\n561/561 - 0s - loss: 0.4784 - accuracy: 0.7914 - val_loss: 0.3805 - val_accuracy: 0.8289\nEpoch 40/250\n561/561 - 0s - loss: 0.4798 - accuracy: 0.7861 - val_loss: 0.3793 - val_accuracy: 0.8075\nEpoch 41/250\n561/561 - 0s - loss: 0.4851 - accuracy: 0.7772 - val_loss: 0.3809 - val_accuracy: 0.8235\nEpoch 42/250\n561/561 - 0s - loss: 0.4815 - accuracy: 0.7932 - val_loss: 0.3787 - val_accuracy: 0.8075\nEpoch 43/250\n561/561 - 0s - loss: 0.4888 - accuracy: 0.7932 - val_loss: 0.3828 - val_accuracy: 0.8235\nEpoch 44/250\n561/561 - 0s - loss: 0.4829 - accuracy: 0.7932 - val_loss: 0.3781 - val_accuracy: 0.8075\nEpoch 45/250\n561/561 - 0s - loss: 0.4877 - accuracy: 0.7914 - val_loss: 0.3803 - val_accuracy: 0.8128\nEpoch 46/250\n561/561 - 0s - loss: 0.4766 - accuracy: 0.7897 - val_loss: 0.3798 - val_accuracy: 0.8075\nEpoch 47/250\n561/561 - 0s - loss: 0.4878 - accuracy: 0.7843 - val_loss: 0.3817 - val_accuracy: 0.8075\nEpoch 48/250\n561/561 - 0s - loss: 0.4882 - accuracy: 0.7754 - val_loss: 0.3814 - val_accuracy: 0.8235\nEpoch 49/250\n561/561 - 0s - loss: 0.4775 - accuracy: 0.7879 - val_loss: 0.3798 - val_accuracy: 0.8075\nEpoch 50/250\n561/561 - 0s - loss: 0.4797 - accuracy: 0.7950 - val_loss: 0.3821 - val_accuracy: 0.8235\nEpoch 51/250\n561/561 - 0s - loss: 0.4781 - accuracy: 0.7897 - val_loss: 0.3805 - val_accuracy: 0.8075\nEpoch 52/250\n561/561 - 0s - loss: 0.4773 - accuracy: 0.7897 - val_loss: 0.3829 - val_accuracy: 0.8182\nEpoch 53/250\n561/561 - 0s - loss: 0.4770 - accuracy: 0.7807 - val_loss: 0.3810 - val_accuracy: 0.8182\nEpoch 54/250\n561/561 - 0s - loss: 0.4828 - accuracy: 0.7825 - val_loss: 0.3820 - val_accuracy: 0.8075\nEpoch 55/250\n561/561 - 0s - loss: 0.4821 - accuracy: 0.7897 - val_loss: 0.3816 - val_accuracy: 0.8021\nEpoch 56/250\n561/561 - 0s - loss: 0.4829 - accuracy: 0.7879 - val_loss: 0.3829 - val_accuracy: 0.8075\nEpoch 57/250\n561/561 - 0s - loss: 0.4786 - accuracy: 0.7897 - val_loss: 0.3818 - val_accuracy: 0.8075\nEpoch 58/250\n561/561 - 0s - loss: 0.4767 - accuracy: 0.7861 - val_loss: 0.3837 - val_accuracy: 0.8235\nEpoch 59/250\n561/561 - 0s - loss: 0.4833 - accuracy: 0.7914 - val_loss: 0.3825 - val_accuracy: 0.8075\nEpoch 60/250\n561/561 - 0s - loss: 0.4778 - accuracy: 0.7897 - val_loss: 0.3847 - val_accuracy: 0.8235\nEpoch 61/250\n561/561 - 0s - loss: 0.4753 - accuracy: 0.7861 - val_loss: 0.3843 - val_accuracy: 0.8235\nEpoch 62/250\n561/561 - 0s - loss: 0.4811 - accuracy: 0.7932 - val_loss: 0.3835 - val_accuracy: 0.8021\nEpoch 63/250\n561/561 - 0s - loss: 0.4761 - accuracy: 0.7825 - val_loss: 0.3830 - val_accuracy: 0.8021\nEpoch 64/250\n561/561 - 0s - loss: 0.4835 - accuracy: 0.7861 - val_loss: 0.3840 - val_accuracy: 0.8021\nEpoch 65/250\n561/561 - 0s - loss: 0.4716 - accuracy: 0.7914 - val_loss: 0.3841 - val_accuracy: 0.8235\nEpoch 66/250\n561/561 - 0s - loss: 0.4787 - accuracy: 0.7861 - val_loss: 0.3831 - val_accuracy: 0.8182\nEpoch 67/250\n561/561 - 0s - loss: 0.4782 - accuracy: 0.7843 - val_loss: 0.3844 - val_accuracy: 0.8021\nEpoch 68/250\n561/561 - 0s - loss: 0.4766 - accuracy: 0.7968 - val_loss: 0.3867 - val_accuracy: 0.8289\nEpoch 69/250\n561/561 - 0s - loss: 0.4763 - accuracy: 0.7968 - val_loss: 0.3838 - val_accuracy: 0.8075\nEpoch 70/250\n561/561 - 0s - loss: 0.4748 - accuracy: 0.7825 - val_loss: 0.3848 - val_accuracy: 0.8182\nEpoch 71/250\n561/561 - 0s - loss: 0.4676 - accuracy: 0.7950 - val_loss: 0.3892 - val_accuracy: 0.8289\nEpoch 72/250\n561/561 - 0s - loss: 0.4802 - accuracy: 0.7932 - val_loss: 0.3843 - val_accuracy: 0.8075\nEpoch 73/250\n561/561 - 0s - loss: 0.4752 - accuracy: 0.7843 - val_loss: 0.3889 - val_accuracy: 0.7968\nEpoch 74/250\n561/561 - 0s - loss: 0.4802 - accuracy: 0.7754 - val_loss: 0.3862 - val_accuracy: 0.8235\nEpoch 75/250\n561/561 - 0s - loss: 0.4721 - accuracy: 0.8004 - val_loss: 0.3846 - val_accuracy: 0.8182\nEpoch 76/250\n561/561 - 0s - loss: 0.4762 - accuracy: 0.7914 - val_loss: 0.3835 - val_accuracy: 0.8075\nEpoch 77/250\n561/561 - 0s - loss: 0.4781 - accuracy: 0.7807 - val_loss: 0.3862 - val_accuracy: 0.8021\nEpoch 78/250\n561/561 - 0s - loss: 0.4727 - accuracy: 0.7932 - val_loss: 0.3858 - val_accuracy: 0.8128\nEpoch 79/250\n561/561 - 0s - loss: 0.4774 - accuracy: 0.7807 - val_loss: 0.3854 - val_accuracy: 0.8128\nEpoch 80/250\n561/561 - 0s - loss: 0.4747 - accuracy: 0.7932 - val_loss: 0.3860 - val_accuracy: 0.8021\nEpoch 81/250\n561/561 - 0s - loss: 0.4766 - accuracy: 0.7790 - val_loss: 0.3907 - val_accuracy: 0.8235\nEpoch 82/250\n561/561 - 0s - loss: 0.4761 - accuracy: 0.7950 - val_loss: 0.3857 - val_accuracy: 0.8128\nEpoch 83/250\n561/561 - 0s - loss: 0.4783 - accuracy: 0.7897 - val_loss: 0.3876 - val_accuracy: 0.8182\nEpoch 84/250\n561/561 - 0s - loss: 0.4735 - accuracy: 0.7897 - val_loss: 0.3866 - val_accuracy: 0.8021\nEpoch 85/250\n561/561 - 0s - loss: 0.4685 - accuracy: 0.7897 - val_loss: 0.3879 - val_accuracy: 0.8182\nEpoch 86/250\n561/561 - 0s - loss: 0.4855 - accuracy: 0.7986 - val_loss: 0.3855 - val_accuracy: 0.8128\nEpoch 87/250\n561/561 - 0s - loss: 0.4798 - accuracy: 0.7968 - val_loss: 0.3864 - val_accuracy: 0.8235\nEpoch 88/250\n561/561 - 0s - loss: 0.4832 - accuracy: 0.7986 - val_loss: 0.3857 - val_accuracy: 0.8128\nEpoch 89/250\n561/561 - 0s - loss: 0.4723 - accuracy: 0.7968 - val_loss: 0.3845 - val_accuracy: 0.8128\nEpoch 90/250\n561/561 - 0s - loss: 0.4830 - accuracy: 0.7807 - val_loss: 0.3844 - val_accuracy: 0.8021\nEpoch 91/250\n561/561 - 0s - loss: 0.4725 - accuracy: 0.7897 - val_loss: 0.3864 - val_accuracy: 0.8021\nEpoch 92/250\n561/561 - 0s - loss: 0.4843 - accuracy: 0.7950 - val_loss: 0.3874 - val_accuracy: 0.8128\nEpoch 93/250\n561/561 - 0s - loss: 0.4801 - accuracy: 0.7843 - val_loss: 0.3902 - val_accuracy: 0.8128\nEpoch 94/250\n561/561 - 0s - loss: 0.4856 - accuracy: 0.8004 - val_loss: 0.3862 - val_accuracy: 0.8021\nEpoch 95/250\n561/561 - 0s - loss: 0.4850 - accuracy: 0.7914 - val_loss: 0.3873 - val_accuracy: 0.8128\nEpoch 96/250\n561/561 - 0s - loss: 0.4720 - accuracy: 0.7914 - val_loss: 0.3864 - val_accuracy: 0.8021\nEpoch 97/250\n561/561 - 0s - loss: 0.4786 - accuracy: 0.7950 - val_loss: 0.3867 - val_accuracy: 0.7968\nEpoch 98/250\n561/561 - 0s - loss: 0.4828 - accuracy: 0.7790 - val_loss: 0.3889 - val_accuracy: 0.8235\nEpoch 99/250\n561/561 - 0s - loss: 0.4728 - accuracy: 0.7950 - val_loss: 0.3878 - val_accuracy: 0.8128\nEpoch 100/250\n561/561 - 0s - loss: 0.4820 - accuracy: 0.7932 - val_loss: 0.3882 - val_accuracy: 0.8182\nEpoch 101/250\n561/561 - 0s - loss: 0.4750 - accuracy: 0.7790 - val_loss: 0.3881 - val_accuracy: 0.7968\nEpoch 102/250\n561/561 - 0s - loss: 0.4733 - accuracy: 0.7879 - val_loss: 0.3879 - val_accuracy: 0.8021\nEpoch 103/250\n561/561 - 0s - loss: 0.4664 - accuracy: 0.7932 - val_loss: 0.3871 - val_accuracy: 0.8021\nEpoch 104/250\n561/561 - 0s - loss: 0.4700 - accuracy: 0.7932 - val_loss: 0.3880 - val_accuracy: 0.8128\nEpoch 105/250\n561/561 - 0s - loss: 0.4704 - accuracy: 0.7932 - val_loss: 0.3903 - val_accuracy: 0.8128\nEpoch 106/250\n561/561 - 0s - loss: 0.4705 - accuracy: 0.7879 - val_loss: 0.3878 - val_accuracy: 0.8021\nEpoch 107/250\n561/561 - 0s - loss: 0.4759 - accuracy: 0.7861 - val_loss: 0.3878 - val_accuracy: 0.8021\nEpoch 108/250\n561/561 - 0s - loss: 0.4806 - accuracy: 0.7897 - val_loss: 0.3880 - val_accuracy: 0.8021\nEpoch 109/250\n561/561 - 0s - loss: 0.4721 - accuracy: 0.7950 - val_loss: 0.3884 - val_accuracy: 0.8128\nEpoch 110/250\n561/561 - 0s - loss: 0.4733 - accuracy: 0.7932 - val_loss: 0.3892 - val_accuracy: 0.8128\nEpoch 111/250\n561/561 - 0s - loss: 0.4732 - accuracy: 0.7932 - val_loss: 0.3894 - val_accuracy: 0.8128\nEpoch 112/250\n561/561 - 0s - loss: 0.4716 - accuracy: 0.7968 - val_loss: 0.3899 - val_accuracy: 0.8128\nEpoch 113/250\n561/561 - 0s - loss: 0.4751 - accuracy: 0.7932 - val_loss: 0.3894 - val_accuracy: 0.8021\nEpoch 114/250\n561/561 - 0s - loss: 0.4752 - accuracy: 0.7897 - val_loss: 0.3900 - val_accuracy: 0.8128\nEpoch 115/250\n561/561 - 0s - loss: 0.4762 - accuracy: 0.7879 - val_loss: 0.3908 - val_accuracy: 0.8128\nEpoch 116/250\n561/561 - 0s - loss: 0.4846 - accuracy: 0.7807 - val_loss: 0.3913 - val_accuracy: 0.8128\nEpoch 117/250\n561/561 - 0s - loss: 0.4638 - accuracy: 0.7950 - val_loss: 0.3906 - val_accuracy: 0.8128\nEpoch 118/250\n561/561 - 0s - loss: 0.4612 - accuracy: 0.7968 - val_loss: 0.3894 - val_accuracy: 0.8128\nEpoch 119/250\n561/561 - 0s - loss: 0.4631 - accuracy: 0.7950 - val_loss: 0.3898 - val_accuracy: 0.8128\nEpoch 120/250\n561/561 - 0s - loss: 0.4657 - accuracy: 0.7932 - val_loss: 0.3890 - val_accuracy: 0.8128\nEpoch 121/250\n561/561 - 0s - loss: 0.4696 - accuracy: 0.8004 - val_loss: 0.3906 - val_accuracy: 0.8128\nEpoch 122/250\n561/561 - 0s - loss: 0.4686 - accuracy: 0.7932 - val_loss: 0.3902 - val_accuracy: 0.8128\nEpoch 123/250\n561/561 - 0s - loss: 0.4687 - accuracy: 0.7879 - val_loss: 0.3900 - val_accuracy: 0.8128\nEpoch 124/250\n561/561 - 0s - loss: 0.4727 - accuracy: 0.7843 - val_loss: 0.3912 - val_accuracy: 0.8128\nEpoch 125/250\n561/561 - 0s - loss: 0.4727 - accuracy: 0.7968 - val_loss: 0.3915 - val_accuracy: 0.8128\nEpoch 126/250\n561/561 - 0s - loss: 0.4723 - accuracy: 0.7932 - val_loss: 0.3908 - val_accuracy: 0.8128\nEpoch 127/250\n561/561 - 0s - loss: 0.4712 - accuracy: 0.7950 - val_loss: 0.3906 - val_accuracy: 0.8021\nEpoch 128/250\n561/561 - 0s - loss: 0.4762 - accuracy: 0.7914 - val_loss: 0.3900 - val_accuracy: 0.8021\nEpoch 129/250\n561/561 - 0s - loss: 0.4750 - accuracy: 0.7879 - val_loss: 0.3902 - val_accuracy: 0.8128\nEpoch 130/250\n561/561 - 0s - loss: 0.4733 - accuracy: 0.7861 - val_loss: 0.3907 - val_accuracy: 0.8128\nEpoch 131/250\n561/561 - 0s - loss: 0.4722 - accuracy: 0.7843 - val_loss: 0.3894 - val_accuracy: 0.7968\nEpoch 132/250\n561/561 - 0s - loss: 0.4802 - accuracy: 0.7897 - val_loss: 0.3915 - val_accuracy: 0.8128\nEpoch 133/250\n561/561 - 0s - loss: 0.4757 - accuracy: 0.7897 - val_loss: 0.3891 - val_accuracy: 0.7968\nEpoch 134/250\n561/561 - 0s - loss: 0.4768 - accuracy: 0.7879 - val_loss: 0.3898 - val_accuracy: 0.7968\nEpoch 135/250\n561/561 - 0s - loss: 0.4728 - accuracy: 0.7897 - val_loss: 0.3908 - val_accuracy: 0.8075\nEpoch 136/250\n561/561 - 0s - loss: 0.4700 - accuracy: 0.7914 - val_loss: 0.3893 - val_accuracy: 0.8021\nEpoch 137/250\n561/561 - 0s - loss: 0.4800 - accuracy: 0.7986 - val_loss: 0.3930 - val_accuracy: 0.8128\nEpoch 138/250\n561/561 - 0s - loss: 0.4621 - accuracy: 0.7968 - val_loss: 0.3919 - val_accuracy: 0.8128\nEpoch 139/250\n561/561 - 0s - loss: 0.4678 - accuracy: 0.7897 - val_loss: 0.3937 - val_accuracy: 0.8128\nEpoch 140/250\n561/561 - 0s - loss: 0.4740 - accuracy: 0.7790 - val_loss: 0.3933 - val_accuracy: 0.8075\nEpoch 141/250\n561/561 - 0s - loss: 0.4685 - accuracy: 0.8004 - val_loss: 0.3909 - val_accuracy: 0.8075\nEpoch 142/250\n561/561 - 0s - loss: 0.4716 - accuracy: 0.7986 - val_loss: 0.3913 - val_accuracy: 0.8128\nEpoch 143/250\n561/561 - 0s - loss: 0.4675 - accuracy: 0.7914 - val_loss: 0.3920 - val_accuracy: 0.8128\nEpoch 144/250\n561/561 - 0s - loss: 0.4726 - accuracy: 0.7932 - val_loss: 0.3921 - val_accuracy: 0.8128\nEpoch 145/250\n561/561 - 0s - loss: 0.4622 - accuracy: 0.8075 - val_loss: 0.3909 - val_accuracy: 0.8128\nEpoch 146/250\n561/561 - 0s - loss: 0.4777 - accuracy: 0.7968 - val_loss: 0.3896 - val_accuracy: 0.8128\nEpoch 147/250\n561/561 - 0s - loss: 0.4769 - accuracy: 0.7968 - val_loss: 0.3901 - val_accuracy: 0.8128\nEpoch 148/250\n561/561 - 0s - loss: 0.4705 - accuracy: 0.7914 - val_loss: 0.3910 - val_accuracy: 0.8128\nEpoch 149/250\n561/561 - 0s - loss: 0.4741 - accuracy: 0.7986 - val_loss: 0.3909 - val_accuracy: 0.8128\nEpoch 150/250\n561/561 - 0s - loss: 0.4722 - accuracy: 0.7772 - val_loss: 0.3925 - val_accuracy: 0.8128\nEpoch 151/250\n561/561 - 0s - loss: 0.4746 - accuracy: 0.7879 - val_loss: 0.3881 - val_accuracy: 0.8021\nEpoch 152/250\n561/561 - 0s - loss: 0.4816 - accuracy: 0.7932 - val_loss: 0.3900 - val_accuracy: 0.8021\nEpoch 153/250\n561/561 - 0s - loss: 0.4725 - accuracy: 0.7897 - val_loss: 0.3898 - val_accuracy: 0.8021\nEpoch 154/250\n561/561 - 0s - loss: 0.4747 - accuracy: 0.7986 - val_loss: 0.3910 - val_accuracy: 0.8128\nEpoch 155/250\n561/561 - 0s - loss: 0.4621 - accuracy: 0.7932 - val_loss: 0.3926 - val_accuracy: 0.8128\nEpoch 156/250\n561/561 - 0s - loss: 0.4800 - accuracy: 0.7879 - val_loss: 0.3903 - val_accuracy: 0.8128\nEpoch 157/250\n561/561 - 0s - loss: 0.4757 - accuracy: 0.8004 - val_loss: 0.3932 - val_accuracy: 0.8128\nEpoch 158/250\n561/561 - 0s - loss: 0.4741 - accuracy: 0.8057 - val_loss: 0.3924 - val_accuracy: 0.8128\nEpoch 159/250\n561/561 - 0s - loss: 0.4704 - accuracy: 0.7897 - val_loss: 0.3915 - val_accuracy: 0.8128\nEpoch 160/250\n561/561 - 0s - loss: 0.4776 - accuracy: 0.7950 - val_loss: 0.3894 - val_accuracy: 0.8021\nEpoch 161/250\n561/561 - 0s - loss: 0.4624 - accuracy: 0.7914 - val_loss: 0.3905 - val_accuracy: 0.8021\nEpoch 162/250\n561/561 - 0s - loss: 0.4673 - accuracy: 0.7861 - val_loss: 0.3907 - val_accuracy: 0.8128\nEpoch 163/250\n561/561 - 0s - loss: 0.4749 - accuracy: 0.7897 - val_loss: 0.3925 - val_accuracy: 0.8128\nEpoch 164/250\n561/561 - 0s - loss: 0.4707 - accuracy: 0.7932 - val_loss: 0.3941 - val_accuracy: 0.8128\nEpoch 165/250\n561/561 - 0s - loss: 0.4683 - accuracy: 0.7897 - val_loss: 0.3961 - val_accuracy: 0.8235\nEpoch 166/250\n561/561 - 0s - loss: 0.4738 - accuracy: 0.7950 - val_loss: 0.3927 - val_accuracy: 0.8128\nEpoch 167/250\n561/561 - 0s - loss: 0.4727 - accuracy: 0.7914 - val_loss: 0.3916 - val_accuracy: 0.8128\nEpoch 168/250\n561/561 - 0s - loss: 0.4637 - accuracy: 0.7861 - val_loss: 0.3940 - val_accuracy: 0.8128\nEpoch 169/250\n561/561 - 0s - loss: 0.4677 - accuracy: 0.7914 - val_loss: 0.3945 - val_accuracy: 0.8128\nEpoch 170/250\n561/561 - 0s - loss: 0.4705 - accuracy: 0.7968 - val_loss: 0.3921 - val_accuracy: 0.7968\nEpoch 171/250\n561/561 - 0s - loss: 0.4725 - accuracy: 0.7843 - val_loss: 0.3901 - val_accuracy: 0.8128\nEpoch 172/250\n561/561 - 0s - loss: 0.4701 - accuracy: 0.7950 - val_loss: 0.3927 - val_accuracy: 0.8128\nEpoch 173/250\n561/561 - 0s - loss: 0.4795 - accuracy: 0.7932 - val_loss: 0.3921 - val_accuracy: 0.8128\nEpoch 174/250\n561/561 - 0s - loss: 0.4742 - accuracy: 0.7879 - val_loss: 0.3896 - val_accuracy: 0.8128\nEpoch 175/250\n561/561 - 0s - loss: 0.4730 - accuracy: 0.7932 - val_loss: 0.3899 - val_accuracy: 0.8128\nEpoch 176/250\n561/561 - 0s - loss: 0.4713 - accuracy: 0.7914 - val_loss: 0.3930 - val_accuracy: 0.8128\nEpoch 177/250\n561/561 - 0s - loss: 0.4702 - accuracy: 0.7879 - val_loss: 0.3912 - val_accuracy: 0.8021\nEpoch 178/250\n561/561 - 0s - loss: 0.4687 - accuracy: 0.7772 - val_loss: 0.3905 - val_accuracy: 0.8021\nEpoch 179/250\n561/561 - 0s - loss: 0.4717 - accuracy: 0.7950 - val_loss: 0.3908 - val_accuracy: 0.8021\nEpoch 180/250\n561/561 - 0s - loss: 0.4730 - accuracy: 0.7879 - val_loss: 0.3922 - val_accuracy: 0.8128\nEpoch 181/250\n561/561 - 0s - loss: 0.4636 - accuracy: 0.7897 - val_loss: 0.3916 - val_accuracy: 0.7968\nEpoch 182/250\n561/561 - 0s - loss: 0.4747 - accuracy: 0.7772 - val_loss: 0.3928 - val_accuracy: 0.8128\nEpoch 183/250\n561/561 - 0s - loss: 0.4664 - accuracy: 0.7861 - val_loss: 0.3914 - val_accuracy: 0.7968\nEpoch 184/250\n561/561 - 0s - loss: 0.4720 - accuracy: 0.7950 - val_loss: 0.3916 - val_accuracy: 0.8128\nEpoch 185/250\n561/561 - 0s - loss: 0.4813 - accuracy: 0.7879 - val_loss: 0.3932 - val_accuracy: 0.8021\nEpoch 186/250\n561/561 - 0s - loss: 0.4750 - accuracy: 0.7914 - val_loss: 0.3921 - val_accuracy: 0.8128\nEpoch 187/250\n561/561 - 0s - loss: 0.4702 - accuracy: 0.7754 - val_loss: 0.3887 - val_accuracy: 0.8021\nEpoch 188/250\n561/561 - 0s - loss: 0.4668 - accuracy: 0.7932 - val_loss: 0.3902 - val_accuracy: 0.8128\nEpoch 189/250\n561/561 - 0s - loss: 0.4689 - accuracy: 0.7932 - val_loss: 0.3911 - val_accuracy: 0.8021\nEpoch 190/250\n561/561 - 0s - loss: 0.4660 - accuracy: 0.8021 - val_loss: 0.3915 - val_accuracy: 0.8128\nEpoch 191/250\n561/561 - 0s - loss: 0.4711 - accuracy: 0.7950 - val_loss: 0.3932 - val_accuracy: 0.8128\nEpoch 192/250\n561/561 - 0s - loss: 0.4766 - accuracy: 0.7861 - val_loss: 0.3935 - val_accuracy: 0.8021\nEpoch 193/250\n561/561 - 0s - loss: 0.4756 - accuracy: 0.7914 - val_loss: 0.3942 - val_accuracy: 0.8128\nEpoch 194/250\n561/561 - 0s - loss: 0.4742 - accuracy: 0.8004 - val_loss: 0.3922 - val_accuracy: 0.8021\nEpoch 195/250\n561/561 - 0s - loss: 0.4779 - accuracy: 0.7932 - val_loss: 0.3914 - val_accuracy: 0.7968\nEpoch 196/250\n561/561 - 0s - loss: 0.4668 - accuracy: 0.7932 - val_loss: 0.3937 - val_accuracy: 0.8128\nEpoch 197/250\n561/561 - 0s - loss: 0.4668 - accuracy: 0.7914 - val_loss: 0.3962 - val_accuracy: 0.8128\nEpoch 198/250\n561/561 - 0s - loss: 0.4660 - accuracy: 0.8004 - val_loss: 0.3928 - val_accuracy: 0.8021\nEpoch 199/250\n561/561 - 0s - loss: 0.4763 - accuracy: 0.7914 - val_loss: 0.3911 - val_accuracy: 0.8128\nEpoch 200/250\n561/561 - 0s - loss: 0.4692 - accuracy: 0.7968 - val_loss: 0.3931 - val_accuracy: 0.8128\nEpoch 201/250\n561/561 - 0s - loss: 0.4696 - accuracy: 0.7968 - val_loss: 0.3919 - val_accuracy: 0.8128\nEpoch 202/250\n561/561 - 0s - loss: 0.4745 - accuracy: 0.7807 - val_loss: 0.3920 - val_accuracy: 0.8128\nEpoch 203/250\n561/561 - 0s - loss: 0.4762 - accuracy: 0.7914 - val_loss: 0.3934 - val_accuracy: 0.7968\nEpoch 204/250\n561/561 - 0s - loss: 0.4684 - accuracy: 0.7897 - val_loss: 0.3945 - val_accuracy: 0.8128\nEpoch 205/250\n561/561 - 0s - loss: 0.4689 - accuracy: 0.7897 - val_loss: 0.3942 - val_accuracy: 0.8128\nEpoch 206/250\n561/561 - 0s - loss: 0.4704 - accuracy: 0.7968 - val_loss: 0.3927 - val_accuracy: 0.8128\nEpoch 207/250\n561/561 - 0s - loss: 0.4696 - accuracy: 0.7861 - val_loss: 0.3946 - val_accuracy: 0.8128\nEpoch 208/250\n561/561 - 0s - loss: 0.4653 - accuracy: 0.7986 - val_loss: 0.3957 - val_accuracy: 0.8128\nEpoch 209/250\n561/561 - 0s - loss: 0.4765 - accuracy: 0.8004 - val_loss: 0.3954 - val_accuracy: 0.8128\nEpoch 210/250\n561/561 - 0s - loss: 0.4707 - accuracy: 0.7861 - val_loss: 0.3931 - val_accuracy: 0.8021\nEpoch 211/250\n561/561 - 0s - loss: 0.4711 - accuracy: 0.7897 - val_loss: 0.3932 - val_accuracy: 0.8128\nEpoch 212/250\n561/561 - 0s - loss: 0.4613 - accuracy: 0.7986 - val_loss: 0.3935 - val_accuracy: 0.8128\nEpoch 213/250\n561/561 - 0s - loss: 0.4677 - accuracy: 0.7914 - val_loss: 0.3936 - val_accuracy: 0.8128\nEpoch 214/250\n561/561 - 0s - loss: 0.4788 - accuracy: 0.7986 - val_loss: 0.3917 - val_accuracy: 0.8128\nEpoch 215/250\n561/561 - 0s - loss: 0.4812 - accuracy: 0.7914 - val_loss: 0.3920 - val_accuracy: 0.8128\nEpoch 216/250\n561/561 - 0s - loss: 0.4766 - accuracy: 0.7879 - val_loss: 0.3944 - val_accuracy: 0.7968\nEpoch 217/250\n561/561 - 0s - loss: 0.4724 - accuracy: 0.7861 - val_loss: 0.3942 - val_accuracy: 0.8128\nEpoch 218/250\n561/561 - 0s - loss: 0.4626 - accuracy: 0.7932 - val_loss: 0.3944 - val_accuracy: 0.8128\nEpoch 219/250\n561/561 - 0s - loss: 0.4722 - accuracy: 0.7932 - val_loss: 0.3939 - val_accuracy: 0.8128\nEpoch 220/250\n561/561 - 0s - loss: 0.4775 - accuracy: 0.7843 - val_loss: 0.3917 - val_accuracy: 0.7968\nEpoch 221/250\n561/561 - 0s - loss: 0.4768 - accuracy: 0.7790 - val_loss: 0.3940 - val_accuracy: 0.8128\nEpoch 222/250\n561/561 - 0s - loss: 0.4723 - accuracy: 0.7914 - val_loss: 0.3917 - val_accuracy: 0.8021\nEpoch 223/250\n561/561 - 0s - loss: 0.4752 - accuracy: 0.7914 - val_loss: 0.3927 - val_accuracy: 0.8128\nEpoch 224/250\n561/561 - 0s - loss: 0.4709 - accuracy: 0.7932 - val_loss: 0.3941 - val_accuracy: 0.8128\nEpoch 225/250\n561/561 - 0s - loss: 0.4696 - accuracy: 0.7879 - val_loss: 0.3945 - val_accuracy: 0.8021\nEpoch 226/250\n561/561 - 0s - loss: 0.4738 - accuracy: 0.7772 - val_loss: 0.3908 - val_accuracy: 0.8128\nEpoch 227/250\n561/561 - 0s - loss: 0.4762 - accuracy: 0.7914 - val_loss: 0.3928 - val_accuracy: 0.8128\nEpoch 228/250\n561/561 - 0s - loss: 0.4693 - accuracy: 0.7968 - val_loss: 0.3954 - val_accuracy: 0.8128\nEpoch 229/250\n561/561 - 0s - loss: 0.4643 - accuracy: 0.7879 - val_loss: 0.3921 - val_accuracy: 0.8021\nEpoch 230/250\n561/561 - 0s - loss: 0.4679 - accuracy: 0.7879 - val_loss: 0.3933 - val_accuracy: 0.8128\nEpoch 231/250\n561/561 - 0s - loss: 0.4702 - accuracy: 0.7968 - val_loss: 0.3922 - val_accuracy: 0.8128\nEpoch 232/250\n561/561 - 0s - loss: 0.4795 - accuracy: 0.7790 - val_loss: 0.3927 - val_accuracy: 0.8128\nEpoch 233/250\n561/561 - 0s - loss: 0.4630 - accuracy: 0.7968 - val_loss: 0.3911 - val_accuracy: 0.8128\nEpoch 234/250\n561/561 - 0s - loss: 0.4741 - accuracy: 0.7914 - val_loss: 0.3909 - val_accuracy: 0.8128\nEpoch 235/250\n561/561 - 0s - loss: 0.4708 - accuracy: 0.7897 - val_loss: 0.3951 - val_accuracy: 0.8128\nEpoch 236/250\n561/561 - 0s - loss: 0.4698 - accuracy: 0.7843 - val_loss: 0.3917 - val_accuracy: 0.8021\nEpoch 237/250\n561/561 - 0s - loss: 0.4639 - accuracy: 0.8004 - val_loss: 0.3915 - val_accuracy: 0.8128\nEpoch 238/250\n561/561 - 0s - loss: 0.4624 - accuracy: 0.7897 - val_loss: 0.3926 - val_accuracy: 0.8128\nEpoch 239/250\n561/561 - 0s - loss: 0.4736 - accuracy: 0.7968 - val_loss: 0.3930 - val_accuracy: 0.8128\nEpoch 240/250\n561/561 - 0s - loss: 0.4684 - accuracy: 0.7950 - val_loss: 0.3929 - val_accuracy: 0.8128\nEpoch 241/250\n561/561 - 0s - loss: 0.4701 - accuracy: 0.7807 - val_loss: 0.3938 - val_accuracy: 0.8128\nEpoch 242/250\n561/561 - 0s - loss: 0.4694 - accuracy: 0.7950 - val_loss: 0.3945 - val_accuracy: 0.8128\nEpoch 243/250\n561/561 - 0s - loss: 0.4629 - accuracy: 0.7968 - val_loss: 0.3938 - val_accuracy: 0.8128\nEpoch 244/250\n561/561 - 0s - loss: 0.4716 - accuracy: 0.7861 - val_loss: 0.3961 - val_accuracy: 0.8128\nEpoch 245/250\n561/561 - 0s - loss: 0.4740 - accuracy: 0.7932 - val_loss: 0.3940 - val_accuracy: 0.8128\nEpoch 246/250\n561/561 - 0s - loss: 0.4558 - accuracy: 0.8004 - val_loss: 0.3961 - val_accuracy: 0.8128\nEpoch 247/250\n561/561 - 0s - loss: 0.4652 - accuracy: 0.7861 - val_loss: 0.3941 - val_accuracy: 0.8128\nEpoch 248/250\n561/561 - 0s - loss: 0.4741 - accuracy: 0.7807 - val_loss: 0.3915 - val_accuracy: 0.8021\nEpoch 249/250\n561/561 - 0s - loss: 0.4799 - accuracy: 0.7861 - val_loss: 0.3945 - val_accuracy: 0.8128\nEpoch 250/250\n561/561 - 0s - loss: 0.4723 - accuracy: 0.7968 - val_loss: 0.3948 - val_accuracy: 0.8021\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a3799dc342b8ecbef617483a3afa7ceb0c9172
4,377
ipynb
Jupyter Notebook
content/lessons/07/Now-You-Code/NYC2-Email-Address.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
content/lessons/07/Now-You-Code/NYC2-Email-Address.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
content/lessons/07/Now-You-Code/NYC2-Email-Address.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
5
2018-09-17T03:54:06.000Z
2019-10-17T02:47:20.000Z
27.702532
234
0.572538
[ [ [ "# Now You Code 2: Is That An Email Address?\n\nLet's use Python's built-in string functions to write our own function to detect if a string is an email address. \n\nThe function `isEmail(text)` should return `True` when `text` is an email address, `False` otherwise. \n\nFor simplicity's sake we will define an email address to be any string with just ONE `@` symbol in it, where the `@` is not at the beginning or end of the string. So `a@b` is considered an email (even though it really isn't).\n\nThe program should detect emails until you enter quit. \n\nSample run:\n```\nEmail address detector. Type quit to exit. \nEmail: [email protected]\[email protected] ==> email\nEmail: mafudge@\nmafudge@ ==> NOT EMAIL\nEmail: mafudge\nmafudge ==> NOT EMAIL\nEmail: @syr.edu\[email protected] ==> NOT EMAIL\nEmail: @\n@ ==> NOT EMAIL\nEmail: mafudge@@syr.edu\nmafudge@@syr.edu ==> NOT EMAIL\nEmail: mafudge@syr@edu\nmafudge@syr@edu ==> NOT EMAIL\n```\n\nOnce again we will use the problem simplification technique to writing this program.\n\nFirst we will write the `isEmail(text)` function, then we will write the main program.\n", "_____no_output_____" ], [ "## Step 1: Problem Analysis for isEmail function only\n\nInputs (function arguments):\n\nOutputs (what is returns): \n\nAlgorithm (Steps in Function):\n\n", "_____no_output_____" ] ], [ [ "## Step 2: Todo write the function definition for isEmail functiuon\n", "_____no_output_____" ], [ "## Step 3: Write some tests, to ensure the function works, for example\n## Make sure to test all cases!\nprint(\"WHEN [email protected] We EXPECT isEmail(text) to return True\", \"ACTUAL\", isEmail(\"[email protected]\") )\nprint(\"WHEN text=mike@ We EXPECT isEmail(text) to return False\", \"ACTUAL\", isEmail(\"mike@\") )\n\n", "_____no_output_____" ] ], [ [ "## Step 4: Problem Analysis for full Program\n\nInputs:\n\nOutputs:\n\nAlgorithm (Steps in Program):\n\n", "_____no_output_____" ] ], [ [ "## Step 5: todo write code for full problem, using the isEmail function to help you solve the problem\n", "_____no_output_____" ] ], [ [ "## Step 6: Questions\n\n1. How many test cases should you have in step 3 to ensure you've tested all the cases?\n2. What should kind of logic should we add to make our `isEmail` function even better, so that is detects emails more accurately?\n", "_____no_output_____" ], [ "## Reminder of Evaluation Criteria\n\n1. What the problem attempted (analysis, code, and answered questions) ?\n2. What the problem analysis thought out? (does the program match the plan?)\n3. Does the code execute without syntax error?\n4. Does the code solve the intended problem?\n5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7a38c8d1aeaf458ec10eafc5d1ad978b4656eb6
43,365
ipynb
Jupyter Notebook
SVM Model.ipynb
AyanavaMukhopadhyay/JustHacktoberFest
b79fa2ef54e1865146e6771a609bc2ee0088168a
[ "Apache-2.0" ]
null
null
null
SVM Model.ipynb
AyanavaMukhopadhyay/JustHacktoberFest
b79fa2ef54e1865146e6771a609bc2ee0088168a
[ "Apache-2.0" ]
null
null
null
SVM Model.ipynb
AyanavaMukhopadhyay/JustHacktoberFest
b79fa2ef54e1865146e6771a609bc2ee0088168a
[ "Apache-2.0" ]
8
2021-10-01T17:16:22.000Z
2021-10-31T13:14:00.000Z
43.714718
284
0.441992
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "from sklearn.datasets import load_breast_cancer", "_____no_output_____" ], [ "cancer = load_breast_cancer()", "_____no_output_____" ], [ "cancer.keys()", "_____no_output_____" ], [ "print(cancer['DESCR'])", ".. _breast_cancer_dataset:\n\nBreast cancer wisconsin (diagnostic) dataset\n--------------------------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 569\n\n :Number of Attributes: 30 numeric, predictive attributes and the class\n\n :Attribute Information:\n - radius (mean of distances from center to points on the perimeter)\n - texture (standard deviation of gray-scale values)\n - perimeter\n - area\n - smoothness (local variation in radius lengths)\n - compactness (perimeter^2 / area - 1.0)\n - concavity (severity of concave portions of the contour)\n - concave points (number of concave portions of the contour)\n - symmetry \n - fractal dimension (\"coastline approximation\" - 1)\n\n The mean, standard error, and \"worst\" or largest (mean of the three\n largest values) of these features were computed for each image,\n resulting in 30 features. For instance, field 3 is Mean Radius, field\n 13 is Radius SE, field 23 is Worst Radius.\n\n - class:\n - WDBC-Malignant\n - WDBC-Benign\n\n :Summary Statistics:\n\n ===================================== ====== ======\n Min Max\n ===================================== ====== ======\n radius (mean): 6.981 28.11\n texture (mean): 9.71 39.28\n perimeter (mean): 43.79 188.5\n area (mean): 143.5 2501.0\n smoothness (mean): 0.053 0.163\n compactness (mean): 0.019 0.345\n concavity (mean): 0.0 0.427\n concave points (mean): 0.0 0.201\n symmetry (mean): 0.106 0.304\n fractal dimension (mean): 0.05 0.097\n radius (standard error): 0.112 2.873\n texture (standard error): 0.36 4.885\n perimeter (standard error): 0.757 21.98\n area (standard error): 6.802 542.2\n smoothness (standard error): 0.002 0.031\n compactness (standard error): 0.002 0.135\n concavity (standard error): 0.0 0.396\n concave points (standard error): 0.0 0.053\n symmetry (standard error): 0.008 0.079\n fractal dimension (standard error): 0.001 0.03\n radius (worst): 7.93 36.04\n texture (worst): 12.02 49.54\n perimeter (worst): 50.41 251.2\n area (worst): 185.2 4254.0\n smoothness (worst): 0.071 0.223\n compactness (worst): 0.027 1.058\n concavity (worst): 0.0 1.252\n concave points (worst): 0.0 0.291\n symmetry (worst): 0.156 0.664\n fractal dimension (worst): 0.055 0.208\n ===================================== ====== ======\n\n :Missing Attribute Values: None\n\n :Class Distribution: 212 - Malignant, 357 - Benign\n\n :Creator: Dr. William H. Wolberg, W. Nick Street, Olvi L. Mangasarian\n\n :Donor: Nick Street\n\n :Date: November, 1995\n\nThis is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic) datasets.\nhttps://goo.gl/U2Uwz2\n\nFeatures are computed from a digitized image of a fine needle\naspirate (FNA) of a breast mass. They describe\ncharacteristics of the cell nuclei present in the image.\n\nSeparating plane described above was obtained using\nMultisurface Method-Tree (MSM-T) [K. P. Bennett, \"Decision Tree\nConstruction Via Linear Programming.\" Proceedings of the 4th\nMidwest Artificial Intelligence and Cognitive Science Society,\npp. 97-101, 1992], a classification method which uses linear\nprogramming to construct a decision tree. Relevant features\nwere selected using an exhaustive search in the space of 1-4\nfeatures and 1-3 separating planes.\n\nThe actual linear program used to obtain the separating plane\nin the 3-dimensional space is that described in:\n[K. P. Bennett and O. L. Mangasarian: \"Robust Linear\nProgramming Discrimination of Two Linearly Inseparable Sets\",\nOptimization Methods and Software 1, 1992, 23-34].\n\nThis database is also available through the UW CS ftp server:\n\nftp ftp.cs.wisc.edu\ncd math-prog/cpo-dataset/machine-learn/WDBC/\n\n.. topic:: References\n\n - W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction \n for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on \n Electronic Imaging: Science and Technology, volume 1905, pages 861-870,\n San Jose, CA, 1993.\n - O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and \n prognosis via linear programming. Operations Research, 43(4), pages 570-577, \n July-August 1995.\n - W.H. Wolberg, W.N. Street, and O.L. Mangasarian. Machine learning techniques\n to diagnose breast cancer from fine-needle aspirates. Cancer Letters 77 (1994) \n 163-171.\n" ], [ "cancer['feature_names']", "_____no_output_____" ], [ "df_feat = pd.DataFrame(cancer['data'],columns=cancer['feature_names'])\ndf_feat.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 569 entries, 0 to 568\nData columns (total 30 columns):\nmean radius 569 non-null float64\nmean texture 569 non-null float64\nmean perimeter 569 non-null float64\nmean area 569 non-null float64\nmean smoothness 569 non-null float64\nmean compactness 569 non-null float64\nmean concavity 569 non-null float64\nmean concave points 569 non-null float64\nmean symmetry 569 non-null float64\nmean fractal dimension 569 non-null float64\nradius error 569 non-null float64\ntexture error 569 non-null float64\nperimeter error 569 non-null float64\narea error 569 non-null float64\nsmoothness error 569 non-null float64\ncompactness error 569 non-null float64\nconcavity error 569 non-null float64\nconcave points error 569 non-null float64\nsymmetry error 569 non-null float64\nfractal dimension error 569 non-null float64\nworst radius 569 non-null float64\nworst texture 569 non-null float64\nworst perimeter 569 non-null float64\nworst area 569 non-null float64\nworst smoothness 569 non-null float64\nworst compactness 569 non-null float64\nworst concavity 569 non-null float64\nworst concave points 569 non-null float64\nworst symmetry 569 non-null float64\nworst fractal dimension 569 non-null float64\ndtypes: float64(30)\nmemory usage: 133.4 KB\n" ], [ "cancer['target']", "_____no_output_____" ], [ "df_target = pd.DataFrame(cancer['target'],columns=['Cancer'])", "_____no_output_____" ], [ "df_feat.head()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(df_feat, np.ravel(df_target), test_size=0.30, random_state=101)", "_____no_output_____" ], [ "from sklearn.svm import SVC\nmodel = SVC()\nmodel.fit(X_train,y_train)", "C:\\Users\\KIIT\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ], [ "predictions = model.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report,confusion_matrix", "_____no_output_____" ], [ "print(confusion_matrix(y_test,predictions))", "[[ 0 66]\n [ 0 105]]\n" ], [ "print(classification_report(y_test,predictions))", " precision recall f1-score support\n\n 0 0.00 0.00 0.00 66\n 1 0.61 1.00 0.76 105\n\n micro avg 0.61 0.61 0.61 171\n macro avg 0.31 0.50 0.38 171\nweighted avg 0.38 0.61 0.47 171\n\n" ], [ "param_grid = {'C': [0.1,1, 10, 100, 1000], 'gamma': [1,0.1,0.01,0.001,0.0001], 'kernel': ['rbf']}", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV\ngrid = GridSearchCV(SVC(),param_grid,refit=True,verbose=3)\ngrid.fit(X_train,y_train)", "C:\\Users\\KIIT\\Anaconda3\\lib\\site-packages\\sklearn\\model_selection\\_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.\n warnings.warn(CV_WARNING, FutureWarning)\n[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s remaining: 0.0s\n" ], [ "grid.best_params_", "_____no_output_____" ], [ "grid.best_estimator_", "_____no_output_____" ], [ "grid_predictions = grid.predict(X_test)\nprint(confusion_matrix(y_test,grid_predictions))\nprint(classification_report(y_test,grid_predictions))", "[[ 60 6]\n [ 3 102]]\n precision recall f1-score support\n\n 0 0.95 0.91 0.93 66\n 1 0.94 0.97 0.96 105\n\n micro avg 0.95 0.95 0.95 171\n macro avg 0.95 0.94 0.94 171\nweighted avg 0.95 0.95 0.95 171\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a3954fe5da66c485da2352546a4cce511505bd
6,358
ipynb
Jupyter Notebook
notebooks/defense.ipynb
Fraunhofer-SIT/ModExTransformer
709e321ba68d7b4f1e55e50230adec5389574701
[ "Apache-2.0" ]
1
2022-03-21T13:16:42.000Z
2022-03-21T13:16:42.000Z
notebooks/defense.ipynb
Fraunhofer-SIT/ModExTransformer
709e321ba68d7b4f1e55e50230adec5389574701
[ "Apache-2.0" ]
null
null
null
notebooks/defense.ipynb
Fraunhofer-SIT/ModExTransformer
709e321ba68d7b4f1e55e50230adec5389574701
[ "Apache-2.0" ]
null
null
null
32.943005
129
0.551746
[ [ [ "# Apache 2.0 License\n# Copyright (c) 2022, Fraunhofer e.V.\n# All rights reserved.\n\nimport sys\nsys.path.append('../')\nimport argparse\nfrom tqdm import tqdm\nfrom scipy.stats import entropy\nimport torch\nimport torch.nn.functional as F\n\nfrom timm.models import create_model\nfrom defenses.victim import MAD, ReverseSigmoid, RandomNoise\n\nfrom datasets import build_transform, get_dataset\n\nimport models\nimport utils\nfrom utils import get_free_gpu\n\nnum_gpus = 1\ngpu_chosen = get_free_gpu(num_gpus)\ndevice = torch.device('cuda' if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ], [ "args = {'input_size': 224}\nargs = argparse.Namespace(**args)", "_____no_output_____" ], [ "def get_accuracy(output, target, topk=(1,)):\n \"\"\" Computes the precision@k for the specified values of k \"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n # one-hot case\n if target.ndimension() > 1:\n target = target.max(1)[1]\n\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = dict()\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res[\"acc{}\".format(k)] = correct_k.mul_(1.0 / batch_size).item()\n return res\n\n\ndef predict(model, model_defended, data_loader, device='cuda'):\n model = model.to(device)\n model.eval()\n preds_orig = []\n preds_def = []\n labels = []\n with torch.no_grad():\n for x, y in tqdm(data_loader):\n x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)\n preds_orig.append(F.softmax(model(x), 1).to('cpu'))\n preds_def.append(model_defended(x).to('cpu'))\n labels.append(y.to('cpu'))\n return torch.cat(preds_orig), torch.cat(preds_def), torch.cat(labels)\n\n\ndef evaluate(model, model_defended, datasets, batch_size=100, workers=4):\n if not isinstance(datasets, tuple):\n datasets = (datasets, )\n res = {}\n for i, dataset in enumerate(datasets):\n d_type = \"\" if len(datasets) == 1 else [\"train\", \"test\"][i]\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=workers)\n \n print(f'Evaluate on {dataset.__class__.__name__} {d_type} data:')\n preds_orig, preds_def, labels = predict(model, model_defended, data_loader)\n num_classes = preds_def.shape[1]\n \n print(f'Results on {dataset.__class__.__name__} {d_type} data:')\n print('Accuracy original:', get_accuracy(preds_orig, labels)['acc1'])\n print('Accuracy defended:', get_accuracy(preds_def, labels)['acc1'])\n print('Fidelity:', get_accuracy(preds_orig, preds_def)['acc1'])\n print('Mean relative entropy original:', np.mean(entropy(preds_orig, axis=1, base=2) / np.log2(num_classes)))\n print('Mean relative entropy defended:', np.mean(entropy(preds_def, axis=1, base=2) / np.log2(num_classes)))\n print('Mean max/min original:', torch.mean(preds_orig.max(1)[0] / preds_orig.min(1)[0]).item())\n print('Mean max/min defended:', torch.mean(preds_def.max(1)[0] / preds_def.min(1)[0]).item())\n print('Mean L1 distance:', torch.mean(torch.linalg.vector_norm(preds_orig - preds_def, 1, 1)).item())\n print()\n res[d_type] = (preds_orig, preds_def, labels)\n return res", "_____no_output_____" ], [ "model = create_model(\n 'resnet34',\n num_classes=10\n)\nmodel.load_state_dict(torch.load(f'checkpoints/checkpoint.pth')['model'])\n\nmodel_adv = create_model(\n 'deit_base_patch16_224',\n pretrained=False,\n num_classes=10\n)\n\ndatasets = get_dataset('cifar10', train_transform=build_transform(False, args), val_transform=build_transform(False, args))", "_____no_output_____" ], [ "epsilon = 50.1\nbeta = 2.0\ngamma = 0.5\noutput_path = './log'\ndist_z = 'l1'\noracle = 'argmax'\n\nmodel_defended = RandomNoise(model=model, out_path=output_path, dist_z=dist_z, epsilon_z=epsilon)\n#model_defended = ReverseSigmoid(model=model, out_path=output_path, beta=beta, gamma=gamma)\n#model_defended = MAD(model=model, out_path=output_path, epsilon=epsilon, model_adv_proxy=model_adv, oracle=oracle)", "_____no_output_____" ], [ "res = evaluate(model, model_defended, datasets[1], 512)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e7a3aa4e47c35970cb510f622d69dbfbd7243534
109,560
ipynb
Jupyter Notebook
code/Plot_Anisotropies.ipynb
cajohare/HaloSpin
5193df53bb41083073407ed0196d030871735c99
[ "MIT" ]
null
null
null
code/Plot_Anisotropies.ipynb
cajohare/HaloSpin
5193df53bb41083073407ed0196d030871735c99
[ "MIT" ]
null
null
null
code/Plot_Anisotropies.ipynb
cajohare/HaloSpin
5193df53bb41083073407ed0196d030871735c99
[ "MIT" ]
1
2019-06-26T14:01:10.000Z
2019-06-26T14:01:10.000Z
310.368272
98,312
0.921303
[ [ [ "from numpy import *\nfrom numpy.random import *\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom PlotFuncs import *\nfrom LabFuncs import *\nfrom Params import *\nfrom HaloFuncs import *\nfrom scipy.stats import norm\nimport pandas\n\nimport cmocean\n\n# Load shards\ndf = pandas.read_csv('../data/FitShards_red.csv')\nnames = df.group_id\nnshards = size(names)\nvelocities = zeros(shape=(nshards,3))\ndispersions = zeros(shape=(nshards,3))\nvelocities[0:(nshards),0] = df.vx # stream velocities\nvelocities[0:(nshards),1] = df.vy\nvelocities[0:(nshards),2] = df.vz\ndispersions[0:(nshards),0] = df.sigx # dispersion tensors\ndispersions[0:(nshards),1] = df.sigy\ndispersions[0:(nshards),2] = df.sigz\npops = df.population\nPsun = df.Psun\nweights = ShardsWeights(names,pops,Psun)\n\n# Halo params\nHaloModel = SHMpp\nv0 = HaloModel.RotationSpeed\nv_esc = HaloModel.EscapeSpeed\nsig_beta = HaloModel.SausageDispersionTensor\nsig_iso = array([1.0,1.0,1.0])*v0/sqrt(2.0)\nday = 67.0 ", "_____no_output_____" ], [ "from scipy.constants import c\nc", "_____no_output_____" ], [ "# Angular discretisation\n\n# n = 50\n# nn = int(n*n/2)\n# ph = linspace(0, 2*pi, n)\n# costh = linspace(-1.0, 1.0, int(n/2))\n# PH, cosTH = meshgrid(ph,costh)\n# X = sin(PH)*cosTH\n# Y = sin(PH)*sqrt(1-cosTH**2.0)\n# Z = cosTH\n# ii = 0\n# q = zeros(shape=(nn,3))\n# for i in range(0,int(n/2)):\n# for j in range(0,n):\n# q[ii,:] = array([X[i,j],Y[i,j],Z[i,j]])\n# ii = ii+1\n\nimport healpy as hp\n\nnside = 64\nnn = 12*nside**2\ndpix = 4*pi/(npix*1.0)\nq = zeros(shape=(nn,3))\nfor i in range(0,nn):\n q[i,:] = hp.pix2vec(nside, i)\n \nforward = q[:,1]<0\nbackward = ~forward\nup = q[:,2]>0\ndown = ~up\n", "_____no_output_____" ], [ "from WIMPFuncs import *\n\nvvlab = sqrt(sum(LabVelocitySimple(day)**2.0))\nvlab = array([0.0,vvlab,0.0])\nvshift = LabVelocitySimple(day)-vlab\nxlab = vlab/vvlab\n\n\n\n\n# constants\nsigma_p = 1.0e-46\nA = F19.MassNumber\neta = 0.2\n\n\n# Radon transforms\nnv = 500\nv_min = linspace(0.01,850.0,nv)\nfhat_smooth = zeros(shape=(nn,nv))\nfhat_shards = zeros(shape=(nn,nv))\nfor iv in range(0,nv):\n fhat_smooth[:,iv] = (1-eta)*fhat_Isotropic(v_min[iv],q,day,v_shift=vshift)\\\n +eta*fhat_Triaxial(v_min[iv],q,day,sig_beta,v_shift=vshift)\n# fhat = zeros(shape=(nn))\n# for i in range(0,nshards):\n# v_s = velocities[i,:]\n# sig_s = dispersions[i,:]\n# fhat += weights[i]*fhat_Triaxial(v_min[iv],q,day,sig_s,v_shift=v_s+vshift)\n# fhat_shards[:,iv] = fhat\n \n #fhat_shards[:,iv] = fhat_Triaxial(v_min[iv],q,day,dispersions[2,:],v_shift=velocities[2,:]+vshift)\n fhat_shards[:,iv] = fhat_Triaxial(v_min[iv],q,day,dispersions[0,:],v_shift=velocities[0,:]+vshift)\n \n\n\n", "_____no_output_____" ], [ "# RATEs\n\ndef CAnisotropies(m_chi,v_min,q,fhat,E_r):\n R = zeros(shape=shape(fhat)[0])\n ne = size(E_r)\n for i in range(0,ne):\n v_min_i = MinimumWIMPSpeed(E_r[i],F19.MassNumber,m_chi)\n R += diffRecoilRate_SI(E_r[i],fhat[:,argmin(abs(v_min-v_min_i))],F19.MassNumber,sigma_p,m_chi)\n R_fw = sum(R[forward])\n R_bw = sum(R[backward])\n R_up = sum(R[up])\n R_dn = sum(R[down])\n return R_fw,R_bw,R_up,R_dn\n\n \n \n# E range\nne = 100\nE_r = zeros(shape=ne)\nE_r = linspace(2.0,50.0,ne)\n\n# xi range\nn_xi = 10\nxi_vals = linspace(0.0,0.2,n_xi)\n\n# m range\nnm = 50\nm_vals = logspace(log10(1.0),log10(1000.0),nm)\n\n\nFW = zeros(shape=(nm,n_xi))\nBW = zeros(shape=(nm,n_xi))\nUP = zeros(shape=(nm,n_xi))\nDN = zeros(shape=(nm,n_xi))\n\nfor i in range(0,nm):\n R0_fw,R0_bw,R0_up,R0_dn = CAnisotropies(m_vals[i],v_min,q,fhat_smooth,E_r)\n R1_fw,R1_bw,R1_up,R1_dn = CAnisotropies(m_vals[i],v_min,q,fhat_shards,E_r)\n for j in range(0,n_xi):\n xi = xi_vals[j]\n FW[i,j] = R0_fw*(1-xi)+R1_fw*xi\n BW[i,j] = R0_bw*(1-xi)+R1_bw*xi\n UP[i,j] = R0_up*(1-xi)+R1_up*xi\n DN[i,j] = R0_dn*(1-xi)+R1_dn*xi", "_____no_output_____" ], [ " \n\nfig,ax1 = MySquarePlot('$m_\\chi$ [GeV]','Anisotropy',lfs=40,tfs=30)\ncol = cmocean.cm.curl(linspace(0,1,n_xi))\n\nfor i in flipud(range(0,n_xi)):\n plt.fill_between(m_vals,FW[:,i]/BW[:,i],y2=FW[:,0]/BW[:,0],lw=3,color=col[i,:])\n plt.fill_between(m_vals,DN[:,i]/UP[:,i],y2=DN[:,0]/UP[:,0],lw=3,color=col[i,:])\n #plt.plot(m_vals,FW[:,i]/BW[:,i],'-',lw=5,color='k')\n #plt.plot(m_vals,DN[:,i]/UP[:,i],'-',lw=5,color='k')\n plt.plot(m_vals,FW[:,i]/BW[:,i],'-',lw=3,color=col[i,:])\n plt.plot(m_vals,DN[:,i]/UP[:,i],'-',lw=3,color=col[i,:])\n \n \n\nplt.gcf().text(0.53,0.7,r'$\\mathcal{A}_{\\rm fw}$',fontsize=45,color=col[0,:])\nplt.gcf().text(0.4,0.14,r'$\\mathcal{A}_{\\rm ud}$',fontsize=45,color=col[0,:])\n\nymin = 1.0\nymax = 10.0\nxmin = 5.0\nxmax = 1000.0\n\n#ivals = array(11,)\n\nNvals = array([14,15,16,17,18,19,20,23,30,45,100,250])\nfor i in Nvals:\n A = (sqrt(i*1)/3.0 + 1)/(sqrt(i*1)/3.0 - 1)\n #plt.plot([xmin,xmax],[A,A],'--',lw=2,color='gray')\n plt.plot([xmax*0.95,xmax],[A,A],'-',lw=3,color='k')\n plt.text(xmax+20,A,str(i),fontsize=30,va='center')\n\nplt.ylim([ymin,ymax])\nplt.xscale('log')\nplt.xlim([xmin,xmax])\nplt.ylim([ymin,ymax])\n\n\n#ax2 = ax1.twinx()\n#ax2.set_yscale('log')\n#ax2.set_ylim([Niso(ymin),Niso(ymax)])\n#ax2.set_yticks(arange(Niso(ymin),Niso(ymax),1))\n\n#ax1.xaxis.grid() # vertical lines\n#ax1.xaxis.grid(which='minor') # vertical lines\n\nax1.tick_params(which='major',right=False)\nax1.tick_params(which='minor',right=False)\n\n\nplt.gcf().text(0.93,0.45,r'$N_{\\rm iso}$',fontsize=45,rotation=-90)\n\n\n# Custom colorbar\ncbar_max = r'10 \\% Shards'\ncbar_min = r'0\\% Shards'\nimport matplotlib as mpl\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\ncbaxes = inset_axes(ax1, width=\"15%\", height=\"30%\", bbox_to_anchor=[0, -330, 200, 760]) \nnorm = mpl.colors.Normalize(vmin=0,vmax=1)\nsm = plt.cm.ScalarMappable(cmap=cmocean.cm.curl, norm=norm)\nsm.set_array([])\nplt.colorbar(sm,cax=cbaxes,ticks=(0,1),boundaries=linspace(0,1,n_xi),orientation='vertical')\nf = plt.gcf().get_children()\ncbar = f[2]\ncbar.set_yticklabels([cbar_min,cbar_max]) # vertically oriented colorbar\ncbar.tick_params(labelsize=30) \ncbar.tick_params(which='major',direction='out',width=2,length=10,right=True,top=False,pad=7)\ncbar.tick_params(which='minor',direction='out',width=2,length=7,right=True,top=False)\nplt.gcf().text(0.23,0.33,r'$\\xi_{\\rm S1}$',fontsize=35,color='k')\n\n\nplt.show()\npltname = 'Anisotropy_S1'\nfig.savefig('../plots/'+pltname+'.pdf',bbox_inches='tight')\nfig.savefig('../plots/plots_png/'+pltname+'.png',bbox_inches='tight')", "/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in true_divide\n import sys\n\n/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in true_divide\n import sys\n\n/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in true_divide\n \n\n/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:11: RuntimeWarning: divide by zero encountered in true_divide\n # This is added back by InteractiveShellApp.init_path()\n\n/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:11: RuntimeWarning: invalid value encountered in true_divide\n # This is added back by InteractiveShellApp.init_path()\n\n/Users/ciaranohare/anaconda/lib/python3.7/site-packages/ipykernel_launcher.py:12: RuntimeWarning: invalid value encountered in true_divide\n if sys.path[0] == '':\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e7a3b5fa2559ee7d6a887c06166eea4cc47bb7fb
25,797
ipynb
Jupyter Notebook
wandb/run-20210520_094138-3le768az/tmp/code/_session_history.ipynb
Programmer-RD-AI/Heart-Disease-UCI
b077f8496fba3fe1a9a073c80d0a5df73c720f29
[ "Apache-2.0" ]
null
null
null
wandb/run-20210520_094138-3le768az/tmp/code/_session_history.ipynb
Programmer-RD-AI/Heart-Disease-UCI
b077f8496fba3fe1a9a073c80d0a5df73c720f29
[ "Apache-2.0" ]
null
null
null
wandb/run-20210520_094138-3le768az/tmp/code/_session_history.ipynb
Programmer-RD-AI/Heart-Disease-UCI
b077f8496fba3fe1a9a073c80d0a5df73c720f29
[ "Apache-2.0" ]
null
null
null
31.383212
271
0.53576
[ [ [ "lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "data = pd.read_csv('./data.csv')", "_____no_output_____" ], [ "X,y = data.drop('target',axis=1),data['target']", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25)", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "X_train = torch.from_numpy(np.array(X_train).astype(np.float32))\ny_train = torch.from_numpy(np.array(y_train).astype(np.float32))\nX_test = torch.from_numpy(np.array(X_test).astype(np.float32))\ny_test = torch.from_numpy(np.array(y_test).astype(np.float32))", "_____no_output_____" ], [ "X_train.shape", "torch.Size([227, 13])" ], [ "X_test.shape", "torch.Size([76, 13])" ], [ "y_train.shape", "torch.Size([227])" ], [ "y_test.shape", "torch.Size([76])" ], [ "import torch.nn.functional as F", "_____no_output_____" ], [ "class Test_Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(13,64)\n self.fc2 = nn.Linear(64,128)\n self.fc3 = nn.Linear(128,256)\n self.fc4 = nn.Linear(256,512)\n self.fc5 = nn.Linear(512,1024)\n self.fc6 = nn.Linear(1024,512)\n self.fc7 = nn.Linear(512,1)\n \n def forward(self,X):\n preds = self.fc1(X)\n preds = F.relu(preds)\n preds = self.fc2(preds)\n preds = F.relu(preds)\n preds = self.fc3(preds)\n preds = F.relu(preds)\n preds = self.fc4(preds)\n preds = F.relu(preds)\n preds = self.fc5(preds)\n preds = F.relu(preds)\n preds = self.fc6(preds)\n preds = F.relu(preds)\n preds = self.fc7(preds)\n return F.sigmoid(preds)", "_____no_output_____" ], [ "device = torch.device('cuda')", "_____no_output_____" ], [ "X_train = X_train.to(device)\ny_train = y_train.to(device)\nX_test = X_test.to(device)\ny_test = y_test.to(device)", "_____no_output_____" ], [ "PROJECT_NAME = 'Heart-Disease-UCI'", "_____no_output_____" ], [ "def get_loss(criterion,X,y,model):\n model.eval()\n with torch.no_grad():\n preds = model(X.float().to(device))\n preds = preds.view(len(preds),).to(device)\n y = y.view(len(y),).to(device)\n loss = criterion(preds,y)\n model.train()\n return loss.item()\ndef get_accuracy(preds,y):\n correct = 0\n total = 0\n for real,pred in zip(y_train,preds):\n if real == pred:\n correct += 1\n total += 1\n return round(correct/total,3)", "_____no_output_____" ], [ "import wandb", "_____no_output_____" ], [ "from tqdm import tqdm", "_____no_output_____" ], [ "EPOCHS = 212\n# EPOCHS = 100", "_____no_output_____" ], [ "# model = Test_Model().to(device)\n# optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n# criterion = nn.L1Loss()\n# wandb.init(project=PROJECT_NAME,name='baseline')\n# for _ in tqdm(range(EPOCHS)):\n# preds = model(X_train.float().to(device))\n# preds = preds.view(len(preds),)\n# preds.to(device)\n# loss = criterion(preds,y_train)\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(X_train,y_train,model),'val_accuracy':get_accuracy(X_test,y_test,model)})\n# wandb.finish()", "_____no_output_____" ], [ "# preds[:10]", "_____no_output_____" ], [ "# preds = torch.round(preds)", "_____no_output_____" ], [ "# correct = 0\n# total = 0\n# for real,pred in zip(y_train,preds):\n# if real == pred:\n# correct += 1\n# # total += 1", "_____no_output_____" ], [ "# round(correct/total,3)", "_____no_output_____" ], [ "## Testing Modelling", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn", "_____no_output_____" ], [ "class Test_Model(nn.Module):\n def __init__(self,num_of_layers=1,activation=F.relu,input_shape=13,fc1_output=32,fc2_output=64,fc3_output=128,fc4_output=256,output_shape=1):\n super().__init__()\n self.num_of_layers = num_of_layers\n self.activation = activation\n self.fc1 = nn.Linear(input_shape,fc1_output)\n self.fc2 = nn.Linear(fc1_output,fc2_output)\n self.fc3 = nn.Linear(fc2_output,fc3_output)\n self.fc4 = nn.Linear(fc3_output,fc4_output)\n self.fc5 = nn.Linear(fc4_output,fc3_output)\n self.fc6 = nn.Linear(fc3_output,fc3_output)\n self.fc7 = nn.Linear(fc3_output,output_shape)\n \n def forward(self,X,activation=False):\n preds = self.fc1(X)\n if activation:\n preds = self.activation(preds)\n preds = self.fc2(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc3(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc4(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc5(preds)\n if activation:\n preds = self.activation(preds)\n for _ in range(self.num_of_layers):\n preds = self.fc6(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc7(preds)\n preds = F.sigmoid(preds)\n return preds", "_____no_output_____" ], [ "device = torch.device('cuda')", "_____no_output_____" ], [ "# preds = torch.round(preds)", "_____no_output_____" ], [ "# num_of_layers = 1\n# input_shape\n# fc1_output\n# fc2_output\n# fc3_output\n# fc4_output\n# output_shape\n# optimizer = torch.optim.SGD\n# criterion = \n# lr\n# activtion = nn.Tanh()", "_____no_output_____" ], [ "lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLoss,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "# nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,\nlossess = [torch.nn.TripletMarginLoss,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "# nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,\nlossess = [torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "lrs = [0.1,1.0,0.25,0.125,0.5,0.75,0.01,0.001,0.0001]\nfor lr in lrs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=lr)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'lr-{lr}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()fc1_outputs=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_outputs=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "fc2_outputs = [16,32,64,128,256,512]\nfor fc2_output in fc2_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=fc2_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc2_output-{fc2_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "fc3_outputs = [16,32,64,128,256,512,1024]\nfor fc3_output in fc3_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=64,fc3_output=fc3_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc3_output-{fc3_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ], [ "# num_of_layers = 1\n# fc1_output = 256\n# fc2_output = 64\n# fc3_output = 32\n# fc4_output = \n# optimizer = torch.optim.SGD\n# criterion = nn.MSELoss\n# lr = 0.125\n# activtion = nn.Tanh()", "_____no_output_____" ], [ "fc4_outputs = [16,32,64,128,256,512,1024,2048]\nfor fc4_output in fc4_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=64,fc3_output=32,fc4_output=fc4_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc4_output-{fc4_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a3cb206dce685b4f72f1ea8e0548284f6d1dbd
527,553
ipynb
Jupyter Notebook
examples/tree-cover-keras/tree-cover-keras.ipynb
Gnilliw/eo-learn
dc3f52b3ad96741081afc39e14903f45714d0868
[ "MIT" ]
null
null
null
examples/tree-cover-keras/tree-cover-keras.ipynb
Gnilliw/eo-learn
dc3f52b3ad96741081afc39e14903f45714d0868
[ "MIT" ]
null
null
null
examples/tree-cover-keras/tree-cover-keras.ipynb
Gnilliw/eo-learn
dc3f52b3ad96741081afc39e14903f45714d0868
[ "MIT" ]
null
null
null
704.343124
365,644
0.944064
[ [ [ "# Example notebook for training a U-net deep learning network to predict tree cover", "_____no_output_____" ], [ "This notebook presents a toy example for training a deep learning architecture for semantic segmentation of satellite images using `eo-learn` and `keras`. The example showcases tree cover prediction over an area in Framce. The ground-truth data is retrieved from the [EU tree cover density (2015)](https://land.copernicus.eu/pan-european/high-resolution-layers/forests/view) through [Geopedia](http://www.geopedia.world/#T235_L2081_x449046.043261205_y6052157.300792162_s15_b17).\n\nThe workflow is as foolows:\n * input the area-of-interest (AOI)\n * split the AOI into small manageable eopatches\n * for each eopatch:\n * download RGB bands form Sentinel-2 L2A products using Sentinel-Hub for the 2017 year \n * retrieve corresponding ground-truth from Geopedia using a WMS request\n * compute the median values for the RGB bands over the time-interval\n * save to disk\n * select a 256x256 patch with corresponding ground-truth to be used for training/validating the model\n * train and validate a U-net\n \nThis example can easily be expanded to:\n * larger AOIs;\n * include more/different bands/indices, such as NDVI\n * include Sentinel-1 images (after harmonisation with Sentinel-2)\n \nThe notebook requires `Keras` with `tensorflow` back-end.", "_____no_output_____" ] ], [ [ "import os\nimport datetime \nfrom os import path as op\nimport itertools\n\nfrom eolearn.io import *\nfrom eolearn.core import EOTask, EOPatch, LinearWorkflow, FeatureType, SaveToDisk, OverwritePermission\nfrom sentinelhub import BBox, CRS, BBoxSplitter, MimeType, ServiceType\n\nfrom tqdm import tqdm_notebook as tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport geopandas\n\nfrom sklearn.metrics import confusion_matrix\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import backend as K\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.utils.np_utils import to_categorical\nK.clear_session()", "Using TensorFlow backend.\n" ] ], [ [ "## 1. Set up workflow", "_____no_output_____" ] ], [ [ "# global image request parameters\ntime_interval = ('2017-01-01', '2017-12-31')\nimg_width = 256\nimg_height = 256\nmaxcc = 0.2", "_____no_output_____" ], [ "# get the AOI and split into bboxes\ncrs = CRS.UTM_31N\naoi = geopandas.read_file('../../example_data/eastern_france.geojson')\naoi = aoi.to_crs(crs=crs.pyproj_crs())\naoi_shape = aoi.geometry.values[-1]\n\nbbox_splitter = BBoxSplitter([aoi_shape], crs, (19, 10))", "_____no_output_____" ], [ "# set raster_value conversions for our Geopedia task\n# see more about how to do this here:\n\nraster_value = {\n '0%': (0, [0, 0, 0, 0]),\n '10%': (1, [163, 235, 153, 255]),\n '30%': (2, [119, 195, 118, 255]),\n '50%': (3, [85, 160, 89, 255]),\n '70%': (4, [58, 130, 64, 255]),\n '90%': (5, [36, 103, 44, 255])\n}", "_____no_output_____" ], [ "import matplotlib as mpl\n\ntree_cmap = mpl.colors.ListedColormap(['#F0F0F0', \n '#A2EB9B', \n '#77C277', \n '#539F5B', \n '#388141', \n '#226528'])\ntree_cmap.set_over('white')\ntree_cmap.set_under('white')\n\nbounds = np.arange(-0.5, 6, 1).tolist()\ntree_norm = mpl.colors.BoundaryNorm(bounds, tree_cmap.N)", "_____no_output_____" ], [ "# create a task for calculating a median pixel value\nclass MedianPixel(EOTask):\n \"\"\"\n The task returns a pixelwise median value from a time-series and stores the results in a \n timeless data array.\n \"\"\"\n def __init__(self, feature, feature_out):\n self.feature_type, self.feature_name = next(self._parse_features(feature)())\n self.feature_type_out, self.feature_name_out = next(self._parse_features(feature_out)())\n\n def execute(self, eopatch):\n eopatch.add_feature(self.feature_type_out, self.feature_name_out, \n np.median(eopatch[self.feature_type][self.feature_name], axis=0))\n return eopatch", "_____no_output_____" ], [ "# initialize tasks\n# task to get S2 L2A images\n\ninput_task = SentinelHubInputTask(data_source=DataSource.SENTINEL2_L2A, \n bands_feature=(FeatureType.DATA, 'BANDS'),\n resolution=10, \n maxcc=0.2, \n bands=['B04', 'B03', 'B02'], \n time_difference=datetime.timedelta(hours=2),\n additional_data=[(FeatureType.MASK, 'dataMask', 'IS_DATA')]\n )\ngeopedia_data = AddGeopediaFeature((FeatureType.MASK_TIMELESS, 'TREE_COVER'), \n layer='ttl2275', theme='QP', raster_value=raster_value)\n# task to compute median values\nget_median_pixel = MedianPixel((FeatureType.DATA, 'BANDS'), \n feature_out=(FeatureType.DATA_TIMELESS, 'MEDIAN_PIXEL'))\n# task to save to disk\nsave = SaveTask(op.join('data', 'eopatch'), \n overwrite_permission=OverwritePermission.OVERWRITE_PATCH, \n compress_level=2)", "_____no_output_____" ], [ "# initialize workflow\nworkflow = LinearWorkflow(input_task, geopedia_data, get_median_pixel, save)", "_____no_output_____" ], [ "# use a function to run this workflow on a single bbox\ndef execute_workflow(index):\n bbox = bbox_splitter.bbox_list[index]\n info = bbox_splitter.info_list[index]\n \n patch_name = 'eopatch_{0}_row-{1}_col-{2}'.format(index, \n info['index_x'], \n info['index_y'])\n \n results = workflow.execute({input_task:{'bbox':bbox, 'time_interval':time_interval},\n save:{'eopatch_folder':patch_name}\n })\n return list(results.values())[-1]\n del results ", "_____no_output_____" ] ], [ [ "Test workflow on an example patch and display", "_____no_output_____" ] ], [ [ "idx = 168\nexample_patch = execute_workflow(idx) ", "_____no_output_____" ], [ "example_patch", "_____no_output_____" ], [ "mp = example_patch.data_timeless['MEDIAN_PIXEL']\nplt.figure(figsize=(15,15))\nplt.imshow(2.5*mp)\ntc = example_patch.mask_timeless['TREE_COVER']\nplt.imshow(tc[...,0], vmin=0, vmax=5, alpha=.5, cmap=tree_cmap)\nplt.colorbar()", "Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n" ] ], [ [ "## 2. Run workflow on all patches", "_____no_output_____" ] ], [ [ "# run over multiple bboxes\nsubset_idx = len(bbox_splitter.bbox_list)\nx_train_raw = np.empty((subset_idx, img_height, img_width, 3))\ny_train_raw = np.empty((subset_idx, img_height, img_width, 1))\npbar = tqdm(total=subset_idx)\nfor idx in range(0, subset_idx):\n patch = execute_workflow(idx)\n x_train_raw[idx] = patch.data_timeless['MEDIAN_PIXEL'][20:276,0:256,:]\n y_train_raw[idx] = patch.mask_timeless['TREE_COVER'][20:276,0:256,:]\n pbar.update(1)", "_____no_output_____" ] ], [ [ "## 3. Create training and validation data arrays", "_____no_output_____" ] ], [ [ "# data normalization and augmentation\nimg_mean = np.mean(x_train_raw, axis=(0, 1, 2))\nimg_std = np.std(x_train_raw, axis=(0, 1, 2))\nx_train_mean = x_train_raw - img_mean\nx_train = x_train_mean - img_std\n\ntrain_gen = ImageDataGenerator(\n horizontal_flip=True,\n vertical_flip=True,\n rotation_range=180)\n\ny_train = to_categorical(y_train_raw, len(raster_value))", "_____no_output_____" ] ], [ [ "## 4. Set up U-net model using Keras (tensorflow back-end)", "_____no_output_____" ] ], [ [ "# Model setup\n#from https://www.kaggle.com/lyakaap/weighing-boundary-pixels-loss-script-by-keras2\n# weight: weighted tensor(same shape with mask image)\ndef weighted_bce_loss(y_true, y_pred, weight):\n # avoiding overflow\n epsilon = 1e-7\n y_pred = K.clip(y_pred, epsilon, 1. - epsilon)\n logit_y_pred = K.log(y_pred / (1. - y_pred))\n\n # https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits\n loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \\\n (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))\n return K.sum(loss) / K.sum(weight)\n\ndef weighted_dice_loss(y_true, y_pred, weight):\n smooth = 1.\n w, m1, m2 = weight * weight, y_true, y_pred\n intersection = (m1 * m2)\n score = (2. * K.sum(w * intersection) + smooth) / (K.sum(w * m1) + K.sum(w * m2) + smooth)\n loss = 1. - K.sum(score)\n return loss\n\ndef weighted_bce_dice_loss(y_true, y_pred):\n y_true = K.cast(y_true, 'float32')\n y_pred = K.cast(y_pred, 'float32')\n # if we want to get same size of output, kernel size must be odd number\n averaged_mask = K.pool2d(\n y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')\n border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')\n weight = K.ones_like(averaged_mask)\n w0 = K.sum(weight)\n weight += border * 2\n w1 = K.sum(weight)\n weight *= (w0 / w1)\n loss = weighted_bce_loss(y_true, y_pred, weight) + \\\n weighted_dice_loss(y_true, y_pred, weight)\n return loss\n\ndef unet(input_size):\n inputs = Input(input_size)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n merge6 = concatenate([drop4,up6])\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n merge7 = concatenate([conv3,up7])\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8])\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv8)\n\n up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = concatenate([conv1,up9])\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', \n kernel_initializer = 'he_normal')(conv9)\n conv10 = Conv2D(len(raster_value), 1, activation = 'softmax')(conv9)\n\n model = Model(inputs = inputs, outputs = conv10)\n\n model.compile(optimizer = Adam(lr = 1e-4), \n loss = weighted_bce_dice_loss, \n metrics = ['accuracy'])\n\n return model\n\nmodel = unet(input_size=(256, 256, 3))", "_____no_output_____" ] ], [ [ "## 5. Train the model", "_____no_output_____" ] ], [ [ "# Fit the model\nbatch_size = 16\nmodel.fit_generator(\n train_gen.flow(x_train, y_train, batch_size=batch_size),\n steps_per_epoch=len(x_train),\n epochs=20,\n verbose=1)\nmodel.save(op.join('model.h5'))", "Epoch 1/20\n190/190 [==============================] - 419s 2s/step - loss: 0.9654 - acc: 0.6208\nEpoch 2/20\n190/190 [==============================] - 394s 2s/step - loss: 0.9242 - acc: 0.6460\nEpoch 3/20\n190/190 [==============================] - 394s 2s/step - loss: 0.9126 - acc: 0.6502\nEpoch 4/20\n190/190 [==============================] - 393s 2s/step - loss: 0.9059 - acc: 0.6540\nEpoch 5/20\n190/190 [==============================] - 393s 2s/step - loss: 0.9051 - acc: 0.6575\nEpoch 6/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8940 - acc: 0.6620\nEpoch 7/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8896 - acc: 0.6654\nEpoch 8/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8870 - acc: 0.6668\nEpoch 9/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8844 - acc: 0.6671\nEpoch 10/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8781 - acc: 0.6708\nEpoch 11/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8712 - acc: 0.6766\nEpoch 12/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8671 - acc: 0.6801\nEpoch 13/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8606 - acc: 0.6827\nEpoch 14/20\n190/190 [==============================] - 393s 2s/step - loss: 0.8489 - acc: 0.6919\nEpoch 15/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8393 - acc: 0.6982\nEpoch 16/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8279 - acc: 0.7063\nEpoch 17/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8160 - acc: 0.7129\nEpoch 18/20\n190/190 [==============================] - 394s 2s/step - loss: 0.8020 - acc: 0.7233\nEpoch 19/20\n190/190 [==============================] - 394s 2s/step - loss: 0.7849 - acc: 0.7342\nEpoch 20/20\n190/190 [==============================] - 394s 2s/step - loss: 0.7781 - acc: 0.7397\n" ] ], [ [ "## 6. Validate model and show some results", "_____no_output_____" ] ], [ [ "# plot one example (image, label, prediction)\nidx = 4\np = np.argmax(model.predict(np.array([x_train[idx]])), axis=3)\nfig = plt.figure(figsize=(12,4))\nax1 = fig.add_subplot(1,3,1)\nax1.imshow(x_train_raw[idx])\nax2 = fig.add_subplot(1,3,2)\nax2.imshow(y_train_raw[idx][:,:,0])\nax3 = fig.add_subplot(1,3,3)\nax3.imshow(p[0])", "_____no_output_____" ], [ "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "_____no_output_____" ], [ "# show image confusion matrix\npredictions = np.argmax(model.predict(x_train), axis=3)\ncnf_matrix = confusion_matrix(y_train_raw.reshape(len(y_train_raw) * 256 * 256, 1), \n predictions.reshape(len(predictions) * 256 * 256, 1))\nplot_confusion_matrix(cnf_matrix, raster_value.keys(), normalize=True)", "Normalized confusion matrix\n[[0.93412552 0. 0. 0. 0.01624412 0.04963036]\n [0.75458006 0. 0. 0. 0.08682321 0.15859672]\n [0.73890185 0. 0. 0. 0.1051384 0.15595975]\n [0.6504189 0. 0. 0. 0.1332155 0.2163656 ]\n [0.36706531 0. 0. 0. 0.18843914 0.44449555]\n [0.1816605 0. 0. 0. 0.05291638 0.76542312]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7a3d49e90cfdba0a52dd06fd7f5c4c8f008aba6
624,693
ipynb
Jupyter Notebook
Measure_and _Plot.ipynb
sourabh-bhide/Analyze_Vesicles
d0c8fe170202b45468a7ea3f74b358754d2b287d
[ "MIT" ]
null
null
null
Measure_and _Plot.ipynb
sourabh-bhide/Analyze_Vesicles
d0c8fe170202b45468a7ea3f74b358754d2b287d
[ "MIT" ]
null
null
null
Measure_and _Plot.ipynb
sourabh-bhide/Analyze_Vesicles
d0c8fe170202b45468a7ea3f74b358754d2b287d
[ "MIT" ]
null
null
null
1,090.21466
97,204
0.955564
[ [ [ "import scipy\nfrom scipy import ndimage\nimport scipy.misc\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nfrom scipy.signal import find_peaks\nfrom skimage.morphology import skeletonize\nfrom skimage import util \nfrom skimage import measure\nfrom skimage import filters\nfrom skimage import data\nfrom skimage import exposure \nfrom skimage import io\nimport pandas as pd\nimport os\nimport os.path\nfrom datetime import datetime\nimport seaborn as sns", "/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "def autophagosome_size(img):\n im= np.array(img)!=2\n mask, number_of_objects = ndimage.label(im)\n all_labels = measure.label(mask)\n props = measure.regionprops(all_labels) \n area=[] \n for i in range (0,number_of_objects): \n if props[i].area > 2 and props[i].area < 1500:\n area=np.append(area,props[i].area)\n area=np.reshape(area, (-1, 1))\n ### size thresholding can be inroduced here\n if number_of_objects is not 0: return number_of_objects,area.mean(),area\n if number_of_objects is 0: return number_of_objects,number_of_objects,area", "_____no_output_____" ] ], [ [ "## Measure autophagosome properties", "_____no_output_____" ], [ "area=[]\nfor i in range (0,number_of_objects):\n area=np.append(area,props[i].area)", "_____no_output_____" ] ], [ [ "experiments = os.listdir(os. getcwd())\nfor item in experiments:\n if 'raph' not in item : experiments.remove(item)\nexperiments.remove('.ipynb_checkpoints')\n#experiments.remove('Statistical_Analysis.ipynb')\nexperiments.remove('Measure_and _Plot.ipynb')\nexperiments.remove('train')\nexperiments.remove('Results')\nexperiments", "_____no_output_____" ], [ "images_dir = 'train/label/'\noutput_csv_dir = 'Results/'\nos.makedirs(output_csv_dir, exist_ok=True)\nresults_all = pd.DataFrame(columns=['experiment','condition','image_name','number_of_objects','mean_area'])\n\nfor experiment in experiments:\n conditions = os.listdir(experiment)\n if '.DS_Store' in conditions: conditions.remove('.DS_Store')\n if 'Screenshot_1.png' in conditions: conditions.remove('Screenshot_1.png')\n pooled_cell_sizes_expt=pd.DataFrame()\n mean_cell_sizes_expt=pd.DataFrame()###\n mean_cell_numbers_expt=pd.DataFrame()\n for condition in conditions:\n data_path = str(experiment+'/'+condition)\n images = os.listdir(data_path)\n if '.DS_Store' in images: images.remove('.DS_Store')\n if '0_tif_RGB' in images: images.remove('0_tif_RGB')\n if '200331_Figure_Atg8a_Chloroquine.jpg' in images: images.remove('200331_Figure_Atg8a_Chloroquine.jpg')\n pooled_cell_sizes =[]\n mean_cell_sizes=[]###\n mean_cell_numbers=[]\n for image_name in images:\n file_name = str(experiment)+'_'+str(condition)+'_'+str(image_name)#+'_Simple_Segmentation'\n if file_name.endswith('.tif'):file_name = file_name[:-4].__add__('_Simple Segmentation.tif')\n elif file_name.endswith('.tiff'):file_name = file_name[:-5].__add__('_Simple Segmentation.tif')\n image = io.imread(os.path.join(images_dir, file_name), plugin='pil')\n \n number_of_objects,mean_area,area_all_objects = autophagosome_size(image)\n \n mean_cell_sizes=np.append(mean_cell_sizes,mean_area)###\n mean_cell_numbers=np.append(mean_cell_numbers,number_of_objects)\n pooled_cell_sizes=np.append(pooled_cell_sizes,area_all_objects)\n results_all = results_all.append({'experiment': str(experiment), 'condition':str(condition), 'image_name': str(image_name), 'number_of_objects':number_of_objects,'mean_area': mean_area},ignore_index=True)\n\n pooled_cell_sizes_data = np.reshape(pooled_cell_sizes, (-1, 1))\n pooled_cell_sizes_df = pd.DataFrame(data=pooled_cell_sizes_data, index=None, columns=[str(condition)])\n pooled_cell_sizes_expt = pd.concat([pooled_cell_sizes_df,pooled_cell_sizes_expt], axis=1,join='outer')\n \n mean_cell_numbers_data = np.reshape(mean_cell_numbers, (-1, 1))\n mean_cell_numbers_df = pd.DataFrame(data=mean_cell_numbers_data, index=None, columns=[str(condition)])\n mean_cell_numbers_expt = pd.concat([mean_cell_numbers_df,mean_cell_numbers_expt], axis=1,join='outer')#### \n \n mean_cell_sizes_data = np.reshape(mean_cell_sizes, (-1, 1))\n mean_cell_sizes_df = pd.DataFrame(data=mean_cell_sizes_data, index=None, columns=[str(condition)])\n mean_cell_sizes_expt = pd.concat([mean_cell_sizes_df,mean_cell_sizes_expt], axis=1,join='outer')#### \n \n pooled_cell_sizes_expt.to_csv(output_csv_dir+experiment+'_pooled_cell_sizes.csv', sep=';', decimal=',')\n mean_cell_sizes_expt.to_csv(output_csv_dir+experiment+'_mean_cell_sizes.csv', sep=';', decimal=',')####\n mean_cell_numbers_expt.to_csv(output_csv_dir+experiment+'_mean_cell_numbers.csv', sep=';', decimal=',')\n print(experiment+' DONE')\nresults_all.to_csv(output_csv_dir+'results_all_'+str(datetime.now())+'.csv', sep=';', decimal=',')\nresults_all.to_csv(output_csv_dir+'results_all.csv', sep=';', decimal=',')\nprint('ALL DONE')", "Graph10_BoiPy__60xWater DONE\nGraph11_ER__60xWater DONE\nGraph12_Golgi__60xWater DONE\nGraph14_Atg8a_Epistase_time_Of_Woud_Healing__40x DONE\nGraph15_Atg8a_Insulin_Foxo_time_Of_Woud_Healing__40x_and_60x DONE\nGraph16_Atg8a_Foxo_TM_time_Of_Woud_Healing__40xOil DONE\nGraph17_Atg8a_time_Of_Woud_Healing__40xOil DONE\nGraph1_Geraf2__Atg8a__40xOil_rest_of_data DONE\n" ], [ "results_all=pd.read_csv(output_csv_dir+'results_all_2020-08-30-2.csv', sep=';', decimal=',')\nresults_all.head()", "_____no_output_____" ] ], [ [ "## PLOT NUMBER OF OBJECTS", "_____no_output_____" ] ], [ [ "\nimport seaborn as sns\nfor experiment in experiments:\n experiment_data = results_all[results_all['experiment']==experiment] \n fig,ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax = sns.boxplot(x=\"condition\", y=\"number_of_objects\", data=experiment_data)\n ax = sns.swarmplot(x=\"condition\", y=\"number_of_objects\", data=experiment_data, color=\".25\")\n plt.xticks(rotation=90)\n plt.title(experiment)\n plt.savefig(output_csv_dir+experiment+'_number_of_objects.png',bbox_inches='tight')\n plt.show()", "_____no_output_____" ] ], [ [ "## PLOT SIZE OF OBJECTS/ MEAN_AREA", "_____no_output_____" ] ], [ [ "\n\nfor experiment in experiments:\n experiment_data = results_all[results_all['experiment']==experiment] \n fig,ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax = sns.boxplot(x=\"condition\", y=\"mean_area\", data=experiment_data)\n ax = sns.swarmplot(x=\"condition\", y=\"mean_area\", data=experiment_data, color=\".25\")\n plt.xticks(rotation=90)\n plt.ylabel('mean_area ($\\mu$m$^{2}$)')\n plt.title(experiment)\n plt.savefig(output_csv_dir+experiment+'_mean_area.png',bbox_inches='tight')\n plt.show()", "_____no_output_____" ] ], [ [ "## PLOT POOLED SIZE OF OBJECTS/ MEAN_AREA", "_____no_output_____" ] ], [ [ "output_csv_dir = 'Results/'\nfor experiment in experiments:\n df=pd.read_csv(output_csv_dir+experiment+'_pooled_cell_sizes.csv', sep=';', decimal=',')\n df = df.drop(columns=['Unnamed: 0'])\n df = df.sort_index(axis=1)\n \n \n if '60x' in str(experiment):df=df*0.1111\n if '40x' in str(experiment):df=df*0.1626\n \n fig,ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax = sns.stripplot(data=df, jitter=0.3,size=2)\n plt.xticks(rotation=90)\n plt.ylabel('spot size ($\\mu$m$^{2}$)')\n plt.ylim(-10,250)\n plt.title(experiment)\n plt.axhline(y=15,color='k')\n plt.savefig(output_csv_dir+experiment+'_pooled_cell_sizes.png',bbox_inches='tight')\n plt.show()", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "def get_concat_h_multi_resize(im_list, resample=Image.BICUBIC):\n min_height = min(im.height for im in im_list)\n im_list_resize = [im.resize((int(im.width * min_height / im.height), min_height),resample=resample)\n for im in im_list]\n total_width = sum(im.width for im in im_list_resize)\n dst = Image.new('RGB', (total_width, min_height))\n pos_x = 0\n for im in im_list_resize:\n dst.paste(im, (pos_x, 0))\n pos_x += im.width\n return dst", "_____no_output_____" ], [ "## CONCATANATE GRAPHS\n\nplot_dir = 'Results/'\nfor experiment in experiments:\n print(experiment)\n im1 = Image.open(os.path.join(plot_dir, experiment+'_number_of_objects.png'))\n im2 = Image.open(os.path.join(plot_dir, experiment+'_mean_area.png'))\n im3 = Image.open(os.path.join(plot_dir, experiment+'_pooled_cell_sizes.png'))\n get_concat_h_multi_resize([im1, im2, im3]).save('Results/'+experiment+'_concat.jpg')", "Graph10_BoiPy__60xWater\nGraph11_ER__60xWater\nGraph12_Golgi__60xWater\nGraph14_Atg8a_Epistase_time_Of_Woud_Healing__40x\nGraph15_Atg8a_Insulin_Foxo_time_Of_Woud_Healing__40x_and_60x\nGraph16_Atg8a_Foxo_TM_time_Of_Woud_Healing__40xOil\nGraph17_Atg8a_time_Of_Woud_Healing__40xOil\nGraph1_Geraf2__Atg8a__40xOil_rest_of_data\nGraph3_Atg8a_Epistase__40xOil\nGraph4_Graph5_Atg8a_Chloroquine_you.have.all.images\nGraph6_Graph7_LAMP1.GFP__60xWater\nGraph8_Graph9_GFP.LAMP1__60xWater\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a3fb027f2f23449884772a16c68998a17e70fc
46,199
ipynb
Jupyter Notebook
notebooks(colab)/Neural_network_models/Supervised_learning_models/CNN_tf_RU.ipynb
jswanglp/MyML
ea1510cc5c2dec9eb37e371a73234b3a228beca7
[ "MIT" ]
7
2019-05-04T13:57:52.000Z
2021-12-31T03:39:58.000Z
notebooks(colab)/Neural_network_models/Supervised_learning_models/CNN_tf_RU.ipynb
jswanglp/MyML
ea1510cc5c2dec9eb37e371a73234b3a228beca7
[ "MIT" ]
19
2020-09-26T01:16:10.000Z
2022-02-10T02:11:15.000Z
notebooks(colab)/Neural_network_models/Supervised_learning_models/CNN_tf_RU.ipynb
jswanglp/MyML
ea1510cc5c2dec9eb37e371a73234b3a228beca7
[ "MIT" ]
2
2019-06-02T05:10:35.000Z
2020-09-19T07:24:43.000Z
46,199
46,199
0.841187
[ [ [ "# Инициализация", "_____no_output_____" ] ], [ [ "#@markdown - **Монтирование GoogleDrive** \nfrom google.colab import drive\ndrive.mount('GoogleDrive')", "_____no_output_____" ], [ "# #@markdown - **Размонтирование**\n# !fusermount -u GoogleDrive", "_____no_output_____" ] ], [ [ "# Область кодов", "_____no_output_____" ] ], [ [ "#@title Сверточные нейронные сети { display-mode: \"both\" }\n# В программе используется API в TensorFlow для реализации двухслойных сверточных нейронных сетей\n# coding: utf-8\nimport tensorflow.examples.tutorials.mnist.input_data as input_data\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os", "_____no_output_____" ], [ "#@markdown - **Определение функций инициализации**\ndef glorot_init(shape, name):\n initial = tf.truncated_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))\n return tf.Variable(initial, name=name)\n\ndef bias_init(shape, name):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=name)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "_____no_output_____" ], [ "#@markdown - **Настройка гиперпараметров**\nmnist = input_data.read_data_sets('sample_data/MNIST_data', one_hot=True)\nnum_epochs = 12000 #@param {type: \"integer\"}\nbatch_size = 196 #@param {type: \"integer\"}\nlearning_rate = 8e-4 #@param {type: \"number\"}\n\ndir_path = 'GoogleDrive/My Drive/Colab Notebooks'\nevent_path = os.path.join(dir_path, 'Tensorboard')\ncheckpoint_path = os.path.join(dir_path, 'Checkpoints')\n", "WARNING:tensorflow:From <ipython-input-5-1291ae95062c>:1: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting sample_data/MNIST_data/train-images-idx3-ubyte.gz\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting sample_data/MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nExtracting sample_data/MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting sample_data/MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" ], [ "#@markdown - **Создание graph**\ngraph = tf.Graph()\nwith graph.as_default():\n\n with tf.name_scope('Input'):\n x = tf.placeholder(tf.float32, shape=[None, 784], name='input_images')\n y_ = tf.placeholder(tf.float32, shape=[None, 10], name='labels')\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n keep_prob = tf.placeholder(tf.float32)\n\n #@markdown - **Настройка сверточных слоев**\n # --------------conv1-----------------------------------\n with tf.name_scope('Conv1'):\n with tf.name_scope('weights_conv1'):\n W_conv1 = glorot_init([3, 3, 1, 64], 'w_conv1')\n with tf.name_scope('bias_covn1'):\n b_conv1 = bias_init([64], 'b_conv1') \n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n with tf.name_scope('features_conv1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # --------------conv2-----------------------------------\n with tf.name_scope('Conv2'):\n W_conv2 = glorot_init([3, 3, 64, 128], 'w_conv2')\n b_conv2 = bias_init([128], 'b_conv2')\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n \n #@markdown - **Настройка полносвязных слоев**\n # --------------fc--------------------------------------\n h_pool2_flat = tf.layers.flatten(h_pool2)\n num_f = h_pool2_flat.get_shape().as_list()[-1]\n with tf.name_scope('FC1'):\n W_fc1 = glorot_init([num_f, 128], 'w_fc1')\n b_fc1 = bias_init([128], 'b_fc1')\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n with tf.name_scope('Dropout'): \n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n \n with tf.name_scope('FC2'):\n W_fc2 = glorot_init([128, 10], 'w_fc2')\n b_fc2 = bias_init([10], 'b_fc2')\n y_fc2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n with tf.name_scope('Loss'):\n y_out = tf.nn.softmax(y_fc2)\n# cross_entropy = -tf.reduce_mean(y_*tf.log(y_out + 1e-10))\n # # or like\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_,\n logits=y_fc2))\n with tf.name_scope('Train'):\n train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\n # # or like\n # optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n # grad_list = optimizer.compute_gradients(cross_entropy)\n # train_step = optimizer.apply_gradients(grad_list)\n\n with tf.name_scope('Accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From <ipython-input-6-95a34e4babde>:30: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.flatten instead.\nWARNING:tensorflow:From <ipython-input-6-95a34e4babde>:38: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" ], [ "#@markdown - **Обучение сетей и сохранение моделей**\nwith tf.Session(graph=graph) as sess:\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=3) # сохранить 3 модели\n max_acc = 101. # модели с более высокой точностью будут сохранены\n\n for epoch in range(num_epochs):\n batch = mnist.train.next_batch(batch_size)\n _, acc, loss = sess.run([train_step, accuracy, cross_entropy], feed_dict={x: batch[0], \n y_: batch[1], \n keep_prob: 0.5})\n step = epoch + 1\n if step % 1000 == 0:\n acc *= 100\n print_list = [step, loss, acc]\n print(\"Epoch: {0[0]}, cross_entropy: {0[1]:.4f}, accuracy on training data: {0[2]:.2f}%,\".format(print_list))\n test_acc, test_loss = sess.run([accuracy, cross_entropy], feed_dict={x: mnist.test.images, \n y_: mnist.test.labels, \n keep_prob: 1.0})\n test_acc *= 100\n print_list = [test_loss, test_acc]\n print(' '*12, 'cross_entropy: {0[0]:.4f}, accuracy on testing data: {0[1]:.2f}%.'.format(print_list))\n print('\\n')\n \n if (acc > max_acc) & (step > 3999):\n max_acc = acc\n saver.save(sess, os.path.join(checkpoint_path, 'f_map.ckpt'), global_step=step)\n\n test_image, test_label = mnist.test.images[100, :].reshape((1, -1)), mnist.test.labels[100, :].reshape((1, -1))\n features1, features2 = sess.run([h_pool1, h_pool2], feed_dict={x: test_image, y_: test_label, keep_prob: 1.0})", "Epoch: 1000, cross_entropy: 0.6997, accuracy on training data: 72.45%,\n cross_entropy: 0.1718, accuracy on testing data: 96.03%.\n\n\nEpoch: 2000, cross_entropy: 0.4519, accuracy on training data: 82.14%,\n cross_entropy: 0.0626, accuracy on testing data: 98.28%.\n\n\nEpoch: 3000, cross_entropy: 0.2301, accuracy on training data: 92.35%,\n cross_entropy: 0.0548, accuracy on testing data: 98.38%.\n\n\nEpoch: 4000, cross_entropy: 0.1407, accuracy on training data: 95.41%,\n cross_entropy: 0.0475, accuracy on testing data: 98.66%.\n\n\nEpoch: 5000, cross_entropy: 0.1878, accuracy on training data: 92.86%,\n cross_entropy: 0.0448, accuracy on testing data: 98.80%.\n\n\nEpoch: 6000, cross_entropy: 0.1025, accuracy on training data: 95.92%,\n cross_entropy: 0.0386, accuracy on testing data: 98.97%.\n\n\nEpoch: 7000, cross_entropy: 0.0510, accuracy on training data: 98.47%,\n cross_entropy: 0.0383, accuracy on testing data: 98.98%.\n\n\nEpoch: 8000, cross_entropy: 0.0281, accuracy on training data: 98.98%,\n cross_entropy: 0.0417, accuracy on testing data: 99.00%.\n\n\nEpoch: 9000, cross_entropy: 0.0042, accuracy on training data: 100.00%,\n cross_entropy: 0.0426, accuracy on testing data: 99.03%.\n\n\nEpoch: 10000, cross_entropy: 0.0260, accuracy on training data: 98.47%,\n cross_entropy: 0.0407, accuracy on testing data: 98.97%.\n\n\nEpoch: 11000, cross_entropy: 0.0075, accuracy on training data: 100.00%,\n cross_entropy: 0.0419, accuracy on testing data: 99.08%.\n\n\nEpoch: 12000, cross_entropy: 0.0324, accuracy on training data: 98.98%,\n cross_entropy: 0.0394, accuracy on testing data: 99.14%.\n\n\n" ], [ "#@markdown - **Восстановление сохраненной модели**\n# with tf.Session() as sess:\n # model_path = 'GoogleDrive/My Drive/Colab Notebooks/Tensorboard/f_map.ckpt-241'\n # saver.restore(sess, model_path)\n # acc, loss = sess.run([accuracy, cross_entropy], feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n # print('Accuracy is %.2f.' %(acc))\n# sess.close()", "_____no_output_____" ], [ "#@markdown - **Представление feature map первого сверточного слоя**\nfeatures_map = features1.reshape((14, 14, 64))\nnum_map = range(features_map.shape[-1])\nfig, AX = plt.subplots(nrows=4, ncols=8)\nfig.set_size_inches(w=14, h=7)\nfig.subplots_adjust(wspace=.2, hspace=.2)\ntry:\n for index, ax in enumerate(AX.flatten()):\n ax.imshow(features_map[:, :, index], 'gray')\n ax.set_xticks([]), ax.set_yticks([])\nexcept IndexError:\n pass\nplt.show()", "_____no_output_____" ], [ "#@markdown - **Представление feature map второго сверточного слоя**\nfeatures_map = features2.reshape((7, 7, 128))\nnum_map = range(features_map.shape[-1])\nfig, AX = plt.subplots(nrows=4, ncols=8)\nfig.set_size_inches(w=14, h=7)\nfig.subplots_adjust(wspace=.2, hspace=.2)\ntry:\n for index, ax in enumerate(AX.flatten()):\n ax.imshow(features_map[:, :, index], 'gray')\n ax.set_xticks([]), ax.set_yticks([])\nexcept IndexError:\n pass\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a403e6bf5f19f318cf2ae3ade3db55bb7e8a3a
14,852
ipynb
Jupyter Notebook
ch07/Predicting flight delays with sklearn.ipynb
kdiogenes/Agile_Data_Code_2
47897407b6846aad95d3013e543b3fd834cfb535
[ "MIT" ]
null
null
null
ch07/Predicting flight delays with sklearn.ipynb
kdiogenes/Agile_Data_Code_2
47897407b6846aad95d3013e543b3fd834cfb535
[ "MIT" ]
null
null
null
ch07/Predicting flight delays with sklearn.ipynb
kdiogenes/Agile_Data_Code_2
47897407b6846aad95d3013e543b3fd834cfb535
[ "MIT" ]
null
null
null
36.22439
1,504
0.605508
[ [ [ "# Predicting Flight Delays with sklearn\n\nIn this notebook, we will be using features we've prepared in PySpark to predict flight delays via regression and classification.", "_____no_output_____" ] ], [ [ "import sys, os, re\nsys.path.append(\"lib\")\nimport utils\n\nimport numpy as np\nimport sklearn\nimport iso8601\nimport datetime\nprint(\"Imports loaded...\")", "Imports loaded...\n" ] ], [ [ "## Load and Inspect our JSON Training Data", "_____no_output_____" ] ], [ [ "# Load and check the size of our training data. May take a minute.\nprint(\"Original JSON file size: {:,} Bytes\".format(os.path.getsize(\"../data/simple_flight_delay_features.jsonl\")))\n\ntraining_data = utils.read_json_lines_file('../data/simple_flight_delay_features.jsonl')\n\nprint(\"Training items: {:,}\".format(len(training_data))) # 5,714,008\nprint(\"Data loaded...\")", "Original JSON file size: 4,096 Bytes\n" ], [ "# Inspect a record before we alter them\nprint(\"Size of training data in RAM: {:,} Bytes\".format(sys.getsizeof(training_data))) # 50MB\nprint(training_data[0])", "Size of training data in RAM: 406,496 Bytes\n{'ArrDelay': -14.0, 'CRSArrTime': '2015-01-01T10:25:00.000Z', 'CRSDepTime': '2015-01-01T08:55:00.000Z', 'Carrier': 'AA', 'DayOfMonth': 1, 'DayOfWeek': 4, 'DayOfYear': 1, 'DepDelay': -4.0, 'Dest': 'DFW', 'Distance': 731.0, 'FlightDate': '2015-01-01T00:00:00.000Z', 'FlightNum': '1455', 'Origin': 'ATL'}\n" ] ], [ [ "## Sample our Data", "_____no_output_____" ] ], [ [ "# We need to sample our data to fit into RAM\ntraining_data = np.random.choice(training_data, 1000000) # 'Sample down to 1MM examples'\nprint(\"Sampled items: {:,} Bytes\".format(len(training_data)))\nprint(\"Data sampled...\")", "Sampled items: 1,000,000 Bytes\nData sampled...\n" ] ], [ [ "## Vectorize the Results (y)", "_____no_output_____" ] ], [ [ "# Separate our results from the rest of the data, vectorize and size up\nresults = [record['ArrDelay'] for record in training_data]\nresults_vector = np.array(results)\nprint(\"Results vectorized size: {:,}\".format(sys.getsizeof(results_vector))) # 45,712,160 bytes\nprint(\"Results vectorized...\")", "Results vectorized size: 8,000,096\nResults vectorized...\n" ] ], [ [ "## Prepare Training Data", "_____no_output_____" ] ], [ [ "# Remove the two delay fields and the flight date from our training data\nfor item in training_data:\n item.pop('ArrDelay', None)\n item.pop('FlightDate', None)\nprint(\"ArrDelay and FlightDate removed from training data...\")", "ArrDelay and FlightDate removed from training data...\n" ], [ "# Must convert datetime strings to unix times\nfor item in training_data:\n if isinstance(item['CRSArrTime'], str):\n dt = iso8601.parse_date(item['CRSArrTime'])\n unix_time = int(dt.timestamp())\n item['CRSArrTime'] = unix_time\n if isinstance(item['CRSDepTime'], str):\n dt = iso8601.parse_date(item['CRSDepTime'])\n unix_time = int(dt.timestamp())\n item['CRSDepTime'] = unix_time\nprint(\"CRSArr/DepTime converted to unix time...\")", "CRSArr/DepTime converted to unix time...\n" ] ], [ [ "## Vectorize Training Data with `DictVectorizer`", "_____no_output_____" ] ], [ [ "# Use DictVectorizer to convert feature dicts to vectors\nfrom sklearn.feature_extraction import DictVectorizer\n\nprint(\"Sampled dimensions: [{:,}]\".format(len(training_data)))\nvectorizer = DictVectorizer()\ntraining_vectors = vectorizer.fit_transform(training_data)\nprint(\"Size of DictVectorized vectors: {:,} Bytes\".format(training_vectors.data.nbytes))\nprint(\"Training data vectorized...\")", "_____no_output_____" ] ], [ [ "## Prepare Experiment by Splitting Data into Train/Test", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n training_vectors,\n results_vector,\n test_size=0.1,\n random_state=43\n)\nprint(X_train.shape, X_test.shape)\nprint(y_train.shape, y_test.shape)\nprint(\"Test train split performed...\")", "_____no_output_____" ] ], [ [ "## Train our Model(s) on our Training Data", "_____no_output_____" ] ], [ [ "# Train a regressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import median_absolute_error, r2_score\nprint(\"Regressor library and metrics imported...\")", "_____no_output_____" ], [ "regressor = LinearRegression()\nprint(\"Regressor instantiated...\")", "_____no_output_____" ], [ "from sklearn.ensemble import GradientBoostingRegressor\n\nregressor = GradientBoostingRegressor\nprint(\"Swapped gradient boosting trees for linear regression!\")\n\n# Lets go back for now...\nregressor = LinearRegression()\nprint(\"Swapped back to linear regression!\")", "_____no_output_____" ], [ "regressor.fit(X_train, y_train)\nprint(\"Regressor fitted...\")", "_____no_output_____" ] ], [ [ "## Predict Using the Test Data", "_____no_output_____" ] ], [ [ "predicted = regressor.predict(X_test)\nprint(\"Predictions made for X_test...\")", "_____no_output_____" ] ], [ [ "## Evaluate and Visualize Model Accuracy", "_____no_output_____" ] ], [ [ "from sklearn.metrics import median_absolute_error, r2_score\n\n# Median absolute error is the median of all absolute differences between the target and the prediction.\n# Less is better, more indicates a high error between target and prediction.\nmedae = median_absolute_error(y_test, predicted)\nprint(\"Median absolute error: {:.3g}\".format(medae))\n\n# R2 score is the coefficient of determination. Ranges from 1-0, 1.0 is best, 0.0 is worst.\n# Measures how well future samples are likely to be predicted.\nr2 = r2_score(y_test, predicted)\nprint(\"r2 score: {:.3g}\".format(r2))", "_____no_output_____" ], [ "# Plot outputs\nimport matplotlib.pyplot as plt\n\n# Cleans up the appearance\nplt.rcdefaults()\n\nplt.scatter(\n y_test,\n predicted,\n color='blue',\n linewidth=1\n)\nplt.grid(True)\n\nplt.xticks()\nplt.yticks()\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a4068902daca1c64b0d3dd00cae3f819a8898d
2,248
ipynb
Jupyter Notebook
Batch-7_Day-6_Assignments.ipynb
Deepika0309/LetsUpgrage-Python-Essentials-
55e6c545169a7587474fa9ec284a229eeef70900
[ "Apache-2.0" ]
null
null
null
Batch-7_Day-6_Assignments.ipynb
Deepika0309/LetsUpgrage-Python-Essentials-
55e6c545169a7587474fa9ec284a229eeef70900
[ "Apache-2.0" ]
null
null
null
Batch-7_Day-6_Assignments.ipynb
Deepika0309/LetsUpgrage-Python-Essentials-
55e6c545169a7587474fa9ec284a229eeef70900
[ "Apache-2.0" ]
null
null
null
20.623853
77
0.482651
[ [ [ "# Assignment-1", "_____no_output_____" ] ], [ [ "class BankAccount(object):\n def __init__(self, initial_balance=0):\n self.balance = initial_balance\n def deposit(self, amount):\n self.balance += amount\n def withdraw(self, amount):\n self.balance -= amount\n def overdrawn(self):\n return self.balance < 0\nmy_account = BankAccount(15)\nmy_account.withdraw(5)\nprint (my_account.balance)\n", "10\n" ] ], [ [ "# Assignment-2", "_____no_output_____" ] ], [ [ "import math \npi = math.pi \n \n\ndef volume(r, h): \n return (1 / 3) * pi * r * r * h \n \n\ndef surfacearea(r, s): \n return pi * r * s + pi * r * r \n \n\nradius = float(5) \nheight = float(12) \nslat_height = float(13) \nprint( \"Volume Of Cone : \", volume(radius, height) ) \nprint( \"Surface Area Of Cone : \", surfacearea(radius, slat_height) ) ", "Volume Of Cone : 314.15926535897927\nSurface Area Of Cone : 282.7433388230814\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a408caa35b3de00a36695f2c5f30b1b2656de3
611,275
ipynb
Jupyter Notebook
Praktikum 12 - Sharpness.ipynb
fadhilyori/pengolahan-citra
f8fed3d97b0ad05f7f0334fe676cc0b8915d7327
[ "MIT" ]
7
2020-01-03T13:41:50.000Z
2020-04-13T07:55:03.000Z
Praktikum 12 - Sharpness.ipynb
fadhilyori/pengolahan-citra
f8fed3d97b0ad05f7f0334fe676cc0b8915d7327
[ "MIT" ]
null
null
null
Praktikum 12 - Sharpness.ipynb
fadhilyori/pengolahan-citra
f8fed3d97b0ad05f7f0334fe676cc0b8915d7327
[ "MIT" ]
1
2020-04-08T00:36:16.000Z
2020-04-08T00:36:16.000Z
1,761.599424
146,932
0.959993
[ [ [ "# Praktikum 12 | Pengolahan Citra", "_____no_output_____" ], [ "## Sharpness\nSharpness adalah proses untuk mendapatkan gambar yang lebih tajam. Proses sharpness ini memanfaatkan BSF (Band-Stop Filter) yang merupakan gabungan antara LPF (Low Pass Filter) dan HPF (High Pass Filter).", "_____no_output_____" ], [ "Fadhil Yori Hibatullah | 2103161037 | 2 D3 Teknik Informatika B\n\n---------------------", "_____no_output_____" ], [ "### Import Dependency", "_____no_output_____" ] ], [ [ "import imageio\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Load Image", "_____no_output_____" ] ], [ [ "imgNormal = imageio.imread(\"gambar4.jpg\")", "_____no_output_____" ] ], [ [ "### Show Image", "_____no_output_____" ] ], [ [ "plt.imshow(imgNormal)\nplt.title(\"Load Image\")\nplt.show()", "_____no_output_____" ] ], [ [ "---------------------", "_____no_output_____" ], [ "## To Grayscale", "_____no_output_____" ] ], [ [ "imgGrayscale = np.zeros((imgNormal.shape[0], imgNormal.shape[1], 3), dtype=np.uint8)\n\nfor y in range(0, imgNormal.shape[0]):\n for x in range(0, imgNormal.shape[1]):\n r = imgNormal[y][x][0]\n g = imgNormal[y][x][1]\n b = imgNormal[y][x][2]\n gr = ( int(r) + int(g) + int(b) ) / 3\n imgGrayscale[y][x] = (gr, gr, gr)\n \nplt.imshow(imgGrayscale)\nplt.title(\"Grayscale\")\nplt.show()", "_____no_output_____" ] ], [ [ "---------------------", "_____no_output_____" ], [ "## Sharpness Gray", "_____no_output_____" ] ], [ [ "imgSharpnessGray = np.zeros((imgNormal.shape[0], imgNormal.shape[1], 3), dtype=np.uint8)\n\nfor y in range(1, imgNormal.shape[0] - 1):\n for x in range(1, imgNormal.shape[1] - 1):\n x1 = int(imgGrayscale[y - 1][x - 1][0])\n x2 = int(imgGrayscale[y][x - 1][0])\n x3 = int(imgGrayscale[y + 1][x - 1][0])\n x4 = int(imgGrayscale[y - 1][x][0])\n x5 = int(imgGrayscale[y][x][0])\n x6 = int(imgGrayscale[y + 1][x][0])\n x7 = int(imgGrayscale[y - 1][x + 1][0])\n x8 = int(imgGrayscale[y][x + 1][0])\n x9 = int(imgGrayscale[y + 1][x + 1][0])\n xt1 = int((x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9) / 9)\n xt2 = int(-x1 - (2 * x2) - x3 + x7 + (2 * x8) + x9)\n xt3 = int(-x1 - (2 * x4) - x7 + x3 + (2 * x6) + x9)\n xb = int(xt1 + xt2 + xt3)\n if xb < 0:\n xb = -xb\n if xb > 255:\n xb = 255\n imgSharpnessGray[y][x] = (xb, xb, xb)\n \nplt.imshow(imgSharpnessGray)\nplt.title(\"Sharpness Gray\")\nplt.show()", "_____no_output_____" ] ], [ [ "------------------", "_____no_output_____" ], [ "## Sharpness Gray (2:1 LPF:HPF)", "_____no_output_____" ] ], [ [ "imgSharpnessGrayL = np.zeros((imgNormal.shape[0], imgNormal.shape[1], 3), dtype=np.uint8)\n\nfor y in range(1, imgNormal.shape[0] - 1):\n for x in range(1, imgNormal.shape[1] - 1):\n x1 = int(imgGrayscale[y - 1][x - 1][0])\n x2 = int(imgGrayscale[y][x - 1][0])\n x3 = int(imgGrayscale[y + 1][x - 1][0])\n x4 = int(imgGrayscale[y - 1][x][0])\n x5 = int(imgGrayscale[y][x][0])\n x6 = int(imgGrayscale[y + 1][x][0])\n x7 = int(imgGrayscale[y - 1][x + 1][0])\n x8 = int(imgGrayscale[y][x + 1][0])\n x9 = int(imgGrayscale[y + 1][x + 1][0])\n xt1 = int((x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9) / 9)\n xt2 = int(-x1 - (2 * x2) - x3 + x7 + (2 * x8) + x9)\n xt3 = int(-x1 - (2 * x4) - x7 + x3 + (2 * x6) + x9)\n xb = int((2 * xt1) + xt2 + xt3)\n if xb < 0:\n xb = -xb\n if xb > 255:\n xb = 255\n imgSharpnessGrayL[y][x] = (xb, xb, xb)\n \nplt.imshow(imgSharpnessGrayL)\nplt.title(\"Sharpness Gray 2:1\")\nplt.show()", "_____no_output_____" ] ], [ [ "------------------", "_____no_output_____" ], [ "## Sharpness Gray (1:2 LPF:HPF)", "_____no_output_____" ] ], [ [ "imgSharpnessGrayH = np.zeros((imgNormal.shape[0], imgNormal.shape[1], 3), dtype=np.uint8)\n\nfor y in range(1, imgNormal.shape[0] - 1):\n for x in range(1, imgNormal.shape[1] - 1):\n x1 = int(imgGrayscale[y - 1][x - 1][0])\n x2 = int(imgGrayscale[y][x - 1][0])\n x3 = int(imgGrayscale[y + 1][x - 1][0])\n x4 = int(imgGrayscale[y - 1][x][0])\n x5 = int(imgGrayscale[y][x][0])\n x6 = int(imgGrayscale[y + 1][x][0])\n x7 = int(imgGrayscale[y - 1][x + 1][0])\n x8 = int(imgGrayscale[y][x + 1][0])\n x9 = int(imgGrayscale[y + 1][x + 1][0])\n xt1 = int((x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9) / 9)\n xt2 = int(-x1 - (2 * x2) - x3 + x7 + (2 * x8) + x9)\n xt3 = int(-x1 - (2 * x4) - x7 + x3 + (2 * x6) + x9)\n xb = int(xt1 + (2 * xt2) + (2 * xt3))\n if xb < 0:\n xb = -xb\n if xb > 255:\n xb = 255\n imgSharpnessGrayH[y][x] = (xb, xb, xb)\n \nplt.imshow(imgSharpnessGrayH)\nplt.title(\"Sharpness Gray 1:2\")\nplt.show()", "_____no_output_____" ] ], [ [ "---------------------", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7a4196e3e521f310190b0178b7a85201f8fd0ff
9,734
ipynb
Jupyter Notebook
day4/Newton-Method.ipynb
devonwt/usrp-sciprog
d7d01f54201e331a383fd3fe46075ec72f84ddbd
[ "MIT" ]
null
null
null
day4/Newton-Method.ipynb
devonwt/usrp-sciprog
d7d01f54201e331a383fd3fe46075ec72f84ddbd
[ "MIT" ]
null
null
null
day4/Newton-Method.ipynb
devonwt/usrp-sciprog
d7d01f54201e331a383fd3fe46075ec72f84ddbd
[ "MIT" ]
null
null
null
41.956897
1,043
0.593589
[ [ [ "# Newton's Method for finding a root\n\n\n[Newton's method](https://en.wikipedia.org/wiki/Newton's_method) uses a clever insight to iteratively home in on the root of a function $f$. The central idea is to approximate $f$ by its tangent at some initial position $x_0$:\n\n$$\ny = f'(x_0) (x-x_0) + f(x_0)\n$$\n\nThe $x$-intercept of this line is then closer to the root than the starting position $x_0$. That is, we need to solve the linear relation\n\n$$\nf'(x_n)(x_1-x_0) + f(x_0)\n$$\n\nfor the updated position $x_1 = x_0 - f(x_0)/f'(x_0)$. Repeating this sequence\n\n$$\nx_{n+1} = x_n - \\frac{f(x_n)}{f'(x_n)}\n$$\n\nwill yield a fixed point, which is the root of $f$ *if one exists in the vicinity of $x_0$*.", "_____no_output_____" ] ], [ [ "def newtons_method(f, df, x0, tol=1E-6):\n x_n = x0 \n while abs(f(x_n)) > tol:\n x_n = x_n - f(x_n)/df(x_n)\n return x_n", "_____no_output_____" ] ], [ [ "## Minimizing a function\n\nAs the maximum and minimum of a function are defined by $f'(x) = 0$, we can use Newton's method to find extremal points by applying it to the first derivative. Let's try this with a simply function with known minimum:", "_____no_output_____" ] ], [ [ "# define a test function\ndef f(x):\n return (x-3)**2 - 9\n\ndef df(x):\n return 2*(x-3)\n\ndef df2(x):\n return 2.", "_____no_output_____" ], [ "root = newtons_method(f, df, x0=0.1)\nprint (\"root {0}, f(root) = {1}\".format(root, f(root)))", "root -4.092847520435343e-14, f(root) = 2.4513724383723456e-13\n" ], [ "minimum = newtons_method(df, df2, x0=0.1)\nprint (\"minimum {0}, f'(minimum) = {1}\".format(minimum, df(minimum)))", "minimum 3.0, f'(minimum) = 0.0\n" ] ], [ [ "There is an important qualifier in the statement about fixed points: **a root needs to exist in the vicinity of $x_0$!** Let's see what happens if that's not the case:", "_____no_output_____" ] ], [ [ "def f(x):\n return (x-3)**2 + 1\nnewtons_method(f, df, x0=0.1)", "_____no_output_____" ] ], [ [ "With a little more defensive programming we can make sure that the function will terminate after a given number of iterations:", "_____no_output_____" ] ], [ [ "def newtons_method2(f, df, x0, tol=1E-6, maxiter=100000):\n x_n = x0 \n for _ in range(maxiter):\n x_n = x_n - f(x_n)/df(x_n)\n \n if abs(f(x_n)) < tol:\n return x_n\n \n raise RuntimeError(\"Failed to find a minimum within {} iterations \".format(maxiter))", "_____no_output_____" ], [ "newtons_method2(f, df, x0=0.1)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a421daac0dfcecd0c62418ec0ab6963d2b4aa1
20,139
ipynb
Jupyter Notebook
WEEK 6 (Project)/baseline.ipynb
prachuryanath/SA-CAIITG
99f891a53c1196fd3e050ea51d16eb9757cf866c
[ "Apache-2.0" ]
null
null
null
WEEK 6 (Project)/baseline.ipynb
prachuryanath/SA-CAIITG
99f891a53c1196fd3e050ea51d16eb9757cf866c
[ "Apache-2.0" ]
null
null
null
WEEK 6 (Project)/baseline.ipynb
prachuryanath/SA-CAIITG
99f891a53c1196fd3e050ea51d16eb9757cf866c
[ "Apache-2.0" ]
null
null
null
43.403017
1,842
0.442127
[ [ [ "# Random Forest with hyperparameter tuning", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "train_data = pd.read_csv('Train_Data.csv')\ntest_data = pd.read_csv('Test_Data.csv')", "_____no_output_____" ], [ "train_data.head()", "_____no_output_____" ], [ "train_data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4571 entries, 0 to 4570\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 date 4571 non-null object \n 1 campaign 4571 non-null object \n 2 adgroup 4571 non-null object \n 3 ad 4571 non-null object \n 4 impressions 4571 non-null int64 \n 5 clicks 4571 non-null int64 \n 6 cost 4571 non-null float64\n 7 conversions 4571 non-null int64 \n 8 revenue 4571 non-null float64\ndtypes: float64(2), int64(3), object(4)\nmemory usage: 321.5+ KB\n" ], [ "train_data.drop('date', axis=1, inplace=True)\ntrain_data.drop('campaign', axis=1, inplace=True)\ntest_data.drop('date', axis=1, inplace=True)\ntest_data.drop('campaign', axis=1, inplace=True)\ntrain_data.drop('ad', axis=1, inplace=True)\ntest_data.drop('ad', axis=1, inplace=True)", "_____no_output_____" ], [ "test_data.head()", "_____no_output_____" ], [ "train_data = pd.get_dummies(train_data)\ntest_data = pd.get_dummies(test_data)\ntrain_data.head()", "_____no_output_____" ], [ "test_data.columns", "_____no_output_____" ], [ "X_train = train_data.drop(['revenue'], axis='columns')\ny_train = train_data['revenue']\nX_test = test_data", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\n\nX_train_scaled = scaler.fit_transform(X_train)\nX_train_scaled\n\nX_test_scaled = scaler.transform(X_test)\nX_test_scaled", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor()", "_____no_output_____" ], [ "print('Parameters currently in use:\\n')\nprint(rf.get_params())", "Parameters currently in use:\n\n{'bootstrap': True, 'ccp_alpha': 0.0, 'criterion': 'mse', 'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None, 'max_samples': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 100, 'n_jobs': None, 'oob_score': False, 'random_state': None, 'verbose': 0, 'warm_start': False}\n" ], [ "from sklearn.model_selection import RandomizedSearchCV\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\nprint(random_grid)", "{'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000], 'max_features': ['auto', 'sqrt'], 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, None], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [1, 2, 4], 'bootstrap': [True, False]}\n" ], [ "rf = RandomForestRegressor()\n# Random search of parameters, using 3 fold cross validation, \n# search across 100 different combinations, and use all available cores\nrf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)\n# Fit the random search model\nrf_random.fit(X_train_scaled, y_train)", "Fitting 3 folds for each of 100 candidates, totalling 300 fits\n" ], [ "rf_random.best_params_", "_____no_output_____" ], [ "rf_tuned = RandomForestRegressor(n_estimators= 200,\n min_samples_split= 5,\n min_samples_leaf= 4,\n max_features= 'auto',\n max_depth= 10,\n bootstrap= True)\nrf_tuned.fit(X_train_scaled, y_train)", "_____no_output_____" ], [ "preds = rf_tuned.predict(X_test_scaled)", "_____no_output_____" ], [ "preds = preds.astype('int64')\r\npreds", "_____no_output_____" ], [ "prediction = pd.DataFrame(preds, columns=['revenue']).to_csv('prediction3.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a42b7ce8888d14055e4a6e0ec0565e1e91a5c3
83,743
ipynb
Jupyter Notebook
Notebook_Archive/FeatureConsistency Score.ipynb
molu1019/CycleGAN-Tensorflow-2
69e51007718b76595313b24ed1fb7c3ee5ea346c
[ "MIT" ]
null
null
null
Notebook_Archive/FeatureConsistency Score.ipynb
molu1019/CycleGAN-Tensorflow-2
69e51007718b76595313b24ed1fb7c3ee5ea346c
[ "MIT" ]
null
null
null
Notebook_Archive/FeatureConsistency Score.ipynb
molu1019/CycleGAN-Tensorflow-2
69e51007718b76595313b24ed1fb7c3ee5ea346c
[ "MIT" ]
null
null
null
76.199272
8,268
0.763097
[ [ [ "## Notebook for calculating Mask Consistency Score for GAN-transformed images", "_____no_output_____" ] ], [ [ "from PIL import Image\nimport cv2\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nimport glob, os\nimport numpy as np\nimport matplotlib.image as mpimg\n#from keras.preprocessing.image import img_to_array, array_to_img", "_____no_output_____" ] ], [ [ "## 1. Resize GAN-transformed Dataset to 1024*1024", "_____no_output_____" ], [ "#### 1.1 Specify Args: Directory, folder name and the new image size", "_____no_output_____" ] ], [ [ "folder = 'A2B_FID'\nimage_size = 1024\ndir = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain14_Blattfeder/Results/training4_batch4_400trainA_250trainB/samples_testing'", "_____no_output_____" ] ], [ [ "#### 1.2 Create new Folder \"/A2B_FID_1024\" in Directory", "_____no_output_____" ] ], [ [ "old_folder = (os.path.join(dir, folder))\nnew_folder = (os.path.join(dir, folder+'_'+str(image_size)))\n\nif not os.path.exists(new_folder):\n try:\n os.mkdir(new_folder)\n except FileExistsError:\n print('Folder already exists')\n pass", "_____no_output_____" ], [ "print(os.path.join(old_folder))\nprint(os.path.join(dir, folder+'_'+str(image_size)))", "/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain14_Blattfeder/Results/training4_batch4_400trainA_250trainB/samples_testing/A2B_FID\n/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain14_Blattfeder/Results/training4_batch4_400trainA_250trainB/samples_testing/A2B_FID_1024\n" ] ], [ [ "#### 1.3 Function for upsampling images of 256-256 or 512-512 to images with size 1024-1024", "_____no_output_____" ] ], [ [ "new_size = image_size\nwidth = new_size\nheight = new_size\ndim = (width, height)\n#images = glob.glob(os.path.join(new_folder, '*.jpg')) + glob.glob(os.path.join(new_folder, '*.png'))\n\ndef resize_upsampling(old_folder, new_folder):\n for image in os.listdir(old_folder):\n img = cv2.imread(os.path.join(old_folder, image))\n # INTER_CUBIC or INTER_LANCZOS4\n img_resized = cv2.resize(img, dim, interpolation = cv2.INTER_CUBIC)\n print('Shape: '+str(img.shape)+' is now resized to: '+str(img_resized.shape))\n cv2.imwrite(os.path.join(new_folder , image),img_resized)\n \ndef resize_downsampling(old_folder, new_folder):\n for image in os.listdir(old_folder):\n img = cv2.imread(os.path.join(old_folder, image))\n img_resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n print('Shape: '+str(img.shape)+' is now resized to: '+str(img_resized.shape))\n cv2.imwrite(os.path.join(new_folder , image),img_resized)", "_____no_output_____" ] ], [ [ "#### 1.4 Run the aforementoined function", "_____no_output_____" ] ], [ [ "resize_upsampling(old_folder, new_folder)", "Shape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\nShape: (256, 256, 3) is now resized to: (1024, 1024, 3)\n" ] ], [ [ "## 2. Use the annotation Tool Labelme to create polygons in JSON format", "_____no_output_____" ], [ "We than use the JSON files with polygon data to create semantic segmentation mask - no instance segmentation needed, because we do not need to differenciate between distinct features. We use the bash and python skript in this directory to do the mask translation.", "_____no_output_____" ] ], [ [ "!ls\n!pwd", " augmentation.py\t\t interpolation.py __pycache__\n data.py\t\t\t labelme2coco.py pylib\n datasets\t\t\t labelme2voc.py README.md\n download_dataset.sh\t\t labels.txt\t resize_images_pascalvoc\n'FeatureConsistency Score.ipynb' LICENSE\t test.py\n FeatureScore\t\t\t mask-score.ipynb tf2gan\n fid.py\t\t\t\t module.py\t tf2lib\n imlib\t\t\t\t output\t train.py\n/home/molu1019/workspace/CycleGAN-Tensorflow-2\n" ] ], [ [ "Insert the folder path as **input_dir** where the GAN transformed images with corresponding JSON label are located.", "_____no_output_____" ] ], [ [ "input_dir = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1'\noutput_dir = input_dir+'_mask'\nprint(output_dir)", "/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1_mask\n" ], [ "!python3 labelme2voc.py $input_dir $output_dir --labels labels.txt", "Creating dataset: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1_mask\nclass_names: ('_background_', 'Blattfeder', 'Blattfeder_nio', 'Wandlerhalter', 'Getriebeflansch_Mutter', 'Getriebeflansch_Abdeckung', 'Entluefter')\nSaved class_names: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1_mask/class_names.txt\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274321.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274414.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273810.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274350.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274227.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274288.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273684.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273905.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273715.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274513.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274544.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274002.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273747.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273971.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273462.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273582.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273366.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274064.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_274032.json\nGenerating dataset from: /mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1/rgb_273430.json\n" ], [ "seg_dir = output_dir+'/SegmentationObjectPNG'\nprint(seg_dir)", "/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Evaluation/BatchSize/Blattfeder/Batch1_mask/SegmentationObjectPNG\n" ], [ "GAN_mask_images = os.listdir(seg_dir)\nprint(mask_images)", "['rgb_274321.png', 'rgb_274414.png', 'rgb_273810.png', 'rgb_274350.png', 'rgb_274227.png', 'rgb_274288.png', 'rgb_273684.png', 'rgb_273905.png', 'rgb_273715.png', 'rgb_274513.png', 'rgb_274544.png', 'rgb_274002.png', 'rgb_273747.png', 'rgb_273971.png', 'rgb_273462.png', 'rgb_273582.png', 'rgb_273366.png', 'rgb_274064.png', 'rgb_274032.png', 'rgb_273430.png']\n" ] ], [ [ "## 3. Mask Parameters syntetic Images", "_____no_output_____" ] ], [ [ "mask_Blattfeder = [149, 255, 0]\nmask_Entluefter = []\nmask_Wandlerhalter = []\nmask_Getreibeflansch = []\nmask_Abdeckung = []", "_____no_output_____" ] ], [ [ "#### Resize syn. Masks from 1920-1080 to 1024-1024 ", "_____no_output_____" ] ], [ [ "def resize(image, size):\n dim = (size, size)\n img = cv2.imread(path)\n img = img\n img_resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n # tp show as array use display()\n #display(img_resized)\n plt.imshow(img_resized)\n return img_resized", "_____no_output_____" ] ], [ [ "##### Check Mask and Color", "_____no_output_____" ] ], [ [ "#img = Image.open(path)\n#rgb_im = img.convert('RGB')\nr, g, b = rgb_im.getpixel((1020, 500))\nwidth, height = img.size\nprint(r, g, b)\nprint(rgb_im.getextrema())\nprint(rgb_im)\nprint(width, height)", "_____no_output_____" ], [ "def readfile(path):\n #img = Image.open(path)\n #with only one color channel:\n img = (Image.open(path).convert('L'))\n img = np.array(img)\n plt.imshow(img)\n print(img.size)\n print(img.shape)\n with Image.open(path) as im:\n print(im.getbbox())\n \n return img", "_____no_output_____" ] ], [ [ "### Read Dataset Folder of Image Masks", "_____no_output_____" ] ], [ [ "def read_imgs(path, size=(1920, 1080), resize=None):\n \"\"\"Read images as ndarray.\n Args:\n path: A string, path of images.\n size: A tuple of 2 integers,\n (heights, widths).\n resize: A float or None,\n specifying how the image value should be resized.\n If None, no scaled.\n preprocessing: A function of data preprocessing,\n (e.g. noralization, shape manipulation, etc.)\n \"\"\"\n img_list = [f for f in os.listdir(path) if not f.startswith(\".\")]\n data = np.empty((len(img_list), *size, 3))\n size = size[1], size[0]\n\n for img_i, _path in enumerate(img_list):\n img = Image.open(path + os.sep + _path)\n img = resize_upsampling(img)\n img = img.convert(\"RGB\") \n img = np.array(img)\n data[img_i] = img\n\n if rescale:\n data = data*rescale\n\n \n return data", "_____no_output_____" ] ], [ [ "### Syntetical Image Data", "_____no_output_____" ] ], [ [ "path = r'/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain14_Blattfeder/Instance_280443.png'\nimg_or = readfile(path)\nimg_or_res = resize(img_or, 1024)\nimg_or_res = img_or_res[:,:,1]\nimg_or_res_bin = binarize (img_or_res)", "2073600\n(1080, 1920)\n(0, 0, 1920, 1080)\n<class 'numpy.ndarray'>\n255\n[[False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]\n ...\n [False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]]\nTrue\n(1024, 1024)\n[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n(497, 200, 641, 860)\n" ] ], [ [ "### GAN Image Data", "_____no_output_____" ] ], [ [ "path_result = '/mnt/robolab/data/Bilddaten/GAN_train_data_sydavis-ai/Powertrain14_Blattfeder/Test_maskScore_results/rgb_280443.png'\nimg_gan = readfile(path_result)\nimg_gan_bin = binarize(img_gan)", "1048576\n(1024, 1024)\n(510, 236, 643, 858)\n<class 'numpy.ndarray'>\n38\n[[False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]\n ...\n [False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]]\nTrue\n(1024, 1024)\n[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n(510, 236, 643, 858)\n" ], [ "def loadpolygon():\n return", "_____no_output_____" ] ], [ [ "Since True is regarded as 1 and False is regarded as 0, when multiplied by 255 which is the Max value of uint8, True becomes 255 (white) and False becomes 0 (black)", "_____no_output_____" ] ], [ [ "def binarize(image):\n #im_gray = np.array(Image.open(path).convert('L'))\n print(type(image))\n print(image[600,600])\n \n thresh = 28\n im_bool = image > thresh\n print(im_bool)\n print(im_bool[600,600])\n print(im_bool.shape)\n \n maxval = 255\n im_bin = (image > thresh) * maxval\n print(im_bin)\n \n im_save = Image.fromarray(np.uint8(im_bin))\n im_save_bool = Image.fromarray((im_bool))\n \n plt.imshow(im_save_bool)\n f, axarr = plt.subplots(1,2)\n axarr[0].imshow(im_save)\n axarr[1].imshow(im_save_bool)\n \n with im_save_bool as im:\n print(im.getbbox())\n return im_bin\n\nbinarize(img_or_res)", "<class 'numpy.ndarray'>\n255\n[[False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]\n ...\n [False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]]\nTrue\n(1024, 1024)\n[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n(497, 200, 641, 860)\n" ], [ "def convexhull():\n return", "_____no_output_____" ], [ "def calculatescore(ground_truth, prediction_gan):\n \"\"\"\n Compute feature consitency score of two segmentation masks.\n \n IoU(A,B) = |A & B| / (| A U B|)\n Dice(A,B) = 2*|A & B| / (|A| + |B|)\n\n Args:\n y_true: true masks, one-hot encoded.\n y_pred: predicted masks, either softmax outputs, or one-hot encoded.\n metric_name: metric to be computed, either 'iou' or 'dice'.\n metric_type: one of 'standard' (default), 'soft', 'naive'.\n In the standard version, y_pred is one-hot encoded and the mean\n is taken only over classes that are present (in y_true or y_pred).\n The 'soft' version of the metrics are computed without one-hot\n encoding y_pred.\n The 'naive' version return mean metrics where absent classes contribute\n to the class mean as 1.0 (instead of being dropped from the mean).\n drop_last = True: boolean flag to drop last class (usually reserved\n for background class in semantic segmentation)\n mean_per_class = False: return mean along batch axis for each class.\n verbose = False: print intermediate results such as intersection, union\n (as number of pixels).\n Returns:\n IoU of ground truth and GAN transformed syntetic Image, as a float.\n\n Inputs are B*W*H*N tensors, with\n B = batch size,\n W = width,\n H = height,\n N = number of classes\n \"\"\"\n \n # check image shape to be the same\n assert ground_truth.shape == prediction_gan.shape, 'Input masks should be same shape, instead are {}, {}'.format(ground_truth.shape, prediction_gan.shape)\n print('Ground truth shape: '+str(ground_truth.shape))\n print('Predicted GAN image shape: '+str(prediction_gan.shape))\n \n intersection = np.logical_and(ground_truth, prediction_gan)\n union = np.logical_or(ground_truth, prediction_gan)\n mask_sum = np.sum(np.abs(union)) + np.sum(np.abs(intersection))\n iou_score = np.sum(intersection) / np.sum(union)\n dice_score = 2*np.sum(intersection) / np.sum(mask_sum) \n print('IoU is: '+str(iou_score))\n print('Dice/F1 Score is: '+str(dice_score))\n return iou_score, dice_score\n\n\ncalculatescore(img_or_res_bin, img_gan_bin)", "Ground truth shape: (1024, 1024)\nPredicted GAN image shape: (1024, 1024)\nIoU is: 0.7979989122059556\nDice/F1 Score is: 0.8876522747468127\n" ] ], [ [ "### Image mask transformation \nTranslate image mask to white RGB(255,255,255), fill convex hull, and compare masks to calculate 'Feature Consistency Score' ", "_____no_output_____" ] ], [ [ "for file in glob.glob(\"*.png\"):\n calculatescore()", "_____no_output_____" ] ], [ [ "## Print Confusion Matrix", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a45552086ffd0b9dc8ada685851e533b9f33b7
272,662
ipynb
Jupyter Notebook
cracking-the-data-science-interview-master/cracking-the-data-science-interview-master/EBooks/Bayesian-Methods-for-Hackers/.ipynb_checkpoints/C1-Introduction-checkpoint.ipynb
anushka-DS/DS-Interview-Prep
c331769f8f2d11e167d782b3b2e48ca4709d9b54
[ "CC-BY-4.0" ]
1
2022-01-01T07:18:42.000Z
2022-01-01T07:18:42.000Z
cracking-the-data-science-interview-master/cracking-the-data-science-interview-master/EBooks/Bayesian-Methods-for-Hackers/C1-Introduction.ipynb
anushka-DS/DS-Interview-Prep
c331769f8f2d11e167d782b3b2e48ca4709d9b54
[ "CC-BY-4.0" ]
null
null
null
cracking-the-data-science-interview-master/cracking-the-data-science-interview-master/EBooks/Bayesian-Methods-for-Hackers/C1-Introduction.ipynb
anushka-DS/DS-Interview-Prep
c331769f8f2d11e167d782b3b2e48ca4709d9b54
[ "CC-BY-4.0" ]
null
null
null
388.961484
88,544
0.919651
[ [ [ "## 1.2 Bayesian Framework\n\nWe are interested in beliefs, which can be interpreted as probabilities by thinking Bayesian. We have a prior belief in event $A$, beliefs formed by previous information, e.g., our prior belief about bugs being in our code before performing tests.\n\nSecondly, we observe our evidence. To continue our buggy-code example: if our code passes $X$ tests, we want to update our belief to incorporate this. We call this new belief the posterior probability. Updating our belief is done via the following equation, known as Bayes' Theorem, after its discoverer Thomas Bayes:\n\n$$\\begin{align}\n P( A | X ) = \\frac{ P(X | A) P(A) } {P(X) } \\\\\\\\[5pt]\n \\propto P(X | A) P(A)\\;\\; (\\propto \\text{is proportional to })\n\\end{align}$$\n\nThe above formula is not unique to Bayesian inference: it is a mathematical fact with uses outside Bayesian inference. Bayesian inference merely uses it to connect prior probabilities $P(A)$ with an updated posterior probabilities $P(A | X )$.\n\n### 1.2.1 Example: Mandatory coin-flip example\nEvery statistics text must contain a coin-flipping example, I'll use it here to get it out of the way. Suppose, naively, that you are unsure about the probability of heads in a coin flip (spoiler alert: it's 50%). You believe there is some true underlying ratio, call it $p$, but have no prior opinion on what $p$ might be.\n\nWe begin to flip a coin, and record the observations: either $H$ or $T$. This is our observed data. An interesting question to ask is how our inference changes as we observe more and more data? More specifically, what do our posterior probabilities look like when we have little data, versus when we have lots of data.\n\nBelow we plot a sequence of updating posterior probabilities as we observe increasing amounts of data (coin flips).", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom IPython.core.pylabtools import figsize\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfigsize(11, 9)\n\nimport scipy.stats as stats\n\ndist = stats.beta\nn_trials = [0, 1, 2, 3, 4, 5, 8, 15, 50, 500]\ndata = stats.bernoulli.rvs(0.5, size=n_trials[-1])\nx = np.linspace(0, 1, 100)\n\n# For the already prepared, I'm using Binomial's conj. prior.\nfor k, N in enumerate(n_trials):\n sx = plt.subplot(len(n_trials) / 2, 2, k + 1)\n plt.xlabel(\"$p$, probability of heads\") \\\n if k in [0, len(n_trials) - 1] else None\n plt.setp(sx.get_yticklabels(), visible=False)\n heads = data[:N].sum()\n y = dist.pdf(x, 1 + heads, 1 + N - heads)\n plt.plot(x, y, label=\"observe %d tosses,\\n %d heads\" % (N, heads))\n plt.fill_between(x, 0, y, color=\"#348ABD\", alpha=0.4)\n plt.vlines(0.5, 0, 4, color=\"k\", linestyles=\"--\", lw=1)\n\n leg = plt.legend()\n leg.get_frame().set_alpha(0.4)\n plt.autoscale(tight=True)\n\n\nplt.suptitle(\"Bayesian updating of posterior probabilities\",\n y=1.02,\n fontsize=14)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "The posterior probabilities are represented by the curves, and our uncertainty is proportional to the width of the curve. As the plot above shows, as we start to observe data our posterior probabilities start to shift and move around. Eventually, as we observe more and more data (coin-flips), our probabilities will tighten closer and closer around the true value of $p=0.5$ (marked by a dashed line).\n\nNotice that the plots are not always peaked at 0.5. There is no reason it should be: recall we assumed we did not have a prior opinion of what $p$ is. In fact, if we observe quite extreme data, say 8 flips and only 1 observed heads, our distribution would look very biased away from lumping around 0.5 (with no prior opinion, how confident would you feel betting on a fair coin after observing 8 tails and 1 head?). As more data accumulates, we would see more and more probability being assigned at $p=0.5$, though never all of it.\n\nThe next example is a simple demonstration of the mathematics of Bayesian inference.\n\n### 1.2.2 Example: Bug, or just sweet, unintended feature?\n\nLet $A$ denote the event that our code has no bugs in it. Let $X$ denote the event that the code passes all debugging tests. For now, we will leave the prior probability of no bugs as a variable, i.e. $P(A) = p$.\n\nWe are interested in $P(A|X)$, i.e. the probability of no bugs, given our debugging tests $X$. To use the formula above, we need to compute some quantities.\n\nWhat is $P(X | A)$, i.e., the probability that the code passes $X$ tests given there are no bugs? Well, it is equal to 1, for a code with no bugs will pass all tests.\n\n$P(X)$ is a little bit trickier: The event $X$ can be divided into two possibilities, event $X$ occurring even though our code indeed has bugs (denoted $\\sim A\\;$, spoken not $A$), or event $X$ without bugs ($A$). $P(X)$ can be represented as:\n\n$$\\begin{align}\nP(X ) = P(X \\text{ and } A) + P(X \\text{ and } \\sim A) \\\\\\\\[5pt]\n = P(X|A)P(A) + P(X | \\sim A)P(\\sim A)\\\\\\\\[5pt]\n = P(X|A)p + P(X | \\sim A)(1-p)\n\\end{align}$$\n\nWe have already computed $P(X|A)$ above. On the other hand, $P(X | \\sim A)$ is subjective: our code can pass tests but still have a bug in it, though the probability there is a bug present is reduced. Note this is dependent on the number of tests performed, the degree of complication in the tests, etc. Let's be conservative and assign $P(X|\\sim A) = 0.5$. Then\n\n$$\\begin{align}\nP(A | X) = \\frac{1\\cdot p}{ 1\\cdot p +0.5 (1-p) } \\\\\\\\\n = \\frac{ 2 p}{1+p}\n\\end{align}$$\n\nThis is the posterior probability. What does it look like as a function of our prior, $p \\in [0,1]$?", "_____no_output_____" ] ], [ [ "figsize(12.5, 4)\np = np.linspace(0, 1, 50)\nplt.plot(p, 2*p/(1+p), color=\"#348ABD\", lw=3)\n#plt.fill_between(p, 2*p/(1+p), alpha=.5, facecolor=[\"#A60628\"])\nplt.scatter(0.2, 2*(0.2)/1.2, s=140, c=\"#348ABD\")\nplt.xlim(0, 1)\nplt.ylim(0, 1)\nplt.xlabel(\"Prior, $P(A) = p$\")\nplt.ylabel(\"Posterior, $P(A|X)$, with $P(A) = p$\")\nplt.title(\"Are there bugs in my code?\");", "_____no_output_____" ] ], [ [ "We can see the biggest gains if we observe the $X$ tests passed when the prior probability, $p$, is low. Let's settle on a specific value for the prior. I'm a strong programmer (I think), so I'm going to give myself a realistic prior of 0.20, that is, there is a 20% chance that I write code bug-free. To be more realistic, this prior should be a function of how complicated and large the code is, but let's pin it at 0.20. Then my updated belief that my code is bug-free is 0.33.\n\nRecall that the prior is a probability: $p$ is the prior probability that there are no bugs, so $1-p$ is the prior probability that there are bugs.\n\nSimilarly, our posterior is also a probability, with $P(A | X)$ the probability there is no bug given we saw all tests pass, hence $1-P(A|X)$ is the probability there is a bug given all tests passed. What does our posterior probability look like? Below is a chart of both the prior and the posterior probabilities.", "_____no_output_____" ] ], [ [ "figsize(12.5, 4)\ncolours = [\"#348ABD\", \"#A60628\"]\n\nprior = [0.20, 0.80]\nposterior = [1. / 3, 2. / 3]\nplt.bar([0, .7], prior, alpha=0.70, width=0.25,\n color=colours[0], label=\"prior distribution\",\n lw=\"3\", edgecolor=colours[0])\n\nplt.bar([0 + 0.25, .7 + 0.25], posterior, alpha=0.7,\n width=0.25, color=colours[1],\n label=\"posterior distribution\",\n lw=\"3\", edgecolor=colours[1])\n\nplt.ylim(0,1)\nplt.xticks([0.20, .95], [\"Bugs Absent\", \"Bugs Present\"])\nplt.title(\"Prior and Posterior probability of bugs present\")\nplt.ylabel(\"Probability\")\nplt.legend(loc=\"upper left\");", "_____no_output_____" ] ], [ [ "## 1.3 Probability Distributions\nLet's quickly recall what a probability distribution is: Let $Z$ be some random variable. Then associated with $Z$ is a probability distribution function that assigns probabilities to the different outcomes $Z$ can take. Graphically, a probability distribution is a curve where the probability of an outcome is proportional to the height of the curve. You can see examples in the first figure of this chapter.\n\nWe can divide random variables into three classifications:\n* $Z$ is discrete: Discrete random variables may only assume values on a specified list. Things like populations, movie ratings, and number of votes are all discrete random variables. Discrete random variables become more clear when we contrast them with...\n\n* $Z$ is continuous: Continuous random variable can take on arbitrarily exact values. For example, temperature, speed, time, color are all modeled as continuous variables because you can progressively make the values more and more precise.\n\n* $Z$ is mixed: Mixed random variables assign probabilities to both discrete and continuous random variables, i.e. it is a combination of the above two categories.\n\n### 1.3.1 Discrete Case\nIf $Z$ is discrete, then its distribution is called a probability mass function, which measures the probability $Z$ takes on the value $k$, denoted $P(Z=k)$. Note that the probability mass function completely describes the random variable $Z$, that is, if we know the mass function, we know how $Z$ should behave. There are popular probability mass functions that consistently appear: we will introduce them as needed, but let's introduce the first very useful probability mass function. We say $Z$ is Poisson-distributed if:\n\n$$P(Z = k) =\\frac{ \\lambda^k e^{-\\lambda} }{k!}, \\; \\; k=0,1,2, \\dots $$\n\n$\\lambda$ is called a parameter of the distribution, and it controls the distribution's shape. For the Poisson distribution, $\\lambda$ can be any positive number. By increasing $\\lambda$, we add more probability to larger values, and conversely by decreasing $\\lambda$ we add more probability to smaller values. One can describe $\\lambda$ as the intensity of the Poisson distribution.\n\nUnlike $\\lambda$, which can be any positive number, the value $k$ in the above formula must be a non-negative integer, i.e., $k$ must take on values 0,1,2, and so on. This is very important, because if you wanted to model a population you could not make sense of populations with 4.25 or 5.612 members.\n\nIf a random variable $Z$ has a Poisson mass distribution, we denote this by writing\n\n$$Z \\sim \\text{Poi}(\\lambda) $$\n\nOne useful property of the Poisson distribution is that its expected value is equal to its parameter, i.e.:\n\n$$E\\large[ \\;Z\\; | \\; \\lambda \\;\\large] = \\lambda $$\n\nWe will use this property often, so it's useful to remember. Below, we plot the probability mass distribution for different $\\lambda$ values. The first thing to notice is that by increasing $\\lambda$, we add more probability of larger values occurring. Second, notice that although the graph ends at 15, the distributions do not. They assign positive probability to every non-negative integer.", "_____no_output_____" ] ], [ [ "figsize(12.5, 4)\n\nimport scipy.stats as stats\na = np.arange(16)\npoi = stats.poisson\nlambda_ = [1.5, 4.25]\ncolours = [\"#348ABD\", \"#A60628\"]\n\nplt.bar(a, poi.pmf(a, lambda_[0]), color=colours[0],\n label=\"$\\lambda = %.1f$\" % lambda_[0], alpha=0.60,\n edgecolor=colours[0], lw=\"3\")\n\nplt.bar(a, poi.pmf(a, lambda_[1]), color=colours[1],\n label=\"$\\lambda = %.1f$\" % lambda_[1], alpha=0.60,\n edgecolor=colours[1], lw=\"3\")\n\nplt.xticks(a + 0.4, a)\nplt.legend()\nplt.ylabel(\"probability of $k$\")\nplt.xlabel(\"$k$\")\nplt.title(\"Probability mass function of a Poisson random variable; differing \\\n$\\lambda$ values\")", "_____no_output_____" ] ], [ [ "### 1.3.2 Continuous Case\nInstead of a probability mass function, a continuous random variable has a probability density function. This might seem like unnecessary nomenclature, but the density function and the mass function are very different creatures. An example of continuous random variable is a random variable with exponential density. The density function for an exponential random variable looks like this:\n\n$$f_Z(z | \\lambda) = \\lambda e^{-\\lambda z }, \\;\\; z\\ge 0$$\nLike a Poisson random variable, an exponential random variable can take on only non-negative values. But unlike a Poisson variable, the exponential can take on any non-negative values, including non-integral values such as 4.25 or 5.612401. This property makes it a poor choice for count data, which must be an integer, but a great choice for time data, temperature data (measured in Kelvins, of course), or any other precise and positive variable. The graph below shows two probability density functions with different $\\lambda$ values.\n\nWhen a random variable $Z$ has an exponential distribution with parameter $\\lambda$, we say $Z$ is exponential and write\n\n$$Z \\sim \\text{Exp}(\\lambda)$$\nGiven a specific $\\lambda$, the expected value of an exponential random variable is equal to the inverse of $\\lambda$, that is:\n\n$$E[\\; Z \\;|\\; \\lambda \\;] = \\frac{1}{\\lambda}$$", "_____no_output_____" ] ], [ [ "a = np.linspace(0, 4, 100)\nexpo = stats.expon\nlambda_ = [0.5, 1]\n\nfor l, c in zip(lambda_, colours):\n plt.plot(a, expo.pdf(a, scale=1. / l), lw=3,\n color=c, label=\"$\\lambda = %.1f$\" % l)\n plt.fill_between(a, expo.pdf(a, scale=1. / l), color=c, alpha=.33)\n\nplt.legend()\nplt.ylabel(\"PDF at $z$\")\nplt.xlabel(\"$z$\")\nplt.ylim(0, 1.2)\nplt.title(\"Probability density function of an Exponential random variable;\\\n differing $\\lambda$\");", "_____no_output_____" ] ], [ [ "### But what is $\\lambda \\;$?\nThis question is what motivates statistics. In the real world, $\\lambda$ is hidden from us. We see only $Z$, and must go backwards to try and determine $\\lambda$. The problem is difficult because there is no one-to-one mapping from $Z$ to $\\lambda$. Many different methods have been created to solve the problem of estimating $\\lambda$, but since $\\lambda$ is never actually observed, no one can say for certain which method is best!\n\nBayesian inference is concerned with beliefs about what $\\lambda$ might be. Rather than try to guess $\\lambda$ exactly, we can only talk about what $\\lambda$ is likely to be by assigning a probability distribution to $\\lambda$.\n\nThis might seem odd at first. After all, $\\lambda$ is fixed; it is not (necessarily) random! How can we assign probabilities to values of a non-random variable? Ah, we have fallen for our old, frequentist way of thinking. Recall that under Bayesian philosophy, we can assign probabilities if we interpret them as beliefs. And it is entirely acceptable to have beliefs about the parameter $\\lambda$.\n\n### 1.3.3 Example: Inferring behaviour from text-message data\nLet's try to model a more interesting example, one that concerns the rate at which a user sends and receives text messages:\n\nYou are given a series of daily text-message counts from a user of your system. The data, plotted over time, appears in the chart below. You are curious to know if the user's text-messaging habits have changed over time, either gradually or suddenly. How can you model this? (This is in fact my own text-message data. Judge my popularity as you wish.)", "_____no_output_____" ] ], [ [ "figsize(12.5, 3.5)\ncount_data = np.loadtxt(\"data/txtdata.csv\")\nn_count_data = len(count_data)\nplt.bar(np.arange(n_count_data), count_data, color=\"#348ABD\")\nplt.xlabel(\"Time (days)\")\nplt.ylabel(\"count of text-msgs received\")\nplt.title(\"Did the user's texting habits change over time?\")\nplt.xlim(0, n_count_data);", "_____no_output_____" ] ], [ [ "Before we start modeling, see what you can figure out just by looking at the chart above. Would you say there was a change in behaviour during this time period?\n\nHow can we start to model this? Well, as we have conveniently already seen, a Poisson random variable is a very appropriate model for this type of count data. Denoting day $i$'s text-message count by $C_i$,\n\n$$ C_i \\sim \\text{Poisson}(\\lambda) $$\nWe are not sure what the value of the $\\lambda$ parameter really is, however. Looking at the chart above, it appears that the rate might become higher late in the observation period, which is equivalent to saying that $\\lambda$ increases at some point during the observations. (Recall that a higher value of $\\lambda$ assigns more probability to larger outcomes. That is, there is a higher probability of many text messages having been sent on a given day.)\n\nHow can we represent this observation mathematically? Let's assume that on some day during the observation period (call it $\\tau$), the parameter $\\lambda$ suddenly jumps to a higher value. So we really have two $\\lambda$ parameters: one for the period before $\\tau$, and one for the rest of the observation period. In the literature, a sudden transition like this would be called a switchpoint:\n\n$$\n\\lambda = \n\\begin{cases}\n\\lambda_1 &amp; \\text{if } t \\lt \\tau \\cr\n\\lambda_2 &amp; \\text{if } t \\ge \\tau\n\\end{cases}\n$$\nIf, in reality, no sudden change occurred and indeed $\\lambda_1 = \\lambda_2$, then the $\\lambda$s posterior distributions should look about equal.\n\nWe are interested in inferring the unknown $\\lambda$s. To use Bayesian inference, we need to assign prior probabilities to the different possible values of $\\lambda$. What would be good prior probability distributions for $\\lambda_1$ and $\\lambda_2$? Recall that $\\lambda$ can be any positive number. As we saw earlier, the exponential distribution provides a continuous density function for positive numbers, so it might be a good choice for modeling $\\lambda_i$. But recall that the exponential distribution takes a parameter of its own, so we'll need to include that parameter in our model. Let's call that parameter $\\alpha$.\n\n$$\\begin{align}\n\\lambda_1 \\sim \\text{Exp}( \\alpha ) \\\\\\\n\\lambda_2 \\sim \\text{Exp}( \\alpha )\n\\end{align}$$\n$\\alpha$ is called a hyper-parameter or parent variable. In literal terms, it is a parameter that influences other parameters. Our initial guess at $\\alpha$ does not influence the model too strongly, so we have some flexibility in our choice. A good rule of thumb is to set the exponential parameter equal to the inverse of the average of the count data. Since we're modeling $\\lambda$ using an exponential distribution, we can use the expected value identity shown earlier to get:\n\n$$\\frac{1}{N}\\sum_{i=0}^N \\;C_i \\approx E[\\; \\lambda \\; |\\; \\alpha ] = \\frac{1}{\\alpha}$$\nAn alternative, and something I encourage the reader to try, would be to have two priors: one for each $\\lambda_i$. Creating two exponential distributions with different $\\alpha$ values reflects our prior belief that the rate changed at some point during the observations.\n\nWhat about $\\tau$? Because of the noisiness of the data, it's difficult to pick out a priori when $\\tau$ might have occurred. Instead, we can assign a uniform prior belief to every possible day. This is equivalent to saying\n\n$$\\begin{align}\n\\tau \\sim \\text{DiscreteUniform(1,70) }\\\\\\\\\n\\Rightarrow P( \\tau = k ) = \\frac{1}{70}\n\\end{align}$$\nSo after all this, what does our overall prior distribution for the unknown variables look like? Frankly, it doesn't matter. What we should understand is that it's an ugly, complicated mess involving symbols only a mathematician could love. And things will only get uglier the more complicated our models become. Regardless, all we really care about is the posterior distribution.\n\nWe next turn to PyMC3, a Python library for performing Bayesian analysis that is undaunted by the mathematical monster we have created.\n\n### 1.4 Introducing our first hammer: PyMC3\nPyMC3 is a Python library for programming Bayesian analysis [3]. It is a fast, well-maintained library. The only unfortunate part is that its documentation is lacking in certain areas, especially those that bridge the gap between beginner and hacker. One of this book's main goals is to solve that problem, and also to demonstrate why PyMC3 is so cool.\n\nWe will model the problem above using PyMC3. This type of programming is called probabilistic programming, an unfortunate misnomer that invokes ideas of randomly-generated code and has likely confused and frightened users away from this field. The code is not random; it is probabilistic in the sense that we create probability models using programming variables as the model's components. Model components are first-class primitives within the PyMC3 framework.\n\nBecause of the confusion engendered by the term probabilistic programming, I'll refrain from using it. Instead, I'll simply say programming, since that's what it really is.\n\nPyMC3 code is easy to read. The only novel thing should be the syntax. Simply remember that we are representing the model's components ($\\tau, \\lambda_1, \\lambda_2$ ) as variables.", "_____no_output_____" ] ], [ [ "import pymc3 as pm\nimport theano.tensor as tt\n\nwith pm.Model() as model:\n alpha = 1.0/count_data.mean() # Recall count_data is the\n # variable that holds our txt counts\n lambda_1 = pm.Exponential(\"lambda_1\", alpha)\n lambda_2 = pm.Exponential(\"lambda_2\", alpha)\n \n tau = pm.DiscreteUniform(\"tau\", lower=0, upper=n_count_data - 1)", "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "In the code above, we create the PyMC3 variables corresponding to $\\lambda_1$ and $\\lambda_2$. We assign them to PyMC3's stochastic variables, so-called because they are treated by the back end as random number generators.", "_____no_output_____" ] ], [ [ "with model:\n idx = np.arange(n_count_data) # Index\n lambda_ = pm.math.switch(tau > idx, lambda_1, lambda_2)", "_____no_output_____" ] ], [ [ "This code creates a new function lambda_, but really we can think of it as a random variable: the random variable $\\lambda$ from above. The switch() function assigns lambda_1 or lambda_2 as the value of lambda_, depending on what side of tau we are on. The values of lambda_ up until tau are lambda_1 and the values afterwards are lambda_2.\n\nNote that because lambda_1, lambda_2 and tau are random, lambda_ will be random. We are not fixing any variables yet.", "_____no_output_____" ] ], [ [ "with model:\n observation = pm.Poisson(\"obs\", lambda_, observed=count_data)", "_____no_output_____" ] ], [ [ "The variable observation combines our data, count_data, with our proposed data-generation scheme, given by the variable lambda_, through the observed keyword.\n\nThe code below will be explained in Chapter 3, but I show it here so you can see where our results come from. One can think of it as a learning step. The machinery being employed is called Markov Chain Monte Carlo (MCMC), which I also delay explaining until Chapter 3. This technique returns thousands of random variables from the posterior distributions of $\\lambda_1, \\lambda_2$ and $\\tau$. We can plot a histogram of the random variables to see what the posterior distributions look like. Below, we collect the samples (called traces in the MCMC literature) into histograms.", "_____no_output_____" ] ], [ [ "with model:\n step = pm.Metropolis()\n trace = pm.sample(10000, tune=5000,step=step)", "Multiprocess sampling (2 chains in 2 jobs)\nCompoundStep\n>Metropolis: [tau]\n>Metropolis: [lambda_2]\n>Metropolis: [lambda_1]\nSampling 2 chains: 100%|██████████| 30000/30000 [00:17<00:00, 786.57draws/s] \nThe number of effective samples is smaller than 25% for some parameters.\n" ], [ "lambda_1_samples = trace['lambda_1']\nlambda_2_samples = trace['lambda_2']\ntau_samples = trace['tau']", "_____no_output_____" ], [ "figsize(12.5, 10)\n#histogram of the samples:\n\nax = plt.subplot(311)\nax.set_autoscaley_on(False)\n\nplt.hist(lambda_1_samples, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of $\\lambda_1$\", color=\"#A60628\", normed=True)\nplt.legend(loc=\"upper left\")\nplt.title(r\"\"\"Posterior distributions of the variables\n $\\lambda_1,\\;\\lambda_2,\\;\\tau$\"\"\")\nplt.xlim([15, 30])\nplt.xlabel(\"$\\lambda_1$ value\")\n\nax = plt.subplot(312)\nax.set_autoscaley_on(False)\nplt.hist(lambda_2_samples, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of $\\lambda_2$\", color=\"#7A68A6\", normed=True)\nplt.legend(loc=\"upper left\")\nplt.xlim([15, 30])\nplt.xlabel(\"$\\lambda_2$ value\")\n\nplt.subplot(313)\nw = 1.0 / tau_samples.shape[0] * np.ones_like(tau_samples)\nplt.hist(tau_samples, bins=n_count_data, alpha=1,\n label=r\"posterior of $\\tau$\",\n color=\"#467821\", weights=w, rwidth=2.)\nplt.xticks(np.arange(n_count_data))\n\nplt.legend(loc=\"upper left\")\nplt.ylim([0, .75])\nplt.xlim([35, len(count_data)-20])\nplt.xlabel(r\"$\\tau$ (in days)\")\nplt.ylabel(\"probability\");", "_____no_output_____" ] ], [ [ "### Interpretation\nRecall that Bayesian methodology returns a distribution. Hence we now have distributions to describe the unknown $\\lambda$s and $\\tau$. What have we gained? Immediately, we can see the uncertainty in our estimates: the wider the distribution, the less certain our posterior belief should be. We can also see what the plausible values for the parameters are: $\\lambda_1$ is around 18 and $\\lambda_2$ is around 23. The posterior distributions of the two $\\lambda$s are clearly distinct, indicating that it is indeed likely that there was a change in the user's text-message behaviour.\n\nWhat other observations can you make? If you look at the original data again, do these results seem reasonable?\n\nNotice also that the posterior distributions for the $\\lambda$s do not look like exponential distributions, even though our priors for these variables were exponential. In fact, the posterior distributions are not really of any form that we recognize from the original model. But that's OK! This is one of the benefits of taking a computational point of view. If we had instead done this analysis using mathematical approaches, we would have been stuck with an analytically intractable (and messy) distribution. Our use of a computational approach makes us indifferent to mathematical tractability.\n\nOur analysis also returned a distribution for $\\tau$. Its posterior distribution looks a little different from the other two because it is a discrete random variable, so it doesn't assign probabilities to intervals. We can see that near day 45, there was a 50% chance that the user's behaviour changed. Had no change occurred, or had the change been gradual over time, the posterior distribution of $\\tau$ would have been more spread out, reflecting that many days were plausible candidates for $\\tau$. By contrast, in the actual results we see that only three or four days make any sense as potential transition points.\n\n### Why would I want samples from the posterior, anyways?\nWe will deal with this question for the remainder of the book, and it is an understatement to say that it will lead us to some amazing results. For now, let's end this chapter with one more example.\n\nWe'll use the posterior samples to answer the following question: what is the expected number of texts at day $t, \\; 0 \\le t \\le 70$ ? Recall that the expected value of a Poisson variable is equal to its parameter $\\lambda$. Therefore, the question is equivalent to what is the expected value of $\\lambda$ at time $t$?\n\nIn the code below, let $i$ index samples from the posterior distributions. Given a day $t$, we average over all possible $\\lambda_i$ for that day $t$, using $\\lambda_i = \\lambda_{1,i}$ if $t \\lt \\tau_i$ (that is, if the behaviour change has not yet occurred), else we use $\\lambda_i = \\lambda_{2,i}$.", "_____no_output_____" ] ], [ [ "figsize(12.5, 5)\n# tau_samples, lambda_1_samples, lambda_2_samples contain\n# N samples from the corresponding posterior distribution\nN = tau_samples.shape[0]\nexpected_texts_per_day = np.zeros(n_count_data)\nfor day in range(0, n_count_data):\n # ix is a bool index of all tau samples corresponding to\n # the switchpoint occurring prior to value of 'day'\n ix = day < tau_samples\n # Each posterior sample corresponds to a value for tau.\n # for each day, that value of tau indicates whether we're \"before\"\n # (in the lambda1 \"regime\") or\n # \"after\" (in the lambda2 \"regime\") the switchpoint.\n # by taking the posterior sample of lambda1/2 accordingly, we can average\n # over all samples to get an expected value for lambda on that day.\n # As explained, the \"message count\" random variable is Poisson distributed,\n # and therefore lambda (the poisson parameter) is the expected value of\n # \"message count\".\n expected_texts_per_day[day] = (lambda_1_samples[ix].sum()\n + lambda_2_samples[~ix].sum()) / N\n\n\nplt.plot(range(n_count_data), expected_texts_per_day, lw=4, color=\"#E24A33\",\n label=\"expected number of text-messages received\")\nplt.xlim(0, n_count_data)\nplt.xlabel(\"Day\")\nplt.ylabel(\"Expected # text-messages\")\nplt.title(\"Expected number of text-messages received\")\nplt.ylim(0, 60)\nplt.bar(np.arange(len(count_data)), count_data, color=\"#348ABD\", alpha=0.65,\n label=\"observed texts per day\")\n\nplt.legend(loc=\"upper left\");", "_____no_output_____" ] ], [ [ "Our analysis shows strong support for believing the user's behavior did change ($\\lambda_1$ would have been close in value to $\\lambda_2$ had this not been true), and that the change was sudden rather than gradual (as demonstrated by $\\tau$'s strongly peaked posterior distribution). We can speculate what might have caused this: a cheaper text-message rate, a recent weather-to-text subscription, or perhaps a new relationship.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a455b73bc9fe95b224f1eb269a814381ce3172
15,562
ipynb
Jupyter Notebook
testing/examples/tight_layout_guide.ipynb
pchaos/quanttesting
98331670547e8a45ba93b49f3e9c660495645114
[ "MIT" ]
5
2020-04-08T14:14:05.000Z
2021-06-29T03:42:01.000Z
testing/examples/tight_layout_guide.ipynb
pchaos/quanttesting
98331670547e8a45ba93b49f3e9c660495645114
[ "MIT" ]
null
null
null
testing/examples/tight_layout_guide.ipynb
pchaos/quanttesting
98331670547e8a45ba93b49f3e9c660495645114
[ "MIT" ]
7
2020-04-15T15:07:39.000Z
2022-03-23T05:44:02.000Z
45.502924
929
0.597095
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Tight Layout guide\n\n\nHow to use tight-layout to fit plots within your figure cleanly.\n\n*tight_layout* automatically adjusts subplot params so that the\nsubplot(s) fits in to the figure area. This is an experimental\nfeature and may not work for some cases. It only checks the extents\nof ticklabels, axis labels, and titles.\n\nAn alternative to *tight_layout* is :doc:`constrained_layout\n</tutorials/intermediate/constrainedlayout_guide>`.\n\n\nSimple Example\n==============\n\nIn matplotlib, the location of axes (including subplots) are specified in\nnormalized figure coordinates. It can happen that your axis labels or\ntitles (or sometimes even ticklabels) go outside the figure area, and are thus\nclipped.\n", "_____no_output_____" ] ], [ [ "# sphinx_gallery_thumbnail_number = 7\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rcParams['savefig.facecolor'] = \"0.8\"\n\n\ndef example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\nplt.close('all')\nfig, ax = plt.subplots()\nexample_plot(ax, fontsize=24)", "_____no_output_____" ] ], [ [ "To prevent this, the location of axes needs to be adjusted. For\nsubplots, this can be done by adjusting the subplot params\n(`howto-subplots-adjust`). Matplotlib v1.1 introduces a new\ncommand :func:`~matplotlib.pyplot.tight_layout` that does this\nautomatically for you.\n\n", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nexample_plot(ax, fontsize=24)\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Note that :func:`matplotlib.pyplot.tight_layout` will only adjust the\nsubplot params when it is called. In order to perform this adjustment each\ntime the figure is redrawn, you can call ``fig.set_tight_layout(True)``, or,\nequivalently, set the ``figure.autolayout`` rcParam to ``True``.\n\nWhen you have multiple subplots, often you see labels of different\naxes overlapping each other.\n\n", "_____no_output_____" ] ], [ [ "plt.close('all')\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)", "_____no_output_____" ] ], [ [ ":func:`~matplotlib.pyplot.tight_layout` will also adjust spacing between\nsubplots to minimize the overlaps.\n\n", "_____no_output_____" ] ], [ [ "fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\nplt.tight_layout()", "_____no_output_____" ] ], [ [ ":func:`~matplotlib.pyplot.tight_layout` can take keyword arguments of\n*pad*, *w_pad* and *h_pad*. These control the extra padding around the\nfigure border and between subplots. The pads are specified in fraction\nof fontsize.\n\n", "_____no_output_____" ] ], [ [ "fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\nplt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)", "_____no_output_____" ] ], [ [ ":func:`~matplotlib.pyplot.tight_layout` will work even if the sizes of\nsubplots are different as far as their grid specification is\ncompatible. In the example below, *ax1* and *ax2* are subplots of a 2x2\ngrid, while *ax3* is of a 1x2 grid.\n\n", "_____no_output_____" ] ], [ [ "plt.close('all')\nfig = plt.figure()\n\nax1 = plt.subplot(221)\nax2 = plt.subplot(223)\nax3 = plt.subplot(122)\n\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "It works with subplots created with\n:func:`~matplotlib.pyplot.subplot2grid`. In general, subplots created\nfrom the gridspec (:doc:`/tutorials/intermediate/gridspec`) will work.\n\n", "_____no_output_____" ] ], [ [ "plt.close('all')\nfig = plt.figure()\n\nax1 = plt.subplot2grid((3, 3), (0, 0))\nax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)\nax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\nax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Although not thoroughly tested, it seems to work for subplots with\naspect != \"auto\" (e.g., axes with images).\n\n", "_____no_output_____" ] ], [ [ "arr = np.arange(100).reshape((10, 10))\n\nplt.close('all')\nfig = plt.figure(figsize=(5, 4))\n\nax = plt.subplot(111)\nim = ax.imshow(arr, interpolation=\"none\")\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Caveats\n=======\n\n * :func:`~matplotlib.pyplot.tight_layout` only considers ticklabels, axis\n labels, and titles. Thus, other artists may be clipped and also may\n overlap.\n\n * It assumes that the extra space needed for ticklabels, axis labels,\n and titles is independent of original location of axes. This is\n often true, but there are rare cases where it is not.\n\n * pad=0 clips some of the texts by a few pixels. This may be a bug or\n a limitation of the current algorithm and it is not clear why it\n happens. Meanwhile, use of pad at least larger than 0.3 is\n recommended.\n\nUse with GridSpec\n=================\n\nGridSpec has its own :func:`~matplotlib.gridspec.GridSpec.tight_layout` method\n(the pyplot api :func:`~matplotlib.pyplot.tight_layout` also works).\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.gridspec as gridspec\n\nplt.close('all')\nfig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig)", "_____no_output_____" ] ], [ [ "You may provide an optional *rect* parameter, which specifies the bounding box\nthat the subplots will be fit inside. The coordinates must be in normalized\nfigure coordinates and the default is (0, 0, 1, 1).\n\n", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])", "_____no_output_____" ] ], [ [ "For example, this can be used for a figure with multiple gridspecs.\n\n", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\ngs2 = gridspec.GridSpec(3, 1)\n\nfor ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\nax.set_xlabel(\"x-label\", fontsize=12)\n\ngs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)\n\n# We may try to match the top and bottom of two grids ::\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.update(top=top, bottom=bottom)\ngs2.update(top=top, bottom=bottom)\nplt.show()", "_____no_output_____" ] ], [ [ "While this should be mostly good enough, adjusting top and bottom\nmay require adjustment of hspace also. To update hspace & vspace, we\ncall :func:`~matplotlib.gridspec.GridSpec.tight_layout` again with updated\nrect argument. Note that the rect argument specifies the area including the\nticklabels, etc. Thus, we will increase the bottom (which is 0 for the normal\ncase) by the difference between the *bottom* from above and the bottom of each\ngridspec. Same thing for the top.\n\n", "_____no_output_____" ] ], [ [ "fig = plt.gcf()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\ngs2 = gridspec.GridSpec(3, 1)\n\nfor ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\nax.set_xlabel(\"x-label\", fontsize=12)\n\ngs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)\n\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.update(top=top, bottom=bottom)\ngs2.update(top=top, bottom=bottom)\n\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),\n 0.5, 1 - (gs1.top-top)])\ngs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),\n None, 1 - (gs2.top-top)],\n h_pad=0.5)", "_____no_output_____" ] ], [ [ "Legends and Annotations\n=======================\n\nPre Matplotlib 2.2, legends and annotations were excluded from the bounding\nbox calculations that decide the layout. Subsequently these artists were\nadded to the calculation, but sometimes it is undesirable to include them.\nFor instance in this case it might be good to have the axes shring a bit\nto make room for the legend:\n\n", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(4, 3))\nlines = ax.plot(range(10), label='A simple plot')\nax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)\nfig.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "However, sometimes this is not desired (quite often when using\n``fig.savefig('outname.png', bbox_inches='tight')``). In order to\nremove the legend from the bounding box calculation, we simply set its\nbounding ``leg.set_in_layout(False)`` and the legend will be ignored.\n\n", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(4, 3))\nlines = ax.plot(range(10), label='B simple plot')\nleg = ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)\nleg.set_in_layout(False)\nfig.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "Use with AxesGrid1\n==================\n\nWhile limited, the axes_grid1 toolkit is also supported.\n\n", "_____no_output_____" ] ], [ [ "from mpl_toolkits.axes_grid1 import Grid\n\nplt.close('all')\nfig = plt.figure()\ngrid = Grid(fig, rect=111, nrows_ncols=(2, 2),\n axes_pad=0.25, label_mode='L',\n )\n\nfor ax in grid:\n example_plot(ax)\nax.title.set_visible(False)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Colorbar\n========\n\nIf you create a colorbar with the :func:`~matplotlib.pyplot.colorbar`\ncommand, the created colorbar is an instance of Axes, *not* Subplot, so\ntight_layout does not work. With Matplotlib v1.1, you may create a\ncolorbar as a subplot using the gridspec.\n\n", "_____no_output_____" ] ], [ [ "plt.close('all')\narr = np.arange(100).reshape((10, 10))\nfig = plt.figure(figsize=(4, 4))\nim = plt.imshow(arr, interpolation=\"none\")\n\nplt.colorbar(im, use_gridspec=True)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Another option is to use AxesGrid1 toolkit to\nexplicitly create an axes for colorbar.\n\n", "_____no_output_____" ] ], [ [ "from mpl_toolkits.axes_grid1 import make_axes_locatable\n\nplt.close('all')\narr = np.arange(100).reshape((10, 10))\nfig = plt.figure(figsize=(4, 4))\nim = plt.imshow(arr, interpolation=\"none\")\n\ndivider = make_axes_locatable(plt.gca())\ncax = divider.append_axes(\"right\", \"5%\", pad=\"3%\")\nplt.colorbar(im, cax=cax)\n\nplt.tight_layout()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a45658984e52f4bae951741b65be98921cd9a4
30,867
ipynb
Jupyter Notebook
uci-pharmsci/assignments/energy_minimization/energy_minimization_assignment.ipynb
matthagy/drug-computing
da98dc4526269ef2557e607467a8d4c58cc24a49
[ "CC-BY-4.0", "MIT" ]
null
null
null
uci-pharmsci/assignments/energy_minimization/energy_minimization_assignment.ipynb
matthagy/drug-computing
da98dc4526269ef2557e607467a8d4c58cc24a49
[ "CC-BY-4.0", "MIT" ]
null
null
null
uci-pharmsci/assignments/energy_minimization/energy_minimization_assignment.ipynb
matthagy/drug-computing
da98dc4526269ef2557e607467a8d4c58cc24a49
[ "CC-BY-4.0", "MIT" ]
null
null
null
52.945111
692
0.633136
[ [ [ "# Energy Minimization Assignment, PharmSci 175/275\n##### By David Mobley (UCI), Jan. 2018\n\n##### Adapted with permission from an assignment by M. Scott Shell (UCSB)\n\n## Overview\nIn this assignment, you will begin with a provided template and several functions, as well as a Fortran library, and add additional code to perform a conjugate-gradient minimization. That is, you will write a conjugate-gradient minimizer. You will then apply this minimize to generate Lennard-Jones clusters with varying numbers of particles, and look at how the energy varies as a function of cluster size. \n\nThe Jupyter notebook for this assignment is laid out with action items YOU need to take labeled as Step 1-7. These are interspersed with background information on the problem, some examples, and a sandbox section to tinker with some of the functions. So read on, and watch for the sections which require your action.\n\n## What are Lennard-Jones clusters?\n\nClusters are small, stable packings of (often spherical) particles. These particles could be colloidal particles, nanoparticles, etc. There has been considerable work spent studying these clusters over the years, from atomic sizes up to colloidal particles in the nanometer to micrometers scale. Cluster analysis is important to understanding a range of phenomena, including structures of solids, aggregation and precipitation of particles, the structure of nanomaterials, self-assembly behavior of synthetic and biomolecular systems, and diffusion in dense liquids.\n\nA cluster can be characterized by the number and type of particles and the energetic interactions between them. Here, we will examine Lennard-Jones (LJ) clusters, which are clusters of simple attractive spherical particles with interactions modeled by the Lennard-Jones interaction. \n\nFor LJ clusters, there are cluster sizes of unusual stability. These are called magic number clusters and correspond to cluster sizes where the packing of atoms is particularly efficient, leading to very favorable energies and hence exceptional stability. The most stable such clusters are built from an icosahedral arrangement of particles, and the first few such **magic numbers** for cluster sizes of icosahedral geometries are 13, 19, 38, 55, and 75. \n\nThese clusters are still interesting from a basic physical chemistry point of view, but our interest here is mainly in (a) energy minimization algorithms, and (b) learning how to do non-trivial numerics in Python.\n\n## Here, we will energy minimize Lennard-Jones clusters of different sizes\n\nHere, we will examine different numbers of particles and attempt to find the minimum energy cluster for each number of particles. Our energy model will use the LJ potential in its dimensionless form (meaning that we have changed units so that all of the constants are hidden in the units). We denote this by putting a star on the potential:\n\n\\begin{equation}\nU^* = \\sum \\limits_{i<j} 4(r_{ij}^{-12} - r_{ij}^{-6})\n\\end{equation}\n\nWe will start with a random initial configuration of particles, and try to use an energy minimization algorithm to find the most stable configuration of particles. But when there are more than just a few particles, there will be more than one local minimum, so there is no guarantee the energy minimizer will find the global minimum. In such cases, we will need to minimize from random initial configurations many times in order to ensure we locate the global minimum.\nThere is also the possibility of forming multiple separate clusters separated by a significant difference. This is not unlikely, since the LJ interaction is only very weakly attractive at large distances. So, to ensure we form single clusters, we will use a weak biasing potential to pull all of the particles towards the origin, biasing the system towards forming a single cluster. Otherwise, the LJ potential will tend to be too weak to pull together very distant particles in these tests. We will use a harmonic biasing potential, so that the total potential energy (“force field”) is:\n\n\\begin{equation}\nU^* = \\sum\\limits_i \\alpha |\\mathbf{r}_i|^2 + \\sum \\limits_{i<j} 4(r_{ij}^{-12} - r_{ij}^{-6})\n\\end{equation}\n\nHere we will use $\\alpha = 0.0001 N^{-2/3}$ where $N$ is the number of particles; this will be a very small number. This particular form is selected so that the energy due to this term for a cluster of $N$ particles is, on average, constant regardless of $N$.\n\n## Additional details\nFor this assignment, your job is to perform a conjugate-gradient minimization of Lennard-Jones particles which are initially randomly distributed in space. I am providing several items for you:\n* A Fortran library (emlib) which you can use within Python to calculate energies and forces\n* A Python library (pos_to_pdb.py) which you can use to output structures to your disk to visualize motion of your particles (for example with PyMol) if you so desire\n* A template for your assignment (below) in iPython notebook format; this also will be posted on the class website in plain Python format in case my experiment with iPython notebooks fails here. \n * This template contains some code which will get you started, including code for a line search minimization. \n * It also contains places for you to write the functions (outlined below) you need to write to complete the assignment.\n \n## A quick (but important) note on Notebook usage:\niPython notebooks such as this one often contain a variety of cells containing code. These are normally intended to be run, which you can when you have an individual cell selected, by hitting the button at the top with a 'play' symbol, or by typing shift-enter. If you do NOT do so on each of the cells defining variables/functions which will be used here, then you will encounter an error about undefined variables when you run the later cells. \n \n## Your step 1 for the assignment: Start by doing some file bookkeeping:\n * Find `emlib.f90` and optional utility `pos_to_pdb.py` in this directory.\n * In the command prompt navigate to that folder and type 'f2py -c -m emlib emlib.f90' which should compile the fortran library for use within python (For more on F2Py, refer to the [f2py documentation](https://docs.scipy.org/doc/numpy-dev/f2py/)). In OS X, this may require you to install the (free) XCode developer tools (available from the Mac App store) and the command-line developer tools first (the latter via `xcode-select --install`). In Linux it should just work. Windows would be a hurdle.\n * In your command prompt, start theis Jupyter notebook (in OSX this would be something like 'Jupyter notebook 272EnergyMinimization'), which should open it in your web browser; you're running it already unless you are looking at the HTML version of this file.\n \nTemplate Python code for the assignment is provided below. I suggest making a new notebook which is a copy of this one (perhaps containing your name in the filename) and working from there. \n \n \n ## Next, we prep Python for the work:\n \n First we import the numpy numerical library we're going to need, as well as the compiled Fortran library emlib", "_____no_output_____" ] ], [ [ "import numpy as np\nimport emlib\nfrom pos_to_pdb import * \n#This would allow you to export coordinates if you want, later", "_____no_output_____" ] ], [ [ "## Important technical note: Unit masses, etc.\n\nNote that all of the following code will assume unit atomic masses, such that forces and accelerations are equal -- that is, instead of $F=ma$ we write $F=a$ assuming that $m=1$. We also drop most constants. This is a relatively common trick in physics when you are interested only in seeing how the basic equations work, and not in quantitative comparison with experimental numbers. It can be called using \"dimensionless units\". ", "_____no_output_____" ], [ "## Then we define the LineSearch function:", "_____no_output_____" ], [ "Here is the `LineSearch` function which is provided for you. Read the prototype (definition) and documentation to understand what it needs and what it will do (note that you do NOT need to read all the code):", "_____no_output_____" ] ], [ [ "def LineSearch(Pos, Dir, dx, EFracTol, Accel = 1.5, MaxInc = 10.,\n MaxIter = 10000):\n \"\"\"Performs a line search along direction Dir.\n Input:\n Pos: starting positions, (N,3) array\n Dir: (N,3) array of gradient direction\n dx: initial step amount, a float\n EFracTol: fractional energy tolerance\n Accel: acceleration factor\n MaxInc: the maximum increase in energy for bracketing\n MaxIter: maximum number of iteration steps\nOutput:\n PEnergy: value of potential energy at minimum along Dir\n PosMin: minimum energy (N,3) position array along Dir\n\"\"\"\n #start the iteration counter\n Iter = 0\n\n #find the normalized direction\n NormDir = Dir / np.sqrt(np.sum(Dir * Dir))\n\n #take the first two steps and compute energies\n Dists = [0., dx]\n PEs = [emlib.calcenergy(Pos + NormDir * x) for x in Dists]\n\n #if the second point is not downhill in energy, back\n #off and take a shorter step until we find one\n while PEs[1] > PEs[0]:\n Iter += 1\n dx = dx * 0.5\n Dists[1] = dx\n PEs[1] = emlib.calcenergy(Pos + NormDir * dx)\n\n #find a third point\n Dists.append( 2. * dx )\n PEs.append( emlib.calcenergy(Pos + NormDir * 2. * dx) )\n\n #keep stepping forward until the third point is higher\n #in energy; then we have bracketed a minimum\n while PEs[2] < PEs[1]:\n Iter += 1\n\n #find a fourth point and evaluate energy\n Dists.append( Dists[-1] + dx )\n PEs.append( emlib.calcenergy(Pos + NormDir * Dists[-1]) )\n\n #check if we increased too much in energy; if so, back off\n if (PEs[3] - PEs[0]) > MaxInc * (PEs[0] - PEs[2]):\n PEs = PEs[:3]\n Dists = Dists[:3]\n dx = dx * 0.5\n else:\n #shift all of the points over\n PEs = PEs[-3:]\n Dists = Dists[-3:]\n dx = dx * Accel \n \n #we've bracketed a minimum; now we want to find it to high\n #accuracy\n OldPE3 = 1.e300\n while True:\n Iter += 1\n if Iter > MaxIter:\n print(\"Warning: maximum number of iterations reached in line search.\")\n break\n\n #store distances for ease of code-reading\n d0, d1, d2 = Dists\n PE0, PE1, PE2 = PEs\n\n #use a parobolic approximation to estimate the location\n #of the minimum\n d10 = d0 - d1\n d12 = d2 - d1\n Num = d12*d12*(PE0-PE1) - d10*d10*(PE2-PE1)\n Dem = d12*(PE0-PE1) - d10*(PE2-PE1)\n if Dem == 0:\n #parabolic extrapolation won't work; set new dist = 0\n d3 = 0\n else:\n #location of parabolic minimum\n d3 = d1 + 0.5 * Num / Dem\n\n #compute the new potential energy\n PE3 = emlib.calcenergy(Pos + NormDir * d3)\n\n #sometimes the parabolic approximation can fail;\n #check if d3 is out of range < d0 or > d2 or the new energy is higher\n if d3 < d0 or d3 > d2 or PE3 > PE0 or PE3 > PE1 or PE3 > PE2:\n #instead, just compute the new distance by bisecting two\n #of the existing points along the line search\n if abs(d2 - d1) > abs(d0 - d1):\n d3 = 0.5 * (d2 + d1)\n else:\n d3 = 0.5 * (d0 + d1)\n PE3 = emlib.calcenergy(Pos + NormDir * d3)\n\n #decide which three points to keep; we want to keep\n #the three that are closest to the minimum\n if d3 < d1:\n if PE3 < PE1:\n #get rid of point 2\n Dists, PEs = [d0, d3, d1], [PE0, PE3, PE1]\n else:\n #get rid of point 0\n Dists, PEs = [d3, d1, d2], [PE3, PE1, PE2]\n else:\n if PE3 < PE1:\n #get rid of point 0\n Dists, PEs = [d1, d3, d2], [PE1, PE3, PE2]\n else:\n #get rid of point 2\n Dists, PEs = [d0, d1, d3], [PE0, PE1, PE3]\n\n #check how much we've changed\n if abs(OldPE3 - PE3) < EFracTol * abs(PE3):\n #the fractional change is less than the tolerance,\n #so we are done and can exit the loop\n break\n OldPE3 = PE3\n\n #return the position array at the minimum (point 1)\n PosMin = Pos + NormDir * Dists[1]\n PEMin = PEs[1]\n\n return PEMin, PosMin", "_____no_output_____" ] ], [ [ "## Step 2: Write a function to assign random initial positions to your atoms\n\nWe need a function that can randomly place N atoms in a box with sides of length L. Write a function based on a tool from the numpy 'random' module to do this. Some hints are in order:\n\n* NumPy contains a ‘random’ module which is good for obtaining random numbers and/or arrays. For example, if you have imported numpy as np, then np.random.random(shape) returns a random array with the specified shape (i.e. ‘np.random.random(3)’ would be a 3x1 array) with elements randomly selected between 0 to 1. Try this out: ", "_____no_output_____" ] ], [ [ "a = np.random.random(3)\nprint(\"a=\\n\",a)\nb = np.random.random((2,3))\nprint(\"b=\\n\",b)", "a=\n [ 0.27969529 0.37836589 0.96785443]\nb=\n [[ 0.37068791 0.64081204 0.21422213]\n [ 0.471194 0.28575791 0.54468387]]\n" ] ], [ [ "* Note that in your function, you want the numbers to run from 0 to L. You might try out what happens if you multiply 'a' and 'b' in the code above by some number.\n\nNow, write your function. I've written the doc string and some comments for you, but you have to fill in its inner workings:", "_____no_output_____" ] ], [ [ "def InitPositions(N, L):\n \"\"\"Returns an array of initial positions of each atom,\nplaced randomly within a box of dimensions L.\nInput:\n N: number of atoms\n L: box width\nOutput:\n Pos: (N,3) array of positions\n\"\"\"\n #### WRITE YOUR CODE HERE ####\n ## In my code, I can accomplish this function in 1 line \n ## using a numpy function.\n ## Yours can be longer if you want. It's more important that it be right than that it be short.\n \n return Pos", "_____no_output_____" ] ], [ [ "## Step 3: Write the Conjugate Gradient function described below\n\nFill in code for the ConjugateGradient function below based on the discussion in class and below, supplemented by your reading of Leach's book (and other online sources if needed). Some additional guidance and hints are warranted first.\n\n### Hints for ConjugateGradient:\n* As discussed/demonstrated above, a LineSearch function is already provided for you here\n* You are going to want to keep doing line searches until the energy stops changing. Write a loop to do this, and store your evaluated energies as you go.\n* A fortran library, `emlib`, is provided for you to calculate energies and forces. You should be able to ask for 'help(emlib)' for info on usage. You can also look directly at the Fortran code if you would like, though this may be less helpful.\n* You can get the potential energy and forces using the provided library using functions from emlib. For example, if `Pos` is an array of positions: \n\n `PEnergy, Forces = emlib.calcenergyforces(Pos)`\n \n `Forces = emlib.calcforces( Pos )`\n* Conjugate gradient does not specify an initial direction. Your initial search should be in the direction of the force. \n* After the initial line search, subsequent line search directions $i$ should be found using this expression for $v_i$, the direction matrix:\n\n \\begin{equation}\n \\mathbf{v}_i^N = \\mathbf{f}_i^N + \\gamma_i \\mathbf{v}_{i-1}^N\n \\end{equation}\n \n where\n \n \\begin{equation}\n \\gamma_i = \\frac{ (\\mathbf{f}_i^N-\\mathbf{f}_{i-1}^N) \\mathbf{f}_i^N}{\\mathbf{f}_{i-1}^N \\mathbf{f}_{i-1}^N}\n \\end{equation}\n \n Note that here, $\\mathbf{f}_i^N$ denotes the force on the particles at step $i$ (and hence it has 3N dimensions - $x$, $y$, and $z$ for each particle) and $\\mathbf{f}_{i-1}^N$ is the force at the last ($i-1$) step. Note that the forces are a collection of vectors, one vector for the force on each particle. $\\gamma_i$ should be just a number (scalar). Hint: Note that if you have a force array consisting of a set of vectors, the product you want inside the equation for $\\gamma_i$ should be an element-by-element multiplication, not a dot or inner product. **Be sure to see the helpful tips about how to calculate this which were given in the energy minimization lecture**!\n \n* You want to end up at the point, in your code, where you can obtain the new direction by calculating something like `Dir = newForces + gamma * Dir`\n* Continue successive line searches in your CG minimization until the fractional change in energy on subsequent searches is less than the tolerance. That is, you'll set it up to use an `EFracTolCG` variable and continue until this criteria is met (where $U_i$ is the potential energy at the present step): \n\\begin{equation}\n\\left|U_i-U_{i-1}\\right| < EFracTolCG \\times \\left| U_i\\right|\n\\end{equation}\n\n* To debug your code, you will probably want to initially use `print` statements in the minimization routine to monitor the energy as a function of step to make sure it's doing the right thing!\n\n\n\n### Now actually write ConjugateGradient:", "_____no_output_____" ] ], [ [ "def ConjugateGradient(Pos, dx, EFracTolLS, EFracTolCG):\n \"\"\"Performs a conjugate gradient search.\nInput:\n Pos: starting positions, (N,3) array\n dx: initial step amount\n EFracTolLS: fractional energy tolerance for line search\n EFracTolCG: fractional energy tolerance for conjugate gradient\nOutput:\n PEnergy: value of potential energy at minimum\n Pos: minimum energy (N,3) position array\n\"\"\"\n #### WRITE YOUR CODE HERE ####\n ## In my code, I can accomplish this function in 10 lines ###\n \n #A return statement you may/will use to finish things off \n return PEnergy, Pos", "_____no_output_____" ] ], [ [ "## Step 4: Energy minimize a variety of clusters, storing energies\n\nWrite code to use the functions you wrote above, plus the emlib module, to energy minimize clusters of various sizes. Loop over clusters from size N=2 to (and including) N=25. For each particle number, do the following:\n* Perform K (to be specified below in the section on graphing) minimizations, each starting from a different random configuration of particles \n* Store the K energies to a list \n* Display the minimum, average, and standard deviation of the minimized energies for the trials. Note standard deviations can be calculated with the numpy.std function (`np.std()`)\n* After doing this, you'll be tasked with making some plots. \n\nUse the following settings:\n* `dx = 0.001`\n* `EFracTolCG = 1.0e-10`\n* `EFracTolLS = 1.0e-8`\n* And place the particles with L chosen such that the average number density of particles ($N/V$, where $V=L^3$) is $0.001$. That is, for every $N$, solve for $L$ such that $N/L^3 = 0.001$. \n\nThese are relatively typical settings for this kind of a system. \n\n**I suggest you do this first for just one N and K to make sure it works**. Then set up a loop over N and perhaps (if you like) a loop over K as well. Reserve the large K values until you are absolutely certain it’s working. Note that if the computational time becomes prohibitive (i.e. if it runs more than overnight, or your computer is having difficulties handling the lode) we can migrate your simulations to GreenPlanet. \n\nYou can easily add visualization of a trajectory by adding, within your ConjugateGradient function’s central loop, a call to the PosToPDB function of the pos_to_pdb module. Assuming you’ve done ‘from pos_to_pdb import *’ you’d add something like: \n\n`PosToPDB( Pos, L, ‘mytrajectory.pdb’)`\n\nwithin the loop inside your ConjugateGradient minimizer. This will write out each step of the minimization as a separate frame in a pdb file, which you can download with scp and view in PyMol to see exactly what’s going on. \nNote that visualization (really, the file I/O and coordinate conversions) will slow things considerably, so I suggest you only do this in one specific case to check out what’s going on, or to troubleshoot if things don't appear to be working. It should also be possible to add interactive visualization via `nglview` here, though I've not done that for you.\n\n* Hint: **You MAY want to use Python's pickle module to save out your data at the end of your calculations, since the next step involves plotting your data and you may want to easily be able to read it back in**. At the very least - whether you save it to disk or not - you'll want to store it (specifically, the minimum and average energies at each N) to variables for later reuse. If you had the variable `energies` containing all of the energies obtained at K = 10000 you might dump this using:", "_____no_output_____" ] ], [ [ "import pickle\nfile = open('energies.pickle', \"w\")\npickle.dump( energies, file) \nfile.close()\n#To load again, use:\n#file = open(\"energies.pickle\", \"r\") \n#energies = pickle.load(file)\n#file.close()", "_____no_output_____" ] ], [ [ "### Write your code here:\n", "_____no_output_____" ] ], [ [ "#Your energy minimization code here\n#This will be the longest code you write in this assignment", "_____no_output_____" ] ], [ [ "## Step 5: Graph your findings", "_____no_output_____" ], [ "Plot the minimum and average energies as a function of N for each of K=100, 1000, and 10000. The last case may be fairly time consuming (i.e. several hours) and should be done without output of pdb files for visualization (since this can slow it down).\n\nUse matplotlib/PyLab to make these plots.\n\n**Hint: If your minimizations are proceeding extremely slowly, it may mean you have an error in calculation of gamma**, such that even K=100 or K=10 could take a very long time. Ensure you have implemented the equation for gamma correctly. Even with a correct gamma value, this will take considerable time for the larger N values.", "_____no_output_____" ] ], [ [ "#Your code for this here", "_____no_output_____" ] ], [ [ "## Step 6: Compare with what's expected\n\nCompare your results (your minimum energy at each N value) with the known global minimum energies, via a plot and by commenting on the results. These are from ( Leary, J. Global Optimization 11:35 (1997)). Add this curve to your graph. Why might your results be higher?\n\n<img src=\"Leary_table.png\" width=500>\n", "_____no_output_____" ] ], [ [ "#Write code here to add these to your graph", "_____no_output_____" ] ], [ [ "## Step 7: Fit a curve to the data \n\n(**For undergraduates, this section is optional.**)\n\nBased on macroscopic arguments, the energy of a cluster could scale with both surface area (via a surface tension) and volume (via an energy density for bulk) of the cluster. So we could model the minimum energy as depending on the cluster size in this way:\n\\begin{equation}\nU_{min} \\propto a + b N^{2/3} +cN\n\\end{equation}\n\nFit this equation to your data in the K=10000 case. You can do this using a least-squares fit, for example using fitting functions within SciPy (`optimize.leastsq`, for example, or similar functions in `scipy.stats`. A fairly dated tutorial is [here](http://www.tau.ac.il/~kineret/amit/scipy_tutorial) (sec 5.4), or see [stack overflow](https://stackoverflow.com/questions/19791581/how-to-use-leastsq-function-from-scipy-optimize-in-python-to-fit-both-a-straight).\n\n**Once you perform the fit, plot the difference between the true minimum energy and the expected energy from this equation as a function of N. Can you identify the magic numbers from this curve?**", "_____no_output_____" ] ], [ [ "#Your code here", "_____no_output_____" ] ], [ [ "## Step 8: Submit your results\n\nSubmit (via Canvas) your graphs, with axes and curves labeled. Also attach a copy of your Python code (this notebook or, if you prefer, a separate script). Specifically, you should be including:\n* Your Python code\n* A graph (or graphs) of minimum and average energies as a function of N for K=100, 1000, and 10000. (Note you can use savefig to save graphs). These can be included in this notebook, or saved separately.\n* A graph comparing your results for the K=10000 case with the expected results from Leary, as a function of N, overlaid with a fitted curve using the equation above (Step 7); undergrads can omit the fit if desired as this is optional. Provide the parameters determined in your fit.\n* Give brief discussion of why your results might be higher than those of Leary. Comment on the magic numbers (step 7). \n\n### Please give clear answers to questions given above, ideally in a box below or a separate report\n\nSpecifically, explain whether you can identify the magic numbers, how you can tell what they are, etc. If you are not certain whether your results are right, explain any reservations you have and why, discuss where you think you might have gone wrong, etc. Basically, be sure to answer all my questions AND give a very brief discussion of your results. The main ways to lose points on this assignment are to:\n- not finish\n- submit late\n- not discuss your results\nI tend to be generous with partial credit for people whose work is mostly correct/mostly complete.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7a471b9e066b7569816bc153779ea373be5da18
89,769
ipynb
Jupyter Notebook
docs/decomposition.ipynb
MaximeBouton/ADMScenarios
eac7eef25a758b3ea20ce58c0e56bd1e5c32c3d4
[ "MIT" ]
24
2019-04-28T08:55:54.000Z
2021-07-22T11:24:55.000Z
docs/decomposition.ipynb
MaximeBouton/ADMScenarios
eac7eef25a758b3ea20ce58c0e56bd1e5c32c3d4
[ "MIT" ]
8
2019-03-09T04:59:41.000Z
2021-07-08T11:43:48.000Z
docs/decomposition.ipynb
MaximeBouton/ADMScenarios
eac7eef25a758b3ea20ce58c0e56bd1e5c32c3d4
[ "MIT" ]
11
2019-07-03T09:51:27.000Z
2021-11-22T07:49:13.000Z
125.551049
28,746
0.855006
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7a486517a3804a24d0cade90b861b4f74b12e6a
74,900
ipynb
Jupyter Notebook
sal_eval.ipynb
hirwaam/PanoSaliency
b0749694264bb9203c46768036d1a4f1319305b6
[ "MIT" ]
17
2019-07-24T06:54:26.000Z
2022-02-08T17:57:42.000Z
sal_eval.ipynb
hirwaam/PanoSaliency
b0749694264bb9203c46768036d1a4f1319305b6
[ "MIT" ]
5
2020-01-07T15:47:59.000Z
2021-12-22T16:45:54.000Z
sal_eval.ipynb
hirwaam/PanoSaliency
b0749694264bb9203c46768036d1a4f1319305b6
[ "MIT" ]
4
2020-06-19T09:40:01.000Z
2021-12-17T08:22:45.000Z
107.30659
10,138
0.838331
[ [ [ " #now, get 3k samples, one thousand for each dataset. Then, \n #how to select sample", "_____no_output_____" ], [ "ls ./data", "\u001b[0m\u001b[34;42m360dataset_rev\u001b[0m/ saliency_ds2_topic6_part1\r\n360dataset_v2.zip saliency_ds2_topic6_part2\r\n\u001b[01;34mbk\u001b[0m/ saliency_ds2_topic6_part3\r\n\u001b[01;34mhead-orientation\u001b[0m/ saliency_ds2_topic7\r\nREADME.md saliency_ds2_topic8\r\nsaleval_bk saliency_ds3_topiccoaster_\r\nsaliency_ds1_topicdiving saliency_ds3_topiccoaster2_\r\nsaliency_ds1_topicparis saliency_ds3_topicdiving\r\nsaliency_ds1_topicroller saliency_ds3_topicdrive\r\nsaliency_ds1_topictimelapse saliency_ds3_topicgame\r\nsaliency_ds1_topicvenise saliency_ds3_topiclandscape\r\nsaliency_ds2_topic0 saliency_ds3_topicpacman\r\nsaliency_ds2_topic1 saliency_ds3_topicpanel\r\nsaliency_ds2_topic2 saliency_ds3_topicride\r\nsaliency_ds2_topic3 saliency_ds3_topicsport\r\nsaliency_ds2_topic4 saliency_evaldat\r\nsaliency_ds2_topic5_part1 viz_filtered_fixation\r\nsaliency_ds2_topic5_part2\r\n" ], [ "import pickle\nimport numpy as np\nimport head_orientation_lib\nimport saldat_head_orientation\nimport saldat_eval\nimport saldat_saliency\nfrom sklearn import metrics\n\nreload(head_orientation_lib)\nreload(saldat_eval)\n\nfrom matplotlib import pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "dirpath1 = u'./data/head-orientation/dataset1'\ndirpath2 = u'./data/head-orientation/dataset2/Experiment_1'\ndirpath3 = u'./data/head-orientation/dataset3/sensory/orientation'\next1 = '.txt'\next2 = '.csv'\next3 = '.csv'\nheadoren = saldat_head_orientation.HeadOrientation(dirpath1, dirpath2, dirpath3, ext1, ext2, ext3)\n\nvar = 20\nsalsal = saldat_saliency.Fixation(var)\n\nsaleval = saldat_eval.SalEvaluation(headoren, salsal, verbose=True)", "Read/Initialize samples for evaluation\nInitialize baselines (center & equator)\n" ], [ "#dataset = 3\n#dirpath, filename_list, f_parse, f_extract_direction = headoren.load_filename_list(dataset, topic)\n#series_ds = headoren.load_series_ds(filename_list, f_parse)\n#vector_ds = headoren.headpos_to_headvec(series_ds, f_extract_direction)\n#vector_ds = headoren.cutoff_vel_acc(vector_ds)", "_____no_output_____" ], [ "dataset = 3\nds, t, vlist, salmap = saleval.dat[dataset][0]\nfixlist = [[t, vlist[idx], -1, -1] for idx,_ in enumerate(vlist)]\nfmap = headoren.create_fixation_map(fixlist, dataset)\nplt.imshow(fmap)\nplt.figure()\nplt.imshow(salmap)\nprint t, ds", "27.640000000000022 panel\n" ], [ "\nheat_mapi = salsal.create_saliency([fixlist[0]], dataset)\nplt.imshow(heat_mapi)\nheat_mapi = salsal.create_saliency([[0.0, [-1.0, 0, 0], 0, 0]], 1)\nplt.imshow(heat_mapi)\nprint heat_mapi.max(), heat_mapi.mean(), heat_mapi.min(), np.median(heat_mapi)\n", "0.08920620580763855 0.00027063952802410966 0.0 1.0133570250784742e-89\n" ], [ "#create individual heat map for saliency. Important\nimport timeit\nbtime = timeit.default_timer()\n\nheat_map_list = []\nfor i in np.arange(len(fixlist)):\n heat_mapi = salsal.create_saliency([fixlist[i]], dataset)\n heat_map_list.append(heat_mapi)\nprint timeit.default_timer() - btime\n\n#10 seconds for 1 frames, total number of evaluation: ", "8.7027258873\n" ], [ "def get_false_geoxy(_topic, _false_gxy_dict, topk=5):\n temp = []\n result = []\n for t in _false_gxy_dict.keys():\n if t != _topic:\n temp = temp + _false_gxy_dict[t]\n idx_list = range(len(temp))\n np.random.shuffle(idx_list)\n idx_list = idx_list[:topk]\n for idx in idx_list:\n result += temp[idx]\n return result\n\ndef sAUC_baseline(_heat_map_list, _true_fixation_list, _true_geoxy_list, _false_geoxy_dict, _vec_map, _gaussian_dict, _width, _height):\n split_n = 3\n split_pos = 0 \n \n false_geoxy_list = get_false_geoxy('2', _false_geoxy_dict, topk=5)\n \n\n idx_list_train, idx_list_val, fixation_train_list, fixation_val_list =\\\n create_split(split_n, split_pos, _true_fixation_list)\n salient_map = sum([_heat_map_list[i] for i in idx_list_val])\n true_geoxy_list_train = [_true_geoxy_list[idx] for idx in idx_list_train]\n \n #human model\n y_pred = [salient_map[x, y] for (x, y) in true_geoxy_list_train]\n y_pred += [salient_map[x, y] for (x, y) in false_geoxy_list]\n #horizontal model\n y_pred_horizon = [1.0 - np.abs(get_fixation.geoy_to_phi(x, _height))/90.0 for (x, y) in true_geoxy_list_train]\n y_pred_horizon += [1.0 - np.abs(get_fixation.geoy_to_phi(x, _height))/90.0 for (x, y) in false_geoxy_list]\n #center model\n y_pred_center = [salient_center_map[x, y] for (x, y) in true_geoxy_list_train]\n y_pred_center += [salient_center_map[x, y] for (x, y) in false_geoxy_list]\n #true fixation\n y_true = [1 for (y, x) in true_geoxy_list_train]\n y_true += [0 for (y, x) in false_geoxy_list]\n\n return metrics.roc_auc_score(y_true, y_pred),\\\n metrics.roc_auc_score(y_true, y_pred_horizon),\\\n metrics.roc_auc_score(y_true, y_pred_center)", "_____no_output_____" ], [ "import timeit\n\nbtime = timeit.default_timer()\n\ndataset = 3\nidx = np.random.randint(len(saleval.dat[dataset]))\ntopic, t, vpos_list, salmap = saleval.dat[dataset][idx]\nvneg_list = saleval.get_negative_fixations(dataset, topic)\nprint topic, t\n\n#now I have positive, negative fixation, need to create saliency list\nfixpos_list = [(t, v, _, _) for v in vpos_list]\nfixneg_list = [(t, v, _, _) for v in vneg_list]\nfposmap = headoren.create_fixation_map(fixpos_list, dataset)\nfnegmap = headoren.create_fixation_map(fixneg_list, dataset)\n\nnp.random.shuffle(fixpos_list)\nnpos= len(fixpos_list)/3\nfixposeval_list = fixpos_list[:npos]\nfixpostrain_list = fixpos_list[npos:]\nval_salmap = salsal.create_saliency(fixposeval_list, dataset)\ntrain_fmap = headoren.create_fixation_map(fixpostrain_list, dataset)\ngeoxy_train = zip(*np.where(train_fmap==1))\ngeoxy_neg = zip(*np.where(fnegmap==1))\n\n#val_salmap must be evaluated against geoxy_train + geoxy_neg\n\ny_pred = [val_salmap[hi, wi] for (hi, wi) in geoxy_train]\ny_pred += [val_salmap[hi, wi] for (hi, wi) in geoxy_neg]\n\ny_true = [1 for item in geoxy_train]\ny_true += [0 for item in geoxy_neg]\n\nplt.imshow(fposmap)\nplt.figure()\nplt.imshow(fnegmap)\nplt.figure()\n\nplt.imshow(val_salmap)\nplt.figure()\nplt.imshow(train_fmap)\n\nprint len(fixpos_list)\nprint metrics.roc_auc_score(y_true, y_pred)\n\nprint 'total time taken: ', timeit.default_timer() - btime", "pacman 10.600000000000009\n12\n0.9857142857142857\ntotal time taken: 1.38693213463\n" ], [ "baseline_circle = None\nbaseline_equator = [1.0 - np.abs(head_orientation_lib.geoy_to_phi(hi, head_orientation_lib.H))/90.0 for (hi, wi) in geoxy_train]", "_____no_output_____" ], [ "head_orientation_lib.H/2, head_orientation_lib.W/2", "_____no_output_____" ], [ "dataset = 3\nresult = []\nfor idx, (topic, t, vpos_list, salmap) in enumerate(saleval.dat[dataset][:200]):\n if len(vpos_list) < 15:\n print 'Warning, not enough data at : {}, {}, {}, SKIPPED'.format(topic, t, len(vpos_list))\n continue\n sauc = saleval.sauc(dataset, topic, vpos_list)\n print idx, sauc, np.mean(result), topic, t, len(vpos_list)\n result.append(sauc)\nresult = np.array(result)\nprint result.mean()", "0 0.9600694444444444 nan panel 27.640000000000022 23\n1 0.7582417582417582 0.9600694444444444 ride 50.50000000000004 19\n2 0.7972972972972973 0.8591556013431013 diving 8.260000000000007 21\n3 0.731186224489796 0.8385361666611666 coaster2_ 50.08000000000004 47\n4 0.5118159203980099 0.811698681118324 landscape 59.86000000000005 17\n5 0.7578125 0.7517221289742612 game 5.740000000000004 36\n6 1.0 0.7527371908118843 sport 28.240000000000023 20\nWarning, not enough data at : panel, 31.06, 14, SKIPPED\n8 1.0 0.7880604492673294 drive 40.42000000000004 27\n9 0.8974603174603175 0.8145528931089132 coaster_ 44.08000000000004 37\n10 0.6534598214285714 0.8237648291479581 panel 19.480000000000018 20\n11 0.6870527000650618 0.8067343283760195 coaster2_ 52.96000000000004 43\n12 0.8295454545454546 0.7958541803477506 coaster_ 16.900000000000013 16\n13 0.8090909090909091 0.7986617865308926 diving 59.68000000000005 38\n14 0.9030054644808744 0.7994640267278169 panel 49.780000000000044 17\nWarning, not enough data at : game, 2.86, 12, SKIPPED\n16 0.6277472527472527 0.8068598437101782 coaster2_ 38.74000000000003 42\nWarning, not enough data at : pacman, 20.5, 12, SKIPPED\nWarning, not enough data at : drive, 21.22, 14, SKIPPED\n19 0.7698863636363636 0.7949190043126497 coaster2_ 49.12000000000004 23\n20 0.6319942611190816 0.7933544642703819 ride 7.360000000000006 25\n21 0.557983682983683 0.783862687614423 panel 41.080000000000034 32\n22 0.7661913250148544 0.7713138540238264 game 49.72000000000004 25\nWarning, not enough data at : landscape, 22.24, 13, SKIPPED\n24 0.9181096681096681 0.7710442472338804 game 52.00000000000004 20\n25 0.9371794871794872 0.7783975182776699 coaster_ 58.66000000000005 17\n26 0.6058122205663189 0.7859585644158517 diving 28.900000000000023 32\n27 0.8538461538461539 0.7777700942408728 coaster_ 36.70000000000003 27\n28 0.6051587301587301 0.7810777490063198 coaster2_ 28.000000000000025 42\nWarning, not enough data at : game, 3.64, 10, SKIPPED\n30 0.8246268656716418 0.7737477898876705 game 25.18000000000002 21\n31 0.5873015873015873 0.7757829529190293 diving 51.280000000000044 27\n32 0.8623481781376519 0.7685336696260509 coaster2_ 6.820000000000006 28\nWarning, not enough data at : pacman, 12.04, 6, SKIPPED\n34 0.7413793103448275 0.7720082810524064 sport 16.660000000000014 29\nWarning, not enough data at : landscape, 1.06, 14, SKIPPED\n36 0.8743228602383533 0.7709143892414215 sport 6.820000000000006 19\n37 0.7803571428571427 0.7744801985861434 diving 25.420000000000023 29\n38 0.653061224489796 0.7746760967285099 panel 39.580000000000034 21\n39 0.88 0.7707530363337127 game 23.20000000000002 29\n40 0.7376373626373627 0.7741670039482841 sport 32.260000000000026 21\n41 0.787012987012987 0.7730600451206804 pacman 47.920000000000044 21\nWarning, not enough data at : coaster_, 1.36, 5, SKIPPED\n43 0.7699214365881033 0.7734704257645717 ride 48.82000000000004 32\nWarning, not enough data at : panel, 10.24, 6, SKIPPED\n45 0.8968599033816425 0.7733690260738154 sport 39.040000000000035 34\n46 0.7622641509433963 0.7767993282212551 coaster_ 36.040000000000035 30\n47 0.9192546583850931 0.7764064855921238 coaster2_ 53.920000000000044 31\n48 0.8777777777777778 0.780165648034044 coaster_ 57.28000000000005 26\n49 0.8315972222222222 0.7826685231556781 pacman 50.200000000000045 17\n50 0.7749999999999999 0.7838917406323418 pacman 31.900000000000027 22\n51 0.974537037037037 0.7836748689096017 coaster_ 58.00000000000005 26\n52 0.7549751243781094 0.7882192062459692 drive 9.220000000000008 17\n53 0.6866666666666666 0.7874460880629957 coaster_ 22.000000000000018 37\nWarning, not enough data at : panel, 14.68, 14, SKIPPED\nWarning, not enough data at : panel, 13.48, 11, SKIPPED\n56 0.48087686567164184 0.7851556466676246 landscape 19.360000000000017 24\n57 0.6351351351351351 0.7783938959788249 drive 19.360000000000017 21\n58 0.9766414141414141 0.7752795750909186 landscape 46.90000000000004 16\n59 0.6212121212121213 0.7795638695388015 landscape 18.700000000000017 16\nWarning, not enough data at : sport, 55.48, 13, SKIPPED\n61 0.9611823361823362 0.7762648747819959 diving 33.10000000000003 26\n62 0.7439814814814815 0.7800387005248599 coaster2_ 35.32000000000003 30\n63 0.886328125 0.7793175561439923 pacman 59.620000000000054 24\n64 0.7960164835164835 0.7814158025921494 ride 40.24000000000004 19\nWarning, not enough data at : pacman, 59.08, 8, SKIPPED\n66 0.728813559322034 0.7816965849176173 coaster2_ 39.70000000000003 30\n67 0.6816964285714286 0.7806987919818515 coaster2_ 11.68000000000001 24\nWarning, not enough data at : ride, 17.44, 8, SKIPPED\n69 0.6641386782231854 0.7788654148816585 pacman 24.52000000000002 19\n70 0.8354700854700855 0.7767794742151408 pacman 44.26000000000004 18\n71 0.9795 0.7778275208446935 drive 26.560000000000024 29\n72 0.8208333333333333 0.7813656345140848 landscape 29.320000000000025 21\nWarning, not enough data at : drive, 8.44, 12, SKIPPED\n74 0.5322463768115941 0.7820461120799339 panel 52.840000000000046 29\n75 0.9929378531073446 0.7778122182618264 drive 27.520000000000024 26\n76 0.9812446717817562 0.7813976455092517 panel 29.380000000000024 25\n77 0.8930288461538461 0.7846738262678175 sport 19.960000000000015 48\n78 0.8539553752535498 0.7864214878788824 pacman 33.88000000000003 25\nWarning, not enough data at : panel, 22.72, 12, SKIPPED\nWarning, not enough data at : sport, 51.04, 5, SKIPPED\nWarning, not enough data at : landscape, 53.44, 13, SKIPPED\nWarning, not enough data at : landscape, 57.1, 13, SKIPPED\n83 0.8214285714285714 0.787493454345147 coaster2_ 51.76000000000005 35\n84 0.6922619047619047 0.7880236905495754 landscape 27.760000000000023 22\n85 0.8041666666666667 0.7865504323066882 sport 56.32000000000005 17\nWarning, not enough data at : panel, 10.3, 9, SKIPPED\n87 0.6858710562414267 0.7868173449485061 pacman 34.36000000000003 27\nWarning, not enough data at : diving, 55.18, 10, SKIPPED\n89 0.9324324324324325 0.7853106839230273 coaster2_ 29.440000000000026 27\n90 0.7456467661691542 0.7874742390481656 ride 18.520000000000017 18\n91 0.9099999999999999 0.7868680437890495 sport 39.52000000000003 37\n92 0.8294573643410852 0.7886270717349202 diving 39.70000000000003 17\n93 0.6759485924112606 0.789202146278669 panel 43.360000000000035 28\n94 0.9422700587084148 0.7876291802527328 drive 23.14000000000002 21\n95 0.8625 0.7897475484507558 panel 38.62000000000003 30\n96 0.9768518518518519 0.7907306896879077 landscape 45.34000000000004 27\n97 0.9448924731182795 0.793212305183427 coaster_ 29.200000000000024 17\n98 0.718694885361552 0.7952080968667804 game 17.740000000000016 21\nWarning, not enough data at : landscape, 11.38, 12, SKIPPED\n100 0.7584325396825398 0.7942144187952839 sport 41.98000000000004 23\n101 0.7716823406478578 0.7937556767553768 landscape 33.64000000000003 16\n102 0.5557909604519775 0.7934762674375602 sport 22.000000000000018 36\n103 0.9598765432098766 0.7905052011002404 coaster_ 16.720000000000013 24\nWarning, not enough data at : pacman, 40.66, 13, SKIPPED\n105 0.6976495726495726 0.7925962053238161 pacman 36.76000000000003 17\n106 0.8660818713450293 0.791438319559496 game 42.220000000000034 44\n107 0.9963570127504553 0.7923376394605265 drive 22.42000000000002 26\n108 0.8340080971659919 0.7947664415235018 panel 4.4200000000000035 28\n109 0.7850438047559449 0.7952281080604723 ride 5.740000000000004 25\n110 0.6960978835978836 0.7951096859290243 diving 12.28000000000001 35\n111 0.8164665523156089 0.7939716192355629 ride 11.440000000000008 16\n112 0.6891774891774891 0.794227243475109 landscape 15.580000000000013 22\n113 0.8590686274509804 0.7930469091571583 sport 21.40000000000002 45\n114 0.892109500805153 0.7937804838048674 sport 14.500000000000012 34\n115 0.6769957983193278 0.7948610224532222 coaster2_ 12.820000000000011 25\nWarning, not enough data at : pacman, 30.7, 3, SKIPPED\nWarning, not enough data at : sport, 51.88, 3, SKIPPED\n118 0.6747619047619047 0.7935798787126364 diving 13.24000000000001 22\n119 1.0 0.7923022660895103 sport 28.480000000000025 22\n120 0.6760416666666668 0.7945118164502601 ride 48.94000000000004 26\n121 0.7546884452510587 0.7932647622420117 ride 36.76000000000003 28\n122 0.8667840375586855 0.7928629256066894 sport 47.32000000000004 17\n123 0.59145880574452 0.7936249989257821 ride 25.36000000000002 31\n124 0.8063427800269906 0.7915620785871978 sport 26.740000000000023 38\n" ], [ "saleval.dat", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a4ba4aa21544772bb95ddf4f35d83b92d67dc0
982,992
ipynb
Jupyter Notebook
Udacity/MachineLearning/customer_segments_project/customer_segments_nabin_proj.ipynb
nacharya/notebooks
584b814542ab06ebb1f4c58063e551e939dd47dc
[ "MIT" ]
null
null
null
Udacity/MachineLearning/customer_segments_project/customer_segments_nabin_proj.ipynb
nacharya/notebooks
584b814542ab06ebb1f4c58063e551e939dd47dc
[ "MIT" ]
null
null
null
Udacity/MachineLearning/customer_segments_project/customer_segments_nabin_proj.ipynb
nacharya/notebooks
584b814542ab06ebb1f4c58063e551e939dd47dc
[ "MIT" ]
null
null
null
377.63811
271,688
0.911378
[ [ [ "# Machine Learning Engineer Nanodegree\n## Unsupervised Learning\n## Project: Creating Customer Segments", "_____no_output_____" ], [ "Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. \n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.", "_____no_output_____" ], [ "## Getting Started\n\nIn this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.\n\nThe dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers.\n\nRun the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.", "_____no_output_____" ] ], [ [ "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display # Allows the use of display() for DataFrames\n\n# Import supplementary visualizations code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the wholesale customers dataset\ntry:\n data = pd.read_csv(\"customers.csv\")\n data.drop(['Region', 'Channel'], axis = 1, inplace = True)\n print \"Wholesale customers dataset has {} samples with {} features each.\".format(*data.shape)\nexcept:\n print \"Dataset could not be loaded. Is the dataset missing?\"", "Wholesale customers dataset has 440 samples with 6 features each.\n" ] ], [ [ "## Data Exploration\nIn this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.\n\nRun the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase.", "_____no_output_____" ] ], [ [ "# Display a description of the dataset\ndisplay(data.describe())", "_____no_output_____" ] ], [ [ "### Implementation: Selecting Samples\nTo get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.", "_____no_output_____" ] ], [ [ "# Select three indices of your choice you wish to sample from the dataset\nindices = [25, 186, 220]\n\n# Create a DataFrame of the chosen samples\nsamples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)\nprint \"Chosen samples of wholesale customers dataset:\"\ndisplay(samples)", "Chosen samples of wholesale customers dataset:\n" ] ], [ [ "### Question 1\nConsider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers. \n*What kind of establishment (customer) could each of the three samples you've chosen represent?* \n**Hint:** Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying *\"McDonalds\"* when describing a sample customer as a restaurant.", "_____no_output_____" ] ], [ [ "samples_value_mean = data.describe().loc['mean']\nsamples_mean = samples.append(samples_value_mean)\nfoo = samples_mean.plot(kind='bar', figsize=(12,6))", "_____no_output_____" ] ], [ [ "1st one: Grocery Store as Fresh is above mean, grocery is in the mean, and detergent purchase costs are above the mean \n\n2nd one: Possibly a restaurant cafe because Frozen, Fresh are the highest followed by a decent amount of Deli as well. Only the Frozen item is higher than the mean\n\n3rd one: A retailer or a store that sells Fresh food. Highest category being Fresh, higher than mean, and everything else being lessa than mean. \n", "_____no_output_____" ], [ "### Implementation: Feature Relevance\nOne interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.\n\nIn the code block below, you will need to implement the following:\n - Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function.\n - Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets.\n - Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`.\n - Import a decision tree regressor, set a `random_state`, and fit the learner to the training data.\n - Report the prediction score of the testing set using the regressor's `score` function.", "_____no_output_____" ] ], [ [ "# Make a copy of the DataFrame, using the 'drop' function to drop the given feature\n#feature_name = 'Grocery'\n\ndef do_feat(feature_name, data):\n new_data = data.drop(feature_name, 1)\n from sklearn.cross_validation import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(new_data, data[feature_name], test_size=0.25, random_state=42)\n # Create a decision tree regressor and fit it to the training set\n from sklearn.tree import DecisionTreeRegressor\n regressor = DecisionTreeRegressor(random_state=42)\n regressor.fit(X_train, y_train)\n feat_imp = zip(new_data, regressor.feature_importances_)\n # Report the score of the prediction using the testing set\n score = regressor.score(X_test, y_test)\n print (\"prediction score for \" + feature_name + \" is: \" + str(score))\n\nfeature_list = [ \"Grocery\", \"Detergents_Paper\", \"Delicatessen\", \"Frozen\", \"Milk\", \"Fresh\" ]\nfor feature_n in feature_list:\n do_feat(feature_n, data)", "prediction score for Grocery is: 0.681884008544\nprediction score for Detergents_Paper is: 0.271666980627\nprediction score for Delicatessen is: -2.2547115372\nprediction score for Frozen is: -0.210135890125\nprediction score for Milk is: 0.156275395017\nprediction score for Fresh is: -0.385749710204\n" ] ], [ [ "### Question 2\n*Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits?* \n**Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data.", "_____no_output_____" ], [ "\nAttempted to predict \"Grocery\". Prediction score using DecisionTreeRegressor was 0.68188. Intuitively, The feature is too generalized to identify customer habits. If you look at the R^2 score , it seems to be the easiest to predict from the balance of the data. Based on this it is the least necessary to identify spending habits. ", "_____no_output_____" ], [ "### Visualize Feature Distributions\nTo get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.", "_____no_output_____" ] ], [ [ "# Produce a scatter matrix for each pair of features in the data\npd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');", "_____no_output_____" ], [ "import seaborn as sns\nsns.heatmap(data.corr(), annot=True)", "_____no_output_____" ] ], [ [ "### Question 3\n*Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?* \n**Hint:** Is the data normally distributed? Where do most of the data points lie? ", "_____no_output_____" ], [ "There is some degree of correlation between Grocery and Detergents_Paper. Similarly, there is some degree of correlation between Milk and Detergents_Paper. Also , note a small correlation between Grocery and Milk. \n\nThe data is not normally distributed. Most of data points in each feature lie within 10%-20% range.", "_____no_output_____" ], [ "## Data Preprocessing\nIn this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.", "_____no_output_____" ], [ "### Implementation: Feature Scaling\nIf data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.\n\nIn the code block below, you will need to implement the following:\n - Assign a copy of the data to `log_data` after applying logarithmic scaling. Use the `np.log` function for this.\n - Assign a copy of the sample data to `log_samples` after applying logarithmic scaling. Again, use `np.log`.", "_____no_output_____" ] ], [ [ "# Scale the data using the natural logarithm\nlog_data = np.log(data)\n\n# TODO: Scale the sample data using the natural logarithm\nlog_samples = np.log(samples)\n\n# Produce a scatter matrix for each pair of newly-transformed features\npd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');", "_____no_output_____" ] ], [ [ "### Observation\nAfter applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).\n\nRun the code below to see how the sample data has changed after having the natural logarithm applied to it.", "_____no_output_____" ] ], [ [ "# Display the log-transformed sample data\ndisplay(log_samples)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nfig, axes = plt.subplots(2, 3)\naxes = axes.flatten()\nfig.set_size_inches(18, 6)\nfig.suptitle('Distribution of Features')\n\nfor i, col in enumerate(data.columns):\n feature = data[col]\n sns.distplot(feature, label=col, ax=axes[i]).set(xlim=(-1000, 20000),)\n axes[i].axvline(feature.mean(),linewidth=1)\n axes[i].axvline(feature.median(),linewidth=1, color='r')", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(2, 3)\naxes = axes.flatten()\nfig.set_size_inches(18, 6)\nfig.suptitle('Distribution of Features for Log Data')\n\nfor i, col in enumerate(log_data.columns):\n feature = log_data[col]\n sns.distplot(feature, label=col, ax=axes[i])\n axes[i].axvline(feature.mean(),linewidth=1)\n axes[i].axvline(feature.median(),linewidth=1, color='r')", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport seaborn as sns\n# set plot style & color scheme\nsns.set_style('ticks')\nwith sns.color_palette(\"Reds_r\"):\n # plot densities of log data\n plt.figure(figsize=(8,4))\n for col in data.columns:\n sns.kdeplot(log_data[col], shade=True)\n plt.legend(loc='best')", "_____no_output_____" ] ], [ [ "### Implementation: Outlier Detection\nDetecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many \"rules of thumb\" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.\n\nIn the code block below, you will need to implement the following:\n - Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this.\n - Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`.\n - Assign the calculation of an outlier step for the given feature to `step`.\n - Optionally remove data points from the dataset by adding indices to the `outliers` list.\n\n**NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points! \nOnce you have performed this implementation, the dataset will be stored in the variable `good_data`.", "_____no_output_____" ] ], [ [ "outliers_list = np.array([], dtype='int64')\n#cnt = Counter()\n\n# For each feature find the data points with extreme high or low values\nfor feature in log_data.keys():\n \n # Calculate Q1 (25th percentile of the data) for the given feature\n Q1 = np.percentile(log_data[feature],25)\n \n # Calculate Q3 (75th percentile of the data) for the given feature\n Q3 = np.percentile(log_data[feature], 75)\n \n # Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)\n step = (Q3 - Q1)*1.5\n \n # Display the outliers\n print \"Data points considered outliers for the feature '{}':\".format(feature)\n found_outliers = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]\n outliers_list = np.append(outliers_list, found_outliers.index.values.astype('int64'))\n display(found_outliers)\n \n\n# OPTIONAL: Select the indices for data points you wish to remove\noutliers_list, counts = np.unique(outliers_list, return_counts=True)\noutliers = outliers_list[counts>1]\n\nprint outliers\n\n# Remove the outliers, if any were specified\ngood_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)", "Data points considered outliers for the feature 'Fresh':\n" ] ], [ [ "### Question 4\n*Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the `outliers` list to be removed, explain why.* ", "_____no_output_____" ], [ "Data Points considered for more than one feature (Observation in output above )\n65, 66, 75, 128, 154\n\nDecided to drop these outliers in the good_data because:\n- they appeared as outlier in more than one feature in the Tukey Method of outlier detection.\n- data point looks little out of range e.g. 65 on Detergents_Paper\n- It appears the outliers cause significant association between features \n\n\n", "_____no_output_____" ], [ "## Feature Transformation\nIn this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.", "_____no_output_____" ], [ "### Implementation: PCA\n\nNow that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new \"feature\" of the space, however it is a composition of the original features present in the data.\n\nIn the code block below, you will need to implement the following:\n - Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`.\n - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.", "_____no_output_____" ] ], [ [ "# Apply PCA by fitting the good data with the same number of dimensions as features\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=6).fit(good_data)\n\n# Transform log_samples using the PCA fit above\npca_samples = pca.transform(log_samples)\n\n# Generate PCA results plot\npca_results = vs.pca_results(good_data, pca)\n\nprint np.cumsum(pca.explained_variance_ratio_)", "[ 0.44302505 0.70681723 0.82988103 0.93109011 0.97959207 1. ]\n" ], [ "# create an x-axis variable for each pca component\nx = np.arange(1,7)\n\n# plot the cumulative variance\nplt.plot(x, np.cumsum(pca.explained_variance_ratio_), '-o', color='black')\n\n# plot the components' variance\nplt.bar(x, pca.explained_variance_ratio_, align='center', alpha=0.5)\n\n# plot styling\nplt.ylim(0, 1.05)\nplt.annotate('Cumulative\\nexplained\\nvariance',\n xy=(3.7, .88), arrowprops=dict(arrowstyle='->'), xytext=(4.5, .6))\nfor i,j in zip(x, np.cumsum(pca.explained_variance_ratio_)):\n plt.annotate(str(j.round(4)),xy=(i+.2,j-.02))\nplt.xticks(range(1,7))\nplt.xlabel('PCA components')\nplt.ylabel('Explained Variance')\nplt.show()", "_____no_output_____" ] ], [ [ "### Question 5\n*How much variance in the data is explained* ***in total*** *by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.* \n**Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the indivdual feature weights.", "_____no_output_____" ], [ "First principal component explains 44.3% variance and the second principal component explains 26.38% variance. The first and the second principal component combined explains 70.68 % variance. The first four principal components explain 93.11% of the variance. \n\nIn terms of customer spending:\n\n- Dimension 1: Highest for \"Detergent_Paper\" then heavy on \"Milk\" and \"Grocery\" \n The three most correlated features. This(PCA1) represents that a increase \n is associated with increase in \"Milk\", \"Grocery\" and \"Detergent_Paper\" \n- Dimension 2: Very heavy on \"Fresh\", \"Frozen\" and \"Deli\".\n This(PCA2) shows larger increases in \"Fresh\", \"Frozen\" and \"Deli\". \n \n- Dimension 3: Low on \"Fresh\" and \"Detergents_Paper\". High on \"Deli\"\n An increase in this (PCA3) increases Deli but decreases Fresh. \n Also notes some increase in \"Frozen\", and decrease in \"Detergents_Paper\"\n \n- Dimension 4: Low on Deli and Fresh, but very high on Frozen \n Increase in this (PCA4) is associated with a large increase in \"Frozen\"\n and a large decrease in \"Delicatessen\" customer spending.\n\n\nAs described above, PCA largely describes the customer spending as it changes. \n", "_____no_output_____" ], [ "### Observation\nRun the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.", "_____no_output_____" ] ], [ [ "# Display sample log-data after having a PCA transformation applied\ndisplay(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))", "_____no_output_____" ] ], [ [ "### Implementation: Dimensionality Reduction\nWhen using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.\n\nIn the code block below, you will need to implement the following:\n - Assign the results of fitting PCA in two dimensions with `good_data` to `pca`.\n - Apply a PCA transformation of `good_data` using `pca.transform`, and assign the results to `reduced_data`.\n - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.", "_____no_output_____" ] ], [ [ "# Apply PCA by fitting the good data with only two dimensions\npca = PCA(n_components=2).fit(good_data)\n\n# Transform the good data using the PCA fit above\nreduced_data = pca.transform(good_data)\n\n# Transform log_samples using the PCA fit above\npca_samples = pca.transform(log_samples)\n\n# Create a DataFrame for the reduced data\nreduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])\nvr = np.cumsum(pca.explained_variance_ratio_)\nprint vr", "[ 0.44302505 0.70681723]\n" ] ], [ [ "### Observation\nRun the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.", "_____no_output_____" ] ], [ [ "# Display sample log-data after applying PCA transformation in two dimensions\ndisplay(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))", "_____no_output_____" ] ], [ [ "## Visualizing a Biplot\nA biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case `Dimension 1` and `Dimension 2`). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features.\n\nRun the code cell below to produce a biplot of the reduced-dimension data.", "_____no_output_____" ] ], [ [ "# Create a biplot\nvs.biplot(good_data, reduced_data, pca)", "_____no_output_____" ] ], [ [ "### Observation\n\nOnce we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on `'Milk'`, `'Grocery'` and `'Detergents_Paper'`, but not so much on the other product categories. \n\nFrom the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier?", "_____no_output_____" ], [ "## Clustering\n\nIn this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale. ", "_____no_output_____" ], [ "### Question 6\n*What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?*", "_____no_output_____" ], [ "K-Means clustering algorithms is computationally faster and is easier to understand. Gaussian Mixture Model (GMM) is a superset of K-Means , is more flexible and handles mixed cluster membership better. Likely to use K-Means algorithm because because it might make it easier to understand the wholesale customer data in multiple segments clustered using K-Means. \n\n\nGMM is a good classification algorithm for static non-time oriented data. GMM may not work well if the dimensions are high ( 6 or more). GMM works well with non-linear geometry , and does not bias to cluster size of specific structure. This allows GMM to any shape cluster. \n\nK-Means , however, is computationally faster and produces disjoint flat cluster in a unsupervised iterative method. Disadvantage with K-Means is there are fixed number of clusters and it is difficult to predict the value of K. \n\nIn GMM, the Gaussian blob is is allowed to be of different sizes and stretched in different directions. However, K-Means requires that each blob to be of a fixed size and completely symmetrical. The K-Means uses a lot of hard assignments and GMM has a lot of \nsoft assignment flexibility. In K-Means, there is no stretching in different directions like GMM. This means that the results in lower number of dimensions in GMM may turn out better than K-Means, however, K-Means will be faster for larger amounts of data. \n\n\n\nIn the wholesale customer data, there are 6 dimensions and we will use K-Means . ", "_____no_output_____" ], [ "### Implementation: Creating Clusters\nDepending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the \"goodness\" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering.\n\nIn the code block below, you will need to implement the following:\n - Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`.\n - Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`.\n - Find the cluster centers using the algorithm's respective attribute and assign them to `centers`.\n - Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`.\n - Import `sklearn.metrics.silhouette_score` and calculate the silhouette score of `reduced_data` against `preds`.\n - Assign the silhouette score to `score` and print the result.", "_____no_output_____" ] ], [ [ "# TODO: Apply your clustering algorithm of choice to the reduced data \nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nreduced_samples = pd.DataFrame(pca_samples, columns = ['Dimension 1', 'Dimension 2'])\n\nclusterer = KMeans(n_clusters=2, random_state=42).fit(reduced_data)\n\n# Predict the cluster for each data point\npreds = clusterer.predict(reduced_data)\n\n# Find the cluster centers\ncenters = clusterer.cluster_centers_\n\n# Predict the cluster for each transformed sample data point\nsample_preds = clusterer.predict(reduced_samples)\n\n# Calculate the mean silhouette coefficient for the number of clusters chosen\nscore = silhouette_score(reduced_data, clusterer.labels_, metric='euclidean')\n\nprint(\"K-Means silhouette score: \",score)", "('K-Means silhouette score: ', 0.42628101546910835)\n" ] ], [ [ "### Question 7\n*Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?* ", "_____no_output_____" ], [ "Cluster of 2: 0.4219\n\nCluster of 4: 0.3326\n\nCluster of 6: 0.3654\n\nCluster of 8: 0.3644\n\nCluster of 10: 0.3505\n\nBest silhouette score was obtained for a cluster of 2\n", "_____no_output_____" ], [ "### Cluster Visualization\nOnce you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters. ", "_____no_output_____" ] ], [ [ "# Display the results of the clustering from implementation\nvs.cluster_results(reduced_data, preds, centers, pca_samples)", "_____no_output_____" ] ], [ [ "### Implementation: Data Recovery\nEach cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.\n\nIn the code block below, you will need to implement the following:\n - Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`.\n - Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`.\n", "_____no_output_____" ] ], [ [ "def ndprint(a, format_string ='{0:.2f}'):\n print [format_string.format(v,i) for i,v in enumerate(a)]\n\n# Inverse transform the centers\nlog_centers = pca.inverse_transform(centers)\n\n# Exponentiate the centers\ntrue_centers = np.exp(log_centers)\nprint \"centers:\"\n#display(true_centers)\nfor i,v in enumerate(true_centers):\n print v\n \n# Display the true centers\nsegments = ['Segment {}'.format(i) for i in range(0,len(centers))]\ntrue_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())\ntrue_centers.index = segments\ndisplay(true_centers)\ntrue_centers = true_centers.append(data.describe().ix['50%'])\ntrue_centers = true_centers.append(data.describe().ix['25%'])\ntrue_centers.plot(kind = 'bar', figsize = (16, 4))\n\n# had to re-drop to get relatively good range data (??)\nsome_good_data = data.drop(data.index[outliers]).reset_index(drop = True)\n\nprint \"mean:\"\nndprint(np.around(some_good_data.mean().values, decimals=1))\n\nprint \"median:\"\nndprint(np.around(some_good_data.median().values, decimals=1))\n\nprint \"\"\nprint \"Centers offset from mean\"\ndisplay(true_centers - np.around(some_good_data.mean().values))\n\nprint \"Centers offset from median\"\ndisplay(true_centers - np.around(some_good_data.median().values))\n", "centers:\n[ 8866.53752613 1896.60679852 2476.59086525 2088.23564999 293.89989089\n 681.28963817]\n[ 4005.0815051 7899.91306923 12103.82742157 952.34560465\n 4561.39226009 1035.53326638]\n" ] ], [ [ "### Question 8\nConsider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. *What set of establishments could each of the customer segments represent?* \n**Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`.", "_____no_output_____" ], [ "Customer segments are represented as the first segment 0 is the one with high Fresh but low on \nDetergents_Paper and Deli, but moderate Milk, Grocery and likeley going to a Restaurant.\n\nSegment 1 is very high on Grocery , low on Frozen and Deli so is likely going to be a Retail store. \n\nNow let's look at quartile, mean and median and comparing with centers.\n\nSegment 0:\nWith outliers removed, slightly above median in Fresh and Frozen. Much lower than median on \nGrocery but slightly lower in Detergents and Deli. \n\nSegment 1:\nWith outliers removed, above the median in Grocery, Milk and Detergents.\nReally below the mean in Fresh and Frozen. \n\nBased on this my choices are:\n\n- Segment 0: Restaurant , Cafes, Food prepare businesses\n- Segment 1: Retail stores, Grocery, supermarkets\n\n", "_____no_output_____" ], [ "### Question 9\n*For each sample point, which customer segment from* ***Question 8*** *best represents it? Are the predictions for each sample point consistent with this?*\n\nRun the code block below to find which cluster each sample point is predicted to be.", "_____no_output_____" ] ], [ [ "# Display the predictions\nfor i, pred in enumerate(sample_preds):\n print \"Sample point\", i, \"predicted to be in Cluster\", pred\n print 'The distance between sample point {} and center of cluster {}:'.format(i, pred)\n print (samples.iloc[i] - true_centers.iloc[pred])\n", "Sample point 0 predicted to be in Cluster 1\nThe distance between sample point 0 and center of cluster 1:\nFresh 12160.0\nMilk -3670.0\nGrocery -4509.0\nFrozen -751.0\nDetergents_Paper -558.0\nDelicatessen -979.0\ndtype: float64\nSample point 1 predicted to be in Cluster 0\nThe distance between sample point 1 and center of cluster 0:\nFresh -5858.0\nMilk -1376.0\nGrocery -1623.0\nFrozen 1382.0\nDetergents_Paper 655.0\nDelicatessen 46.0\ndtype: float64\nSample point 2 predicted to be in Cluster 0\nThe distance between sample point 2 and center of cluster 0:\nFresh 5888.0\nMilk -998.0\nGrocery -1095.0\nFrozen -323.0\nDetergents_Paper -238.0\nDelicatessen 68.0\ndtype: float64\n" ] ], [ [ "Segment 0 is very high on Detergent and Grocery and Segment 1 is very high on Fresh and fairly low on detergent. If we look at the sample points, the predictions of points 0 and 1 should be swapped. ", "_____no_output_____" ], [ "## Conclusion", "_____no_output_____" ], [ "In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships.", "_____no_output_____" ], [ "### Question 10\nCompanies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. *How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?* \n**Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most?", "_____no_output_____" ], [ "It is unclear from the data the frequency of the delivery for each product segment. \"Fresh\" is the highest in terms of purchase so that would be a good category to understand the change in delivery service with possibly a lower impact on profit margin, and based on the data they are probably more frequest buyers. \n\n\nHowever, if we would like to do A/B testing we should select small groups of \"sample customers\" from both Segment 0 and Segment 1 that look statistically significant. The remainder customers in each segment become the \"second variant group\".\nThe 3 day schedule is applied to the \"sample customers\" in small groups in each segment. The response of service is received and evaluated for these \"sample customers\" as well as the \"second variant group\". This will give us 2 separate categories in each segment. \n\nA/B testing is usually done in two variants, and we have two variant in each segment.\nThe response would help us determine a suitable group customers from the two variants\nwith those who respond positively. Because of the multiple segments, and two variants, \nit may not affect the customers equally , but you will probablistically obtain a \nlarger number of customers that are likely to respond positively or negatively to the \nchange. \n\n", "_____no_output_____" ], [ "### Question 11\nAdditional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service. \n*How can the wholesale distributor label the new customers using only their estimated product spending and the* ***customer segment*** *data?* \n**Hint:** A supervised learner could be used to train on the original customers. What would be the target variable?", "_____no_output_____" ], [ "We would first use the clustering technique to create multiple segments of customers. Supervised learner could be used on each of these segments, so when a new customer joins he's be categorized based on product spending and type of purchase. Product type of purchase , frequency and product spending would be the target variables.\n", "_____no_output_____" ], [ "### Visualizing Underlying Distributions\n\nAt the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset.\n\nRun the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.", "_____no_output_____" ] ], [ [ "# Display the clustering results based on 'Channel' data\nvs.channel_results(reduced_data, outliers, pca_samples)", "_____no_output_____" ] ], [ [ "### Question 12\n*How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? Would you consider these classifications as consistent with your previous definition of the customer segments?*", "_____no_output_____" ], [ "The clustertered algorithm does a pretty good job of creating clusters compared to the underlying distribution of Hotel/Restaurant/Cafe to Retail Customers. The number of clusters created is in harmony with the distribution and is on conjunction with the type of establishments thought of in the early part of this excercise. The kind of establishements thought of early are consistent with the underlying distribution as well.\n\nHowever, based on the last graph , some of the customers are in different segments. e.g Retailers in Hotel/Restaurant/Cafe clusters. As the classification may be consistent with the early thoughts, therre are some misclassifications especially between two. ", "_____no_output_____" ], [ "> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e7a4c821fe1a44a9fd522e92c4a014ce185dc9e3
70,665
ipynb
Jupyter Notebook
DATA PARTIONS Project.ipynb
phani-1995/Datascience
715268cb3634c75b621d57ac9557c6ec49e8e235
[ "MIT" ]
null
null
null
DATA PARTIONS Project.ipynb
phani-1995/Datascience
715268cb3634c75b621d57ac9557c6ec49e8e235
[ "MIT" ]
null
null
null
DATA PARTIONS Project.ipynb
phani-1995/Datascience
715268cb3634c75b621d57ac9557c6ec49e8e235
[ "MIT" ]
null
null
null
44.781369
8,804
0.428614
[ [ [ "pip install sklearn\n", "Collecting sklearn\n Downloading https://files.pythonhosted.org/packages/1e/7a/dbb3be0ce9bd5c8b7e3d87328e79063f8b263b2b1bfa4774cb1147bfcd3f/sklearn-0.0.tar.gz\nRequirement already satisfied: scikit-learn in c:\\anacondainterpreter\\lib\\site-packages (from sklearn) (0.21.3)\nRequirement already satisfied: joblib>=0.11 in c:\\anacondainterpreter\\lib\\site-packages (from scikit-learn->sklearn) (0.13.2)\nRequirement already satisfied: scipy>=0.17.0 in c:\\anacondainterpreter\\lib\\site-packages (from scikit-learn->sklearn) (1.3.1)\nRequirement already satisfied: numpy>=1.11.0 in c:\\anacondainterpreter\\lib\\site-packages (from scikit-learn->sklearn) (1.16.5)\nBuilding wheels for collected packages: sklearn\n Building wheel for sklearn (setup.py): started\n Building wheel for sklearn (setup.py): finished with status 'done'\n Created wheel for sklearn: filename=sklearn-0.0-py2.py3-none-any.whl size=1321 sha256=c7bf0dd24aa1959991ae24642ed642cb07ec9571fc4546125f698a347e77fc19\n Stored in directory: C:\\Users\\p#an!\\AppData\\Local\\pip\\Cache\\wheels\\76\\03\\bb\\589d421d27431bcd2c6da284d5f2286c8e3b2ea3cf1594c074\nSuccessfully built sklearn\nInstalling collected packages: sklearn\nSuccessfully installed sklearn-0.0\nNote: you may need to restart the kernel to use updated packages.\n" ], [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris", "_____no_output_____" ], [ "df = pd.read_csv(\"D:\\\\projects\\\\Employee_Compensation.csv\")\ndf", "_____no_output_____" ], [ "df.head()\ndf.shape", "_____no_output_____" ], [ "x=df.drop('Salaries',axis=1)\nx", "_____no_output_____" ], [ "#The line test_size=0.2 suggests that the test data should be 10% of the dataset\n\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.1)\nx_train.head()", "_____no_output_____" ], [ "x_train.shape", "_____no_output_____" ], [ "x_test.head()", "_____no_output_____" ], [ "x_test.shape", "_____no_output_____" ], [ "###############################################################\n###############################################################\n######Let’s take another example. We’ll use the IRIS dataset this time.#####\n#########################################################################\n\niris= load_iris()\nx,y=iris.data,iris.target\nx_train,x_test,y_train,y_test=train_test_split(x,y,train_size = 0.5,test_size=0.5,random_state=123)\ny_test", "_____no_output_____" ], [ "y_train", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression as lm\nmodel=lm().fit(x_train,y_train)\npredictions=model.predict(x_test)\nimport matplotlib.pyplot as plt\nplt.scatter(y_test,predictions)\nplt.xlabel('True Values')\nplt.ylabel('Predictions')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a4d94a08cbeae7ff52d32b820be105b7db0964
22,005
ipynb
Jupyter Notebook
example/paraphrase/load-paraphrase.ipynb
zulkiflizaki/malaya
2358081bfa43aad57d9415a99f64c68f615d0cc4
[ "MIT" ]
1
2021-07-28T07:15:21.000Z
2021-07-28T07:15:21.000Z
example/paraphrase/load-paraphrase.ipynb
ahmed3991/malaya
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
[ "MIT" ]
null
null
null
example/paraphrase/load-paraphrase.ipynb
ahmed3991/malaya
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
[ "MIT" ]
null
null
null
31.480687
815
0.572824
[ [ [ "# Paraphrase", "_____no_output_____" ], [ "<div class=\"alert alert-info\">\n\nThis tutorial is available as an IPython notebook at [Malaya/example/paraphrase](https://github.com/huseinzol05/Malaya/tree/master/example/paraphrase).\n \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-warning\">\n\nThis module only trained on standard language structure, so it is not save to use it for local language structure.\n \n</div>", "_____no_output_____" ] ], [ [ "%%time\n\nimport malaya\nfrom pprint import pprint", "CPU times: user 5.15 s, sys: 1.03 s, total: 6.17 s\nWall time: 7.43 s\n" ], [ "malaya.paraphrase.available_transformer()", "INFO:root:tested on 1k paraphrase texts.\n" ] ], [ [ "### Load T5 models", "_____no_output_____" ], [ "```python\ndef transformer(model: str = 't2t', quantized: bool = False, **kwargs):\n \"\"\"\n Load Malaya transformer encoder-decoder model to generate a paraphrase given a string.\n\n Parameters\n ----------\n model : str, optional (default='t2t')\n Model architecture supported. Allowed values:\n\n * ``'t2t'`` - Malaya Transformer BASE parameters.\n * ``'small-t2t'`` - Malaya Transformer SMALL parameters.\n * ``'t5'`` - T5 BASE parameters.\n * ``'small-t5'`` - T5 SMALL parameters.\n\n quantized : bool, optional (default=False)\n if True, will load 8-bit quantized model. \n Quantized model not necessary faster, totally depends on the machine.\n\n Returns\n -------\n result: malaya.model.tf.Paraphrase class\n \"\"\"\n```", "_____no_output_____" ] ], [ [ "t5 = malaya.paraphrase.transformer(model = 't5', quantized = True)", "WARNING:root:Load quantized model will cause accuracy drop.\n" ] ], [ [ "### Paraphrase simple string\n\nWe only provide `greedy_decoder` method for T5 models,\n\n```python\n@check_type\ndef greedy_decoder(self, string: str, split_fullstop: bool = True):\n \"\"\"\n paraphrase a string. Decoder is greedy decoder with beam width size 1, alpha 0.5 .\n\n Parameters\n ----------\n string: str\n split_fullstop: bool, (default=True)\n if True, will generate paraphrase for each strings splitted by fullstop.\n\n Returns\n -------\n result: str\n \"\"\"\n```", "_____no_output_____" ] ], [ [ "string = \"Beliau yang juga saksi pendakwaan kesembilan berkata, ia bagi mengelak daripada wujud isu digunakan terhadap Najib.\"\npprint(string)", "('Beliau yang juga saksi pendakwaan kesembilan berkata, ia bagi mengelak '\n 'daripada wujud isu digunakan terhadap Najib.')\n" ], [ "pprint(t5.greedy_decoder([string]))", "['Ini juga bagi mengelakkan kekeliruan yang biasa berlaku terhadap Najib.']\n" ], [ "string = \"\"\"\nPELETAKAN jawatan Tun Dr Mahathir Mohamad sebagai Pengerusi Parti Pribumi Bersatu Malaysia (Bersatu) ditolak di dalam mesyuarat khas Majlis Pimpinan Tertinggi (MPT) pada 24 Februari lalu.\n\nJusteru, tidak timbul soal peletakan jawatan itu sah atau tidak kerana ia sudah pun diputuskan pada peringkat parti yang dipersetujui semua termasuk Presiden, Tan Sri Muhyiddin Yassin.\n\nBekas Setiausaha Agung Bersatu Datuk Marzuki Yahya berkata, pada mesyuarat itu MPT sebulat suara menolak peletakan jawatan Dr Mahathir.\n\n\"Jadi ini agak berlawanan dengan keputusan yang kita sudah buat. Saya tak faham bagaimana Jabatan Pendaftar Pertubuhan Malaysia (JPPM) kata peletakan jawatan itu sah sedangkan kita sudah buat keputusan di dalam mesyuarat, bukan seorang dua yang buat keputusan.\n\n\"Semua keputusan mesti dibuat melalui parti. Walau apa juga perbincangan dibuat di luar daripada keputusan mesyuarat, ini bukan keputusan parti.\n\n\"Apa locus standy yang ada pada Setiausaha Kerja untuk membawa perkara ini kepada JPPM. Seharusnya ia dibawa kepada Setiausaha Agung sebagai pentadbir kepada parti,\" katanya kepada Harian Metro.\n\nBeliau mengulas laporan media tempatan hari ini mengenai pengesahan JPPM bahawa Dr Mahathir tidak lagi menjadi Pengerusi Bersatu berikutan peletakan jawatannya di tengah-tengah pergolakan politik pada akhir Februari adalah sah.\n\nLaporan itu juga menyatakan, kedudukan Muhyiddin Yassin memangku jawatan itu juga sah.\n\nMenurutnya, memang betul Dr Mahathir menghantar surat peletakan jawatan, tetapi ditolak oleh MPT.\n\n\"Fasal yang disebut itu terpakai sekiranya berhenti atau diberhentikan, tetapi ini mesyuarat sudah menolak,\" katanya.\n\nMarzuki turut mempersoal kenyataan media yang dibuat beberapa pimpinan parti itu hari ini yang menyatakan sokongan kepada Perikatan Nasional.\n\n\"Kenyataan media bukanlah keputusan rasmi. Walaupun kita buat 1,000 kenyataan sekali pun ia tetap tidak merubah keputusan yang sudah dibuat di dalam mesyuarat. Kita catat di dalam minit apa yang berlaku di dalam mesyuarat,\" katanya.\n\"\"\"", "_____no_output_____" ], [ "import re\n\n# minimum cleaning, just simply to remove newlines.\ndef cleaning(string):\n string = string.replace('\\n', ' ')\n string = re.sub(r'[ ]+', ' ', string).strip()\n return string\n\nstring = cleaning(string)\nsplitted = malaya.text.function.split_into_sentences(string)", "_____no_output_____" ], [ "' '.join(splitted[:2])", "_____no_output_____" ], [ "t5.greedy_decoder([' '.join(splitted[:2])])", "_____no_output_____" ] ], [ [ "### Load Transformer\n\nTo load 8-bit quantized model, simply pass `quantized = True`, default is `False`.\n\nWe can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine.", "_____no_output_____" ] ], [ [ "model = malaya.paraphrase.transformer(model = 'small-t2t')\nquantized_model = malaya.paraphrase.transformer(model = 'small-t2t', quantized = True)", "WARNING:tensorflow:From /Users/huseinzolkepli/Documents/Malaya/malaya/function/__init__.py:112: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n\n" ] ], [ [ "#### Predict using greedy decoder\n\n```python\ndef greedy_decoder(self, strings: List[str], **kwargs):\n \"\"\"\n Paraphrase strings using greedy decoder.\n\n Parameters\n ----------\n strings: List[str]\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```", "_____no_output_____" ] ], [ [ "' '.join(splitted[:2])", "_____no_output_____" ], [ "model.greedy_decoder([' '.join(splitted[:2])])", "_____no_output_____" ], [ "quantized_model.greedy_decoder([' '.join(splitted[:2])])", "_____no_output_____" ] ], [ [ "#### Predict using beam decoder\n\n```python\ndef beam_decoder(self, strings: List[str], **kwargs):\n \"\"\"\n Paraphrase strings using beam decoder, beam width size 3, alpha 0.5 .\n\n Parameters\n ----------\n strings: List[str]\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```", "_____no_output_____" ] ], [ [ "model.beam_decoder([' '.join(splitted[:2])])", "_____no_output_____" ], [ "quantized_model.beam_decoder([' '.join(splitted[:2])])", "_____no_output_____" ] ], [ [ "#### Predict using nucleus decoder\n\n```python\ndef nucleus_decoder(self, strings: List[str], top_p: float = 0.7, **kwargs):\n \"\"\"\n Paraphrase strings using nucleus sampling.\n\n Parameters\n ----------\n strings: List[str]\n top_p: float, (default=0.7)\n cumulative distribution and cut off as soon as the CDF exceeds `top_p`.\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```", "_____no_output_____" ] ], [ [ "model.nucleus_decoder([' '.join(splitted[:2])])", "_____no_output_____" ], [ "quantized_model.nucleus_decoder([' '.join(splitted[:2])], top_p = 0.5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a4daf19500adc34c55c79177842416a1e4be90
21,051
ipynb
Jupyter Notebook
2_assignment_1/1_PM_assignment_1.ipynb
MathildeBreton/TBS_portfolio_management_2022
ab39e182d2f63aa5a3b1f58a47cad9186a3fe065
[ "MIT" ]
null
null
null
2_assignment_1/1_PM_assignment_1.ipynb
MathildeBreton/TBS_portfolio_management_2022
ab39e182d2f63aa5a3b1f58a47cad9186a3fe065
[ "MIT" ]
null
null
null
2_assignment_1/1_PM_assignment_1.ipynb
MathildeBreton/TBS_portfolio_management_2022
ab39e182d2f63aa5a3b1f58a47cad9186a3fe065
[ "MIT" ]
3
2022-03-23T16:50:47.000Z
2022-03-26T19:10:47.000Z
38.344262
497
0.57888
[ [ [ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-mKbU9Yz9vaaQz6cr13SrvvMy_4hXkk7?usp=sharing)\n# <strong> Investment Management 1</strong>\n---\n#<strong> Assignment 1</strong>\n**You have to use this Colab notebook to complete the assignment. To get started, create a copy of the notebook and save it on your Google drive.**\n\n&nbsp;\n\n**Deadline:** See C@mpus.\n\n&nbsp;\n\n**Total:** 100 Points\n\n&nbsp;\n\n**Late submission penalty:** there is a penalty-free grace period of two hours past the deadline. Any work that is submitted between 2 hour and 24 hours past the deadline will receive a 20% grade deduction. No other work will be accepted after that. C@mpus submission time will be used, not your local computer time. You can submit your completed assignment as many times as required before the deadline. Consider submitting your work early.\n\n&nbsp;\n\nThis assignment is a warm up to get you used to the Colab/ Jupyter notebook environment used in the course, and also to help you acquaint yourself with Python and relevant Python libraries. The assignment must be completed individually. The TBS plagarism rules apply.\n\nBy the end of this assignment, you should be able to:\n\n* fetch or load financial time series data into Colab \n\n* load data into `pandas` dataframe\n\n* perform basic operations with `pandas` dataframes\n\n* perform EDA (exploratory data analysis) on a given dataset\n\n&nbsp;\n\n\nYou will need to use the `numpy` and `pandas` libraries for necessary data manipulation. For more information, see:\nhttps://numpy.org/doc/stable/reference/\n\nhttps://pandas.pydata.org/\n\nYou can use a financial data library of your choice to access historical asset prices. One example is `yfinance`. It is used to access the financial data available on Yahoo Finance. Other widely used libraries are `pandas_datareader`, `yahoo_fin`, `ffn` (highly recommended), and `PyNance`. You are also free to use any Python data visualisation library of your choice (default is `matplotlib`). Some of the available options include: `Seaborn`, `Bokeh`, `ggplot`, `pygal`, and `Plotly`.", "_____no_output_____" ], [ "##**What to submit**\n\nSubmit a PDF file containing your code, outputs, and write-up from parts 1-4. You can produce a PDF of your Colab file by going to `File >>> Print` and selecting `save as PDF`. See the <a href=\"https://github.com/mscouse/TBS_investment_management/blob/main/Python_workspace.pdf\">Python Workspace</a> document in the course GitHub repository for more information. **Do not submit any other data files produced by your code.**\n\n&nbsp;\n\nYou also need to provide a link to your completed Colab file in your submission - see the **\"Colab link\"** section below.\n\nPlease note that you have to use Google Colab to complete this assignment. If you want to use Jupyter Notebook, complete the assignment and upload your Jupyter Notebook file to Google Colab for submission. ", "_____no_output_____" ], [ "##**Colab link**\nBefore submitting your work, make sure to include a link to your colab file below.\n\n**Colab Link:** _ _ _ _ _ _ _ _ _ _ _ _", "_____no_output_____" ], [ "##**Part 1: Obtaining financial data [10 pt]**\n\nThe purpose of this section is to get you used to the basics of Python and the Colab notebook environment. This includes importing data and working with variables, lists, dataframes, and functions.\n\nYour code will be checked for clarity and efficiency. If you have trouble with this part of the assignment, please review the introductory Colab notebooks stored in the GitHub course repository.", "_____no_output_____" ], [ "###Part 1.1. Loading historical stock prices (6pt)\n\nUsing any Python financial data library (e.g. `yfinance`) download daily adjusted close prices for 5 U.S. stocks of your choice for the last 5 years and store them in a `pandas` DataFrame object named `stock_prices`. Only stocks that are current constituents of the S&P 500 should be considered.\n\n&nbsp;\n\nAs the financial data library you use is not pre-installed in Google Colab by default, make sure to install it first by executing the following code:\n```\n!pip install library_name\n```\n\nThe !pip install <package> command looks for the latest version of the package and installs it. This only needs to be done once per session.\n\nIf you are unable to install the required library to fetch the data, you can prepare a separate CSV file containing the necessary data and use the following code to read it into a `pandas` dataframe object:\n```\nfrom google.colab import files\nfiles.upload()\n```\nfollowed by:\n```\nimport pandas as pd\nstock_prices = pd.read_csv('filename.csv')\n```\nNote that `filename.csv` should be changed to the exact name of your CSV file. ", "_____no_output_____" ] ], [ [ "# step 1: install required libraries using \"!pip install\"\n# YOUR CODE HERE", "_____no_output_____" ], [ "# step 2: import required libraries using \"import\"\n# YOUR CODE HERE", "_____no_output_____" ], [ "# step 3: fetch historical asset prices\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "###Part 1.2. Obtaining data on risk-free asset (4pt)\n\nUsing a financial data library (e.g. `yfinance`) of your choice, obtain daily data on the U.S. risk-free (1-month Treasury Bill) rate for the last 5 years and store them in a `pandas` DataFrame object named `rf`.\n\nIf you are unable to obtain the risk-free data using your chosen data library, you can prepare a separate CSV file containing the necessary data and use the steps discussed above to read it into a `pandas` dataframe object `rf`.", "_____no_output_____" ] ], [ [ "# step 4: fetch historical risk-free rate\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "##**Part 2: Visualising historical asset prices [10pt]**\n\nIn this part of the assignment, you will be manipulating dataframes containing historical asset prices using Pandas, and visualising them using a Python plotting library of your choice. The purpose of these visualisations is to help you explore the data and identify any patterns. \n\nOne robust visualisation library you may want to consider is `Matplotlib`. It is one of the most popular, and certainly the most widely used, multi-platform data visualisation library built on NumPy arrays in Python. It is used to generate simple yet powerful visualisations with just a few lines of code. It can be used in both interactive and non-interactive scripts.\n\nMake sure you import the required libraries first.", "_____no_output_____" ], [ "###Part 2.1. Raw stock prices (4pt)\n\nPlot the adjusted daily close prices for your stocks on the same diagram using a Python data visualisation library of your choice (default is matplotlib). Use the historical price data stored in the `stock_prices` dataframe created earlier. ", "_____no_output_____" ] ], [ [ "# step 5: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "###Part 2.1. Rebased stock prices (6pt)\n\nTo make comparing and plotting different asset price series together easier, we often \"rebase\" all prices to a given initial value - e.g. 100. \n\nIn this section, you need to rebase the adjusted close prices for your stocks and plot them on the same diagram using a visualisation library of your choice (default is matplotlib). Note that some financial data libraries have handy built-in functions to perform this kind of task. Have a look at the `ffn` library <a href=\"https://pmorissette.github.io/ffn/\"> documentation </a>. ", "_____no_output_____" ] ], [ [ "# step 6: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "##**Part 3: Absolute return and risk measures [40pt]**\n\nIn this part of the assignment, you will work with basic financial calculations and functions, such as computing and compounding investment returns, calculating averages, and computing measures of investment risk.\n\nI suggest you use `pandas` dataframes to store all necessary data. Colab includes an extension that renders Pandas dataframes into interactive tables that can be filtered, sorted, and explored dynamically.\n\nThe extension can be enabled by executing `%load_ext google.colab.data_table` in any code cell and disabled with `%unload_ext google.colab.data_table`.", "_____no_output_____" ], [ "### 3.1. Stock returns (6pt)\n\nIn asset management, we are often interested in the returns of a given time series. Therefore, in this part of the assignment, you need to compute **daily**, **weekly**, and **monthly** **arithmetic and logarithmic** returns for each chosen stock and store them in separate `pandas` dataframe objects named `returns` and `log_returns`, respectively.\n\nMake sure to drop any missing values and display the first 5 lines of the resulting dataframes. ", "_____no_output_____" ] ], [ [ "# step 7: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "### 3.2. Distribution of returns (5pt)\n\nCheck what the return distributions look like by plotting a histogram of daily returns calculated in the previous section. You can use any Python visualisation library of your choice.\n\nPlot returns distributions for both, arithmetic and logarithmic returns. Discuss whether there are significant differences between the two. Also, provide a short explanation on when and why we use log returns, rather than normal returns.", "_____no_output_____" ] ], [ [ "# step 8: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "**Your response/ short explanation:** ________HERE_________\n", "_____no_output_____" ], [ "### 3.3. Correlation matrix (5pt)\n\nUsing daily arithmetic stock returns, compute pairwise correlations between your 5 assets and plot a correlation matrix. \n\n(optional) You may want to have a look at the `heatmap()` method in the `Seaborn` visualisation library. It allows you to create elegant correlation heatmaps easily. ", "_____no_output_____" ] ], [ [ "# step 9: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "### 3.4. Cumulative returns (8pt)\n\nUsing the arithmetic daily returns, compute cumulative returns for each stock over the last 1–, 3-, and 5- year periods and display them as values. Once done, annualise the resulting cumulative daily returns for each stock and display them as well.", "_____no_output_____" ] ], [ [ "# step 10: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "### 3.5. Arithmetic average returns (8pt)\n\nCompute arithmetic average daily returns for each stock, annualise them, and display the resulting values. \n\nAs there are typically 252 trading days in a year, to annualise a daily return $r_d$ we use:\n\n$$ (1+r_d)^{252} - 1$$", "_____no_output_____" ] ], [ [ "# step 11: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "### 3.6. Standard deviation (8pt)\n\nUsing the stock returns calculated earlier, compute standard deviations of daily returns for each stock over the last 1–, 3-, and 5- year periods and display them.\n\nOnce done, repeat the calculation of standard deviations but using monthly returns instead. Display the resulting values.\n\nExplain what the best way to annualise standard deviations is.", "_____no_output_____" ] ], [ [ "# step 12: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "**Your response/ short explanation:** ________HERE_________", "_____no_output_____" ], [ "##**Part 4: Risk-adjusted performance evaluation [40pt]**\n\nAs part of the course we considered several risk-adjusted performance evaluation measures. In this section of the assignment you are asked to compute one of them - the Sharpe ratio:\n\n\n $$Sharpe\\ ratio = \\frac{E[{r_p}-{r_f}]}{\\sqrt{[{r_p}-{r_f}]}}$$\n", "_____no_output_____" ], [ "### Part 4.1. Calculating the Sharpe measure [10pt]\n\nUsing previously calculated monthly stock returns and the corresponding risk-free interest rates, compute Sharpe ratios for your selected stocks for the last 1-, 3-, and 5-years. Annualise the calculated Sharpe measures and report them as values.", "_____no_output_____" ] ], [ [ "# step 13: import required data visualisation library\n# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "### Part 4.2. Sharpe measure function [30pt]\n\nDefine a new Python function `sharpe(ticker_1, ticker_2, ticker_3)` which:\n\n* accepts 3 stock tickers as the only arguments;\n* fetches historical daily prices for the 3 selected tickers over the last 3 years;\n* fetches U.S. treasury bill (1-month T-Bill rates) rates over the corresponding 3 year period;\n* computes daily returns and excess returns for each stock;\n* computes daily average excess returns for each stock;\n* computes standard deviations of excess daily returns for each stock;\n* compute Sharpe ratios based on the daily average excess returns and standard deviations of excess retunrs;\n* annualises the resulting Sharpe ratio (by multiplying the daily Sharpe by $\\sqrt[2]{252}$);\n* returns the annualised Sharpe ratios for the 3 stocks.\n\nAssume that all libraries required by your function are already preinstalled and imported (i.e. do not include any `import` statements within your function). However, make sure to import all the required libraries in the code cell below, directly before the function. ", "_____no_output_____" ] ], [ [ "# step 14: install required libraries and import as needed\n\ndef sharpe(ticker_1, ticker_2, ticker_3):\n \"\"\"This function returns annualised Sharpe \n ratios for the entered tickers using last 3 years\n of stock data from Yahoo finance\"\"\"\n # YOUR CODE HERE\n # YOUR CODE HERE\n return # YOUR CODE HERE\n\n# execute your functions using AAPL, MSFT, and JPM as arguments", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a4dcb2608ef9579af9b773e585c51d3b8e0d69
169,235
ipynb
Jupyter Notebook
12_case_study_of_evaluating_attention_in_COCO_dataset/codes/EDA_COCO_Dataset.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
2
2019-08-24T07:20:35.000Z
2020-03-27T08:16:59.000Z
12_case_study_of_evaluating_attention_in_COCO_dataset/codes/EDA_COCO_Dataset.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
null
null
null
12_case_study_of_evaluating_attention_in_COCO_dataset/codes/EDA_COCO_Dataset.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
3
2019-06-21T09:34:32.000Z
2019-09-19T10:43:07.000Z
89.071053
38,230
0.77614
[ [ [ "import numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n%matplotlib inline\r\n\r\nimport torch\r\nimport torchvision\r\nimport torch.nn as nn\r\nimport torchvision.datasets as dset\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data import Dataset,DataLoader\r\nfrom tqdm import tqdm as tqdm\r\nimport unicodedata\r\nimport pycocotools\r\n\r\nimport re\r\nimport random \r\nimport itertools\r\nimport time\r\n\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\ndir = \"/content/drive/MyDrive/cocodataset/\"", "_____no_output_____" ], [ "import os \r\nimport sys\r\nfrom pycocotools.coco import COCO\r\nimport urllib\r\nimport zipfile", "_____no_output_____" ], [ "os.makedirs('/content/opt' , exist_ok=True)\r\nos.chdir( '/content/opt' )\r\n!git clone 'https://github.com/cocodataset/cocoapi.git'", "fatal: destination path 'cocoapi' already exists and is not an empty directory.\n" ], [ "os.chdir('/content/opt/cocoapi')\r\n\r\n# Download the annotation : \r\nannotations_trainval2014 = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip'\r\nimage_info_test2014 = 'http://images.cocodataset.org/annotations/image_info_test2014.zip'\r\n\r\n\r\nurllib.request.urlretrieve(annotations_trainval2014 , filename = 'annotations_trainval2014.zip' )\r\nurllib.request.urlretrieve(image_info_test2014 , filename= 'image_info_test2014.zip' )\r\n", "_____no_output_____" ], [ "with zipfile.ZipFile('annotations_trainval2014.zip' , 'r') as zip_ref:\r\n zip_ref.extractall( '/content/opt/cocoapi' ) \r\n\r\ntry:\r\n os.remove( 'annotations_trainval2014.zip' )\r\n print('zip removed')\r\nexcept:\r\n None\r\n\r\nwith zipfile.ZipFile('image_info_test2014.zip' , 'r') as zip_ref:\r\n zip_ref.extractall( '/content/opt/cocoapi' ) \r\n\r\ntry:\r\n os.remove( 'image_info_test2014.zip' )\r\n print('zip removed')\r\nexcept:\r\n None\r\n", "zip removed\nzip removed\n" ], [ "import matplotlib.pyplot as plt \r\nimport skimage.io as io \r\nimport numpy as np \r\n%matplotlib inline", "_____no_output_____" ], [ "# #Pick a random annotation id and display img of that annotation :\r\n# ann_id = np.random.choice( ids )\r\n# img_id = coco.anns[ann_id]['image_id']\r\n# img = coco.loadImgs( img_id )[0]\r\n# url = img['coco_url']\r\n# print(url)\r\n# I = io.imread(url)\r\n# plt.imshow(I)\r\n\r\n# # Display captions for that annotation id :\r\n# ann_ids = coco_caps.getAnnIds( img_id )\r\n# print('Number of annotations i.e captions for the image: ' , ann_ids)\r\n# print()\r\n# anns = coco_caps.loadAnns( ann_ids )\r\n# coco_caps.showAnns(anns)", "_____no_output_____" ], [ "os.chdir('/content/opt/cocoapi')\r\n\r\ntrain2014 = 'http://images.cocodataset.org/zips/train2014.zip'\r\n#test2014 = 'http://images.cocodataset.org/zips/test2014.zip'\r\n#val2014 = 'http://images.cocodataset.org/zips/val2014.zip'\r\n\r\nurllib.request.urlretrieve( train2014 , 'train2014' )\r\n#urllib.request.urlretrieve( test2014 , 'test2014' )\r\n#urllib.request.urlretrieve( val2014 , 'val2014' )", "_____no_output_____" ], [ "os.chdir('/content/opt/cocoapi')\r\nwith zipfile.ZipFile( 'train2014' , 'r' ) as zip_ref:\r\n zip_ref.extractall( 'images' )\r\n\r\ntry:\r\n os.remove( 'train2014' )\r\n print('zip removed')\r\nexcept:\r\n None", "zip removed\n" ], [ "import glob\r\nfiles = glob.glob(\"/content/opt/cocoapi/images/train2014/*.jpg\",recursive=True)\r\nlen(files)", "_____no_output_____" ], [ "data_transform = transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor()])\r\n\r\ncoco_train_caption = dset.CocoCaptions(root=\"/content/opt/cocoapi/images/train2014\",annFile=\"/content/opt/cocoapi/annotations/captions_train2014.json\",transform=data_transform)", "loading annotations into memory...\nDone (t=1.85s)\ncreating index...\nindex created!\n" ], [ "img_caption,target_caption = coco_train_caption[1]", "_____no_output_____" ], [ "data_transform = transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor()])\r\n\r\ncoco_train_detection = dset.CocoDetection(root=\"/content/opt/cocoapi/images/train2014\",annFile=\"/content/opt/cocoapi/annotations/instances_train2014.json\")#,transform=data_transform)", "loading annotations into memory...\nDone (t=23.17s)\ncreating index...\nindex created!\n" ], [ "img_detection,target_detection = coco_train_detection[1]", "_____no_output_____" ], [ "img_caption.size()\r\noriginal_shape = np.array(img_detection).shape\r\nprint(original_shape)", "(426, 640, 3)\n" ], [ "def bbox_transform_coco2cv(b,wratio,hratio):\r\n b = [b[0]*wratio,b[1]*hratio,b[2]*wratio,b[3]*hratio]\r\n temp = [b[0],b[1]]\r\n temp.append(b[0]+b[2])\r\n temp.append(b[1]+b[3])\r\n for i in range(len(temp)):\r\n temp[i] = int(temp[i])\r\n temp2 = [temp[1],temp[0],temp[3],temp[2]]\r\n return np.array(temp).astype(int),temp2 #(x1,y1,x2,y2)", "_____no_output_____" ] ], [ [ "Preprocessing captions", "_____no_output_____" ] ], [ [ "# Turn a Unicode string to plain ASCII, thanks to\r\n# https://stackoverflow.com/a/518232/2809427\r\ndef unicodeToAscii(s):\r\n return ''.join(\r\n c for c in unicodedata.normalize('NFD', s)\r\n if unicodedata.category(c) != 'Mn'\r\n )\r\n\r\n# Lowercase, trim, and remove non-letter characters\r\ndef normalizeString(s):\r\n s = unicodeToAscii(s.lower().strip())\r\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\r\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\r\n return s", "_____no_output_____" ], [ "SOS_token = 1\r\nEOS_token = 2\r\nPAD_token = 0\r\nUNK_token = 3\r\n\r\nclass Vocabulary:\r\n \r\n def __init__(self,name):\r\n self.word2index = {\"SOS\":1,\"EOS\":2,\"UNK\":3,\"PAD\":0}\r\n self.index2word = {1:\"SOS\",2:\"EOS\",3:\"UNK\",0:\"PAD\"}\r\n self.word2count = {}\r\n self.nwords = 4\r\n \r\n def addSentence(self,sentence):\r\n for word in sentence.split(' '):\r\n self.addWord(word)\r\n \r\n def addWord(self,word):\r\n if word not in list(self.word2index.keys()):\r\n self.word2index[word] = self.nwords\r\n self.index2word[self.nwords] = word\r\n self.word2count[word] = 1\r\n self.nwords += 1\r\n else:\r\n self.word2count[word] += 1\r\n \r\n def save(self,word2index_dic = 'word2index_dic', index2word_dic = 'index2word_dic',\r\n word2count_dic = 'word2count_dic'):\r\n\r\n with open('/content/Save/'+word2index_dic+'.p', 'wb') as fp:\r\n pickle.dump(self.word2index, fp, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n with open('/content/Save/'+index2word_dic+'.p', 'wb') as fp:\r\n pickle.dump(self.index2word, fp, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n with open('/content/Save/'+word2count_dic+'.p', 'wb') as fp:\r\n pickle.dump(self.word2count, fp, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n def load(self, word2index_dic = 'word2index_dic', index2word_dic = 'index2word_dic',\r\n word2count_dic = 'word2count_dic'):\r\n \r\n with open('/content/Save/'+word2index_dic+'.p', 'rb') as fp:\r\n self.word2index = pickle.load(fp)\r\n \r\n with open('/content/Save/'+index2word_dic+'.p', 'rb') as fp:\r\n self.index2word = pickle.load(fp)\r\n \r\n with open('/content/Save/'+word2count_dic+'.p', 'rb') as fp:\r\n self.word2count = pickle.load(fp)\r\n \r\n self.nwords = len(self.word2index)\r\n\r\n \r\nvoc = Vocabulary('COCO_TRAIN')\r\n#voc.load()", "_____no_output_____" ], [ "import pickle\r\n# for _,target in tqdm.tqdm(coco_train_caption):\r\n# for sen in target:\r\n# voc.addSentence(normalizeString(sen))\r\n \r\n# voc.save()\r\nvoc.load()", "_____no_output_____" ], [ "\r\nclass COCO14Dataset(Dataset):\r\n def __init__(self,coco_caption,coco_detection,voc,transforms=None):\r\n self.coco_caption = coco_caption\r\n self.coco_detection = coco_detection\r\n self.voc = voc\r\n self.transforms = transforms\r\n def __len__(self):\r\n return len(self.coco_caption)\r\n def __getitem__(self,idx):\r\n img,target = self.coco_caption[idx]\r\n img_detection,detection_target = self.coco_detection[idx]\r\n \r\n \r\n original_shape = np.array(img_detection).shape\r\n\r\n lbl = normalizeString(random.choice(target))\r\n label = []\r\n for s in lbl.split(' '):\r\n if s in list(voc.word2count.keys()):\r\n label.append(voc.word2index[s])\r\n else:\r\n label.append(UNK_token)\r\n label = [SOS_token]+label +[EOS_token]\r\n \r\n bounding_box = []\r\n bounding_box_category = []\r\n wratio = (224*1.0)/original_shape[0]\r\n hratio = (224*1.0)/original_shape[1]\r\n\r\n for i in range(len(detection_target)):\r\n bnding_box = detection_target[i]['bbox']\r\n b_category = detection_target[i]['category_id']\r\n bbox_plt ,_ = bbox_transform_coco2cv(bnding_box,hratio,wratio)\r\n bounding_box.append(list(bbox_plt))\r\n bounding_box_category.append(b_category)\r\n\r\n\r\n return img, label,bounding_box,bounding_box_category", "_____no_output_____" ], [ "batch_size = 32\r\ntrain_dset = COCO14Dataset(coco_train_caption,coco_train_detection,voc,transforms=data_transform)\r\n\r\ndef collate_fn(batch):\r\n data = [item[0] for item in batch]\r\n images = torch.stack(data,0)\r\n \r\n label = [item[1] for item in batch]\r\n max_target_len = max([len(indexes) for indexes in label])\r\n padList = list(itertools.zip_longest(*label, fillvalue = 0))\r\n lengths = torch.tensor([len(p) for p in label])\r\n padVar = torch.LongTensor(padList)\r\n bounding_box = [item[2] for item in batch]\r\n bounding_box_category = [item[3] for item in batch]\r\n \r\n \r\n m = []\r\n for i, seq in enumerate(padVar):\r\n #m.append([])\r\n tmp = []\r\n for token in seq:\r\n if token == 0:\r\n tmp.append(int(0))\r\n else:\r\n tmp.append(1)\r\n m.append(tmp)\r\n m = torch.tensor(m)\r\n \r\n return images, padVar, m, max_target_len,bounding_box,bounding_box_category\r\n\r\ntrain_loader=DataLoader(train_dset,batch_size = batch_size, num_workers = 8,shuffle = False,\r\n collate_fn = collate_fn, drop_last=True)", "_____no_output_____" ] ], [ [ "## Exploratory Data Analysis on COCO datasets\r\n\r\n", "_____no_output_____" ] ], [ [ "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nimport pdb\nimport os\nfrom pycocotools.coco import COCO\nfrom skimage import io\n\nfont = {'family' : 'Arial',\n 'weight' : 'normal',\n 'size' : 12}\n\nmatplotlib.rc('font', **font)", "_____no_output_____" ] ], [ [ "## Setting the root directory and annotation json file", "_____no_output_____" ] ], [ [ "\n# src_root = '../../../../datasets/voc2012/'\n# src_subset = 'images/'\n# src_file = src_root+'annotations/instances_train2012.json'\n# src_desc = 'train2017_voc12' \n\nsrc_root = '../../../../datasets/coco/'\nsrc_subset = '/content/opt/cocoapi/images/train2014'\nsrc_file = '/content/opt/cocoapi/annotations/instances_train2014.json'\nsrc_desc = 'train2014_coco' # a name (identifier) for the dataset\nannotation_file = '/content/opt/cocoapi/annotations/captions_train2014.json'\n\ncoco_obj = COCO(src_file)", "loading annotations into memory...\nDone (t=20.30s)\ncreating index...\nindex created!\n" ], [ "# Reading the json file\nwith open(src_file, 'r') as f:\n root = json.load(f)\n\nroot.keys()", "_____no_output_____" ], [ "with open(annotation_file, 'r') as f:\r\n caption = json.load(f)\r\n\r\ncaption.keys()", "_____no_output_____" ], [ "# caption['annotations']", "_____no_output_____" ], [ "# root['annotations']\r\nroot['categories']", "_____no_output_____" ] ], [ [ "## Basic High Level Information", "_____no_output_____" ] ], [ [ "# Basic High Level Information\n\nn_images = len(root['images'])\nn_boxes = len(root['annotations'])\nn_categ = len(root['categories'])\n\n# height, width\nheights = [x['height'] for x in root['images']]\nwidths = [x['width'] for x in root['images']]\n\n\n\n\nprint('Dataset Name: ',src_desc)\nprint('Number of images: ',n_images)\nprint('Number of bounding boxes: ', n_boxes)\nprint('Number of classes: ', n_categ)\nprint('Max min avg height: ', max(heights), min(heights), int(sum(heights)/len(heights)))\nprint('Max min avg width: ', max(widths), min(widths), int(sum(widths)/len(widths)))", "Dataset Name: train2014_coco\nNumber of images: 82783\nNumber of bounding boxes: 604907\nNumber of classes: 80\nMax min avg height: 640 51 483\nMax min avg width: 640 59 578\n" ] ], [ [ "## Distribution of objects across images", "_____no_output_____" ] ], [ [ "# Objects per image distribution\n\nimg2nboxes = {} # mapping \"image id\" to \"number of boxes\"\n\nfor ann in root['annotations']:\n img_id = ann['image_id']\n \n if img_id in img2nboxes.keys():\n img2nboxes[img_id] += 1\n else:\n img2nboxes[img_id] = 1\n\nnboxes_list = list(img2nboxes.values())\n\nmin_nboxes = min(nboxes_list)\nmax_nboxes = max(nboxes_list)\navg_nboxes = int(sum(nboxes_list)/len(nboxes_list))", "_____no_output_____" ], [ "out = pd.cut(nboxes_list, bins=np.arange(0,max_nboxes+10,10), include_lowest=True)\n\ncounts = out.value_counts().values\nlabels = [(int(i.left),int(i.right)) for i in out.value_counts().index.categories]\n\ngraph_xind = [i[0] for i in labels] \nticks = [ '('+str(i[0])+','+ str(i[1])+')' for i in labels]\n\nplt.figure(figsize=(10,5))\nplt.bar(graph_xind, counts, tick_label=ticks, width=5)\nplt.xlabel('Number of objects')\nplt.ylabel('Number of images')\nplt.title('Number of objects distribution over the dataset')\nplt.show()\n\nprint(\"Number of images having atleast one box: \", len(nboxes_list))\nprint(\"Min number of boxes per image: \", min_nboxes)\nprint(\"Max number of boxes per image: \", max_nboxes)\nprint(\"Avg number of boxes per image: \", avg_nboxes)", "_____no_output_____" ] ], [ [ "## Class wise distribution of objects", "_____no_output_____" ] ], [ [ "# Class distribution\nclass2nboxes = {} \n\nfor ann in root['annotations']:\n cat_id = ann['category_id']\n \n if cat_id in class2nboxes.keys():\n class2nboxes[cat_id] += 1\n else:\n class2nboxes[cat_id] = 1\n\nsorted_c2nb = [(k,v)for k, v in sorted(class2nboxes.items(), reverse=True, key=lambda item: item[1])]\n\n# top 20 classes\ntop = min(len(sorted_c2nb),20)\n\n# to plot\ny = [i[1] for i in sorted_c2nb[:top]]\nx = [i[0] for i in sorted_c2nb[:top]]\n\nplt.figure(figsize=(10,5))\nplt.bar(np.arange(len(y)),y, width=0.5,tick_label=x,color='orange')\nplt.xlim(-0.5,len(y))\nplt.xlabel('Category Id')\nplt.ylabel('Number of boxes')\nplt.title('Class distribution (decreasing order)')\nplt.show()\n\n\n\ncateg_map = {x['id']: x['name'] for x in root['categories']}\nfor k in categ_map.keys():\n print(k,'->',categ_map[k])", "_____no_output_____" ] ], [ [ "## Class wise bounding box area distribution", "_____no_output_____" ] ], [ [ "# Class wise bounding box area distribution\n\nbbox_areas = {} # key: class index, value -> a list of bounding box areas\n\nfor ann in root['annotations']:\n area = ann['area']\n cat_id = ann['category_id']\n \n if area <= 0.0:\n continue\n \n if cat_id in bbox_areas.keys():\n bbox_areas[cat_id].append(area)\n else:\n bbox_areas[cat_id] = [area]\n\n\nbbox_avg_areas = []\n\nfor cat in bbox_areas.keys():\n areas = bbox_areas[cat]\n avg_area = int(sum(areas)/len(areas))\n \n bbox_avg_areas.append((cat,avg_area))\n\nbbox_avg_areas = sorted(bbox_avg_areas, key = lambda x: x[1])", "_____no_output_____" ], [ "top = min(10, len(bbox_avg_areas))\n\nplt.figure(figsize=(10,10))\n\ny = [item[1] for item in bbox_avg_areas[-top:]]\nx = [item[0] for item in bbox_avg_areas[-top:]]\n\ny2 = [item[1] for item in bbox_avg_areas[:top]]\nx2 = [item[0] for item in bbox_avg_areas[:top]]\n\nplt.subplot(211)\nplt.bar(np.arange(len(y)),y, width=0.5,tick_label=x,color='green')\nplt.xlim(-0.5,len(y))\nplt.xlabel('Category Id')\nplt.ylabel('Average bounding box area in pixel squared')\nplt.title('Top '+str(top)+' Classes with highest avg bounding box size')\n\n\nplt.subplot(212)\nplt.bar(np.arange(len(y2)),y2, width=0.5,tick_label=x2,color='red')\nplt.xlim(-0.5,len(y2))\nplt.xlabel('Category Id')\nplt.ylabel('Average bounding box area in pixel squared')\nplt.title('Top '+str(top)+' Classes with lowest avg bounding box size')\nplt.show()\n\n\n\n\ncateg_map = {x['id']: x['name'] for x in root['categories']}\nfor k in categ_map.keys():\n print(k,'->',categ_map[k])", "_____no_output_____" ], [ "# root['annotations']", "_____no_output_____" ] ], [ [ "## Class wise 'occurance' in Captions\r\n", "_____no_output_____" ] ], [ [ "# caption['annotations']", "_____no_output_____" ], [ "caption_list=[]\r\nfor x in caption['annotations']:\r\n caption_list.append(x['caption'])", "_____no_output_____" ], [ "caption_list[1:8]", "_____no_output_____" ], [ "len(caption_list)", "_____no_output_____" ], [ "caption_list[9]", "_____no_output_____" ], [ "if 'toilet' in caption_list[9]:\r\n print(\"bro\")", "bro\n" ], [ "class_list=[]\r\nfor x in root['categories']:\r\n class_list.append(x['name'])", "_____no_output_____" ], [ "# class_list", "_____no_output_____" ], [ "cnt_frequency=[]\r\nfor class_name in class_list:\r\n cnt=0;\r\n for caption in caption_list:\r\n if class_name in caption:\r\n cnt +=1\r\n cnt_frequency.append(cnt);\r\n print(class_name + ' : ' + str(cnt))", "person : 17269\nbicycle : 2086\ncar : 14819\nmotorcycle : 6428\nairplane : 4261\nbus : 12347\ntrain : 11163\ntruck : 5935\nboat : 5925\ntraffic light : 1666\nfire hydrant : 3439\nstop sign : 2866\nparking meter : 1096\nbench : 6682\nbird : 5269\ncat : 16140\ndog : 14582\nhorse : 8828\nsheep : 3606\ncow : 4583\nelephant : 7059\nbear : 7909\nzebra : 5890\ngiraffe : 8205\nbackpack : 587\numbrella : 6949\nhandbag : 64\ntie : 4108\nsuitcase : 1923\nfrisbee : 3842\nskis : 3986\nsnowboard : 3314\nsports ball : 2\nkite : 6329\nbaseball bat : 1332\nbaseball glove : 146\nskateboard : 9781\nsurfboard : 4904\ntennis racket : 2134\nbottle : 2012\nwine glass : 794\ncup : 2770\nfork : 1169\nknife : 1308\nspoon : 629\nbowl : 4575\nbanana : 4565\napple : 1891\nsandwich : 4011\norange : 4316\nbroccoli : 2321\ncarrot : 1545\nhot dog : 1863\npizza : 8913\ndonut : 1983\ncake : 6693\nchair : 5922\ncouch : 4268\npotted plant : 183\nbed : 9649\ndining table : 451\ntoilet : 7268\ntv : 875\nlaptop : 6623\nmouse : 1137\nremote : 1856\nkeyboard : 2152\ncell phone : 3578\nmicrowave : 1292\noven : 2136\ntoaster : 224\nsink : 6118\nrefrigerator : 2449\nbook : 2690\nclock : 7059\nvase : 3909\nscissors : 1286\nteddy bear : 3373\nhair drier : 2\ntoothbrush : 827\n" ], [ "cat_list=[]\r\nfor k in categ_map.keys():\r\n cat_list.append(k)", "_____no_output_____" ], [ "word_cnt_list=[]\r\nfor k in cnt_frequency:\r\n word_cnt_list.append(k)", "_____no_output_____" ], [ "word_cnt_tuple_list=[]\r\nfor i in range(len(cat_list)):\r\n a = cat_list[i]\r\n b = word_cnt_list[i]\r\n word_cnt_tuple_list.append((a,b))\r\n\r\nword_cnt_tuple_list = sorted(word_cnt_tuple_list, key = lambda x: x[1])", "_____no_output_____" ], [ "top = min(10, len(word_cnt_tuple_list))\r\n\r\nplt.figure(figsize=(10,10))\r\n\r\ny = [item[1] for item in word_cnt_tuple_list[-top:]]\r\nx = [item[0] for item in word_cnt_tuple_list[-top:]]\r\n\r\ny2 = [item[1] for item in word_cnt_tuple_list[:top]]\r\nx2 = [item[0] for item in word_cnt_tuple_list[:top]]\r\n\r\nplt.subplot(211)\r\nplt.bar(np.arange(len(y)),y, width=0.5,tick_label=x,color='green')\r\nplt.xlim(-0.5,len(y))\r\nplt.xlabel('Category Id')\r\nplt.ylabel('nos. of captions')\r\nplt.title('Top '+str(top)+' Classes with highest occurance in captions')\r\n\r\n\r\nplt.subplot(212)\r\nplt.bar(np.arange(len(y2)),y2, width=0.5,tick_label=x2,color='red')\r\nplt.xlim(-0.5,len(y2))\r\nplt.xlabel('Category Id')\r\nplt.ylabel('nos. of captions')\r\nplt.title('Top '+str(top)+' Classes with lowest occurance in captions')\r\nplt.show()\r\n\r\n\r\ncateg_map = {x['id']: x['name'] for x in root['categories']}\r\nfor k in categ_map.keys():\r\n print(k,'->',categ_map[k])", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a4eb89e87bfe627e090c193efb0230b1e01c7f
66,215
ipynb
Jupyter Notebook
Lesson 07/exercise/lstm_spam_ham.ipynb
TrainingByPackt/Deep-Learning-for-Natural-Language-Processing
d9183b2a01fef044963e7ad967c6373b3887f0d1
[ "MIT" ]
29
2019-05-15T22:57:56.000Z
2022-03-17T02:11:33.000Z
Lesson 07/exercise/Exercise 27_LSTM_spam_ham.ipynb
TrainingByPackt/Deep-Learning-for-Natural-Language-Processing
d9183b2a01fef044963e7ad967c6373b3887f0d1
[ "MIT" ]
1
2021-02-07T22:52:55.000Z
2021-07-12T06:10:50.000Z
Lesson 07/exercise/lstm_spam_ham.ipynb
TrainingByPackt/Deep-Learning-for-Natural-Language-Processing
d9183b2a01fef044963e7ad967c6373b3887f0d1
[ "MIT" ]
42
2019-02-17T23:04:07.000Z
2022-01-16T05:47:32.000Z
30.097727
196
0.312437
[ [ [ "import pandas as pd\nimport numpy as np\nfrom keras.models import Model, Sequential\nfrom keras.layers import LSTM, Dense,Embedding\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence", "Using TensorFlow backend.\n" ], [ "df = pd.read_csv(\"spam.csv\", encoding=\"latin\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df = df[[\"v1\",\"v2\"]]", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df[\"v1\"].value_counts()", "_____no_output_____" ], [ "# get X as feature and Y as target ", "_____no_output_____" ], [ "lab_map = {\"ham\":0, \"spam\":1}", "_____no_output_____" ], [ "Y = df[\"v1\"].map(lab_map).values", "_____no_output_____" ], [ "Y", "_____no_output_____" ], [ "X = df[\"v2\"].values", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "# convert to sequences\n# pad sequence\n# train", "_____no_output_____" ], [ "max_words = 100\nmytokenizer = Tokenizer(nb_words=max_words,lower=True, split=\" \")", "/Users/tanuj.jain/anaconda3/envs/packt_env/lib/python3.6/site-packages/keras_preprocessing/text.py:178: UserWarning: The `nb_words` argument in `Tokenizer` has been renamed `num_words`.\n warnings.warn('The `nb_words` argument in `Tokenizer` '\n" ], [ "mytokenizer.fit_on_texts(X)", "_____no_output_____" ], [ "text_tokenized = mytokenizer.texts_to_sequences(X)", "_____no_output_____" ], [ "text_tokenized", "_____no_output_____" ], [ "max_len = 50\nsequences = sequence.pad_sequences(text_tokenized,maxlen=max_len)", "_____no_output_____" ], [ "sequences", "_____no_output_____" ], [ "# Build model", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Embedding(max_words, 20, input_length=max_len))\nmodel.add(LSTM(64))\nmodel.add(Dense(1, activation=\"sigmoid\"))\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(sequences,Y,batch_size=128,epochs=10,\n validation_split=0.2)", "Train on 4457 samples, validate on 1115 samples\nEpoch 1/10\n4457/4457 [==============================] - 2s 560us/step - loss: 0.5013 - acc: 0.8503 - val_loss: 0.3872 - val_acc: 0.8700\nEpoch 2/10\n4457/4457 [==============================] - 2s 362us/step - loss: 0.3598 - acc: 0.8649 - val_loss: 0.3011 - val_acc: 0.8700\nEpoch 3/10\n4457/4457 [==============================] - 2s 356us/step - loss: 0.2269 - acc: 0.9100 - val_loss: 0.1555 - val_acc: 0.9525\nEpoch 4/10\n4457/4457 [==============================] - 2s 354us/step - loss: 0.1369 - acc: 0.9560 - val_loss: 0.1162 - val_acc: 0.9570\nEpoch 5/10\n4457/4457 [==============================] - 2s 353us/step - loss: 0.1140 - acc: 0.9614 - val_loss: 0.1067 - val_acc: 0.9605\nEpoch 6/10\n4457/4457 [==============================] - 2s 356us/step - loss: 0.1062 - acc: 0.9628 - val_loss: 0.1026 - val_acc: 0.9587\nEpoch 7/10\n4457/4457 [==============================] - 2s 384us/step - loss: 0.0995 - acc: 0.9663 - val_loss: 0.0984 - val_acc: 0.9614\nEpoch 8/10\n4457/4457 [==============================] - 2s 422us/step - loss: 0.1005 - acc: 0.9663 - val_loss: 0.0987 - val_acc: 0.9632\nEpoch 9/10\n4457/4457 [==============================] - 2s 460us/step - loss: 0.0934 - acc: 0.9684 - val_loss: 0.0999 - val_acc: 0.9605\nEpoch 10/10\n4457/4457 [==============================] - 2s 401us/step - loss: 0.0890 - acc: 0.9695 - val_loss: 0.1011 - val_acc: 0.9641\n" ], [ "# Predict on new test data", "_____no_output_____" ], [ "inp_test_seq = \"WINNER! U win a 500 prize reward & free entry to FA cup final tickets! Text FA to 34212 to receive award\"", "_____no_output_____" ], [ "test_sequences = mytokenizer.texts_to_sequences(np.array([inp_test_seq]))\ntest_sequences_matrix = sequence.pad_sequences(test_sequences,maxlen=max_len)", "_____no_output_____" ], [ "model.predict(test_sequences_matrix)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a4eba8b905c481051c6cc353dd8ddc64497d8e
4,435
ipynb
Jupyter Notebook
notes/.ipynb_checkpoints/ScalaRep-checkpoint.ipynb
alf239/ProvingGround
c9f4de822be55bd1b6cb0f1959392205453e8d45
[ "MIT" ]
null
null
null
notes/.ipynb_checkpoints/ScalaRep-checkpoint.ipynb
alf239/ProvingGround
c9f4de822be55bd1b6cb0f1959392205453e8d45
[ "MIT" ]
null
null
null
notes/.ipynb_checkpoints/ScalaRep-checkpoint.ipynb
alf239/ProvingGround
c9f4de822be55bd1b6cb0f1959392205453e8d45
[ "MIT" ]
null
null
null
21.634146
300
0.556032
[ [ [ "## Scala Representations\n\nScala objects are integrated with HoTT by using wrappers, combinators and implicit based convenience methods. In this note we look at the basic representations. The main power of this is to provide automatically (through implicits) types and scala bindings for functions from the basic ones.\n\nA more advanced form of Scala representations also makes symbolic algebra simplifications. The basic form should be used, for example, for group presentations, where simplifications are not expected.\n", "_____no_output_____" ] ], [ [ "load.jar(\"/home/gadgil/code/ProvingGround/core/.jvm/target/scala-2.11/ProvingGround-Core-assembly-0.8.jar\")", "_____no_output_____" ], [ "import provingground._\nimport HoTT._\nimport ScalaRep._", "_____no_output_____" ] ], [ [ "We consider the type of Natural numbers formed from Integers. This is defined in ScalaRep as:\n\n```scala\ncase object NatInt extends ScalaTyp[Int]\n```\n\n**Warning:** This is an unsafe type, as Integers can overflow, and there is no checking for positivity.", "_____no_output_____" ] ], [ [ "NatInt", "_____no_output_____" ] ], [ [ "### Conversion using the term method\n\nThe term method converts a scala object, with scala type T say, into a Term, provided there is an implicit representation with scala type T.", "_____no_output_____" ] ], [ [ "import NatInt.rep\n1.term", "_____no_output_____" ] ], [ [ "### Functions to FuncTerms\n\nGiven the representation of Int, there are combinators that give representations of, for instance Int => Int => Int. Note also that the type of the resulting term is a type parameter of the scala representations, so we get a refined compile time type", "_____no_output_____" ] ], [ [ "val sum = ((n: Int) => (m: Int) => n + m).term", "_____no_output_____" ], [ "sum(1.term)(2.term)", "_____no_output_____" ], [ "val n = \"n\" :: NatInt\nsum(n)(2.term)", "_____no_output_____" ], [ "val s = lmbda(n)(sum(n)(2.term))", "_____no_output_____" ], [ "s(3.term)", "_____no_output_____" ] ], [ [ "We will also define the product", "_____no_output_____" ] ], [ [ "val prod = ((n : Int) => (m: Int) => n * m).term", "_____no_output_____" ], [ "prod(2.term)(4.term)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7a4ec1eee4da523dbd71d1e0aa323dc3352cba2
192,393
ipynb
Jupyter Notebook
examples/notebooks/SCF_diagram.ipynb
gramaziokohler/integral_timber_joints
70e75a66e13b5ada580fcffc58879f5fcb8fce32
[ "MIT" ]
3
2021-09-16T13:08:32.000Z
2022-02-21T17:20:21.000Z
examples/notebooks/SCF_diagram.ipynb
gramaziokohler/integral_timber_joints
70e75a66e13b5ada580fcffc58879f5fcb8fce32
[ "MIT" ]
80
2021-09-06T09:55:38.000Z
2022-03-22T18:44:24.000Z
examples/notebooks/SCF_diagram.ipynb
gramaziokohler/integral_timber_joints
70e75a66e13b5ada580fcffc58879f5fcb8fce32
[ "MIT" ]
null
null
null
92.630236
30,760
0.791609
[ [ [ "# https://ipython.org/ipython-doc/3/config/extensions/autoreload.html\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "## Options", "_____no_output_____" ] ], [ [ "# parse options\nproblem = 'twelve_pieces_process.json' # 'pavilion_process.json' # 'twelve_pieces_process.json'\nproblem_subdir = 'results'\n\nrecompute_action_states = False\nload_external_movements = False", "_____no_output_____" ], [ "from collections import namedtuple\nPlanningArguments = namedtuple('PlanningArguments', ['problem', 'viewer', 'debug', 'diagnosis', 'id_only', 'solve_mode', 'viz_upon_found', \n 'save_now', 'write', 'plan_impacted', 'watch', 'step_sim', 'verbose'])\n# args = PlanningArguments(problem, viewer, debug, diagnosis, id_only, solve_mode, viz_upon_found, save_now, write, plan_impacted, watch, step_sim, verbose)", "_____no_output_____" ] ], [ [ "## Parse process from json", "_____no_output_____" ] ], [ [ "import os\nfrom termcolor import cprint\nimport pybullet_planning as pp\nfrom integral_timber_joints.planning.parsing import parse_process, save_process_and_movements, get_process_path, save_process", "_____no_output_____" ], [ "process = parse_process(problem, subdir=problem_subdir)\nresult_path = get_process_path(problem, subdir='results')\nif len(process.movements) == 0:\n cprint('No movements found in process, trigger recompute actions.', 'red')\n recompute_action_states = True\nif recompute_action_states:\n cprint('Recomputing Actions and States', 'cyan')\n recompute_action_states(process)", "\u001b[34mProcess json parsed from c:\\users\\harry\\dropbox (mit)\\code_ws_dropbox\\itj_ws\\integral_timber_joints\\external\\itj_design_study\\210128_RemodelFredPavilion\\results\\twelve_pieces_process.json\u001b[0m\n" ], [ "from integral_timber_joints.process import RoboticMovement\nmoves = process.get_movements_by_beam_id('b4')\n\ncnt = 0\nfor m in moves:\n if isinstance(m, RoboticMovement):\n cnt+=1\nprint('{}/{}'.format(cnt, len(moves)))", "45/72\n" ] ], [ [ "## read jsons", "_____no_output_____" ] ], [ [ "# from collections import defaultdict\nimport json\n\n# file_name = 'b4_runtime_data_w_TC_final_nonlinear.json'\nnotc_file_name = 'b4_runtime_data_No_TC_final_all.json'\ntc_file_name = 'b4_runtime_data_w_TC_nonlinear_40_trials.json'\ntc_file_name2 = 'b4_runtime_data_w_TC_final_linear.json'\n\n# b4_runtime_data_No_TC_21-07-06_11-54-15.json, on-going\n# 'b1_runtime_data_w_TC_21-07-06_07-35-29.json'\n#'b4_runtime_data_No_TC_21-07-06_00-04-45.json', 600 timeout, before bug fixed\n# 'b4_runtime_data_No_TC_21-07-05_19-59-42.json' 1800 timeout, before bug fixed\n\nruntime_data = {}\nwith open('figs/{}'.format(notc_file_name), 'r') as f:\n runtime_data['notc'] = json.load(f)\n\nruntime_data['tc'] = {}\nwith open('figs/{}'.format(tc_file_name), 'r') as f:\n runtime_data['tc'].update(json.load(f))\nwith open('figs/{}'.format(tc_file_name2), 'r') as f:\n runtime_data['tc'].update(json.load(f))\n \nprint(runtime_data['notc'].keys())\nprint(runtime_data['tc'].keys())", "dict_keys(['nonlinear', 'linear_forward', 'linear_backward'])\ndict_keys(['nonlinear', 'linear_forward', 'linear_backward'])\n" ] ], [ [ "## B4-Histogram", "_____no_output_____" ] ], [ [ "from collections import defaultdict\n\n# aggregate all success/failure trials\nagg_data = {'notc':{}, 'tc':{}}\n\nfor tc_flag in runtime_data:\n for solve_mode_ in runtime_data[tc_flag]:\n agg_data[tc_flag][solve_mode_] = defaultdict(list)\n cnt = 0\n for outer_trial_i, tdata in runtime_data[tc_flag][solve_mode_].items():\n for inner_trial_j_data in tdata.values():\n runtime_per_move = [sum(inner_trial_j_data['profiles'][mid]['plan_time']) \\\n for mid in inner_trial_j_data['profiles']]\n runtime_key = 'success' if inner_trial_j_data['success'] else 'failure'\n agg_data[tc_flag][solve_mode_]['history'].append((inner_trial_j_data['success'], sum(runtime_per_move)))\n# if cnt < sample_num:\n agg_data[tc_flag][solve_mode_][runtime_key].append(sum(runtime_per_move))\n cnt += 1", "_____no_output_____" ], [ "# agg_data['tc']", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nhistory = agg_data['tc']['linear_forward']['history'][:37]\nprint(len(history))\n\nax.plot(range(len(history)), [h[1] for h in history])\nax.scatter(range(len(history)), [h[1] for h in history], c=['g' if h[0] else 'r' for h in history])", "_____no_output_____" ], [ "import numpy as np\ntotal_attempts = 37\nplot_data = {'notc':{}, 'tc':{}}\nfor tc_flag in agg_data:\n for solve_mode, solve_data in agg_data[tc_flag].items():\n history = solve_data['history'][0:total_attempts]\n success_runs = [h[1] for h in history if h[0]]\n failed_runs = [h[1] for h in history if not h[0]]\n \n success_rate = len(success_runs) / len(history)\n success_mean = np.mean(success_runs)\n success_std = np.std(success_runs)\n failure_mean = np.mean(failed_runs)\n failure_std = np.std(failed_runs)\n\n plot_data[tc_flag][solve_mode] = {}\n plot_data[tc_flag][solve_mode]['total_attempts'] = len(history)\n plot_data[tc_flag][solve_mode]['success_rate'] = success_rate\n plot_data[tc_flag][solve_mode]['success_mean'] = success_mean\n plot_data[tc_flag][solve_mode]['success_std'] = success_std\n plot_data[tc_flag][solve_mode]['failure_mean'] = failure_mean\n plot_data[tc_flag][solve_mode]['failure_std'] = failure_std\n plot_data[tc_flag][solve_mode]['success_range'] = (success_mean-np.min(success_runs), np.max(success_runs)-success_mean) \\\n if success_runs else (0,0)\n plot_data[tc_flag][solve_mode]['failed_range'] = (failure_mean-np.min(failed_runs), np.max(failed_runs)-failure_mean)", "C:\\Users\\harry\\miniconda3\\envs\\itj\\lib\\site-packages\\numpy\\core\\fromnumeric.py:3373: RuntimeWarning: Mean of empty slice.\n out=out, **kwargs)\nC:\\Users\\harry\\miniconda3\\envs\\itj\\lib\\site-packages\\numpy\\core\\_methods.py:234: RuntimeWarning: Degrees of freedom <= 0 for slice\n keepdims=keepdims)\n" ], [ "plot_data", "_____no_output_____" ], [ "# use Helvetica font\n# https://felix11h.github.io/blog/matplotlib-tgheros\n\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Arial']\n# rc(‘font’,**{‘family’:‘sans-serif’,‘sans-serif’:[‘Arial’]})", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nconstriant_type = 'notc'\npp_data = plot_data[constriant_type]\nx = np.arange(len(pp_data)) # the label locations\nwidth = 0.3 # the width of the bars\nsuccess_green = '#caffbf'\nfailure_red = '#ffadad'\naverage_color = '#a0c4ff'\nscatter_size = 5\n\nfig, ax = plt.subplots(1,3,figsize=(14,4)) # plt.figaspect(2)\n\n# ! First figure\ns_height = 40\nrate_x = x\nrate_alpha = 1.0\nsuccess_heights = [(pp_data[s]['success_rate'])*s_height for s in pp_data]\nfailed_heights = [(1-pp_data[s]['success_rate'])*s_height for s in pp_data]\n\nrects1_1 = ax[0].bar(rate_x, success_heights, width, color=success_green, alpha=rate_alpha)\nrects1_2 = ax[0].bar(rate_x, failed_heights, width, bottom=success_heights, color=failure_red, alpha=rate_alpha)\nax[0].bar_label(rects1_1, labels=['{:.1f}%'.format(pp_data[s]['success_rate']*100) for s in pp_data], \n label_type='center') #padding=3)\nax[0].bar_label(rects1_2, labels=['{:.1f}%'.format((1-pp_data[s]['success_rate'])*100) for s in pp_data], \n label_type='center') #padding=3)\nax[0].set_ylabel('number of trials')\nax[0].set_xticks(x)\nax[0].set_xticklabels(pp_data)\n# ax[0].legend()\nax[0].set_title('Success rate')\n\nelinewidth = 0.5\necapsize = 2\n\n# ! Second Figure\nrects2 = ax[1].bar(x - width/2, [pp_data[s]['success_mean'] for s in pp_data], width,\n yerr=[[pp_data[s]['success_range'][0] for s in pp_data], [pp_data[s]['success_range'][1] for s in pp_data]], \n label='success runtime', error_kw={'elinewidth':elinewidth},\n color=success_green, ecolor='black', capsize=ecapsize)\n\nrects3 = ax[1].bar(x + width/2, [pp_data[s]['failure_mean'] for s in pp_data], width, \n yerr=[[pp_data[s]['failed_range'][0] for s in pp_data], [pp_data[s]['failed_range'][1] for s in pp_data]],\n label='failure runtime', error_kw={'elinewidth':elinewidth},\n color=failure_red, ecolor='black', capsize=ecapsize)\n\nax[1].set_ylabel('planning time (s)')\nax[1].set_xticks(x)\nax[1].set_xticklabels(pp_data)\nax[1].set_ylim([0,420])\nax[1].legend(loc='upper center')\nax[1].set_title('Average runtime for each attempt')\n# average time to obtain a successful result\n\n# ! Third Figure\ntimeout = 600*3\ndata_summary = {}\nfor solve_mode, solve_mode_data in runtime_data[constriant_type].items():\n runtime_per_trial = []\n for outer_trial_data in solve_mode_data.values():\n runtime_per_inner = []\n for inner_trial_j_data in outer_trial_data.values():\n runtime_per_inner.append(sum([sum(inner_trial_j_data['profiles'][mid]['plan_time']) \\\n for mid in inner_trial_j_data['profiles']]))\n runtime_per_trial.append(sum(runtime_per_inner))\n num_bts = [len(solve_mode_data[str(at)])-1 for at in range(len(solve_mode_data))]\n data_summary[solve_mode] = (np.average(runtime_per_trial), runtime_per_trial, num_bts)\nbars = ax[2].bar(x, [d[0] for _, d in data_summary.items()], width, align='center', zorder=1, color=average_color)\n\n# scatter points\nfor tx, rdata in zip(x, data_summary.values()):\n inner_runtimes = rdata[1]\n ax[2].scatter([tx for _ in inner_runtimes], inner_runtimes, c=['black' if rt < timeout else '#ef476f' \\\n for rt in inner_runtimes], s=scatter_size, zorder=2)\n# for t, bt in zip(inner_runtimes, rdata[2]):\n# ax[2].annotate(bt, (tx+0.05, t), fontsize=7)\n# timeout\nax[2].plot(x, [timeout for _ in x], c=failure_red, dashes=[6, 2], label='timeout', zorder=2, lw=1)\n\n# ax[2].set_ylim([0,1850])\nax[2].set_xticks(x)\nax[2].set_xticklabels(data_summary)\nax[2].set_ylabel('planning time (s)')\nax[2].set_title('Average runtime until success/timeout')\nax[2].set_ylim([0,2100])\nax[2].legend(loc='upper right')\n\nfig.tight_layout()\n# all: comparison between linear and nonlinear planning b4's all movements (xx robot movements)\n# a: success rate, b: runtime for each attempt, c: average runtime until success\n\nplt.savefig(os.path.join('figs','10_beam4_runtime_without_TC.svg'))\nplt.savefig(os.path.join('figs','10_beam4_runtime_without_TC.png'))\n\nplt.show()", "_____no_output_____" ], [ "# ! Third Figure\ntc_file_name = 'b4_runtime_data_w_TC_final_nonlinear.json'\ntc_file_name2 = 'b4_runtime_data_w_TC_final_linear.json'\n\ntc_runtime_data = {}\nwith open('figs/{}'.format(tc_file_name), 'r') as f:\n tc_runtime_data.update(json.load(f))\nwith open('figs/{}'.format(tc_file_name2), 'r') as f:\n tc_runtime_data.update(json.load(f))", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nconstriant_type = 'tc'\npp_data = plot_data[constriant_type]\nx = np.arange(len(pp_data)) # the label locations\nwidth = 0.3 # the width of the bars\nsuccess_green = '#caffbf'\nfailure_red = '#ffadad'\naverage_color = '#a0c4ff'\nscatter_size = 5\n\nfig, ax = plt.subplots(1,3,figsize=(14,4)) # plt.figaspect(2)\n\n# ! First figure\ns_height = 40\nrate_x = x\nrate_alpha = 1.0\nsuccess_heights = [(pp_data[s]['success_rate'])*s_height for s in pp_data]\nfailed_heights = [(1-pp_data[s]['success_rate'])*s_height for s in pp_data]\n\nrects1_1 = ax[0].bar(rate_x, success_heights, width, color=success_green, alpha=rate_alpha)\nrects1_2 = ax[0].bar(rate_x, failed_heights, width, bottom=success_heights, color=failure_red, alpha=rate_alpha)\nax[0].bar_label(rects1_1, labels=['{:.1f}%'.format(pp_data[s]['success_rate']*100) for s in pp_data], \n label_type='center') #padding=3)\nax[0].bar_label(rects1_2, labels=['{:.1f}%'.format((1-pp_data[s]['success_rate'])*100) for s in pp_data], \n label_type='center') #padding=3)\nax[0].set_ylabel('number of trials')\nax[0].set_xticks(x)\nax[0].set_xticklabels(pp_data)\n# ax[0].legend()\nax[0].set_title('Success rate')\n\n# ! Second Figure\nrects2 = ax[1].bar(x - width/2, [pp_data[s]['success_mean'] for s in pp_data], width,\n yerr=[[pp_data[s]['success_range'][0] for s in pp_data], [pp_data[s]['success_range'][1] for s in pp_data]], \n label='success runtime', error_kw={'elinewidth':elinewidth},\n color=success_green, ecolor='black', capsize=ecapsize)\n\nrects3 = ax[1].bar(x + width/2, [pp_data[s]['failure_mean'] for s in pp_data], width, \n yerr=[[pp_data[s]['failed_range'][0] for s in pp_data], [pp_data[s]['failed_range'][1] for s in pp_data]],\n label='failure runtime', error_kw={'elinewidth':elinewidth},\n color=failure_red, ecolor='black', capsize=ecapsize)\n\nax[1].set_ylabel('planning time (s)')\nax[1].set_xticks(x)\nax[1].set_xticklabels(pp_data)\nax[1].set_ylim([0,420])\nax[1].legend(loc='upper center')\nax[1].set_title('Average runtime for each attempt')\n# average time to obtain a successful result\n\n# ! Third Figure\ntimeout = 600*3\ndata_summary = {}\nsolve_mode = 'nonlinear'\nsolve_mode_data = tc_runtime_data[solve_mode]\nruntime_per_trial = []\nfor outer_trial_data in solve_mode_data.values():\n runtime_per_inner = []\n for inner_trial_j_data in outer_trial_data.values():\n runtime_per_inner.append(sum([sum(inner_trial_j_data['profiles'][mid]['plan_time']) \\\n for mid in inner_trial_j_data['profiles']]))\n runtime_per_trial.append(sum(runtime_per_inner))\nnum_bts = [len(solve_mode_data[str(at)])-1 for at in range(len(solve_mode_data))]\ndata_summary[solve_mode] = (np.average(runtime_per_trial), runtime_per_trial, num_bts)\n\n# 455, 68\nlf_bts = 500\ndata_summary['linear_forward'] = (timeout, [timeout/lf_bts for i in range(lf_bts)], lf_bts)\nlb_bts = 500\ndata_summary['linear_backward'] = (timeout, [timeout/lb_bts for i in range(lb_bts)], lb_bts)\nbars = ax[2].bar(x, [d[0] for _, d in data_summary.items()], width, align='center', zorder=1, color=average_color)\n\n# scatter points\ntx = 0\nrdata = data_summary['nonlinear']\ninner_runtimes = rdata[1]\nax[2].scatter([tx for _ in inner_runtimes], inner_runtimes, c=['black' if rt < timeout else '#ef476f' \\\n for rt in inner_runtimes], s=scatter_size, zorder=2) # label\nax[2].scatter([1,2], [timeout, timeout], c='#ef476f', s=scatter_size, zorder=2)\n\n# for t, bt in zip(inner_runtimes, rdata[2]):\n# ax[2].annotate(bt, (tx+0.05, t), fontsize=7)\n# timeout\nax[2].plot(x, [timeout for _ in x], c=failure_red, dashes=[6, 2], label='timeout', zorder=2, lw=1)\n\nax[2].set_xticks(x)\nax[2].set_xticklabels(data_summary)\nax[2].set_ylabel('planning time (s)')\nax[2].set_title('Average runtime until success/timeout')\nax[2].set_ylim([0,2100])\nax[2].legend(loc='upper right')\n\nfig.tight_layout()\n# all: comparison between linear and nonlinear planning b4's all movements (xx robot movements)\n# a: success rate, b: runtime for each attempt, c: average runtime until success\n\nplt.savefig(os.path.join('figs','11_beam4_runtime_with_TC.svg'))\nplt.savefig(os.path.join('figs','11_beam4_runtime_with_TC.png'))\n\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\ntimeout = 600*3\n\nfig, ax = plt.subplots()\ndata_summary = {}\nfor solve_mode, solve_mode_data in runtime_data.items():\n runtime_per_trial = []\n for outer_trial_data in solve_mode_data.values():\n runtime_per_inner = []\n for inner_trial_j_data in outer_trial_data.values():\n runtime_per_inner.append(sum([sum(inner_trial_j_data['profiles'][mid]['plan_time']) \\\n for mid in inner_trial_j_data['profiles']]))\n runtime_per_trial.append(sum(runtime_per_inner))\n num_bts = [len(solve_mode_data[str(at)])-1 for at in range(len(solve_mode_data))]\n data_summary[solve_mode] = (np.average(runtime_per_trial), runtime_per_trial, num_bts)\n\nwidth = 0.35 # the width of the bars\nx_pos = np.arange(len(data_summary))\nbars = ax.bar(x_pos, [d[0] for _, d in data_summary.items()], width, align='center', zorder=1)\n\n# scatter points\nfor x, rdata in zip(x_pos, data_summary.values()):\n inner_runtimes = rdata[1]\n ax.scatter([x for _ in inner_runtimes], inner_runtimes, c='red', s=2.0, zorder=2) # label\n for t, bt in zip(inner_runtimes, rdata[2]):\n plt.annotate(bt, (x, t))\n\nax.plot(x_pos, [timeout for _ in x_pos], c='r', dashes=[6, 2], label='timeout', zorder=2)\n\nleg = ax.legend(loc='right')\nax.set_xticks(x_pos)\nax.set_xticklabels(data_summary)\nax.set_ylabel('runtime (s)')\n\n# import os\n# plt.savefig(os.path.join('figs',file_name.split('.json')[0]+'_hist.png'))\nfig.tight_layout()\n\nplt.show()", "_____no_output_____" ] ], [ [ "# All beam scatter plot", "_____no_output_____" ] ], [ [ "# from collections import defaultdict\nimport json\n\nfile_names = {'b{}'.format(bid) : 'b{}_runtime_data_w_TC_{}.json'.format(bid, '21-07-06_23-04-03') \\\n for bid in list(range(0,25)) + list(range(26,32))}\nfile_names.update(\n {'b{}'.format(bid) : 'b{}_runtime_data_w_TC_{}.json'.format(bid, '21-07-07_07-55-05') for bid in range(32,40)}\n)\n\nall_runtime_data = {}\nfor bid, fn in file_names.items():\n with open('figs/{}'.format(fn), 'r') as f:\n all_runtime_data[bid] = json.load(f)\n\nprint(all_runtime_data.keys())", "dict_keys(['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10', 'b11', 'b12', 'b13', 'b14', 'b15', 'b16', 'b17', 'b18', 'b19', 'b20', 'b21', 'b22', 'b23', 'b24', 'b26', 'b27', 'b28', 'b29', 'b30', 'b31', 'b32', 'b33', 'b34', 'b35', 'b36', 'b37', 'b38', 'b39'])\n" ], [ "all_runtime_data['b1']['nonlinear'].keys()", "_____no_output_____" ], [ "# ['nonlinear', 'linear_forward', 'linear_backward']\nfor bid, beam_data in all_runtime_data.items():\n for solve_mode_ in beam_data:\n print('='*20)\n for i, tdata in beam_data[solve_mode_].items():\n print('{} | #{}-T#{}:'.format(bid, solve_mode_, i))\n sc = all([d['success'] for di, d in tdata.items()])\n\n total_runtime = []\n for i, trial_data in tdata.items():\n trial_profiles = trial_data['profiles']\n runtime_per_move = [sum(trial_profiles[mid]['plan_time']) for mid in trial_profiles]\n total_runtime.append(sum(runtime_per_move))\n tdata['total_runtime'] = sum(total_runtime)\n cprint('- {} - BT {} | time {:.2f}'.format(sc, len(tdata), sum(total_runtime)), 'green' if sc else 'red')\n print('---')", "====================\nb0 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 22.86\u001b[0m\n---\n====================\nb1 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 13.85\u001b[0m\n---\n====================\nb2 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 163.00\u001b[0m\n---\n====================\nb3 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 78.48\u001b[0m\n---\n====================\nb4 | #nonlinear-T#0:\n\u001b[31m- False - BT 9 | time 1271.01\u001b[0m\n---\n====================\nb5 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 58.98\u001b[0m\n---\n====================\nb6 | #nonlinear-T#0:\n\u001b[31m- False - BT 11 | time 1827.63\u001b[0m\n---\n====================\nb7 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 11.03\u001b[0m\n---\n====================\nb8 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 382.68\u001b[0m\n---\n====================\nb9 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 95.90\u001b[0m\n---\n====================\nb10 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 392.30\u001b[0m\n---\n====================\nb11 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 171.90\u001b[0m\n---\n====================\nb12 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 129.33\u001b[0m\n---\n====================\nb13 | #nonlinear-T#0:\n\u001b[31m- False - BT 11 | time 1552.85\u001b[0m\n---\n====================\nb14 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 175.17\u001b[0m\n---\n====================\nb15 | #nonlinear-T#0:\n\u001b[31m- False - BT 6 | time 380.87\u001b[0m\n---\n====================\nb16 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 530.10\u001b[0m\n---\n====================\nb17 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 99.48\u001b[0m\n---\n====================\nb18 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 79.31\u001b[0m\n---\n====================\nb19 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 19.87\u001b[0m\n---\n====================\nb20 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 159.99\u001b[0m\n---\n====================\nb21 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 31.12\u001b[0m\n---\n====================\nb22 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 77.90\u001b[0m\n---\n====================\nb23 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 61.55\u001b[0m\n---\n====================\nb24 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 339.08\u001b[0m\n---\n====================\nb26 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 190.71\u001b[0m\n---\n====================\nb27 | #nonlinear-T#0:\n\u001b[32m- True - BT 2 | time 78.56\u001b[0m\n---\n====================\nb28 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 435.24\u001b[0m\n---\n====================\nb29 | #nonlinear-T#0:\n\u001b[31m- False - BT 18 | time 1893.25\u001b[0m\n---\n====================\nb30 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 336.32\u001b[0m\n---\n====================\nb31 | #nonlinear-T#0:\n\u001b[31m- False - BT 3 | time 359.94\u001b[0m\n---\n====================\nb32 | #nonlinear-T#0:\n\u001b[31m- False - BT 8 | time 1134.93\u001b[0m\n---\n====================\nb33 | #nonlinear-T#0:\n\u001b[31m- False - BT 6 | time 332.68\u001b[0m\n---\n====================\nb34 | #nonlinear-T#0:\n\u001b[31m- False - BT 14 | time 1930.53\u001b[0m\n---\n====================\nb35 | #nonlinear-T#0:\n\u001b[31m- False - BT 4 | time 319.84\u001b[0m\n---\n====================\nb36 | #nonlinear-T#0:\n\u001b[31m- False - BT 9 | time 1802.49\u001b[0m\n---\n====================\nb37 | #nonlinear-T#0:\n\u001b[31m- False - BT 7 | time 717.19\u001b[0m\n---\n====================\nb38 | #nonlinear-T#0:\n\u001b[31m- False - BT 16 | time 1652.13\u001b[0m\n---\n====================\nb39 | #nonlinear-T#0:\n\u001b[31m- False - BT 12 | time 1355.28\u001b[0m\n---\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\ntimeout = 600*3\n\nfig, ax = plt.subplots()\ndata_summary = {}\nfor bid, beam_data in all_runtime_data.items():\nfor solve_mode, solve_mode_data in runtime_data.items():\n x = range(len(solve_mode_data))\n runtime_per_trial = [solve_mode_data[str(at)]['total_runtime'] for at in x]\n num_bts = [len(solve_mode_data[str(at)])-1 for at in x]\n data_summary[solve_mode] = (np.average(runtime_per_trial), runtime_per_trial, num_bts)\n\nx_pos = np.arange(len(data_summary))\nbars = ax.bar(x_pos, [d[0] for _, d in data_summary.items()], align='center', zorder=1)\n\n# scatter points\nfor x, rdata in zip(x_pos, data_summary.values()):\n inner_runtimes = rdata[1]\n ax.scatter([x for _ in inner_runtimes], inner_runtimes, c='red', s=2.0, zorder=2) # label\n for t, bt in zip(inner_runtimes, rdata[2]):\n plt.annotate(bt, (x, t))\n\nax.plot(x_pos, [timeout for _ in x_pos], c='r', dashes=[6, 2], label='timeout', zorder=2)\n\nleg = ax.legend(loc='right')\nax.set_xticks(x_pos)\nax.set_xticklabels(data_summary)\nax.set_ylabel('runtime (s)')\n\nplt.draw() # Draw the figure so you can find the positon of the legend.\n\nimport os\nplt.savefig(os.path.join('figs',file_name.split('.json')[0]+'_hist.png'))\n\nplt.show()", "_____no_output_____" ] ], [ [ "## All-beam scatter plot", "_____no_output_____" ] ], [ [ "# from collections import defaultdict\nimport json\n\nfile_names = {bid : 'b{}_runtime_data_w_TC_{}.json'.format(bid, '21-07-06_23-04-03') \\\n for bid in list(range(0,25)) + list(range(26,32))}\nfile_names.update(\n {bid : 'b{}_runtime_data_w_TC_{}.json'.format(bid, '21-07-07_07-55-05') for bid in range(32,40)}\n)\n\nfor bid in [6,29,36]:\n file_names[bid] = 'b{}_runtime_data_w_TC_21-07-08_17-21-22.json'.format(bid)\nfor bid in [34]:\n file_names[bid] = 'b{}_runtime_data_w_TC_21-07-07_21-58-12.json'.format(bid)\n# file_names[bid] = 'b{}_runtime_data_w_TC_21-07-07_07-55-05.json'.format(bid)\nfor bid in [25]:\n file_names[bid] = 'b{}_runtime_data_w_TC_21-07-08_21-12-56.json'.format(bid)\nfor bid in [4, 13, 32, 37, 38, 39]:\n file_names[bid] = 'b{}_runtime_data_w_TC_21-07-08_19-58-46.json'.format(bid)\n \nruntime_per_beam = {}\nfor bid, fn in file_names.items():\n runtime_data = {}\n with open('figs/{}'.format(fn), 'r') as f:\n runtime_data = json.load(f)\n\n # ['nonlinear', 'linear_forward', 'linear_backward']\n for solve_mode_ in runtime_data:\n print('='*20)\n for i, tdata in runtime_data[solve_mode_].items():\n print('b{} | #{}-T#{}:'.format(bid, solve_mode_, i))\n sc = any([d['success'] for di, d in tdata.items()])\n\n total_runtime = []\n for i, trial_data in tdata.items():\n trial_profiles = trial_data['profiles']\n runtime_per_move = [sum(trial_profiles[mid]['plan_time']) for mid in trial_profiles]\n total_runtime.append(sum(runtime_per_move))\n tdata['total_runtime'] = sum(total_runtime)\n # !\n runtime_per_beam[bid] = (sum(total_runtime), len(tdata))\n cprint('{} - BT {} | time {:.2f}'.format(sc, len(tdata), sum(total_runtime)), 'green' if sc else 'red')\n print('---')", "====================\nb0 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 22.86\u001b[0m\n---\n====================\nb1 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 13.85\u001b[0m\n---\n====================\nb2 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 163.00\u001b[0m\n---\n====================\nb3 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 78.48\u001b[0m\n---\n====================\nb4 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 84.23\u001b[0m\n---\n====================\nb5 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 58.98\u001b[0m\n---\n====================\nb6 | #nonlinear-T#0:\n\u001b[32mTrue - BT 5 | time 259.35\u001b[0m\n---\n====================\nb7 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 11.03\u001b[0m\n---\n====================\nb8 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 382.68\u001b[0m\n---\n====================\nb9 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 95.90\u001b[0m\n---\n====================\nb10 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 392.30\u001b[0m\n---\n====================\nb11 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 171.90\u001b[0m\n---\n====================\nb12 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 129.33\u001b[0m\n---\n====================\nb13 | #nonlinear-T#0:\n\u001b[32mTrue - BT 10 | time 1512.13\u001b[0m\n---\n====================\nb14 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 175.17\u001b[0m\n---\n====================\nb15 | #nonlinear-T#0:\n\u001b[32mTrue - BT 6 | time 380.87\u001b[0m\n---\n====================\nb16 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 530.10\u001b[0m\n---\n====================\nb17 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 99.48\u001b[0m\n---\n====================\nb18 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 79.31\u001b[0m\n---\n====================\nb19 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 19.87\u001b[0m\n---\n====================\nb20 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 159.99\u001b[0m\n---\n====================\nb21 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 31.12\u001b[0m\n---\n====================\nb22 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 77.90\u001b[0m\n---\n====================\nb23 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 61.55\u001b[0m\n---\n====================\nb24 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 339.08\u001b[0m\n---\n====================\nb26 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 190.71\u001b[0m\n---\n====================\nb27 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 78.56\u001b[0m\n---\n====================\nb28 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 435.24\u001b[0m\n---\n====================\nb29 | #nonlinear-T#0:\n\u001b[32mTrue - BT 5 | time 651.53\u001b[0m\n---\n====================\nb30 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 336.32\u001b[0m\n---\n====================\nb31 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 359.94\u001b[0m\n---\n====================\nb32 | #nonlinear-T#0:\n\u001b[32mTrue - BT 2 | time 153.30\u001b[0m\n---\n====================\nb33 | #nonlinear-T#0:\n\u001b[32mTrue - BT 6 | time 332.68\u001b[0m\n---\n====================\nb34 | #nonlinear-T#0:\n\u001b[32mTrue - BT 10 | time 1233.37\u001b[0m\n---\n====================\nb35 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 319.84\u001b[0m\n---\n====================\nb36 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 300.41\u001b[0m\n---\n====================\nb37 | #nonlinear-T#0:\n\u001b[32mTrue - BT 3 | time 213.51\u001b[0m\n---\n====================\nb38 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 385.49\u001b[0m\n---\n====================\nb39 | #nonlinear-T#0:\n\u001b[32mTrue - BT 4 | time 347.66\u001b[0m\n---\n====================\nb25 | #nonlinear-T#0:\n\u001b[32mTrue - BT 10 | time 542.60\u001b[0m\n---\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\ntimeout = 600*3\n\nfig, ax = plt.subplots(figsize=(16,4))\nbeams = list(sorted(runtime_per_beam.keys()))\n# plt.scatter(beams, [runtime_per_beam[b][0] for b in beams], s=5)\nb_runtimes = [runtime_per_beam[b][0] for b in beams]\nbar_chart = ax.bar(np.array(beams)+1, b_runtimes, color=average_color) #, edgecolor='black'\nax.bar_label(bar_chart, fontsize=7, padding=3, fmt='%.f')\n\n# plt.plot(beams, [runtime_per_beam[b][0] for b in beams])\n# for b in beams:\n# plt.annotate(runtime_per_beam[b][1], (b, runtime_per_beam[b][0]))\n\n# plt.plot(beams, [timeout for _ in beams], c='r', label='timeout')\nax.set_xlabel('timber element sequence')\nax.set_ylabel('planning time (s)')\nax.set_xlim([0,41])\nax.set_ylim([0,1700])\n\n# import os\nplt.savefig(os.path.join('figs','12_all_beam_runtime.svg'))\nplt.savefig(os.path.join('figs','12_all_beam_runtime.png'))\n\nfig.tight_layout()\nplt.show()\n\n# all beams, until success, no timeout", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\n\ntimeout = 600*3\n\nfig, ax = plt.subplots()\nmarkers = ['o', '^', (5, 0)]\nmcolors = ['g', 'r', 'b']\nfor marker, mcolor, (solve_m, solve_mode_data) in zip(markers, mcolors, runtime_data.items()):\n x = range(len(solve_mode_data))\n runtime_per_trial = [solve_mode_data[str(at)]['total_runtime'] for at in x]\n num_bts = [len(solve_mode_data[str(at)])-1 for at in x]\n plt.scatter(x, runtime_per_trial, marker=marker, c=mcolor, label=solve_m)\n for i in x:\n plt.annotate(num_bts[i], (i, runtime_per_trial[i]))\n\nplt.plot(x, [timeout for _ in x], c='r', label='timeout')\n \nax.set_xlabel('random trials')\nax.set_ylabel('runtime(s)')\n# ax.set_title('Runtime by sovl')\nleg = ax.legend(loc='upper right')\n\nplt.draw() # Draw the figure so you can find the positon of the legend. \n\n# Get the bounding box of the original legend\nbb = leg.get_bbox_to_anchor().transformed(ax.transAxes.inverted())\n\n# Change to location of the legend. \nxOffset = 0.4\nbb.x0 += xOffset\nbb.x1 += xOffset\nleg.set_bbox_to_anchor(bb, transform = ax.transAxes)\n\n# import os\n# plt.savefig(os.path.join('figs',file_name.split('.json')[0]+'.png'))\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Single result", "_____no_output_____" ] ], [ [ "fn = 'b6_runtime_data_w_TC_21-07-07_21-58-12.json'\n\nsingle_runtime_data = {}\nwith open('figs/{}'.format(fn), 'r') as f:\n single_runtime_data = json.load(f)", "_____no_output_____" ], [ "# ['nonlinear', 'linear_forward', 'linear_backward']\n\nfor bid in [25]: # [4, 13, 32, 37, 38, 39]: #[6,25,29,34,36]:\n# fn = 'b{}_runtime_data_w_TC_21-07-08_19-58-46.json'.format(bid)\n fn = 'b{}_runtime_data_w_TC_21-07-08_21-12-56.json'.format(bid)\n \n single_runtime_data = {}\n with open('figs/{}'.format(fn), 'r') as f:\n single_runtime_data = json.load(f)\n\n beam_id = fn.split('_')[0]\n for solve_mode_ in single_runtime_data:\n print('='*20)\n for i, tdata in single_runtime_data[solve_mode_].items():\n print('{} | #{}-T#{}:'.format(beam_id, solve_mode_, i))\n sc = any([d['success'] for di, d in tdata.items()])\n\n total_runtime = []\n for i, trial_data in tdata.items():\n trial_profiles = trial_data['profiles']\n runtime_per_move = [sum(trial_profiles[mid]['plan_time']) for mid in trial_profiles]\n total_runtime.append(sum(runtime_per_move))\n # tdata['total_runtime'] = sum(total_runtime)\n cprint('{} - BT {} | time {:.2f}'.format(sc, len(tdata), sum(total_runtime)), 'green' if sc else 'red')\n print('---')", "====================\nb25 | #nonlinear-T#0:\n\u001b[32mTrue - BT 9 | time 542.60\u001b[0m\n---\n" ] ], [ [ "# Detailed diagram", "_____no_output_____" ] ], [ [ "from plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nfrom integral_timber_joints.process import RoboticFreeMovement, RoboticLinearMovement, RoboticClampSyncLinearMovement\n\n# solve_mode_ = 'linear_forward' # linear_backward | linear_forward | nonlinear\nbeam_id = file_name.split('_runtime_data')[0]\n\n# total_rows = 0\n# for i, d in runtime_data[solve_mode_].items():\n# total_rows += len(d)+1\nmax_inner_loop_displayed = 11\n\nfor solve_mode_ in runtime_data:\n for attempt_i, s_rdata in runtime_data[solve_mode_].items():\n if 'total_runtime' in s_rdata:\n del s_rdata['total_runtime']\n if len(s_rdata) > max_inner_loop_displayed:\n num_rows = max_inner_loop_displayed+1\n half = int(max_inner_loop_displayed/2)\n selected_inners = list(range(0,half)) + list(range(len(s_rdata)-half,len(s_rdata)))\n else:\n num_rows = len(s_rdata)+1\n selected_inners = list(range(len(s_rdata)))\n\n fig = make_subplots(rows=num_rows, cols=2)\n success = any([d['success'] for di, d in s_rdata.items()])\n total_runtime = []\n failed_m_id = []\n for i in s_rdata.keys():\n trial_data = s_rdata[i]\n trial_profiles = trial_data['profiles']\n mid_keys = sorted(trial_profiles.keys(), key=int)\n runtime_per_move = [sum(trial_profiles[mid]['plan_time']) for mid in mid_keys]\n total_runtime.append(sum(runtime_per_move))\n\n for mid in mid_keys:\n if not any(trial_profiles[mid]['plan_success']):\n movement = process.get_movement_by_movement_id(trial_profiles[mid]['movement_id'][0])\n m_color = '#ff1b6b' if isinstance(movement, RoboticFreeMovement) else '#45caff'\n failed_m_id.append((mid, movement.short_summary, m_color))\n break\n else:\n failed_m_id.append((-1, 'success!', '#00ff87'))\n\n if i in selected_inners or int(i) in selected_inners:\n success_colors = ['#99C24D' if any(trial_profiles[mid]['plan_success']) else '#F18F01' for mid in mid_keys]\n row_id = selected_inners.index(int(i))+1\n fig.append_trace(go.Scatter(x=mid_keys,\n y=runtime_per_move,\n mode='markers',\n marker_color=success_colors,\n text=[process.get_movement_by_movement_id(trial_profiles[mid]['movement_id'][0]).short_summary \\\n for mid in mid_keys], # hover text goes here\n name='#{}-feasibility'.format(i),\n ),\n row=row_id, col=1\n )\n\n fig.append_trace(go.Scatter(x=mid_keys,\n y=runtime_per_move,\n mode='markers',\n marker=dict(\n size=5,\n color=[trial_profiles[mid]['sample_order'][0] for mid in mid_keys], #set color equal to a variable\n colorscale='Viridis', # one of plotly colorscales\n showscale=True\n ),\n text=['S#{}-{}'.format(trial_profiles[mid]['sample_order'][0], process.get_movement_by_movement_id(trial_profiles[mid]['movement_id'][0]).short_summary) \\\n for mid in mid_keys], # hover text goes here\n name='#{}-sample order'.format(i),),\n row=row_id, col=2\n )\n if row_id == 1:\n fig.update_xaxes(title_text=\"m_id\",row=row_id, col=1)\n fig.update_yaxes(title_text=\"runtime(s)\",row=row_id, col=1)\n\n fig.append_trace(go.Scatter(x=list(range(len(s_rdata))),y=total_runtime), \n row=num_rows, col=1)\n fig.update_xaxes(title_text=\"trials\",row=num_rows, col=1)\n fig.update_yaxes(title_text=\"runtime(s)\",row=num_rows, col=1)\n\n fig.append_trace(go.Scatter(x=list(range(len(failed_m_id))),y=[int(tt[0]) for tt in failed_m_id],\n mode='markers',\n marker_color=[tt[2] for tt in failed_m_id],\n text=[tt[1] for tt in failed_m_id],\n ), row=num_rows, col=2)\n fig.update_xaxes(title_text=\"trials\",row=num_rows, col=2)\n fig.update_yaxes(title_text=\"failed_movement_id\",row=num_rows, col=2)\n\n\n title = \"figs/{}-{}-trail_{}_success-{}_BT-{}_time-{:.1f}\".format(beam_id, solve_mode_, \n attempt_i, success, len(s_rdata), sum(total_runtime))\n fig.update_layout(title=title)\n fig.write_html(title + \".html\")\n# fig.show()", "_____no_output_____" ], [ "len(failed_m_id)", "_____no_output_____" ] ], [ [ "# Save runtime data", "_____no_output_____" ] ], [ [ "runtime_data.keys()", "_____no_output_____" ] ], [ [ "## Start client", "_____no_output_____" ] ], [ [ "from integral_timber_joints.planning.robot_setup import load_RFL_world\nfrom integral_timber_joints.planning.run import set_initial_state\n\n# * Connect to path planning backend and initialize robot parameters\n# viewer or diagnosis or view_states or watch or step_sim,\nclient, robot, _ = load_RFL_world(viewer=False, verbose=False)\nset_initial_state(client, robot, process, disable_env=disable_env, reinit_tool=False)", "argv[0]=\nb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nx_railb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nrobot11_tool0b3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nrobot12_tool0b3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nrobot21_tool0b3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nrobot22_tool0b3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nclamp_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nclamp_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nclamp_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nclamp_jawb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jaw_lb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jaw_rb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_baseb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jaw_lb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ngripper_jaw_rb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\nNo inertial data for link, using mass=1, localinertiadiagonal = 1,1,1, identity local inertial frameb3Warning[examples/Importers/ImportURDFDemo/BulletUrdfImporter.cpp,126]:\ntoolchanger_base" ], [ "client.disconnect()", "_____no_output_____" ] ], [ [ "# Visualize traj", "_____no_output_____" ] ], [ [ "from integral_timber_joints.planning.state import set_state\nfrom integral_timber_joints.planning.visualization import visualize_movement_trajectory\n\naltered_ms = [process.get_movement_by_movement_id('A43_M2')]\nset_state(client, robot, process, process.initial_state)\nfor altered_m in altered_ms:\n visualize_movement_trajectory(client, robot, process, altered_m, step_sim=False, step_duration=0.05)", "===\nViz:\u001b[0m\n\u001b[33mNo traj found for RoboticLinearMovement(#A43_M2, Linear Approach 2 of 2 to place CL3 ('c2') in storage., traj 0)\n -- has_start_conf False, has_end_conf True\u001b[0m\nPress enter to continue\n" ] ], [ [ "# Disconnect client", "_____no_output_____" ] ], [ [ "client.disconnect()", "_____no_output_____" ] ], [ [ "# Plan only one movement", "_____no_output_____" ] ], [ [ "# if id_only:\n# beam_id = process.get_beam_id_from_movement_id(id_only)\n# process.get_movement_summary_by_beam_id(beam_id)", "_____no_output_____" ], [ "from integral_timber_joints.planning.stream import compute_free_movement, compute_linear_movement\nfrom integral_timber_joints.planning.solve import compute_movement\n\nchosen_m = process.get_movement_by_movement_id(id_only)\ncompute_movement(client, robot, process, chosen_m, options=lm_options, diagnosis=diagnosis)", "\u001b[36mRoboticLinearMovement(#A2_M1, Linear Advance to Final Frame of Beam ('b0'), traj 1)\u001b[0m\n\u001b[33mend conf FK inconsistent (0.00005 m) with given current frame in end state.\u001b[0m\n\u001b[33mBoth start/end confs are pre-specified, problem might be too stiff to be solved.\u001b[0m\n\u001b[34mOne-sided Cartesian planning : start conf set, forward mode\u001b[0m\n\tcartesian trial #0\n\u001b[32mPlan found by IterativeIK! After 0 path failure (by IterativeIK) over 1 samples.\u001b[0m\n" ], [ "from integral_timber_joints.planning.visualization import visualize_movement_trajectory\n\nwith pp.WorldSaver():\n visualize_movement_trajectory(client, robot, process, chosen_m, step_sim=True)", "===\nViz:\u001b[0m\n\u001b[32mRoboticLinearMovement(#A2_M1, Linear Advance to Final Frame of Beam ('b0'), traj 1)\u001b[0m\n" ] ], [ [ "# Debug", "_____no_output_____" ] ], [ [ "prev_m = process.get_movement_by_movement_id('A40_M6')\nstart_state = process.get_movement_start_state(prev_m)\nend_state = process.get_movement_end_state(prev_m)\n\n# v = end_state['robot'].current_frame.point - start_state['robot'].current_frame.point\n# list(v)\nset_state(client, robot, process, end_state)\nprint(end_state['tool_changer'].current_frame)\nprint(client.get_object_frame('^tool_changer$', scale=1e3)[75])", "{\n \"point\": [\n 16365.955352783203,\n 5373.7616539001465,\n 1185.3845119476318\n ],\n \"xaxis\": [\n -0.2580321229101535,\n 0.6278875467781188,\n 0.7342864918731894\n ],\n \"yaxis\": [\n -0.9661363350844321,\n -0.1677526279329609,\n -0.19606029136775213\n ]\n}\n{\n \"point\": [\n 16365.955352783203,\n 5373.7616539001465,\n 1185.3845119476318\n ],\n \"xaxis\": [\n -0.25803212291015387,\n 0.6278875467781186,\n 0.7342864918731893\n ],\n \"yaxis\": [\n -0.9661363350844319,\n -0.16775262793296133,\n -0.19606029136775227\n ]\n}\n" ], [ "client.set_robot_configuration(robot, end_state['robot'].kinematic_config)\nprint(client.get_object_frame('^tool_changer$', scale=1e3)[75])", "{\n \"point\": [\n 16365.961074829102,\n 5373.770236968994,\n 1185.3852272033691\n ],\n \"xaxis\": [\n -0.2580321229101535,\n 0.6278875467781188,\n 0.7342864918731894\n ],\n \"yaxis\": [\n -0.9661363350844321,\n -0.1677526279329609,\n -0.19606029136775213\n ]\n}\n" ], [ "from compas_fab_pychoreo.backend_features.pychoreo_configuration_collision_checker import PyChoreoConfigurationCollisionChecker\n\nset_state(client, robot, process, end_state, options=options)\n# set_state(client, robot, process, start_state, options=options)\npychore_collision_fn = PyChoreoConfigurationCollisionChecker(client)\n# end_state['robot'].kinematic_config\noptions['diagnosis'] = True\npychore_collision_fn.check_collisions(robot, prev_m.trajectory.points[-2], options=options)", "_____no_output_____" ], [ "tc_body = client.pychoreo_attachments['tool_changer']", "_____no_output_____" ], [ "from compas_fab_pychoreo.conversions import pose_from_frame, frame_from_pose\n\nframe_from_pose(pp.get_pose(75))", "_____no_output_____" ], [ "client.get_object_frame('^tool_changer$')", "_____no_output_____" ], [ "print(end_state['robot'])\nprint(end_state['tool_changer'])", "State: current frame: {\n \"point\": [\n 16365.989685058594,\n 5373.808860778809,\n 1185.4075193405151\n ],\n \"xaxis\": [\n -0.25802939931448104,\n 0.6277901217809272,\n 0.7343707456616834\n ],\n \"yaxis\": [\n -0.9661370648091927,\n -0.16763997964096333,\n -0.1961530250285612\n ]\n} | config: JointTrajectoryPoint((15.468, -4.130, -2.020, 2.159, -0.587, -2.805, 0.492, -2.039, 0.908), (2, 2, 2, 0, 0, 0, 0, 0, 0), (0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000), (0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000), (0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000), Duration(11, 0)) | attached to robot: False\nState: current frame: {\n \"point\": [\n 16366.001562499872,\n 5373.822840010225,\n 1185.408652972277\n ],\n \"xaxis\": [\n -0.2580290176609404,\n 0.6277482599146081,\n 0.7344066640622972\n ],\n \"yaxis\": [\n -0.9661371673033442,\n -0.16765452393882985,\n -0.19614008911467898\n ]\n} | config: None | attached to robot: True\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a4f1768d35997795fab9cbcdd859abf0d0835e
18,098
ipynb
Jupyter Notebook
Copy_of_C3_W4_Lab_4_GradCam.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
Copy_of_C3_W4_Lab_4_GradCam.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
Copy_of_C3_W4_Lab_4_GradCam.ipynb
nafiul-araf/TensorFlow-Advanced-Techniques-Specialization
a69aa9139f266a2e601433339d97ca9146029378
[ "Apache-2.0" ]
null
null
null
35.90873
364
0.516797
[ [ [ "<a href=\"https://colab.research.google.com/github/nafiul-araf/TensorFlow-Advanced-Techniques-Specialization/blob/main/Copy_of_C3_W4_Lab_4_GradCam.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Ungraded Lab: GradCAM\n\nThis lab will walk you through generating gradient-weighted class activation maps (GradCAMs) for model predictions. \n- This is similar to the CAMs you generated before except:\n - GradCAMs uses gradients instead of the global average pooling weights to weight the activations.", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "%tensorflow_version 2.x\n\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\nimport os\nimport glob\nimport cv2\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom skimage.io import imread, imsave\nfrom skimage.transform import resize \nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.applications import vgg16\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.optimizers import SGD, Adam, RMSprop\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport tensorflow_datasets as tfds\nimport tensorflow_hub as hub\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa", "_____no_output_____" ] ], [ [ "## Download and Prepare the Dataset\n\nYou will use the Cats vs Dogs dataset again for this exercise. The following will prepare the train, test, and eval sets.", "_____no_output_____" ] ], [ [ "tfds.disable_progress_bar()\n\nsplits = ['train[:80%]', 'train[80%:90%]', 'train[90%:]']\n\n# load the dataset given the splits defined above\nsplits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True, split = splits)\n\n(train_examples, validation_examples, test_examples) = splits\n\nnum_examples = info.splits['train'].num_examples\nnum_classes = info.features['label'].num_classes", "_____no_output_____" ], [ "BATCH_SIZE = 32\nIMAGE_SIZE = (224, 224)\n\n# resizes the image and normalizes the pixel values\ndef format_image(image, label):\n image = tf.image.resize(image, IMAGE_SIZE) / 255.0\n return image, label\n\n# prepare batches\ntrain_batches = train_examples.shuffle(num_examples // 4).map(format_image).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)\nvalidation_batches = validation_examples.map(format_image).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)\ntest_batches = test_examples.map(format_image).batch(1)", "_____no_output_____" ] ], [ [ "## Modelling\n\nYou will use a pre-trained VGG16 network as your base model for the classifier. This will be followed by a global average pooling (GAP) and a 2-neuron Dense layer with softmax activation for the output. The earlier VGG blocks will be frozen and we will just fine-tune the final layers during training. These steps are shown in the utility function below.", "_____no_output_____" ] ], [ [ "def build_model():\n # load the base VGG16 model\n base_model = vgg16.VGG16(input_shape=IMAGE_SIZE + (3,), \n weights='imagenet', \n include_top=False)\n \n # add a GAP layer\n output = layers.GlobalAveragePooling2D()(base_model.output)\n\n # output has two neurons for the 2 classes (cats and dogs)\n output = layers.Dense(2, activation='softmax')(output)\n\n # set the inputs and outputs of the model\n model = Model(base_model.input, output)\n\n # freeze the earlier layers\n for layer in base_model.layers[:-4]:\n layer.trainable=False\n\n # choose the optimizer\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n # configure the model for training\n model.compile(loss='sparse_categorical_crossentropy', \n optimizer=optimizer, \n metrics=['accuracy'])\n \n # display the summary\n model.summary()\n \n return model", "_____no_output_____" ], [ "model = build_model()", "_____no_output_____" ] ], [ [ "You can now train the model. This will take around 10 minutes to run.", "_____no_output_____" ] ], [ [ "EPOCHS = 3\nmodel.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)", "_____no_output_____" ] ], [ [ "## Model Interpretability\n\nLet's now go through the steps to generate the class activation maps. You will start by specifying the layers you want to visualize.\n", "_____no_output_____" ] ], [ [ "# select all the layers for which you want to visualize the outputs and store it in a list\noutputs = [layer.output for layer in model.layers[1:18]]\n\n# Define a new model that generates the above output\nvis_model = Model(model.input, outputs)\n\n# store the layer names we are interested in\nlayer_names = []\nfor layer in outputs:\n layer_names.append(layer.name.split(\"/\")[0])\n\n \nprint(\"Layers that will be used for visualization: \")\nprint(layer_names)", "_____no_output_____" ] ], [ [ "### Class activation maps (GradCAM)\n\nWe'll define a few more functions to output the maps. `get_CAM()` is the function highlighted in the lectures and takes care of generating the heatmap of gradient weighted features. `show_random_sample()` takes care of plotting the results.", "_____no_output_____" ] ], [ [ "def get_CAM(processed_image, actual_label, layer_name='block5_conv3'):\n model_grad = Model([model.inputs], \n [model.get_layer(layer_name).output, model.output])\n \n with tf.GradientTape() as tape:\n conv_output_values, predictions = model_grad(processed_image)\n\n # watch the conv_output_values\n tape.watch(conv_output_values)\n\n ## Use binary cross entropy loss\n ## actual_label is 0 if cat, 1 if dog\n # get prediction probability of dog\n # If model does well, \n # pred_prob should be close to 0 if cat, close to 1 if dog\n pred_prob = predictions[:,1] \n \n # make sure actual_label is a float, like the rest of the loss calculation\n actual_label = tf.cast(actual_label, dtype=tf.float32)\n \n # add a tiny value to avoid log of 0\n smoothing = 0.00001 \n \n # Calculate loss as binary cross entropy\n loss = -1 * (actual_label * tf.math.log(pred_prob + smoothing) + (1 - actual_label) * tf.math.log(1 - pred_prob + smoothing))\n print(f\"binary loss: {loss}\")\n \n # get the gradient of the loss with respect to the outputs of the last conv layer\n grads_values = tape.gradient(loss, conv_output_values)\n grads_values = K.mean(grads_values, axis=(0,1,2))\n \n conv_output_values = np.squeeze(conv_output_values.numpy())\n grads_values = grads_values.numpy()\n \n # weight the convolution outputs with the computed gradients\n for i in range(512): \n conv_output_values[:,:,i] *= grads_values[i]\n heatmap = np.mean(conv_output_values, axis=-1)\n \n heatmap = np.maximum(heatmap, 0)\n heatmap /= heatmap.max()\n \n del model_grad, conv_output_values, grads_values, loss\n \n return heatmap", "_____no_output_____" ], [ "def show_sample(idx=None):\n \n # if image index is specified, get that image\n if idx:\n for img, label in test_batches.take(idx):\n sample_image = img[0]\n sample_label = label[0]\n # otherwise if idx is not specified, get a random image\n else:\n for img, label in test_batches.shuffle(1000).take(1):\n sample_image = img[0]\n sample_label = label[0]\n \n sample_image_processed = np.expand_dims(sample_image, axis=0)\n \n activations = vis_model.predict(sample_image_processed)\n \n pred_label = np.argmax(model.predict(sample_image_processed), axis=-1)[0]\n \n sample_activation = activations[0][0,:,:,16]\n \n sample_activation-=sample_activation.mean()\n sample_activation/=sample_activation.std()\n \n sample_activation *=255\n sample_activation = np.clip(sample_activation, 0, 255).astype(np.uint8)\n \n heatmap = get_CAM(sample_image_processed, sample_label)\n heatmap = cv2.resize(heatmap, (sample_image.shape[0], sample_image.shape[1]))\n heatmap = heatmap *255\n heatmap = np.clip(heatmap, 0, 255).astype(np.uint8)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)\n converted_img = sample_image.numpy()\n super_imposed_image = cv2.addWeighted(converted_img, 0.8, heatmap.astype('float32'), 2e-3, 0.0)\n\n f,ax = plt.subplots(2,2, figsize=(15,8))\n\n ax[0,0].imshow(sample_image)\n ax[0,0].set_title(f\"True label: {sample_label} \\n Predicted label: {pred_label}\")\n ax[0,0].axis('off')\n \n ax[0,1].imshow(sample_activation)\n ax[0,1].set_title(\"Random feature map\")\n ax[0,1].axis('off')\n \n ax[1,0].imshow(heatmap)\n ax[1,0].set_title(\"Class Activation Map\")\n ax[1,0].axis('off')\n \n ax[1,1].imshow(super_imposed_image)\n ax[1,1].set_title(\"Activation map superimposed\")\n ax[1,1].axis('off')\n plt.tight_layout()\n plt.show()\n \n return activations", "_____no_output_____" ] ], [ [ "### Time to visualize the results", "_____no_output_____" ] ], [ [ "# Choose an image index to show, or leave it as None to get a random image\nactivations = show_sample(idx=None)", "_____no_output_____" ] ], [ [ "### Intermediate activations of layers\n\nYou can use the utility function below to visualize the activations in the intermediate layers you defined earlier. This plots the feature side by side for each convolution layer starting from the earliest layer all the way to the final convolution layer.", "_____no_output_____" ] ], [ [ "def visualize_intermediate_activations(layer_names, activations):\n assert len(layer_names)==len(activations), \"Make sure layers and activation values match\"\n images_per_row=16\n \n for layer_name, layer_activation in zip(layer_names, activations):\n nb_features = layer_activation.shape[-1]\n size= layer_activation.shape[1]\n\n nb_cols = nb_features // images_per_row\n grid = np.zeros((size*nb_cols, size*images_per_row))\n\n for col in range(nb_cols):\n for row in range(images_per_row):\n feature_map = layer_activation[0,:,:,col*images_per_row + row]\n feature_map -= feature_map.mean()\n feature_map /= feature_map.std()\n feature_map *=255\n feature_map = np.clip(feature_map, 0, 255).astype(np.uint8)\n\n grid[col*size:(col+1)*size, row*size:(row+1)*size] = feature_map\n\n scale = 1./size\n plt.figure(figsize=(scale*grid.shape[1], scale*grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.axis('off')\n plt.imshow(grid, aspect='auto', cmap='viridis')\n plt.show()", "_____no_output_____" ], [ "visualize_intermediate_activations(activations=activations, \n layer_names=layer_names)", "_____no_output_____" ] ], [ [ "If you scroll all the way down to see the outputs of the final conv layer, you'll see that there are very few active features and these are mostly located in the face of the cat. This is the region of the image that your model looks at when determining the class.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e7a4f1b050928f16d11fe5793399e859670a5b0c
234,929
ipynb
Jupyter Notebook
emsembling_predictions.ipynb
siyue-zhang/time-series-forecast-Darts
6f75ed0e1e7c7e8b8bcfa67aab3286b228a30ef0
[ "Apache-2.0" ]
null
null
null
emsembling_predictions.ipynb
siyue-zhang/time-series-forecast-Darts
6f75ed0e1e7c7e8b8bcfa67aab3286b228a30ef0
[ "Apache-2.0" ]
null
null
null
emsembling_predictions.ipynb
siyue-zhang/time-series-forecast-Darts
6f75ed0e1e7c7e8b8bcfa67aab3286b228a30ef0
[ "Apache-2.0" ]
null
null
null
659.912921
70,856
0.946729
[ [ [ "import sys\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (15,6)\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom functools import reduce\n\nfrom darts import TimeSeries\nfrom darts.models import (\n NaiveSeasonal,\n NaiveDrift,\n ExponentialSmoothing,\n ARIMA,\n RegressionEnsembleModel,\n RegressionModel\n)\n\nfrom darts.metrics import mape, mase\nfrom darts.utils.statistics import check_seasonality, plot_acf, plot_residuals_analysis, plot_hist\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport logging\nlogging.disable(logging.CRITICAL)", "_____no_output_____" ], [ "df = pd.read_csv(\"./data/wet-bulb-temperature-hourly.csv\")\ndt = [datetime.strptime(df.iloc[n,0], \"%Y-%m-%d\")+timedelta(hours=int(df.iloc[n,1])-1) for n in range(df.shape[0])]\ndf.index = dt\ndf.drop([\"wbt_date\",\"wbt_time\"],axis=1,inplace=True)\nseries = TimeSeries.from_dataframe(df)\n_, series = series.split_before(pd.Timestamp('20160101'))\ntrain, val = series.split_before(pd.Timestamp('20200101'))\ntrain.plot(label='training')\nval.plot(label='validation')\nplt.legend();", "_____no_output_____" ] ], [ [ "## Ensembling several predictions\n\nEnsembling is about combining the forecasts produced by several models, in order to obtain a final – and hopefully better forecast.", "_____no_output_____" ] ], [ [ "models = [NaiveSeasonal(12), NaiveSeasonal(24), NaiveDrift()]\n\nmodel_predictions = [m.historical_forecasts(series,\n start=pd.Timestamp('20170101'),\n forecast_horizon=12,\n stride=12,\n last_points_only=False,\n verbose=True)\n for m in models]\n\nmodel_predictions = [reduce((lambda a, b: a.append(b)), model_pred) for model_pred in model_predictions]", "_____no_output_____" ], [ "model_predictions_stacked = model_predictions[0]\nfor model_prediction in model_predictions[1:]:\n model_predictions_stacked = model_predictions_stacked.stack(model_prediction)", "_____no_output_____" ], [ "\"\"\" We build the regression model, and tell it to use the current predictions\n\"\"\"\nregr_model = RegressionModel(lags=None, lags_future_covariates=[0])\n\n\"\"\" Our target series is what we want to predict (the actual data)\n It has to have the same time index as the features series:\n\"\"\"\nseries_target = series.slice_intersect(model_predictions[0])\n\n\"\"\" Here we backtest our regression model\n\"\"\"\nensemble_pred = regr_model.historical_forecasts(\n series=series_target, future_covariates=model_predictions_stacked,\n start=pd.Timestamp('20180101'), forecast_horizon=12, stride=12, verbose=True\n)", "_____no_output_____" ], [ "fig, ax = plt.subplots(2,2,figsize=(12,6))\nax = ax.ravel()\n\nfor i, m in enumerate(models):\n series.plot(label='actual', ax=ax[i])\n model_predictions[i].plot(label=str(m), ax=ax[i])\n\n # intersect last part, to compare all the methods over the duration of the ensemble forecast\n model_pred = model_predictions[i].slice_intersect(ensemble_pred)\n\n mape_model = mape(series, model_pred)\n ax[i].set_title('\\nMAPE: {:.2f}%'.format(mape_model))\n ax[i].legend()\n\nseries.plot(label='actual', ax=ax[3])\nensemble_pred.plot(label='Ensemble', ax=ax[3])\nax[3].set_title('\\nMAPE, ensemble: {:.2f}%'.format(mape(series, ensemble_pred)))\nax[3].legend()\n\nprint('\\nRegression coefficients for the individual models:')\nfor i, m in enumerate(models):\n print('Learned coefficient for {}: {:.2f}'.format(m, regr_model.model.coef_[i]))\nplt.tight_layout();", "_____no_output_____" ] ], [ [ "## RegressionEnsembleModel approach", "_____no_output_____" ] ], [ [ "ensemble_model = RegressionEnsembleModel(\n forecasting_models=[NaiveSeasonal(12), NaiveSeasonal(24), NaiveDrift()],\n regression_train_n_points=12)\n\nensemble_model.fit(train)\nensemble_pred = ensemble_model.predict(48)\n\nseries.plot(label='actual')\nensemble_pred.plot(label='Ensemble forecast')\nplt.title('MAPE = {:.2f}%'.format(mape(ensemble_pred, series)))\nplt.legend();", "_____no_output_____" ], [ "val[:48].plot(label='actual')\nensemble_pred.plot(label='Ensemble forecast')\nplt.title('MAPE = {:.2f}%'.format(mape(ensemble_pred, series)))\nplt.legend();", "_____no_output_____" ] ], [ [ "### Only Naive model", "_____no_output_____" ] ], [ [ "naive_model = NaiveSeasonal(K=24)\nnaive_model.fit(train)\nnaive_forecast = naive_model.predict(48)\n\nval[:48].plot(label='actual')\nnaive_forecast.plot(label='Navie forecast')\nplt.title('MAPE = {:.2f}%'.format(mape(naive_forecast, series)))\nplt.legend();", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7a4ff15f419f7b4e3c0ed657e3b1423f8de8a8c
16,570
ipynb
Jupyter Notebook
01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb
iamoespana92/pandas_exercises
afb095d1f96a95d9d8f7457acf324c8dc14b7650
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb
iamoespana92/pandas_exercises
afb095d1f96a95d9d8f7457acf324c8dc14b7650
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb
iamoespana92/pandas_exercises
afb095d1f96a95d9d8f7457acf324c8dc14b7650
[ "BSD-3-Clause" ]
null
null
null
29.642218
228
0.420459
[ [ [ "# Ex1 - Getting and knowing your Data\nCheck out [World Food Facts Exercises Video Tutorial](https://youtu.be/_jCSK4cMcVw) to watch a data scientist go through the exercises", "_____no_output_____" ], [ "### Step 1. Go to https://www.kaggle.com/openfoodfacts/world-food-facts/data", "_____no_output_____" ], [ "### Step 2. Download the dataset to your computer and unzip it.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Step 3. Use the tsv file and assign it to a dataframe called food", "_____no_output_____" ] ], [ [ "food = pd.read_csv('~/Desktop/en.openfoodfacts.org.products.tsv', sep='\\t')", "//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2717: DtypeWarning: Columns (0,3,5,19,20,24,25,26,27,28,36,37,38,39,48) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ] ], [ [ "### Step 4. See the first 5 entries", "_____no_output_____" ] ], [ [ "food.head()", "_____no_output_____" ] ], [ [ "### Step 5. What is the number of observations in the dataset?", "_____no_output_____" ] ], [ [ "food.shape #will give you both (observations/rows, columns)", "_____no_output_____" ], [ "food.shape[0] #will give you only the observations/rows number", "_____no_output_____" ] ], [ [ "### Step 6. What is the number of columns in the dataset?", "_____no_output_____" ] ], [ [ "print(food.shape) #will give you both (observations/rows, columns)\nprint(food.shape[1]) #will give you only the columns number\n\n#OR\n\nfood.info() #Columns: 163 entries", "(356027, 163)\n163\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 356027 entries, 0 to 356026\nColumns: 163 entries, code to water-hardness_100g\ndtypes: float64(107), object(56)\nmemory usage: 442.8+ MB\n" ] ], [ [ "### Step 7. Print the name of all the columns.", "_____no_output_____" ] ], [ [ "food.columns", "_____no_output_____" ] ], [ [ "### Step 8. What is the name of 105th column?", "_____no_output_____" ] ], [ [ "food.columns[104]", "_____no_output_____" ] ], [ [ "### Step 9. What is the type of the observations of the 105th column?", "_____no_output_____" ] ], [ [ "food.dtypes['-glucose_100g']", "_____no_output_____" ] ], [ [ "### Step 10. How is the dataset indexed?", "_____no_output_____" ] ], [ [ "food.index", "_____no_output_____" ] ], [ [ "### Step 11. What is the product name of the 19th observation?", "_____no_output_____" ] ], [ [ "food.values[18][7]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a5043913a47bfb0014ab19b597cc44d99f4f30
31,288
ipynb
Jupyter Notebook
Events/ETSS June 2016/DevOps Networking Model/Testing Your Changes.ipynb
manpowertw/leafspine-ops
59309bed802e6d11c7c801f893adfd9b188222b6
[ "Apache-2.0" ]
32
2016-05-24T23:32:02.000Z
2021-11-17T07:53:50.000Z
Events/ETSS June 2016/DevOps Networking Model/Testing Your Changes.ipynb
manpowertw/leafspine-ops
59309bed802e6d11c7c801f893adfd9b188222b6
[ "Apache-2.0" ]
5
2016-09-25T15:55:02.000Z
2018-09-06T10:54:45.000Z
Events/ETSS June 2016/DevOps Networking Model/Testing Your Changes.ipynb
manpowertw/leafspine-ops
59309bed802e6d11c7c801f893adfd9b188222b6
[ "Apache-2.0" ]
34
2016-03-02T17:37:07.000Z
2021-11-17T07:54:04.000Z
40.792699
4,523
0.469413
[ [ [ "# Testing is good\n\nTesting is one of the most important things we can do in our infrastructure to make sure that things are configured the way we expect them to be and that the system as a whole is operating the way we want it to.\n\nIn a world of dynamic protocols that are **designed** to continue to operate in the face of multiple failures, it's always good to make sure that you know when the system has gone through a failure. \n\nIf routing works the way it's supposed to, you may not even be aware you have a failure until the last bandaid finally falls off and you have a total meltdown.\n\nMore importantly, Testing can be used to help gain confidence in your changes, not just for you, but for your peers, managers, and the business who depends on the network to get things done.\n\nWe're going to start as usual by grabbing all the imports we need. \n\n** *Note: I'm going to fly through some of these steps as I've covered them pretty thouroughly in previous blogs, please feel free to ask/comment if there's something that you'd like me to explain in further detail* **", "_____no_output_____" ] ], [ [ "from pyhpeimc.auth import *\nfrom pyhpeimc.plat.icc import *\nfrom pyhpeimc.plat.device import *\nimport jtextfsm as textfsm\nimport yaml\n#import githubuser\nimport mygithub #file not in github repo\nauth = IMCAuth(\"http://\", \"10.101.0.203\", \"8080\", \"admin\", \"admin\")", "_____no_output_____" ] ], [ [ "First we've got to grab the devID of the switch we wish to test ", "_____no_output_____" ] ], [ [ "devid = get_dev_details('10.101.0.221', auth.creds, auth.url)['id']\ndevid", "_____no_output_____" ] ], [ [ "Now we need to create the list of commands that we want to gather their output. For this example, we want to make sure that OSPF, as a system, is still working. So we want to gather the **display ospf peer** output so that we can tkae a look at the peers and make sure that all expected peers are still present and in a **Full/BDR** state.", "_____no_output_____" ] ], [ [ "cmd_list = ['display ospf peer']", "_____no_output_____" ] ], [ [ "Now that we've got the command list, we're going to use the **run_dev_cmd** function from the **pyhpeimc** library to gather this for the devid of the switch we specified above.\n\nWe'll also take a quick look at the contents of the **contents** key of the object that is returned by the run_dev_cmd function.", "_____no_output_____" ] ], [ [ "raw_text_data = run_dev_cmd(devid, cmd_list, auth.creds, auth.url)\nraw_text_data['content']", "_____no_output_____" ] ], [ [ "Just to be sure, we'll print this out to make sure that this is the response we would actually expect for this command from that specific device OS.", "_____no_output_____" ] ], [ [ "print (raw_text_data['content'])", "\t OSPF Process 1 with Router ID 10.101.0.221\r\n Neighbor Brief Information\r\n\r\n Area: 0.0.0.0\r\n Router ID Address Pri Dead-Time Interface State\r\n 10.101.16.1 10.101.0.1 1 36 Vlan1 Full/BDR\r\n 10.101.16.1 10.101.15.1 1 32 Vlan15 Full/BDR\r\n 10.20.1.1 10.20.1.1 1 37 GE2/0/23 Full/BDR\n" ] ], [ [ "We will now run this through a TextFSM template to transform this string into some structured text which will make it much easier to deal with. ", "_____no_output_____" ] ], [ [ "template = open(\"./Textfsm/Templates/displayospf.textfsm\")\nre_table = textfsm.TextFSM(template)\nfsm_results = re_table.ParseText(raw_text_data['content'])\nospf_peer = [ { 'area': i[0], 'router_id' :i[1], 'address':i[2], 'pri' :i[3], 'deadtime': i[4], 'interface': i[5], 'state': i[6]} for i in fsm_results]\nprint ( \"There are currently \" + str(len(ospf_peer)) + ' OSPF peers on this device')\nospf_peer[0]", "There are currently 4 OSPF peers on this device\n" ] ], [ [ "Now that we've got an object with all the OSPF Peers in them, let's write some quick code to see if the one specific peer, 10.20.1.1, is present in the OSPF peer table and if it's current state is Full/BDR. This will let us know that the OSPF peer we expect to be in the table is, in fact, still in the table and in the FULL/BDR state which tells us there's a pretty good chance it's passing traffic.\n\nI've also added an **else** clause to ", "_____no_output_____" ] ], [ [ "for peer in ospf_peer:\n if (peer['address']) == '10.20.1.1' and peer['state'] == \"Full/BDR\":\n print ( peer['address'] + \" was the peer I was looking for and it's Full\")\n else:\n print (peer['address'] + ' was not the peer I was looking for')", "10.101.0.1 was not the peer I was looking for\n10.101.15.1 was not the peer I was looking for\n10.20.1.1 was the peer I was looking for and it's Full\n was not the peer I was looking for\n" ] ], [ [ "# Checking IP Routes\n\nWhat about checking the routing table of a remote peer?\n", "_____no_output_____" ] ], [ [ "cmd_list = ['display ip routing-table']", "_____no_output_____" ], [ "raw_text_data = run_dev_cmd(devid, cmd_list, auth.creds, auth.url)\nraw_text_data['content']", "_____no_output_____" ], [ "print (raw_text_data['content'])", "Routing Tables: Public\r\n\tDestinations : 58\tRoutes : 59\r\n\r\nDestination/Mask Proto Pre Cost NextHop Interface\r\n\r\n0.0.0.0/0 Static 60 0 10.101.0.1 Vlan1\r\n10.3.10.0/24 Direct 0 0 10.3.10.1 Vlan20\r\n10.3.10.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.3.0/24 Direct 0 0 10.10.3.1 Vlan3\r\n10.10.3.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.10.0/24 Direct 0 0 10.10.10.1 Vlan10\r\n10.10.10.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.11.0/24 Direct 0 0 10.10.11.1 Vlan11\r\n10.10.11.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.12.0/24 Direct 0 0 10.10.12.1 Vlan12\r\n10.10.12.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.13.0/24 Direct 0 0 10.10.13.1 Vlan13\r\n10.10.13.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.50.0/24 Direct 0 0 10.10.50.1 Vlan500\r\n10.10.50.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.101.0/24 Direct 0 0 10.10.101.1 Vlan101\r\n10.10.101.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.102.0/24 Direct 0 0 10.10.102.1 Vlan102\r\n10.10.102.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.103.0/24 Direct 0 0 10.10.103.1 Vlan103\r\n10.10.103.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.105.0/24 Direct 0 0 10.10.105.1 Vlan105\r\n10.10.105.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.106.0/24 Direct 0 0 10.10.106.1 Vlan106\r\n10.10.106.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.201.0/24 Direct 0 0 10.10.201.1 Vlan201\r\n10.10.201.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.10.203.0/24 Direct 0 0 10.10.203.1 Vlan203\r\n10.10.203.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.20.1.0/24 Direct 0 0 10.20.1.254 GE2/0/23\r\n10.20.1.254/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.20.10.0/24 Direct 0 0 10.20.10.1 GE1/0/22\r\n10.20.10.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.101.0.0/24 Direct 0 0 10.101.0.221 Vlan1\r\n10.101.0.221/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.101.2.0/24 Direct 0 0 10.101.2.1 Vlan2\r\n10.101.2.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.101.15.0/24 Direct 0 0 10.101.15.254 Vlan15\r\n10.101.15.254/32 Direct 0 0 127.0.0.1 InLoop0\r\n10.101.16.0/24 OSPF 10 2 10.101.0.1 Vlan1\r\n OSPF 10 2 10.101.15.1 Vlan15\r\n10.102.1.0/24 Direct 0 0 10.102.1.2 Vlan50\r\n10.102.1.2/32 Direct 0 0 127.0.0.1 InLoop0\r\n127.0.0.0/8 Direct 0 0 127.0.0.1 InLoop0\r\n127.0.0.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.2.0/24 Direct 0 0 172.16.2.1 Vlan2000\r\n172.16.2.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.3.0/24 Direct 0 0 172.16.3.1 Vlan2001\r\n172.16.3.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.3.10/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.4.0/24 Direct 0 0 172.16.4.1 Vlan2002\r\n172.16.4.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.5.0/24 Direct 0 0 172.16.5.1 Vlan2003\r\n172.16.5.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.6.0/24 Direct 0 0 172.16.6.1 Vlan2004\r\n172.16.6.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n172.16.7.0/24 Direct 0 0 172.16.7.1 Vlan2007\r\n172.16.7.1/32 Direct 0 0 127.0.0.1 InLoop0\r\n192.168.1.221/32 Direct 0 0 127.0.0.1 InLoop0\r\n\n" ], [ "template = open(\"./Textfsm/Templates/displayiproutingtable.textfsm\")\nre_table = textfsm.TextFSM(template)\nfsm_results = re_table.ParseText(raw_text_data['content'])\nip_routes = [ { 'DestinationMask': i[0], 'Proto' :i[1], 'Pre':i[2], 'Cost' :i[3], 'NextHop': i[4], 'Interface': i[5]} for i in fsm_results]\nip_routes[0]", "_____no_output_____" ], [ "for route in ip_routes:\n if route['DestinationMask'] == \"10.20.10.0/24\":\n print (json.dumps(route, indent =4))\n", "{\n \"Proto\": \"Direct\",\n \"Interface\": \"GE1/0/22\",\n \"NextHop\": \"10.20.10.1 \",\n \"Pre\": \"0\",\n \"DestinationMask\": \"10.20.10.0/24\",\n \"Cost\": \"0\"\n}\n" ] ], [ [ "# Checking VLANs", "_____no_output_____" ] ], [ [ "devid = get_dev_details('10.20.10.10', auth.creds, auth.url)['id']\ndevid", "_____no_output_____" ], [ "cmd_list = ['display vlan brief']", "_____no_output_____" ], [ "raw_text_data = run_dev_cmd(devid, cmd_list, auth.creds, auth.url)\nraw_text_data['content']", "_____no_output_____" ], [ "print (raw_text_data['content'])", "Brief information about all VLANs:\r\r\nSupported Minimum VLAN ID: 1\r\r\nSupported Maximum VLAN ID: 4094\r\r\nDefault VLAN ID: 1\r\r\nVLAN ID Name Port\r\r\n1 default FGE1/0/49 FGE1/0/50 FGE1/0/51 \r\r\n FGE1/0/52 XGE1/0/1 XGE1/0/2 \r\r\n XGE1/0/3 XGE1/0/4 XGE1/0/5 \r\r\n XGE1/0/6 XGE1/0/7 XGE1/0/8 \r\r\n XGE1/0/9 XGE1/0/10 XGE1/0/11 \r\r\n XGE1/0/12 XGE1/0/13 XGE1/0/14 \r\r\n XGE1/0/15 XGE1/0/16 XGE1/0/17 \r\r\n XGE1/0/18 XGE1/0/19 XGE1/0/20 \r\r\n XGE1/0/21 XGE1/0/22 XGE1/0/23 \r\r\n XGE1/0/24 XGE1/0/25 XGE1/0/26 \r\r\n XGE1/0/27 XGE1/0/28 XGE1/0/29 \r\r\n XGE1/0/30 XGE1/0/31 XGE1/0/32 \r\r\n XGE1/0/33 XGE1/0/34 XGE1/0/35 \r\r\n XGE1/0/36 XGE1/0/37 XGE1/0/38 \r\r\n XGE1/0/39 XGE1/0/40 XGE1/0/41 \r\r\n XGE1/0/42 XGE1/0/43 XGE1/0/44 \r\r\n XGE1/0/45 XGE1/0/46 XGE1/0/47 \r\r\n XGE1/0/48 \r\r\n\r\r \r5 DoesntBelong \r\n" ], [ "template = open(\"./TextFSM/Templates/displayvlanbrief.textfsm\")\nre_table = textfsm.TextFSM(template)\nfsm_results = re_table.ParseText(raw_text_data['content'])", "_____no_output_____" ], [ "fsm_results\ndev_vlans = [ {'vlanId': i[0], 'vlanName' : i[1]} for i in fsm_results]\ndev_vlans", "_____no_output_____" ] ], [ [ "# Checking our work\n\nNow that we've captured the VLANs present on the device, we can easily compare this back to the GITHUB YAML file where we originally defined what VLANs should be on the device.\n\nFirst we'll create the git_vlans object from the file vlans.yaml directly from GITHUB. ", "_____no_output_____" ] ], [ [ "gitauth = mygithub.gitcreds() #you didn't think I was going to give you my password did you?\ngit_vlans = yaml.load(requests.get('https://raw.githubusercontent.com/netmanchris/Jinja2-Network-Configurations-Scripts/master/vlans.yaml', auth=gitauth).text)", "_____no_output_____" ] ], [ [ "## Cleaning up a bit\n\nIf we take a look at the gitvlans variable, we can see it's a little too deep for what we want to do. \nWe're going to perform two transformations on the data here to get it to where we want it to be\n- grab the contents of the git_vlans['vlans'] key which is just the list of vlans.\n- use the .pop() method on each of the vlans to get rid of the **vlanStatus** key which we don't want here\n\nFor the ", "_____no_output_____" ] ], [ [ "git_vlans = git_vlans['vlans']\nfor vlan in git_vlans:\n vlan.pop('vlanStatus')\ngit_vlans", "_____no_output_____" ] ], [ [ "## Comparing git_vlans and dev_vlans\n\nNow that we've got two different list which contain a vlan dictionary for each VLAN with the exact same keys, we can do a boolean magic to see if each of the VLANs are present in the other objects. \n\nWe'll first do this by comparing to see if all of the VLANs from the **git_vlans** object **are** actually on the device. The git_vlans objects was loaded from a YAML file on github where we defined what VLANS **should** be on the device, remember?", "_____no_output_____" ] ], [ [ "for vlan in git_vlans:\n if vlan in dev_vlans:\n print (vlan['vlanId'] + \" is there\")\n elif vlan not in dev_vlans:\n print (devv['vlanId'] + \" is not there\")", "1 is there\n2 is there\n3 is there\n10 is there\n" ] ], [ [ "## Comparing dev_vlans to git_vlans\n\nYou didn't think we were done did you?\n\nFor the last step here, we'll do the exact opposite to see if all of the vlans that are actually present on the device are also defined in the vlans.yaml file on github. We want to make sure that no body snuck in and configured a VLAN in our production environment when we weren't looking, right?", "_____no_output_____" ] ], [ [ "for vlan in dev_vlans:\n if vlan in git_vlans:\n print ( \"VLAN \" + vlan['vlanId'] + \" should be there\")\n elif vlan not in git_vlans:\n print ( \"\\nSomebody added VLAN \" + vlan['vlanId'] + \" when we weren't looking. \\n \\nGo slap them please.\\n\\n\")", "VLAN 1 should be there\nVLAN 2 should be there\nVLAN 3 should be there\n\nSomebody added VLAN 5 when we weren't looking. \n \nGo slap them please.\n\n\nVLAN 10 should be there\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a5084c404b6dbdc2e362143599a683f213e56f
124,071
ipynb
Jupyter Notebook
CTC2019_tutorial.ipynb
genenetwork/Teaching_CTC2019
04ef275ecaa35975dd9a46ba3e34b3c564ecedfd
[ "CC0-1.0" ]
null
null
null
CTC2019_tutorial.ipynb
genenetwork/Teaching_CTC2019
04ef275ecaa35975dd9a46ba3e34b3c564ecedfd
[ "CC0-1.0" ]
null
null
null
CTC2019_tutorial.ipynb
genenetwork/Teaching_CTC2019
04ef275ecaa35975dd9a46ba3e34b3c564ecedfd
[ "CC0-1.0" ]
null
null
null
219.206714
47,948
0.890522
[ [ [ "# Mapping QTL in BXD mice using R/qtl2\n\n[Karl Broman](https://kbroman.org)\n[<img style=\"display:inline-block;\" src=\"https://orcid.org/sites/default/files/images/orcid_16x16(1).gif\">](https://orcid.org/0000-0002-4914-6671),\n[Department of Biostatistics & Medical Informatics](https://www.biostat.wisc.edu), \n[University of Wisconsin&ndash;Madison](https://www.wisc.edu)\n\nOur aim in this tutorial is to demonstrate how to map quantitative trait loci (QTL) in the BXD mouse recombinant inbred lines using the [R/qtl2](https://kbroman.org/qtl2) software. We will first show how to download BXD phenotypes from [GeneNetwork2](http://gn2.genenetwork.org) using its API, via the R package [R/GNapi](https://github.com/rqtl/GNapi). At the end, we will use the [R/qtl2browse](https://github.com/rqtl/qtl2browse) package to display genome scan results using the [Genetics Genome Browser](https://github.com/chfi/purescript-genome-browser).", "_____no_output_____" ], [ "## Acquiring phenotypes with the GeneNetwork API\n\nWe will first use the [GeneNetwork2](http://gn2.genenetwork.org) API to acquire BXD phenotypes to use for mapping. We will use the R package [R/GNapi](https://github.com/rqtl/GNapi). \n\nWe first need to install the package, which is not available on [CRAN](https://cran.r-project.org), but is available via a private repository.\n\n```r\ninstall.packages(\"GNapi\", repos=\"http://rqtl.org/qtl2cran\")\n```\n\nWe then load the package using `library()`.", "_____no_output_____" ] ], [ [ "install.packages(\"GNapi\", repos=\"http://rqtl.org/qtl2cran\")\n ", "Installing package into ‘/srv/rlibs’\n(as ‘lib’ is unspecified)\n" ], [ "library(GNapi)", "_____no_output_____" ] ], [ [ "The [R/GNapi](https://github.com/kbroman/GNapi) has a variety of functions. For an overview, see [its vignette](http://kbroman.org/GNapi/GNapi.html). Here we will just do one thing: use the function `get_pheno()` to grab BXD phenotype data. You provide a data set and a phenotype. Phenotype 10038 concerns \"habituation\", measured as a difference in locomotor activity between day 1 and day 3 in a 5 minute test trial. ", "_____no_output_____" ] ], [ [ "phe <- get_pheno(\"BXD\", \"10038\")\nhead(phe)", "_____no_output_____" ] ], [ [ "We will use just the column \"value\", but we need to include the strain names so that R/qtl2 can line up these phenotypes with the genotypes.", "_____no_output_____" ] ], [ [ "pheno <- setNames(phe$value, phe$sample_name)\nhead(pheno)", "_____no_output_____" ] ], [ [ "## Acquire genotype data with R/qtl2\n\nWe now want to get genotype data for the BXD panel. We first need to install the [R/qtl2](https://kbroman.org/qtl2) package. As with R/GNapi, it is not available on CRAN, but rather is distributed via a private repository.\n\n```r\ninstall.packages(\"qtl2\", repos=\"http://rqtl.org/qtl2cran\")\n```\n\nWe then load the package with `library()`.", "_____no_output_____" ] ], [ [ "install.packages(\"qtl2\", repos=\"http://rqtl.org/qtl2cran\")", "Installing package into ‘/srv/rlibs’\n(as ‘lib’ is unspecified)\nalso installing the dependencies ‘bit’, ‘prettyunits’, ‘bit64’, ‘blob’, ‘DBI’, ‘pkgconfig’, ‘plogr’, ‘yaml’, ‘data.table’, ‘RSQLite’, ‘RcppEigen’\n\n" ], [ "library(qtl2)", "_____no_output_____" ] ], [ [ "R/qtl2 uses a special file format for QTL data ([described here](https://kbroman.org/qtl2/assets/vignettes/input_files.html)). There are a variety of sample datasets [on Github](https://github.com/rqtl/qtl2data), including genotypes for the [mouse BXD lines](https://github.com/rqtl/qtl2data/tree/master/BXD), taken from [GeneNetwork2](http://gn2.genenetwork.org). We'll load those data directly into R using the function `read_cross2()`.", "_____no_output_____" ] ], [ [ "bxd_file <- \"https://raw.githubusercontent.com/rqtl/qtl2data/master/BXD/bxd.zip\"\nbxd <- read_cross2(bxd_file)", "Warning message in recode_geno(sheet, genotypes):\n“117497 genotypes treated as missing: \"H\"”" ] ], [ [ "We get a warning message about heterozygous genotypes being omitted. A number of the newer BXD lines have considerable heterozygosity. But these lines weren't among those phenotyped in the data we downloaded above, and so we don't need to worry about it here.\n\nThe data are read into the object `bxd`, which has class `\"cross2\"`. It contains the genotypes and well as genetic and physical marker maps. There are also phenotype data (which we will ignore).\n\nWe can get a quick summary of the dataset with `summary()`. For reasons that I don't understand, it gets printed as a big mess within this Jupyter notebook, and so here we need to surround it with `print()` to get the intended output.", "_____no_output_____" ] ], [ [ "print( summary(bxd) )", "Object of class cross2 (crosstype \"risib\")\n\nTotal individuals 198\nNo. genotyped individuals 198\nNo. phenotyped individuals 198\nNo. with both geno & pheno 198\n\nNo. phenotypes 5806\nNo. covariates 0\nNo. phenotype covariates 1\n\nNo. chromosomes 20\nTotal markers 7320\n\nNo. markers by chr:\n 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 X \n636 583 431 460 470 449 437 319 447 317 375 308 244 281 247 272 291 250 310 193 \n" ] ], [ [ "The first step in QTL analysis is to calculate genotype probabilities at putative QTL positions across the genome, conditional on the observed marker data. This allows us that consider positions between the genotyped markers and to allow for the presence of genotyping errors.\n\nFirst, we need to define the positions that we will consider. We will take the observed marker positions and insert a set of \"pseudomarkers\" (marker-like positions that are not actually markers). We do this with the function `insert_pseudomarkers()`. We pull the genetic map (`gmap`) out of the `bxd` data as our basic map; `step=0.2` and `stepwidth=\"max\"` mean to insert pseudomarkers so that no two adjacent markers or pseudomarkers are more than 0.2 cM apart. That is, in any marker interval that is greater than 0.2 cM, we will insert one or more evenly spaced pseudomarkers, so that the intervals between markers and pseudomarkers are no more than 0.2 cM.", "_____no_output_____" ] ], [ [ "gmap <- insert_pseudomarkers(bxd$gmap, step=0.2, stepwidth=\"max\")", "_____no_output_____" ] ], [ [ "We will be interested in results with respect to the physical map (in Mbp), and so we need to create a corresponding map that includes the pseudomarker positions. We do this with the function `interp_map()`, which uses linear interpolation to get estimated positions for the inserted pseudomarkers.", "_____no_output_____" ] ], [ [ "pmap <- interp_map(gmap, bxd$gmap, bxd$pmap)", "_____no_output_____" ] ], [ [ "We can now proceed with calculating genotype probabilities for all BXD strains at all markers and pseudomarkers, conditional on the observed marker genotypes and assuming a 0.2% genotyping error rate. We use the [Carter-Falconer](https://doi.org/10.1007/BF02996226) map function to convert between cM and recombination fractions; it assumes a high degree of crossover interference, appropriate for the mouse.", "_____no_output_____" ] ], [ [ "pr <- calc_genoprob(bxd, gmap, error_prob=0.002, map_function=\"c-f\")", "_____no_output_____" ] ], [ [ "In the QTL analysis, we will fit a linear mixed model to account for polygenic background effects. We will use the \"leave one chromosome out\" (LOCO) method for this. When we scan a chromosome for a QTL, we include a polygenic term with a kinship matrix derived from all other chromosomes. \n\nWe first need to calculate this set of kinship matrices, which we do with the function `calc_kinship()`. The second argument, `\"loco\"`, indicates that we want to calculate a vector of kinship matrices, each derived from the genotype probabilities but leaving one chromosome out.", "_____no_output_____" ] ], [ [ "k <- calc_kinship(pr, \"loco\")", "_____no_output_____" ] ], [ [ "Now, finally, we're ready to perform the genome scan, which we do with the function `scan1()`. It takes the genotype probabilities and a set of phenotypes (here, just one phenotype). If kinship matrices are provided (here, as `k`), the scan is performed using a linear mixed model. To make the calculations faster, the residual polygenic variance is first estimated without including any QTL effect and is then taking to be fixed and known during the scan.", "_____no_output_____" ] ], [ [ "out <- scan1(pr, pheno, k)", "_____no_output_____" ] ], [ [ "The output of `scan1()` is a matrix of LOD scores; the rows are marker/pseudomarker positions and the columns are phenotypes. We can plot the results using `plot.scan1()`, and we can just use `plot()` because it uses the class of its input to determine what plot to make.\n\nHere I'm using the package [repr](https://cran.r-project.org/package=repr) to control the height and width of the plot that's created. I installed it with `install.packages(\"repr\")`. You can ignore that part, if you want.", "_____no_output_____" ] ], [ [ "library(repr)\noptions(repr.plot.height=4, repr.plot.width=8)\npar(mar=c(5.1, 4.1, 0.6, 0.6))\nplot(out, pmap)", "_____no_output_____" ] ], [ [ "There's a clear QTL on chromosome 8. We can make a plot of just that chromosome with the argument `chr=15`.", "_____no_output_____" ] ], [ [ "plot(out, pmap, chr=15)", "_____no_output_____" ] ], [ [ "Let's create a plot of the phenotype vs the genotype at the inferred QTL. We first need to identify the QTL location, which we can do using `max()`. We then use `maxmarg()` to get inferred genotypes at the inferred QTL.", "_____no_output_____" ] ], [ [ "mx <- max(out, pmap)\ng_imp <- maxmarg(pr, pmap, chr=mx$chr, pos=mx$pos, return_char=TRUE)", "_____no_output_____" ] ], [ [ "We can use `plot_pxg()` to plot the phenotype as a function of QTL genotype. We use `swap_axes=TRUE` to have the phenotype on the x-axis and the genotype on the y-axis, rather than the other way around. Here we see that the BB and DD genotypes are completely separated, phenotypically. ", "_____no_output_____" ] ], [ [ "par(mar=c(5.1, 4.1, 0.6, 0.6))\nplot_pxg(g_imp, pheno, swap_axes=TRUE, xlab=\"Habituation phenotype\")", "_____no_output_____" ] ], [ [ "## Browse genome scan results with Genetics Genome Browser\n\nThe [Genetics Genome Browser](https://github.com/chfi/purescript-genome-browser) is a fast, lightweight, [purescript]-based genome browser developed for browsing GWAS or QTL analysis results. We'll use the R package [R/qtl2browse](https://github.com/rqtl/qtl2browse) to view our QTL mapping results in the GGB.\n\nWe first need to install the R/qtl2browse package, again from a private [CRAN](https://cran.r-project.org)-like repository.\n\n```r\ninstall.packages(\"qtl2browse\", repos=\"http://rqtl.org/qtl2cran\")\n```\n\nWe then load the package and use its one function, `browse()`, which takes the `scan1()` output and corresponding physical map (in Mbp). This will open the Genetics Genome Browser in a separate tab in your web browser.", "_____no_output_____" ] ], [ [ "library(qtl2browse)\nbrowse(out, pmap)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7a50e61c8e412715d082fb9e3d1e67273f79cdd
192,218
ipynb
Jupyter Notebook
courses/dl1/lesson3-rossman.ipynb
linbojin/fastai
e666e42796da2e9b6625cecd48b923d00a1ede93
[ "Apache-2.0" ]
1
2019-04-30T07:59:57.000Z
2019-04-30T07:59:57.000Z
courses/dl1/lesson3-rossman.ipynb
linbojin/fastai
e666e42796da2e9b6625cecd48b923d00a1ede93
[ "Apache-2.0" ]
null
null
null
courses/dl1/lesson3-rossman.ipynb
linbojin/fastai
e666e42796da2e9b6625cecd48b923d00a1ede93
[ "Apache-2.0" ]
2
2019-01-13T16:32:55.000Z
2020-07-02T17:42:05.000Z
34.226852
14,748
0.406518
[ [ [ "# Structured and time series data", "_____no_output_____" ], [ "This notebook contains an implementation of the third place result in the Rossman Kaggle competition as detailed in Guo/Berkhahn's [Entity Embeddings of Categorical Variables](https://arxiv.org/abs/1604.06737).\n\nThe motivation behind exploring this architecture is it's relevance to real-world application. Most data used for decision making day-to-day in industry is structured and/or time-series data. Here we explore the end-to-end process of using neural networks with practical structured data problems.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n%reload_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from fastai.structured import *\nfrom fastai.column_data import *\nnp.set_printoptions(threshold=50, edgeitems=20)\n\nPATH='data/rossmann/'", "_____no_output_____" ] ], [ [ "## Create datasets", "_____no_output_____" ], [ "In addition to the provided data, we will be using external datasets put together by participants in the Kaggle competition. You can download all of them [here](http://files.fast.ai/part2/lesson14/rossmann.tgz).\n\nFor completeness, the implementation used to put them together is included below.", "_____no_output_____" ] ], [ [ "def concat_csvs(dirname):\n path = f'{PATH}{dirname}'\n filenames=glob(f\"{PATH}/*.csv\")\n\n wrote_header = False\n with open(f\"{path}.csv\",\"w\") as outputfile:\n for filename in filenames:\n name = filename.split(\".\")[0]\n with open(filename) as f:\n line = f.readline()\n if not wrote_header:\n wrote_header = True\n outputfile.write(\"file,\"+line)\n for line in f:\n outputfile.write(name + \",\" + line)\n outputfile.write(\"\\n\")", "_____no_output_____" ], [ "# concat_csvs('googletrend')\n# concat_csvs('weather')", "_____no_output_____" ] ], [ [ "Feature Space:\n* train: Training set provided by competition\n* store: List of stores\n* store_states: mapping of store to the German state they are in\n* List of German state names\n* googletrend: trend of certain google keywords over time, found by users to correlate well w/ given data\n* weather: weather\n* test: testing set", "_____no_output_____" ] ], [ [ "table_names = ['train', 'store', 'store_states', 'state_names', \n 'googletrend', 'weather', 'test']", "_____no_output_____" ] ], [ [ "We'll be using the popular data manipulation framework `pandas`. Among other things, pandas allows you to manipulate tables/data frames in python as one would in a database.\n\nWe're going to go ahead and load all of our csv's as dataframes into the list `tables`.", "_____no_output_____" ] ], [ [ "tables = [pd.read_csv(f'{PATH}{fname}.csv', low_memory=False) for fname in table_names]", "_____no_output_____" ], [ "from IPython.display import HTML, display", "_____no_output_____" ] ], [ [ "We can use `head()` to get a quick look at the contents of each table:\n* train: Contains store information on a daily basis, tracks things like sales, customers, whether that day was a holdiay, etc.\n* store: general info about the store including competition, etc.\n* store_states: maps store to state it is in\n* state_names: Maps state abbreviations to names\n* googletrend: trend data for particular week/state\n* weather: weather conditions for each state\n* test: Same as training table, w/o sales and customers\n", "_____no_output_____" ] ], [ [ "for t in tables: display(t.head())", "_____no_output_____" ] ], [ [ "This is very representative of a typical industry dataset.\n\nThe following returns summarized aggregate information to each table accross each field.", "_____no_output_____" ] ], [ [ "for t in tables: display(DataFrameSummary(t).summary())", "_____no_output_____" ] ], [ [ "## Data Cleaning / Feature Engineering", "_____no_output_____" ], [ "As a structured data problem, we necessarily have to go through all the cleaning and feature engineering, even though we're using a neural network.", "_____no_output_____" ] ], [ [ "train, store, store_states, state_names, googletrend, weather, test = tables", "_____no_output_____" ], [ "len(train),len(test)", "_____no_output_____" ] ], [ [ "We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.", "_____no_output_____" ] ], [ [ "train.StateHoliday = train.StateHoliday!='0'\ntest.StateHoliday = test.StateHoliday!='0'", "_____no_output_____" ] ], [ [ "`join_df` is a function for joining tables on specific fields. By default, we'll be doing a left outer join of `right` on the `left` argument using the given fields for each table.\n\nPandas does joins using the `merge` method. The `suffixes` argument describes the naming convention for duplicate fields. We've elected to leave the duplicate field names on the left untouched, and append a \"\\_y\" to those on the right.", "_____no_output_____" ] ], [ [ "def join_df(left, right, left_on, right_on=None, suffix='_y'):\n if right_on is None: right_on = left_on\n return left.merge(right, how='left', left_on=left_on, right_on=right_on, \n suffixes=(\"\", suffix))", "_____no_output_____" ] ], [ [ "Join weather/state names.", "_____no_output_____" ] ], [ [ "weather = join_df(weather, state_names, \"file\", \"StateName\")", "_____no_output_____" ] ], [ [ "In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.\n\nWe're also going to replace all instances of state name 'NI' to match the usage in the rest of the data: 'HB,NI'. This is a good opportunity to highlight pandas indexing. We can use `.loc[rows, cols]` to select a list of rows and a list of columns from the dataframe. In this case, we're selecting rows w/ statename 'NI' by using a boolean list `googletrend.State=='NI'` and selecting \"State\".", "_____no_output_____" ] ], [ [ "googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]\ngoogletrend['State'] = googletrend.file.str.split('_', expand=True)[2]\ngoogletrend.loc[googletrend.State=='NI', \"State\"] = 'HB,NI'", "_____no_output_____" ] ], [ [ "The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.\n\nYou should *always* consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities. We'll add to every table with a date field.", "_____no_output_____" ] ], [ [ "add_datepart(weather, \"Date\", drop=False)\nadd_datepart(googletrend, \"Date\", drop=False)\nadd_datepart(train, \"Date\", drop=False)\nadd_datepart(test, \"Date\", drop=False)", "_____no_output_____" ] ], [ [ "The Google trends data has a special category for the whole of the Germany - we'll pull that out so we can use it explicitly.", "_____no_output_____" ] ], [ [ "trend_de = googletrend[googletrend.file == 'Rossmann_DE']", "_____no_output_____" ] ], [ [ "Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.\n\n*Aside*: Why note just do an inner join?\nIf you are assuming that all records are complete and match on the field you desire, an inner join will do the same thing as an outer join. However, in the event you are wrong or a mistake is made, an outer join followed by a null-check will catch it. (Comparing before/after # of rows for inner join is equivalent, but requires keeping track of before/after row #'s. Outer join is easier.)", "_____no_output_____" ] ], [ [ "store = join_df(store, store_states, \"Store\")\nlen(store[store.State.isnull()])", "_____no_output_____" ], [ "joined = join_df(train, store, \"Store\")\njoined_test = join_df(test, store, \"Store\")\nlen(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])", "_____no_output_____" ], [ "joined = join_df(joined, googletrend, [\"State\",\"Year\", \"Week\"])\njoined_test = join_df(joined_test, googletrend, [\"State\",\"Year\", \"Week\"])\nlen(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])", "_____no_output_____" ], [ "joined = joined.merge(trend_de, 'left', [\"Year\", \"Week\"], suffixes=('', '_DE'))\njoined_test = joined_test.merge(trend_de, 'left', [\"Year\", \"Week\"], suffixes=('', '_DE'))\nlen(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])", "_____no_output_____" ], [ "joined = join_df(joined, weather, [\"State\",\"Date\"])\njoined_test = join_df(joined_test, weather, [\"State\",\"Date\"])\nlen(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])", "_____no_output_____" ], [ "for df in (joined, joined_test):\n for c in df.columns:\n if c.endswith('_y'):\n if c in df.columns: df.drop(c, inplace=True, axis=1)", "_____no_output_____" ] ], [ [ "Next we'll fill in missing values to avoid complications with `NA`'s. `NA` (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary *signal value* that doesn't otherwise appear in the data.", "_____no_output_____" ] ], [ [ "for df in (joined,joined_test):\n df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)\n df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)\n df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)\n df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)", "_____no_output_____" ] ], [ [ "Next we'll extract features \"CompetitionOpenSince\" and \"CompetitionDaysOpen\". Note the use of `apply()` in mapping a function across dataframe values.", "_____no_output_____" ] ], [ [ "for df in (joined,joined_test):\n df[\"CompetitionOpenSince\"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear, \n month=df.CompetitionOpenSinceMonth, day=15))\n df[\"CompetitionDaysOpen\"] = df.Date.subtract(df.CompetitionOpenSince).dt.days", "_____no_output_____" ] ], [ [ "We'll replace some erroneous / outlying data.", "_____no_output_____" ] ], [ [ "for df in (joined,joined_test):\n df.loc[df.CompetitionDaysOpen<0, \"CompetitionDaysOpen\"] = 0\n df.loc[df.CompetitionOpenSinceYear<1990, \"CompetitionDaysOpen\"] = 0", "_____no_output_____" ] ], [ [ "We add \"CompetitionMonthsOpen\" field, limiting the maximum to 2 years to limit number of unique categories.", "_____no_output_____" ] ], [ [ "for df in (joined,joined_test):\n df[\"CompetitionMonthsOpen\"] = df[\"CompetitionDaysOpen\"]//30\n df.loc[df.CompetitionMonthsOpen>24, \"CompetitionMonthsOpen\"] = 24\njoined.CompetitionMonthsOpen.unique()", "_____no_output_____" ] ], [ [ "Same process for Promo dates.", "_____no_output_____" ] ], [ [ "for df in (joined,joined_test):\n df[\"Promo2Since\"] = pd.to_datetime(df.apply(lambda x: Week(\n x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))\n df[\"Promo2Days\"] = df.Date.subtract(df[\"Promo2Since\"]).dt.days", "_____no_output_____" ], [ "for df in (joined,joined_test):\n df.loc[df.Promo2Days<0, \"Promo2Days\"] = 0\n df.loc[df.Promo2SinceYear<1990, \"Promo2Days\"] = 0\n df[\"Promo2Weeks\"] = df[\"Promo2Days\"]//7\n df.loc[df.Promo2Weeks<0, \"Promo2Weeks\"] = 0\n df.loc[df.Promo2Weeks>25, \"Promo2Weeks\"] = 25\n df.Promo2Weeks.unique()", "_____no_output_____" ], [ "joined.to_feather(f'{PATH}joined')\njoined_test.to_feather(f'{PATH}joined_test')", "_____no_output_____" ] ], [ [ "## Durations", "_____no_output_____" ], [ "It is common when working with time series data to extract data that explains relationships across rows as opposed to columns, e.g.:\n* Running averages\n* Time until next event\n* Time since last event\n\nThis is often difficult to do with most table manipulation frameworks, since they are designed to work with relationships across columns. As such, we've created a class to handle this type of data.\n\nWe'll define a function `get_elapsed` for cumulative counting across a sorted dataframe. Given a particular field `fld` to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero.\n\nUpon initialization, this will result in datetime na's until the field is encountered. This is reset every time a new store is seen. We'll see how to use this shortly.", "_____no_output_____" ] ], [ [ "def get_elapsed(fld, pre):\n day1 = np.timedelta64(1, 'D')\n last_date = np.datetime64()\n last_store = 0\n res = []\n\n for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):\n if s != last_store:\n last_date = np.datetime64()\n last_store = s\n if v: last_date = d\n res.append(((d-last_date).astype('timedelta64[D]') / day1))\n df[pre+fld] = res", "_____no_output_____" ] ], [ [ "We'll be applying this to a subset of columns:", "_____no_output_____" ] ], [ [ "columns = [\"Date\", \"Store\", \"Promo\", \"StateHoliday\", \"SchoolHoliday\"]", "_____no_output_____" ], [ "df = train[columns]", "_____no_output_____" ], [ "df = test[columns]", "_____no_output_____" ] ], [ [ "Let's walk through an example.\n\nSay we're looking at School Holiday. We'll first sort by Store, then Date, and then call `add_elapsed('SchoolHoliday', 'After')`:\nThis will apply to each row with School Holiday:\n* A applied to every row of the dataframe in order of store and date\n* Will add to the dataframe the days since seeing a School Holiday\n* If we sort in the other direction, this will count the days until another holiday.", "_____no_output_____" ] ], [ [ "fld = 'SchoolHoliday'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')", "_____no_output_____" ] ], [ [ "We'll do this for two more fields.", "_____no_output_____" ] ], [ [ "fld = 'StateHoliday'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')", "_____no_output_____" ], [ "fld = 'Promo'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')", "_____no_output_____" ] ], [ [ "We're going to set the active index to Date.", "_____no_output_____" ] ], [ [ "df = df.set_index(\"Date\")", "_____no_output_____" ] ], [ [ "Then set null values from elapsed field calculations to 0.", "_____no_output_____" ] ], [ [ "columns = ['SchoolHoliday', 'StateHoliday', 'Promo']", "_____no_output_____" ], [ "for o in ['Before', 'After']:\n for p in columns:\n a = o+p\n df[a] = df[a].fillna(0).astype(int)", "_____no_output_____" ] ], [ [ "Next we'll demonstrate window functions in pandas to calculate rolling quantities.\n\nHere we're sorting by date (`sort_index()`) and counting the number of events of interest (`sum()`) defined in `columns` in the following week (`rolling()`), grouped by Store (`groupby()`). We do the same in the opposite direction.", "_____no_output_____" ] ], [ [ "bwd = df[['Store']+columns].sort_index().groupby(\"Store\").rolling(7, min_periods=1).sum()", "_____no_output_____" ], [ "fwd = df[['Store']+columns].sort_index(ascending=False\n ).groupby(\"Store\").rolling(7, min_periods=1).sum()", "_____no_output_____" ] ], [ [ "Next we want to drop the Store indices grouped together in the window function.\n\nOften in pandas, there is an option to do this in place. This is time and memory efficient when working with large datasets.", "_____no_output_____" ] ], [ [ "bwd.drop('Store',1,inplace=True)\nbwd.reset_index(inplace=True)", "_____no_output_____" ], [ "fwd.drop('Store',1,inplace=True)\nfwd.reset_index(inplace=True)", "_____no_output_____" ], [ "df.reset_index(inplace=True)", "_____no_output_____" ] ], [ [ "Now we'll merge these values onto the df.", "_____no_output_____" ] ], [ [ "df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])\ndf = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])", "_____no_output_____" ], [ "df.drop(columns,1,inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.", "_____no_output_____" ] ], [ [ "df.to_feather(f'{PATH}df')", "_____no_output_____" ], [ "df = pd.read_feather(f'{PATH}df')", "/home/jhoward/anaconda3/lib/python3.6/site-packages/numpy/lib/arraysetops.py:463: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n mask |= (ar1 == a)\n" ], [ "df[\"Date\"] = pd.to_datetime(df.Date)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "joined = join_df(joined, df, ['Store', 'Date'])", "_____no_output_____" ], [ "joined_test = join_df(joined_test, df, ['Store', 'Date'])", "_____no_output_____" ] ], [ [ "The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.", "_____no_output_____" ] ], [ [ "joined = joined[joined.Sales!=0]", "_____no_output_____" ] ], [ [ "We'll back this up as well.", "_____no_output_____" ] ], [ [ "joined.reset_index(inplace=True)\njoined_test.reset_index(inplace=True)", "_____no_output_____" ], [ "joined.to_feather(f'{PATH}joined')\njoined_test.to_feather(f'{PATH}joined_test')", "_____no_output_____" ] ], [ [ "We now have our final set of engineered features.\n\nWhile these steps were explicitly outlined in the paper, these are all fairly typical feature engineering steps for dealing with time series data and are practical in any similar setting.", "_____no_output_____" ], [ "## Create features", "_____no_output_____" ] ], [ [ "joined = pd.read_feather(f'{PATH}joined')\njoined_test = pd.read_feather(f'{PATH}joined_test')", "_____no_output_____" ], [ "joined.head().T.head(40)", "_____no_output_____" ] ], [ [ "Now that we've engineered all our features, we need to convert to input compatible with a neural network.\n\nThis includes converting categorical variables into contiguous integers or one-hot encodings, normalizing continuous features to standard normal, etc...", "_____no_output_____" ] ], [ [ "cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',\n 'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',\n 'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',\n 'SchoolHoliday_fw', 'SchoolHoliday_bw']\n\ncontin_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',\n 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', \n 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',\n 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']\n\nn = len(joined); n", "_____no_output_____" ], [ "dep = 'Sales'\njoined = joined[cat_vars+contin_vars+[dep, 'Date']].copy()", "_____no_output_____" ], [ "joined_test[dep] = 0\njoined_test = joined_test[cat_vars+contin_vars+[dep, 'Date', 'Id']].copy()", "_____no_output_____" ], [ "for v in cat_vars: joined[v] = joined[v].astype('category').cat.as_ordered()", "_____no_output_____" ], [ "apply_cats(joined_test, joined)", "_____no_output_____" ], [ "for v in contin_vars:\n joined[v] = joined[v].fillna(0).astype('float32')\n joined_test[v] = joined_test[v].fillna(0).astype('float32')", "_____no_output_____" ] ], [ [ "We're going to run on a sample.", "_____no_output_____" ] ], [ [ "idxs = get_cv_idxs(n, val_pct=150000/n)\njoined_samp = joined.iloc[idxs].set_index(\"Date\")\nsamp_size = len(joined_samp); samp_size", "_____no_output_____" ] ], [ [ "To run on the full dataset, use this instead:", "_____no_output_____" ] ], [ [ "samp_size = n\njoined_samp = joined.set_index(\"Date\")", "_____no_output_____" ] ], [ [ "We can now process our data...", "_____no_output_____" ] ], [ [ "joined_samp.head(2)", "_____no_output_____" ], [ "df, y, nas, mapper = proc_df(joined_samp, 'Sales', do_scale=True)\nyl = np.log(y)", "_____no_output_____" ], [ "joined_test = joined_test.set_index(\"Date\")", "_____no_output_____" ], [ "df_test, _, nas, mapper = proc_df(joined_test, 'Sales', do_scale=True, skip_flds=['Id'],\n mapper=mapper, na_dict=nas)", "_____no_output_____" ], [ "df.head(2)", "_____no_output_____" ] ], [ [ "In time series data, cross-validation is not random. Instead, our holdout data is generally the most recent data, as it would be in real application. This issue is discussed in detail in [this post](http://www.fast.ai/2017/11/13/validation-sets/) on our web site.\n\nOne approach is to take the last 25% of rows (sorted by date) as our validation set.", "_____no_output_____" ] ], [ [ "train_ratio = 0.75\n# train_ratio = 0.9\ntrain_size = int(samp_size * train_ratio); train_size\nval_idx = list(range(train_size, len(df)))", "_____no_output_____" ] ], [ [ "An even better option for picking a validation set is using the exact same length of time period as the test set uses - this is implemented here:", "_____no_output_____" ] ], [ [ "val_idx = np.flatnonzero(\n (df.index<=datetime.datetime(2014,9,17)) & (df.index>=datetime.datetime(2014,8,1)))", "_____no_output_____" ], [ "val_idx=[0]", "_____no_output_____" ] ], [ [ "## DL", "_____no_output_____" ], [ "We're ready to put together our models.\n\nRoot-mean-squared percent error is the metric Kaggle used for this competition.", "_____no_output_____" ] ], [ [ "def inv_y(a): return np.exp(a)\n\ndef exp_rmspe(y_pred, targ):\n targ = inv_y(targ)\n pct_var = (targ - inv_y(y_pred))/targ\n return math.sqrt((pct_var**2).mean())\n\nmax_log_y = np.max(yl)\ny_range = (0, max_log_y*1.2)", "_____no_output_____" ] ], [ [ "We can create a ModelData object directly from out data frame.", "_____no_output_____" ] ], [ [ "md = ColumnarModelData.from_data_frame(PATH, val_idx, df, yl.astype(np.float32), cat_flds=cat_vars, bs=128,\n test_df=df_test)", "_____no_output_____" ] ], [ [ "Some categorical variables have a lot more levels than others. Store, in particular, has over a thousand!", "_____no_output_____" ] ], [ [ "cat_sz = [(c, len(joined_samp[c].cat.categories)+1) for c in cat_vars]", "_____no_output_____" ], [ "cat_sz", "_____no_output_____" ] ], [ [ "We use the *cardinality* of each variable (that is, its number of unique values) to decide how large to make its *embeddings*. Each level will be associated with a vector with length defined as below.", "_____no_output_____" ] ], [ [ "emb_szs = [(c, min(50, (c+1)//2)) for _,c in cat_sz]", "_____no_output_____" ], [ "emb_szs", "_____no_output_____" ], [ "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3", "_____no_output_____" ], [ "m.lr_find()", "_____no_output_____" ], [ "m.sched.plot(100)", "_____no_output_____" ] ], [ [ "### Sample", "_____no_output_____" ] ], [ [ "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3", "_____no_output_____" ], [ "m.fit(lr, 3, metrics=[exp_rmspe])", "_____no_output_____" ], [ "m.fit(lr, 5, metrics=[exp_rmspe], cycle_len=1)", "_____no_output_____" ], [ "m.fit(lr, 2, metrics=[exp_rmspe], cycle_len=4)", "_____no_output_____" ] ], [ [ "### All", "_____no_output_____" ] ], [ [ "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3", "_____no_output_____" ], [ "m.fit(lr, 1, metrics=[exp_rmspe])", "_____no_output_____" ], [ "m.fit(lr, 3, metrics=[exp_rmspe])", "_____no_output_____" ], [ "m.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)", "_____no_output_____" ] ], [ [ "### Test", "_____no_output_____" ] ], [ [ "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3", "_____no_output_____" ], [ "m.fit(lr, 3, metrics=[exp_rmspe])", "_____no_output_____" ], [ "m.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)", "_____no_output_____" ], [ "m.save('val0')", "_____no_output_____" ], [ "m.load('val0')", "_____no_output_____" ], [ "x,y=m.predict_with_targs()", "_____no_output_____" ], [ "exp_rmspe(x,y)", "_____no_output_____" ], [ "pred_test=m.predict(True)", "_____no_output_____" ], [ "pred_test = np.exp(pred_test)", "_____no_output_____" ], [ "joined_test['Sales']=pred_test", "_____no_output_____" ], [ "csv_fn=f'{PATH}tmp/sub.csv'", "_____no_output_____" ], [ "joined_test[['Id','Sales']].to_csv(csv_fn, index=False)", "_____no_output_____" ], [ "FileLink(csv_fn)", "_____no_output_____" ] ], [ [ "## RF", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "((val,trn), (y_val,y_trn)) = split_by_idx(val_idx, df.values, yl)", "_____no_output_____" ], [ "m = RandomForestRegressor(n_estimators=40, max_features=0.99, min_samples_leaf=2,\n n_jobs=-1, oob_score=True)\nm.fit(trn, y_trn);", "_____no_output_____" ], [ "preds = m.predict(val)\nm.score(trn, y_trn), m.score(val, y_val), m.oob_score_, exp_rmspe(preds, y_val)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a510ae1295eabe960366002c4620d946c7a77b
102,988
ipynb
Jupyter Notebook
Create_Dataset_MarlonFranco.ipynb
marlonrcfranco/DatasetMarlon
d558dae5df078a77806b4a364bd5356c2bc94b07
[ "Apache-2.0" ]
9
2020-03-29T21:32:15.000Z
2021-12-02T22:09:59.000Z
Create_Dataset_MarlonFranco.ipynb
marlonrcfranco/DatasetMarlon
d558dae5df078a77806b4a364bd5356c2bc94b07
[ "Apache-2.0" ]
null
null
null
Create_Dataset_MarlonFranco.ipynb
marlonrcfranco/DatasetMarlon
d558dae5df078a77806b4a364bd5356c2bc94b07
[ "Apache-2.0" ]
2
2021-01-20T05:49:30.000Z
2021-12-20T13:38:55.000Z
44.334051
13,938
0.392424
[ [ [ "<a href=\"https://colab.research.google.com/github/marlonrcfranco/soyforecast/blob/master/Create_Dataset_MarlonFranco.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Dataset Marlon\n## Soybean, CBOT Soybean Futures + ( Global Historical Climatology Network (GHCN) filtered by USDA-NASS-soybeans-production_bushels-2015)\n\n### Soybean, CBOT Soybean Futures\n- https://blog.quandl.com/api-for-commodity-data\n- http://www.quandl.com/api/v3/datasets/CHRIS/CME_S1/\n\n### Global Historical Climatology Network (GHCN)\n- https://www.ncdc.noaa.gov/data-access/land-based-station-data/land-based-datasets/global-historical-climatology-network-ghcn\n- FTP: ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/\n\n### USDA-NASS-soybeans-production_bushels-2015\n- https://usda-reports.nautilytics.com/?crop=soybeans&statistic=production_dollars&year=2007\n- https://www.nass.usda.gov/Data_Visualization/index.php\n\n\n\n\n\n\n\nhttps://github.com/aaronpenne/get_noaa_ghcn_data.git\n", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport pandas as pd\nimport numpy as np\nimport os\nfrom six.moves import urllib\nfrom ftplib import FTP\nfrom io import StringIO\nfrom IPython.display import clear_output\nfrom functools import reduce\nimport tarfile\nimport subprocess\n#subprocess.run([\"ls\", \"-l\"])\nimport zipfile\nimport shutil # move files\nimport psutil\n\n\n# Load the Drive helper and mount\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ "## Defines", "_____no_output_____" ] ], [ [ "ROOT_PATH = \"drive/My Drive/TCC/\"\nDATASETS_PATH = ROOT_PATH + \"datasets/\"\nSOYBEAN_PATH = DATASETS_PATH + \"CBOTSoybeanFutures/\"\nWEATHER_PATH = DATASETS_PATH + \"GHCN_Data/\"\nSOYBEAN_URL = \"http://www.quandl.com/api/v3/datasets/CHRIS/CME_S1/data.csv\"\nUSDA_PATH = \"datasets/USDA-NASS-soybeans-production_bushels-2015/\"\n\nWEATHER_PATH_DRIVE_ZIP = WEATHER_PATH + \"data/zip/\"\nWEATHER_PATH_DRIVE_CSV = WEATHER_PATH + \"data/csv/\"\nFIXED_STATE_FILE = WEATHER_PATH + \"fixed_states.txt\"\nCALCULATED_STATE_FILE = WEATHER_PATH + \"calculated_states.txt\"\n\nDOWNLOADED_STATIONS_FILE = WEATHER_PATH + \"downloaded_stations.txt\"\nDOWNLOADED_STATIONS_FILE_TEMP = DOWNLOADED_STATIONS_FILE\n\n\nplt.rcParams[\"figure.figsize\"] = [19,15]\nplt.rcParams.update({'font.size': 27})\n", "_____no_output_____" ], [ "# Create directories\n# and initial files\n\nif not os.path.exists(SOYBEAN_PATH):\n os.makedirs(SOYBEAN_PATH)\n\nif not os.path.exists(WEATHER_PATH_DRIVE_ZIP):\n os.makedirs(WEATHER_PATH_DRIVE_ZIP)\n\nif not os.path.exists(WEATHER_PATH_DRIVE_CSV):\n os.makedirs(WEATHER_PATH_DRIVE_CSV)\n\nif not os.path.exists(DOWNLOADED_STATIONS_FILE):\n open(DOWNLOADED_STATIONS_FILE,'a').close()\n\nif not os.path.exists(DOWNLOADED_STATIONS_FILE_TEMP):\n open(DOWNLOADED_STATIONS_FILE_TEMP,'a').close()\n\nif not os.path.exists(FIXED_STATE_FILE):\n open(FIXED_STATE_FILE,'a').close()\n\nif not os.path.exists(CALCULATED_STATE_FILE):\n open(CALCULATED_STATE_FILE,'a').close()\n", "_____no_output_____" ] ], [ [ "#### https://github.com/aaronpenne/get_noaa_ghcn_data.git", "_____no_output_____" ], [ "##### https://github.com/aaronpenne/get_noaa_ghcn_data/blob/master/get_station_id.py\n\n", "_____no_output_____" ] ], [ [ "# -*- coding: utf-8 -*-\n\"\"\"\nSearches list of stations via user input to find the station ID.\nAuthor: Aaron Penne\n------------------------------\nVariable Columns Type\n------------------------------\nID 1-11 Character\nLATITUDE 13-20 Real\nLONGITUDE 22-30 Real\nELEVATION 32-37 Real\nSTATE 39-40 Character\nNAME 42-71 Character\nGSN FLAG 73-75 Character\nHCN/CRN FLAG 77-79 Character\nWMO ID 81-85 Character\n------------------------------\n\"\"\"\n\nimport sys\nimport pandas as pd\nfrom ftplib import FTP\nimport os\n\noutput_dir = os.path.relpath('output')\nif not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\nftp_path_dly = '/pub/data/ghcn/daily/'\nftp_path_dly_all = '/pub/data/ghcn/daily/all/'\nftp_filename = 'ghcnd-stations.txt'\n\ndef connect_to_ftp():\n ftp_path_root = 'ftp.ncdc.noaa.gov'\n\n # Access NOAA FTP server\n ftp = FTP(ftp_path_root)\n message = ftp.login() # No credentials needed\n print(message)\n return ftp\n\ndef get_station_id(ftp, search_term):\n '''\n Get stations file\n '''\n ftp_full_path = os.path.join(ftp_path_dly, ftp_filename)\n local_full_path = os.path.join(output_dir, ftp_filename)\n if not os.path.isfile(local_full_path):\n with open(local_full_path, 'wb+') as f:\n ftp.retrbinary('RETR ' + ftp_full_path, f.write)\n\n '''\n Get user search term\n '''\n query = search_term\n query = query.upper()\n print(\"> Query: '\"+query+\"'\")\n\n '''\n Read stations text file using fixed-width-file reader built into pandas\n '''\n # http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_fwf.html\n dtype = {'STATION_ID': str,\n 'LATITUDE': str,\n 'LONGITUDE': str,\n 'ELEVATION': str,\n 'STATE': str,\n 'STATION_NAME': str,\n 'GSN_FLAG': str,\n 'HCN_CRN_FLAG': str,\n 'WMO_ID': str}\n names = ['STATION_ID', 'LATITUDE', 'LONGITUDE', 'ELEVATION', 'STATE', 'STATION_NAME', 'GSN_FLAG', 'HCN_CRN_FLAG', 'WMO_ID']\n widths = [11, # Station ID\n 9, # Latitude (decimal degrees)\n 10, # Longitude (decimal degrees)\n 7, # Elevation (meters)\n 3, # State (USA stations only)\n 31, # Station Name\n 4, # GSN Flag\n 4, # HCN/CRN Flag\n 6] # WMO ID\n df = pd.read_fwf(local_full_path, widths=widths, names=names, dtype=dtype, header=None)\n\n '''\n Replace missing values (nan, -999.9)\n '''\n df['STATE'] = df['STATE'].replace('nan', '--')\n df['GSN_FLAG'] = df['GSN_FLAG'].replace('nan', '---')\n df['HCN_CRN_FLAG'] = df['GSN_FLAG'].replace('nan', '---')\n df = df.replace(-999.9, float('nan'))\n\n try:\n '''\n Get query results, but only the columns we care about\n '''\n print('Searching records...')\n matches = df['STATION_ID'].str.contains(query)\n df = df.loc[matches, ['STATION_ID', 'LATITUDE', 'LONGITUDE', 'ELEVATION', 'STATE', 'STATION_NAME']]\n df.reset_index(drop=True, inplace=True)\n\n '''\n Get file sizes of each station's records to augment results\n '''\n #print('Getting file sizes...', end='')\n #print(df.index)\n #ftp.voidcmd('TYPE I') # Needed to avoid FTP error with ftp.size()\n #count=0\n #last = ''\n #for i in list(df.index):\n # count = count + 1\n # print('.', end='')\n # ftp_dly_file = ftp_path_dly + 'all/' + df.loc[i, 'STATION_ID'] + '.dly'\n # #print(df.loc[i, 'STATION_ID'], end='')\n # df.loc[i, 'SIZE'] = round(ftp.size(ftp_dly_file)/1000) # Kilobytes\n # #print('size: %d KB' %round(ftp.size(ftp_dly_file)/1000))\n # actual = \" %.1f%% \" % ((count/df.index.size)*100)\n # if (actual != last):\n # clear_output()\n # last = actual\n # #print(\"%.2f%% \" %((count/df.index.size)*100), end='')\n # print('Getting file sizes...')\n # print(str(actual) + ' ['+ str(count) + ' of ' + str(df.index.size) + ']')\n \n print()\n print()\n\n '''\n Sort by size then by rounded lat/long values to group geographic areas and show stations with most data\n '''\n df_sort = df.round(0)\n #df_sort.sort_values(['LATITUDE', 'LONGITUDE', 'SIZE'], ascending=False, inplace=True)\n df_sort.sort_values(['LATITUDE', 'LONGITUDE'], ascending=False, inplace=True)\n df = df.loc[df_sort.index]\n df.reset_index(drop=True, inplace=True)\n \n except:\n print('Station not found')\n traceback.print_exc(file=sys.stdout)\n ftp.quit()\n sys.exit()\n \n '''\n Print headers and values to facilitate reading\n '''\n #selection = 'Index'\n #station_id = 'Station_ID '\n #lat = 'Latitude'\n #lon = 'Longitude'\n #state = 'State'\n #name = 'Station_Name '\n #size = ' File_Size'\n # Format output to be pretty, hopefully there is a prettier way to do this.\n #print('{: <6}{: <31}{: <6}({: >8},{: >10}){: >13}'.format(selection, name, state, lat, lon, size))\n #print('-'*5 + ' ' + '-'*30 + ' ' + '-'*5 + ' ' + '-'*21 + ' ' + '-'*12)\n #for i in list(df.index):\n # print('{: 4}: {: <31}{: <6}({: >8},{: >10}){: >10} Kb'.format(i,\n # df.loc[i,'STATION_NAME'],\n # df.loc[i,'STATE'],\n # df.loc[i,'LATITUDE'],\n # df.loc[i,'LONGITUDE'],\n # df.loc[i,'SIZE']))\n\n# '''\n# Get user selection\n# '''\n# try:\n# query = input('Enter selection (ex. 001, 42): ')\n# query = int(query)\n# except:\n# print('Please enter valid selection (ex. 001, 42)')\n# ftp.quit()\n# sys.exit()\n\n #station_id = df.loc[query, 'STATION_ID']\n station_id = df\n return station_id\n\n\ndef get_station(ftp=None, search_term='US'):\n close_after = False\n if ftp==None:\n ftp = connect_to_ftp()\n close_after = True\n \n station_id = get_station_id(ftp,search_term)\n #print(station_id)\n \n if close_after:\n ftp.quit()\n \n return (station_id)\n ", "_____no_output_____" ] ], [ [ "#####https://github.com/aaronpenne/get_noaa_ghcn_data/blob/master/get_dly.py\n", "_____no_output_____" ] ], [ [ "\n\"\"\"\nGrabs .dly file from the NOAA GHCN FTP server, parses, and reshapes to have one\nday per row and element values in the columns. Writes output as CSV.\nAuthor: Aaron Penne\n.dly Format In (roughly): .csv Format Out (roughly):\n------------------------- --------------------------\nMonth1 PRCP Day1 Day2 ... Day31 Day1 PRCP SNOW\nMonth1 SNOW Day1 Day2 ... Day31 Day2 PRCP SNOW\nMonth2 PRCP Day1 Day2 ... Day31 Day3 PRCP SNOW\nMonth2 SNOW Day1 Day2 ... Day31 Day4 PRCP SNOW\nStarting with 5 core elements (per README)\n PRCP = Precipitation (tenths of mm)\n SNOW = Snowfall (mm)\n SNWD = Snow depth (mm)\n TMAX = Maximum temperature (tenths of degrees C)\n TMIN = Minimum temperature (tenths of degrees C)\nICD:\n ------------------------------\n Variable Columns Type\n ------------------------------\n ID 1-11 Character\n YEAR 12-15 Integer\n MONTH 16-17 Integer\n ELEMENT 18-21 Character\n VALUE1 22-26 Integer\n MFLAG1 27-27 Character\n QFLAG1 28-28 Character\n SFLAG1 29-29 Character\n VALUE2 30-34 Integer\n MFLAG2 35-35 Character\n QFLAG2 36-36 Character\n SFLAG2 37-37 Character\n . . .\n . . .\n . . .\n VALUE31 262-266 Integer\n MFLAG31 267-267 Character\n QFLAG31 268-268 Character\n SFLAG31 269-269 Character\n ------------------------------\n\"\"\"\n\nimport pandas as pd\nfrom ftplib import FTP\nfrom io import StringIO\nimport os\n\nftp_path_dly_all = '/pub/data/ghcn/daily/all/'\n\ndef connect_to_ftp():\n \"\"\"\n Get FTP server and file details\n \"\"\"\n ftp_path_root = 'ftp.ncdc.noaa.gov'\n # Access NOAA FTP server\n ftp = FTP(ftp_path_root)\n message = ftp.login() # No credentials needed\n #print(message)\n return ftp\n\n# Marlon Franco\ndef disconnect_to_ftp(ftp_connection):\n return ftp_connection.quit()\n\ndef get_flags(s):\n \"\"\"\n Get flags, replacing empty flags with '_' for clarity (' S ' becomes '_S_')\n \"\"\"\n m_flag = s.read(1)\n m_flag = m_flag if m_flag.strip() else '_'\n q_flag = s.read(1)\n q_flag = q_flag if q_flag.strip() else '_'\n s_flag = s.read(1)\n s_flag = s_flag if s_flag.strip() else '_'\n return [m_flag + q_flag + s_flag]\n\ndef create_dataframe(element, dict_element):\n \"\"\"\n Make dataframes out of the dicts, make the indices date strings (YYYY-MM-DD)\n \"\"\"\n element = element.upper()\n df_element = pd.DataFrame(dict_element)\n # Add dates (YYYY-MM-DD) as index on df. Pad days with zeros to two places\n df_element.index = df_element['YEAR'] + '-' + df_element['MONTH'] + '-' + df_element['DAY'].str.zfill(2)\n df_element.index.name = 'DATE'\n # Arrange columns so ID, YEAR, MONTH, DAY are at front. Leaving them in for plotting later - https://stackoverflow.com/a/31396042\n for col in ['DAY', 'MONTH', 'YEAR', 'ID']:\n df_element = move_col_to_front(col, df_element)\n # Convert numerical values to float\n df_element.loc[:,element] = df_element.loc[:,element].astype(float)\n return df_element\n\ndef move_col_to_front(element, df):\n element = element.upper()\n cols = df.columns.tolist()\n cols.insert(0, cols.pop(cols.index(element)))\n df = df.reindex(columns=cols)\n return df\n\ndef dly_to_csv(ftp, station_id, output_dir, save_dly):\n #output_dir = os.path.relpath('output')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n ftp_filename = station_id + '.dly'\n\n # Write .dly file to stream using StringIO using FTP command 'RETR'\n s = StringIO()\n ftp.retrlines('RETR ' + ftp_path_dly_all + ftp_filename, s.write)\n s.seek(0)\n\n # Write .dly file to dir to preserve original # FIXME make optional?\n if (save_dly):\n with open(os.path.join(output_dir, ftp_filename), 'wb+') as f:\n ftp.retrbinary('RETR ' + ftp_path_dly_all + ftp_filename, f.write)\n\n # Move to first char in file\n s.seek(0)\n\n # File params\n num_chars_line = 269\n num_chars_metadata = 21\n\n element_list = ['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN']\n\n '''\n Read through entire StringIO stream (the .dly file) and collect the data\n '''\n all_dicts = {}\n element_flag = {}\n prev_year = '0000'\n i = 0\n while True:\n i += 1\n\n '''\n Read metadata for each line (one month of data for a particular element per line)\n '''\n id_station = s.read(11)\n year = s.read(4)\n month = s.read(2)\n day = 0\n element = s.read(4)\n\n # If this is blank then we've reached EOF and should exit loop\n if not element:\n break\n\n '''\n Print status\n '''\n if year != prev_year:\n #print('Year {} | Line {}'.format(year, i))\n prev_year = year\n\n '''\n Loop through each day in rest of row, break if current position is end of row\n '''\n while s.tell() % num_chars_line != 0:\n day += 1\n # Fill in contents of each dict depending on element type in current row\n if day == 1:\n try:\n first_hit = element_flag[element]\n except:\n element_flag[element] = 1\n all_dicts[element] = {}\n all_dicts[element]['ID'] = []\n all_dicts[element]['YEAR'] = []\n all_dicts[element]['MONTH'] = []\n all_dicts[element]['DAY'] = []\n all_dicts[element][element.upper()] = []\n all_dicts[element][element.upper() + '_FLAGS'] = []\n\n value = s.read(5)\n flags = get_flags(s)\n if value == '-9999':\n continue\n all_dicts[element]['ID'] += [station_id]\n all_dicts[element]['YEAR'] += [year]\n all_dicts[element]['MONTH'] += [month]\n all_dicts[element]['DAY'] += [str(day)]\n all_dicts[element][element.upper()] += [value]\n all_dicts[element][element.upper() + '_FLAGS'] += flags\n\n '''\n Create dataframes from dict\n '''\n all_dfs = {}\n for element in list(all_dicts.keys()):\n all_dfs[element] = create_dataframe(element, all_dicts[element])\n\n '''\n Combine all element dataframes into one dataframe, indexed on date.\n '''\n # pd.concat automagically aligns values to matching indices, therefore the data is date aligned!\n list_dfs = []\n for df in list(all_dfs.keys()):\n list_dfs += [all_dfs[df]]\n df_all = pd.concat(list_dfs, axis=1, sort=False)\n df_all.index.name = 'MM/DD/YYYY'\n\n '''\n Remove duplicated/broken columns and rows\n '''\n # https://stackoverflow.com/a/40435354\n df_all = df_all.loc[:,~df_all.columns.duplicated()]\n df_all = df_all.loc[df_all['ID'].notnull(), :]\n\n '''\n Output to CSV, convert everything to strings first\n '''\n # NOTE: To open the CSV in Excel, go through the CSV import wizard, otherwise it will come out broken\n df_out = df_all.astype(str)\n df_out.to_csv(os.path.join(output_dir, station_id + '.csv'))\n #print('\\nOutput CSV saved to: {}'.format(os.path.join(output_dir, station_id + '.csv')))\n\ndef get_weather_data(ftp=None, station_id='USR0000CCHC',output_dir=WEATHER_PATH, save_dly=False):\n close_after = False\n if ftp==None:\n ftp = connect_to_ftp()\n close_after = True\n \n dly_to_csv(ftp, station_id,output_dir, save_dly)\n if close_after:\n ftp.quit()", "_____no_output_____" ] ], [ [ "## Fetch Data", "_____no_output_____" ] ], [ [ "def fetch_soybean_data(soybean_url=SOYBEAN_URL, soybean_path=SOYBEAN_PATH):\n if not os.path.isdir(soybean_path):\n os.makedirs(soybean_path)\n csv_path = os.path.join(soybean_path, \"soybeans.csv\")\n urllib.request.urlretrieve(soybean_url, csv_path)\n\ndef fetch_weather_data(contains='US', weather_path=WEATHER_PATH_DRIVE_CSV, save_dly=False, how_much=100):\n conn = connect_to_ftp()\n weather = get_station(conn,search_term=contains) # List all stations from USA\n downloaded_stations = \"\"\n with open(DOWNLOADED_STATIONS_FILE_TEMP,\"r+\") as f:\n downloaded_stations = f.read()\n count = 0\n count2 = 0\n total = weather['STATION_ID'].size\n amount_of_data = total * how_much/100\n last = ''\n for station in weather['STATION_ID']:\n print('.',end='')\n count += 1\n actual = \"%.2f%% \" %((count/total)*100)\n actual_partial = \"%.2f%% \" %((count2/amount_of_data)*100)\n if (station+'.csv' not in downloaded_stations):\n if (count2 > amount_of_data):\n print('download completed: ['+str(count2)+' of '+str(amount_of_data)+'], total = '+str(total))\n return True\n count2 += 1\n print('get ', end='')\n get_weather_data(conn, station, weather_path, save_dly)\n print('done')\n downloaded_stations += station+'.csv\\r\\n'\n with open(DOWNLOADED_STATIONS_FILE_TEMP,\"a+\") as f:\n f.write(station+'.csv\\r\\n')\n else:\n print(',',end='')\n if (actual != last):\n clear_output()\n last = actual\n print('Getting '+str(how_much)+'% of weather data from GHCN ftp containing \\''+contains+'\\' in STATION_ID...')\n print('PARTIAL: '+str(actual_partial) + ' ['+ str(count2) + ' of ' + str(amount_of_data) + ']')\n print('TOTAL: '+str(actual) + ' ['+ str(count) + ' of ' + str(total) + ']')\n\n disconnect_to_ftp(conn)\n print('Final: download completed: ['+str(count2)+' of '+str(amount_of_data)+'], total = '+str(total))\n return True\n", "_____no_output_____" ], [ "# Update the local temp control file\n#!echo \"$DOWNLOADED_STATIONS_FILE\" > \"$DOWNLOADED_STATIONS_FILE_TEMP\" .\n\n#fetch_weather_data(how_much=0.54) #0.54% of total amount of data\nfetch_weather_data()\n\n# Update the control file\n!echo \"$DOWNLOADED_STATIONS_FILE_TEMP\" > \"$DOWNLOADED_STATIONS_FILE\" .", "_____no_output_____" ] ], [ [ "### Check the number of downloaded station's csv file\n\n", "_____no_output_____" ] ], [ [ "weather = get_station(search_term='US') # List all stations from USA\n \nprint(\"# of stations in GHCN FTP: \", end=\"\")\nprint(str(weather['STATION_ID'].size))\n\nprint(\"# of downloaded csv files: \", end=\"\")\n!find \"$WEATHER_PATH_DRIVE_CSV\" -type f | wc -l\n\nprint(\"# of downloaded stations in control file: \", end=\"\")\nwith open(DOWNLOADED_STATIONS_FILE) as f:\n num_lines = sum(1 for _ in f.readlines())\n print(str(num_lines))\n", "> Query: 'US'\nSearching records...\n\n\n# of stations in GHCN FTP: 0\n# of downloaded csv files: 13924\n# of downloaded stations in control file: 13924\n" ], [ "def force_update_control_file():\n directory = os.path.join(WEATHER_PATH_DRIVE_CSV)\n with open(DOWNLOADED_STATIONS_FILE,\"w\") as f:\n for root,dirs,files in os.walk(directory):\n for file in files:\n print('.',end='')\n if file.endswith(\".csv\"):\n f.write(file+'\\r\\n')\n\nforce_update_control_file()", "...................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................." ] ], [ [ "# Get 'US' Stations\n", "_____no_output_____" ] ], [ [ "newfile = ''\nwith open(PROJECT_PATH+'ghcnd-stations-us.txt', 'r') as f: \n for line in f.readlines():\n line_list = line.split(' ')\n station = line_list[0]\n newfile += station\n for word in line_list:\n if (len(word) > 1):\n if (word[0].isalpha() and word!=station):\n state = word\n newfile += ','+state+'\\n'\n break\n \nprint(newfile)\n\nwith open(PROJECT_PATH+'ghcnd-stations-us.csv', 'w+') as f: \n f.write(newfile)", "_____no_output_____" ] ], [ [ "# Organize Stations by State", "_____no_output_____" ] ], [ [ "def organize_stations_by_state():\n f1='' #stations_not_dowloaded\n csv_path = WEATHER_PATH_DRIVE_CSV\n with open(WEATHER_PATH+'ghcnd-stations-us.csv', 'r') as f:\n for line in f:\n station = line.split(',')[0]\n state = line.split(',')[1].rstrip()\n # Create target Directory if don't exist\n if not os.path.exists(csv_path+state):\n os.mkdir(csv_path+state)\n print(\"Directory \" , csv_path+state , \" Created \")\n #else:\n #\tprint(\"Directory \" , \"csv/\"+state , \" already exists\")\n if not os.path.exists(csv_path+station+\".csv\"):\n print(\".\", end='')\n f1+=station+\"\\n\"\n else:\n os.rename(csv_path+station+\".csv\", csv_path+state+\"/\"+station+\".csv\")\n with open(WEATHER_PATH+'stations_not_dowloaded.csv', 'w+') as f: \n f.write(f1)", "_____no_output_____" ], [ "!ls drive/My\\ Drive/TCC/", "drive ghcnd-stations-us.csv ghcnd-stations-us.txt sample_data\n" ], [ "sLength = len(df1['TMAX'])", "_____no_output_____" ], [ " df1['e'] = p.Series(np.random.randn(sLength), index=df1.index)", "_____no_output_____" ] ], [ [ "# Fix columns", "_____no_output_____" ] ], [ [ "def fix_columns(df):\n for column in df:\n if column in ('ID','TMAX','TMIN','TAVG','PRCP'):\n pass\n else:\n #print(' deleting ',column, end='')\n del(df[column])\n if 'TMAX' not in df:\n #print(' creating TMAX... ', end='')\n #sLength = sizes['TMAX']\n df['TMAX'] = pd.Series(0, index=df.index)\n if 'TMIN' not in df:\n #print(' creating TMIN... ', end='')\n #sLength = sizes['TMIN']\n df['TMIN'] = pd.Series(0, index=df.index)\n if 'TAVG' not in df:\n #print(' creating TAVG... ', end='')\n #sLength = sizes['TAVG']\n df['TAVG'] = pd.Series(0, index=df.index)\n if 'PRCP' not in df:\n #print(' creating PRCP... ')\n #sLength = sizes['PRCP']\n df['PRCP'] = pd.Series(0, index=df.index)\n df=df.fillna(method='ffill')\n ", "_____no_output_____" ], [ "df_ref = load_single_csv(CSV_PATH+'WA/USS0017B04S.csv')\nsizes = {'TMAX':len(df_ref['TMAX']),'TMIN':len(df_ref['TMIN']),'TAVG':len(df_ref['TAVG']),'PRCP':len(df_ref['PRCP'])}\n ", "_____no_output_____" ], [ "def fix_dataframes(folder=''):\n root_path = CSV_PATH+folder+'/'\n print(root_path)\n count=0\n count2=10\n total=0\n for root, dirs, files in os.walk(root_path):\n total=len(files)\n for file in files:\n if '.csv' in file:\n station=file.strip('.csv')\n #print(station)\n path = os.path.join(root, file)\n df = load_single_csv(path)\n fix_columns(df)\n new_path = os.path.join(PROJECT_PATH+'new/'+folder+'/', file)\n # Create target Directory if don't exist\n if not os.path.exists(PROJECT_PATH+'new/'+folder+'/'):\n os.mkdir(PROJECT_PATH+'new/'+folder+'/')\n print(\"Directory \" , PROJECT_PATH+'new/'+folder+'/' , \" Created \")\n df.to_csv(new_path)\n if count2 == 70:\n count2=0\n actual = \"%.2f%% \" %((count/total)*100)\n clear_output()\n print('Fixing ',folder,' stations... ',actual,' (',str(count),' of ',str(total),')')\n count+=1\n count2+=1\n print('Done: %.2f%% ' %((count/total)*100))\n return True", "_____no_output_____" ], [ "fixed_states = \"\"\nwith open(FIXED_STATE_FILE, \"r+\") as f:\n fixed_states = f.read()\n\nprint('Already fixed:',fixed_states) \n \nfor root, dirs, files in os.walk(CSV_PATH):\n total=len(dirs)\n for state in dirs:\n if (state not in fixed_states):\n if(fix_dataframes(state)):\n fixed_states+= state\n with open(FIXED_STATE_FILE,\"a\") as f:\n f.write(state+'\\r\\n')", "Fixing DE stations... 54.55% ( 60 of 110 )\nDone: 100.00% \ndrive/My Drive/TCC/datasets/GHCN_Data/data/csv/DC/\nDirectory drive/My Drive/TCC/datasets/GHCN_Data/data/new/DC/ Created \nDone: 100.00% \ndrive/My Drive/TCC/datasets/GHCN_Data/data/csv/UM/\nDirectory drive/My Drive/TCC/datasets/GHCN_Data/data/new/UM/ Created \nDone: 100.00% \ndrive/My Drive/TCC/datasets/GHCN_Data/data/csv/PI/\nDirectory drive/My Drive/TCC/datasets/GHCN_Data/data/new/PI/ Created \nDone: 100.00% \n" ], [ "fix_dataframes('CA')", "_____no_output_____" ], [ "df1 = load_single_csv('drive/My Drive/TCC/datasets/GHCN_Data/data/csv/TX/US1TXAC0002.csv')\ndf2 = load_single_csv('drive/My Drive/TCC/datasets/GHCN_Data/data/new/TX/US1TXAC0002.csv')", "_____no_output_____" ], [ "df1.tail()", "_____no_output_____" ], [ "df2.tail()", "_____no_output_____" ] ], [ [ "# Load Data", "_____no_output_____" ] ], [ [ "def load_soybean_data(soybean_path=SOYBEAN_PATH):\n csv_path = os.path.join(soybean_path, \"soybeans.csv\")\n print(csv_path)\n return pd.read_csv(csv_path)\n\ndef load_single_csv(csv_path):\n #print(csv_path)\n df = pd.read_csv(csv_path,low_memory=False)\n df.set_index(['MM/DD/YYYY','YEAR','MONTH','DAY'], inplace=True)\n return df\n\ndef extract_zip(dir_name=WEATHER_PATH_DRIVE_ZIP,destination_dir=WEATHER_PATH_DRIVE_CSV):\n for item in os.listdir(dir_name): # loop through items in dir\n if item.endswith(\".zip\"): # check for \".zip\" extension\n print(\"Extracting \"+str(item), end=\"\")\n #file_name = os.path.abspath(item) # get full path of files\n file_name = dir_name+item # get full path of files\n zip_ref = zipfile.ZipFile(file_name) # create zipfile object\n zip_ref.extractall(destination_dir) # extract file to dir\n zip_ref.close() # close file\n print(\"... OK!\")\n #os.remove(file_name) # delete zipped file\n print(\"Extraction complete!\")\n\ndef load_weather_data(weather_path=WEATHER_PATH_DRIVE_CSV,from_zip=False):\n if from_zip:\n extract_zip()\n data_frames=[]\n #first=True\n directory = os.path.join(weather_path,\"\")\n print(directory)\n for root,dirs,files in os.walk(directory):\n print(directory+\".\")\n for file in files:\n print(\".\")\n if file.endswith(\".csv\"):\n csv_path = os.path.join(weather_path, file)\n df = load_single_csv(csv_path)\n #Rename Columns\n #df=df.drop(columns=['ID'])\n #station = file.replace('.csv','')\n #for column in df.columns:\n # if(column not in ['MM/DD/YYYY','YEAR','MONTH','DAY']):\n # df.rename(columns={column: station +'-'+ column}, inplace=True)\n #print(station +'-'+ column)\n #Append to list\n data_frames.append(df)\n #if (first):\n # data_frames = df\n # first=False\n #else:\n # data_frames = pd.merge(data_frames, df, on=['MM/DD/YYYY','YEAR','MONTH','DAY'], how='left')\n return data_frames\n #return pd.concat(data_frames, axis=1)\n \ndef load_usda_data(usda_path=USDA_PATH):\n csv_path = os.path.join(usda_path, \"data.csv\")\n print(csv_path)\n return pd.read_csv(csv_path, thousands=',')\n", "_____no_output_____" ] ], [ [ "# Calculate Mean and Standard Deviation for each state\n", "_____no_output_____" ] ], [ [ "def save_csv(df,name,path):\n #print('Saving DataFrame in ',path)\n # Create target Directory if don't exist\n if not os.path.exists(path):\n os.mkdir(path)\n print(\"Directory \" , path , \" Created \")\n df.to_csv(path+name)\n \ndef read_log(file_path):\n files_processed = \"\"\n if not os.path.exists(file_path):\n with open(file_path,'w+') as f:\n files_processed = f.read()\n else :\n with open(file_path,'r+') as f:\n files_processed = f.read()\n return files_processed\n\ndef write_log(file_path,content):\n with open(file_path,'w+') as f:\n f.write(content)\n\ndef calculate(daf):\n print('TMAX',end='')\n daf['TMAX_mean'] = daf[[col for col in daf.columns if 'TMAX' in col ]].mean(1)\n print(' Ok mean')\n daf['TMAX_std'] = daf[[col for col in daf.columns if 'TMAX' in col ]].std(1)\n print(' Ok std')\n #daf = daf.drop(columns=['TMAX'])\n\n print(' OK\\nTMIN', end='')\n daf['TMIN_mean'] = daf[[col for col in daf.columns if 'TMIN' in col ]].mean(1)\n print(' Ok mean')\n daf['TMIN_std'] = daf[[col for col in daf.columns if 'TMIN' in col ]].std(1)\n print(' Ok std')\n #daf = daf.drop(columns=['TMIN'])\n\n print(' OK\\nTAVG', end='')\n daf['TAVG_mean'] = daf[[col for col in daf.columns if 'TAVG' in col ]].mean(1)\n print(' Ok mean')\n daf['TAVG_std'] = daf[[col for col in daf.columns if 'TAVG' in col ]].std(1)\n print(' Ok std')\n #daf = daf.drop(columns=['TAVG'])\n\n print(' OK\\nPRCP', end='')\n daf['PRCP_mean'] = daf[[col for col in daf.columns if 'PRCP' in col ]].mean(1)\n print(' Ok mean')\n daf['PRCP_std'] = daf[[col for col in daf.columns if 'PRCP' in col ]].std(1)\n print(' Ok std')\n #daf = daf.drop(columns=['PRCP'])\n daf = daf.drop(columns=[col for col in daf.columns if col not in ['MM/DD/YYYY','YEAR','MONTH','DAY','TMAX_mean','TMAX_std','TMIN_mean','TMIN_std','TAVG_mean','TAVG_std','PRCP_mean','PRCP_std']])\n print(' OK')\n daf=daf.fillna(0)\n return daf", "_____no_output_____" ], [ "def calculate_mean(folder=''):\n root_path = WEATHER_PATH_DRIVE_CSV+folder+'/'\n new_path = os.path.join(WEATHER_PATH+'new/'+folder+'/','')\n file_path = new_path+folder+'.txt'\n if not os.path.exists(new_path):\n os.mkdir(new_path)\n print(\"Directory \" , new_path , \" Created \")\n files_processed = read_log(file_path)\n print(root_path)\n n=0\n count=0\n count2=70\n count_to_save=0\n count_to_reset=0\n total=0\n already_readed=False\n for root, dirs, files in os.walk(root_path):\n total=len(files)\n for file in files:\n if '.csv' in file:\n station=file.strip('.csv')\n if (station not in files_processed):\n path = os.path.join(root, file)\n df = load_single_csv(path)\n df = df.drop(columns=['ID'])\n if not already_readed:\n try:\n daf = load_single_csv(new_path+folder+'_tmp.csv')\n except:\n daf = df\n already_readed=True\n daf = pd.concat([daf,df], axis=1)\n if count_to_save == 100:\n count_to_save=0\n print('saving')\n save_csv(daf,folder+'_tmp',new_path)\n write_log(file_path,files_processed)\n print('saved')\n count_to_save+=1\n files_processed+=station+'\\r\\n'\n del df\n if count2 == 70:\n count2=0\n actual = \"%.2f%% \" %((count/total)*100)\n clear_output()\n process = psutil.Process(os.getpid())\n print('RAM usage: %.2f GB' %((process.memory_info().rss) / 1e9))\n print('Loading ',folder,' stations in DataFrames... ',actual,' (',str(count),' of ',str(total),')')\n count+=1\n count2+=1\n save_csv(daf,folder+'_tmp',new_path)\n write_log(file_path,files_processed)\n print('Load done: %.2f%% ' %((count/total)*100))\n if(\"Done\" not in files_processed):\n daf = load_single_csv(new_path+folder+'_tmp.csv')\n daf = calculate(daf)\n new_file_name = state+str(n)+'.csv'\n print('Saving ', new_file_name)\n save_csv(daf,new_file_name,new_path)\n print('Done saving ',new_file_name)\n write_log(file_path,files_processed+\"Done\\r\\n\")\n print('Done!')\n os.remove(new_path+folder+'_tmp.csv')\n else :\n print('Already processed.')\n return True\n ", "_____no_output_____" ], [ "calculate_mean('FL')", "RAM usage: 10.82 GB\nLoading FL stations in DataFrames... 99.35% ( 1680 of 1691 )\nLoad done: 100.00% \nTMAX Ok mean\n Ok std\n OK\nTMIN Ok mean\n Ok std\n OK\nTAVG Ok mean\n Ok std\n OK\nPRCP Ok mean\n Ok std\n OK\nDone!\n" ], [ "\ncalculated_states = \"\"\nwith open(CALCULATED_STATE_FILE, \"r+\") as f:\n calculated_states = f.read()\nprint('Already calculated:\\n[\\n',calculated_states,']\\n') \n\nfor root, dirs, files in os.walk(WEATHER_PATH_DRIVE_CSV):\n total=len(dirs)\n for state in dirs:\n if (state not in calculated_states):\n if(calculate_mean(state)):\n calculated_states+= state\n with open(CALCULATED_STATE_FILE,\"a\") as f:\n f.write(state+'\\r\\n')\n", "Loading TX stations in DataFrames... 3.53% ( 174 of 4933 )\n" ], [ "", "_____no_output_____" ] ], [ [ "### Make the Date column as index", "_____no_output_____" ] ], [ [ "soybeans.Date = pd.to_datetime(soybeans.Date)\nsoybeans.set_index('Date', inplace=True)", "_____no_output_____" ], [ "soybeans.head()\n", "_____no_output_____" ], [ "soybeans.tail()", "_____no_output_____" ], [ "plt.plot(soybeans.index, soybeans.Settle)\nplt.title('CBOT Soybean Futures',fontsize=27)\nplt.ylabel('Price (0.01 $USD)',fontsize=27)\nplt.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%d'))\nplt.show()", "_____no_output_____" ], [ "usda = load_usda_data()\n", "_____no_output_____" ] ], [ [ "## Filter soybeans by the year 2015:", "_____no_output_____" ] ], [ [ "mask = (soybeans['Date'] > '2015-01-01') & (soybeans['Date'] <= '2015-12-31')\nsoybeans = soybeans.loc[mask]", "_____no_output_____" ], [ "mask = (soybeans['Date'] > '2014-01-01')\nsoybeans = soybeans.loc[mask]", "_____no_output_____" ] ], [ [ "## Filter weather by the most productive states \n ", "_____no_output_____" ] ], [ [ "weather = weather.query(\"state in ('IA','IL','MN','NE','IN','OH','SD','ND','MO','AR','KS','MS','MI','WI','KY','TN','LA','NC','PA','VA','MD','AL','GA','NY','OK','SC','DE','NJ','TX','WV','FL')\")", "_____no_output_____" ] ], [ [ "## Plot map data\n", "_____no_output_____" ] ], [ [ "stations = pd.read_csv('US_stations.csv')\n#stations.set_index(['LATITUDE','LONGITUDE'], inplace=True)\nstations.index.names", "_____no_output_____" ], [ "#stations.drop_duplicates(subset=['LATITUDE','LONGITUDE'])\nstations.plot(kind=\"scatter\", x=\"LONGITUDE\", y=\"LATITUDE\",fontsize=27,figsize=(20,15))\nplt.title(\"Meteorological stations in the USA's most soy producing regions\", fontsize=27)\nplt.gca().yaxis.set_major_locator(plt.NullLocator())\nplt.gca().xaxis.set_major_formatter(plt.NullFormatter())\nplt.axis('off')\nplt.show()", "_____no_output_____" ], [ "weather.drop_duplicates(subset=['latitude','longitude']).plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\",fontsize=27,figsize=(20,15))\nplt.title(\"Meteorological stations in the USA's most soy producing regions\", fontsize=27)\nplt.gca().yaxis.set_major_locator(plt.NullLocator())\nplt.gca().xaxis.set_major_formatter(plt.NullFormatter())\nplt.axis('off')\nplt.show()", "_____no_output_____" ] ], [ [ "# Group data by date (daily)\n\nMédia das medidas horárias para o avgtemp, mintemp e maxtemp", "_____no_output_____" ] ], [ [ "weather = weather.groupby(['date'], as_index=False)['date','mintemp','maxtemp','avgtemp'].mean()\nweather.head()", "_____no_output_____" ], [ "weather.date = pd.to_datetime(weather.date)\nweather.set_index('date', inplace=True)", "_____no_output_____" ] ], [ [ "## Join datasets (soybeans + weather)", "_____no_output_____" ] ], [ [ "dtMarlon = soybeans.join(weather)", "_____no_output_____" ], [ "dtMarlon.head()", "_____no_output_____" ] ], [ [ "## Histograms", "_____no_output_____" ] ], [ [ "soybeans.hist(bins=50, figsize=(20,15))\nplt.show()", "_____no_output_____" ], [ "weather.hist(bins=50, figsize=(20,15))\nplt.show()", "_____no_output_____" ], [ "dtMarlon.hist(bins=50, figsize=(20,15))\nplt.show()", "_____no_output_____" ] ], [ [ "## Time Series", "_____no_output_____" ] ], [ [ "plt.plot(soybeans.index, soybeans.Settle)\nplt.title('CBOT Soybean Futures',fontsize=27)\nplt.ylabel('Price (0.01 $USD)',fontsize=27)\nplt.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%d'))\nplt.show()", "_____no_output_____" ], [ "plt.plot(weather.index, weather.avgtemp)\nplt.title('2015 USA Weather Avg, Max, Min')\nplt.ylabel('Avg. Temp. (°F)');\nplt.show()", "_____no_output_____" ], [ "fig = plt.figure()\nax1 = fig.add_subplot(111)\nax1.plot(dtMarlon.index, dtMarlon.avgtemp, 'g-')\nax1.set_ylabel('Avg. Temp. (°F)')\n\nax2 = ax1.twinx()\nax2.plot(dtMarlon.index, dtMarlon.Settle, 'b-')\nax2.set_ylabel('Price per bushel (0.01 $USD)')\nax2.yaxis.set_major_formatter(mticker.FormatStrFormatter('%0.01d'))\nplt.title('2015 USA Weather Avg and CBOT Soybean Futures')\nplt.show()", "_____no_output_____" ] ], [ [ "## Missing values for weather in days where we have soy quotes", "_____no_output_____" ] ], [ [ "dtMarlon.query('avgtemp.isnull()')", "_____no_output_____" ], [ "weather.query(\"date=='2015-06-05' or date=='2015-06-04' or date=='2015-06-03' or date=='2015-06-02' or date=='2015-06-01'\")", "_____no_output_____" ], [ "soybeans.query(\"Date=='2015-06-05' or Date=='2015-06-04' or Date=='2015-06-03' or Date=='2015-06-02' or Date=='2015-06-01'\")", "_____no_output_____" ] ], [ [ "## Filling missing values with method 'ffil'\nThis propagate non-null values forward", "_____no_output_____" ] ], [ [ "dtMarlon = dtMarlon.fillna(method='ffill')", "_____no_output_____" ] ], [ [ "## Correlation", "_____no_output_____" ] ], [ [ "dtMarlon.corr()", "_____no_output_____" ], [ "dtMarlon.diff().corr()", "_____no_output_____" ], [ "pd.plotting.autocorrelation_plot(dtMarlon)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7a511a9dadc4cb57243ca92f9b1965ebe083198
6,577
ipynb
Jupyter Notebook
07-Neural_networks_e_deep_learning/python/1-rede_neural_iris.ipynb
CaioHenriqueMachado/data-science-practices
aaf65515e03249cee761cab22ba7931a68f46451
[ "MIT" ]
2
2020-11-29T21:21:01.000Z
2020-12-03T21:29:06.000Z
07-Neural_networks_e_deep_learning/python/1-rede_neural_iris.ipynb
CaioHenriqueMachado/data-science-practices
aaf65515e03249cee761cab22ba7931a68f46451
[ "MIT" ]
2
2020-12-02T02:58:21.000Z
2020-12-19T02:54:12.000Z
07-Neural_networks_e_deep_learning/python/.ipynb_checkpoints/1-rede_neural_iris-checkpoint.ipynb
CaioHenriqueMachado/data-science-practices
aaf65515e03249cee761cab22ba7931a68f46451
[ "MIT" ]
null
null
null
34.984043
1,204
0.599057
[ [ [ "# Formação Cientista de Dados\n# Redes neurais artificiais com keras", "_____no_output_____" ], [ "# Importação das bibliotecas\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nimport numpy as np", "_____no_output_____" ], [ "# Carregamento da base de dados e criação dos previsores (variáveis independentes - X) e da classe (variável dependente - y)\nbase = datasets.load_iris()\nprevisores = base.data\nclasse = base.target\nclasse", "_____no_output_____" ], [ "# Transformação da classe para o formato \"dummy\", pois temos uma rede neural com 3 neurônios na camada de saída\nclasse_dummy = np_utils.to_categorical(classe)\nclasse_dummy", "_____no_output_____" ], [ "# Divisão da base de dados entre treinamento e teste (30% para testar e 70% para treinar)\nX_treinamento, X_teste, y_treinamento, y_teste = train_test_split(previsores,\n classe_dummy,\n test_size = 0.3,\n random_state = 0)", "_____no_output_____" ], [ "# Criação da estrutura da rede neural com a classe Sequential (sequência de camadas)\nmodelo = Sequential()\n#primeira camada oculta, 5 neuronios, 4 neuronios de entrada\nmodelo.add(Dense(units = 5, input_dim = 4))\n#segunda camada oculta\nmodelo.add(Dense(units = 4))\n# Função softmax porque temos um problema de classificação com mais de duas classes \n#(é gerada uma probabilidade em cada neurônio)\nmodelo.add(Dense(units = 3, activation = 'softmax'))", "_____no_output_____" ], [ "# Visualização da estrutura da rede neural\nmodelo.summary()", "_____no_output_____" ], [ "# Configuração dos parâmetros da rede neural (adam = algoritmo para atualizar os pesos e loss = cálculo do erro)\nmodelo.compile(optimizer = 'adam', loss = 'categorical_crossentropy',\n metrics = ['accuracy'])\n# Treinamento, dividindo a base de treinamento em uma porção para validação (validation_data)\nmodelo.fit(X_treinamento, y_treinamento, epochs = 1000,\n validation_data = (X_teste, y_teste))", "_____no_output_____" ], [ "# Previsões e mudar a variável para True ou False de acordo com o threshold 0.5\nprevisoes = modelo.predict(X_teste)\nprevisoes = (previsoes > 0.5)\nprevisoes", "_____no_output_____" ], [ "# Como é um problema com três saídas, precisamos buscar a posição que possui o maior valor (são retornados 3 valores)\ny_teste_matrix = [np.argmax(t) for t in y_teste]\ny_previsao_matrix = [np.argmax(t) for t in previsoes]", "_____no_output_____" ], [ "# Geração da matriz de confusão\nconfusao = confusion_matrix(y_teste_matrix, y_previsao_matrix)\nconfusao", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a519fb8249b440c43b2a760cea305b98fecd6b
13,444
ipynb
Jupyter Notebook
jupyter/dAnalysis/b_numpy_class/Ex01_dnarray.ipynb
WoolinChoi/test
a0f9c8ecc63443acaae61d744eecec6c943d3a26
[ "MIT" ]
null
null
null
jupyter/dAnalysis/b_numpy_class/Ex01_dnarray.ipynb
WoolinChoi/test
a0f9c8ecc63443acaae61d744eecec6c943d3a26
[ "MIT" ]
1
2021-03-30T09:01:47.000Z
2021-03-30T09:01:47.000Z
jupyter/dAnalysis/b_numpy_class/Ex01_dnarray.ipynb
WoolinChoi/test
a0f9c8ecc63443acaae61d744eecec6c943d3a26
[ "MIT" ]
1
2019-12-06T18:21:10.000Z
2019-12-06T18:21:10.000Z
19.178317
73
0.388872
[ [ [ "# numpy\n\n\n- 행렬 / 선형대수 / 통계 패키지\n\n- 머신러닝의 이론적 백그라운드는 선형대수와 통계로 이루어져 있다\n\n- 사이킷런 같은 머신러닝 패키지가 넘파이 기반으로 되어 있다\n\n\n * 머신러닝 알고리즘이나 사이파이와 같은 과학, 통계 지원용 패키지를 직접 만드는 개발이 아니라면\n 넘파이를 상세하기 알 필요는 없다지만, 넘파이를 이해하는 것이 파이썬 기반의 데이타분석과 머신러닝에 중요하다\n \n * 넘파이가 데이타 핸들링에 효율적으로 쉽고 편하고 할 수 없다.\n 그러나 데이타 핸들링에 주로 사용하는 판다스도 많은 부분이 넘파이를 기반으로 만들어져 있다.\n \n * ndarray \n - 넘파이 기반 데이타 타입\n - np.array()\n \n * 자료형 정리\n - 파이썬 (list / tuple)\n - numpy의 ndarray\n - pandas의 DataFrame / series", "_____no_output_____" ] ], [ [ "import numpy as np\n\nlist_1 = [1, 2, 3]\nlist_2 = [9, 8, 7]\n\narr = np.array([list_1, list_2])\narr\nprint(type(arr))\nprint(arr.shape)\nprint(arr.ndim)", "<class 'numpy.ndarray'>\n(2, 3)\n2\n" ], [ "# 두번째행에 두번째 열의 값을 100 지정\narr[[1], 1] = 100\narr", "_____no_output_____" ], [ "arr1 = np.array([1, 2, 3])\narr2 = np.array([[1, 2, 3]])\n\nprint(arr1.shape) # 1차원 튜플로 3개의 요소를 가짐\nprint(arr2.shape) # 1행 3열의 요소를 가진 2차원\n\n# 차원수 확인\nprint(arr1.ndim, \"차원\")\nprint(arr2.ndim, \"차원\")", "(3,)\n(1, 3)\n1 차원\n2 차원\n" ] ], [ [ "### 자료형", "_____no_output_____" ] ], [ [ "# 자료형 확인\nprint(type(arr))\n\n# 요소의 자료형 확인\nprint(arr.dtype)", "<class 'numpy.ndarray'>\nint32\n" ], [ "# 요소의 자료형 변경\narr2 = arr.astype(np.float64)\nprint(arr2.dtype)", "float64\n" ], [ "list1 = [1, 2, 3.6]\nprint(type(list1))\n# ndarray 변경\nlist1 = np.array(list1)\nprint(type(list1))\n\n# 요소\nlist1.astype(np.float64)\nprint(list1.dtype)", "<class 'list'>\n<class 'numpy.ndarray'>\nfloat64\n" ], [ "list2 = [1, 2.3, 'python']\nprint(type(list2))\n# ndarray 변경\nlist2 = np.array(list2)\nprint(type(list2)) \n\n# 요소\nprint(list2.dtype) # U32", "<class 'list'>\n<class 'numpy.ndarray'>\n<U32\n" ], [ " # [결과] <U11 : Unicode 문자열 ", "_____no_output_____" ] ], [ [ "### nparray를 편리하게 생성\n\n* arange() : 범위를 이용한 배열 만들기\n* zeros() : 0으로 채우는 배열 만들기\n* ones() : 1로 채우는 배열 만들기", "_____no_output_____" ] ], [ [ "a = np.arange(10)\nprint(a)\n\nb = np.arange(1, 11)\nprint(b)\n\nc = np.arange(1, 11, 3)\nprint(c)", "[0 1 2 3 4 5 6 7 8 9]\n[ 1 2 3 4 5 6 7 8 9 10]\n[ 1 4 7 10]\n" ], [ "a2 = np.zeros((5, 5)) # 기본자료형: float\na2", "_____no_output_____" ], [ "a3 = np.ones((3, 4), dtype='int32')\na3", "_____no_output_____" ] ], [ [ "### ndarray의 차원과 크기를 변경 : reshape()", "_____no_output_____" ] ], [ [ "arr = np.arange(10)\nprint(arr)\nprint(arr.shape)\nprint(arr.ndim) # 차원으로 확인 권장", "[0 1 2 3 4 5 6 7 8 9]\n(10,)\n1\n" ], [ "arr2 = arr.reshape(2, 5) # 2행 5열로 차원크기 변경\nprint(arr2)\nprint(arr2.shape)\nprint(arr2.ndim)", "[[0 1 2 3 4]\n [5 6 7 8 9]]\n(2, 5)\n2\n" ], [ "# -1 적용\narr = np.arange(20)\nprint(arr)\n\narr2 = arr.reshape(5, -1)\nprint(arr2)\n\narr3 = arr.reshape(-1, 2)\nprint(arr3)", "[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19]\n[[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]\n [12 13 14 15]\n [16 17 18 19]]\n[[ 0 1]\n [ 2 3]\n [ 4 5]\n [ 6 7]\n [ 8 9]\n [10 11]\n [12 13]\n [14 15]\n [16 17]\n [18 19]]\n" ] ], [ [ "### 인덱싱: 특정 데이타 추출", "_____no_output_____" ] ], [ [ "#------------------------------------------ (1) 단일값 출력\nimport numpy as np\narr = np.arange(1, 11)\nprint(arr)\n\n## 세번째 요소 추출 ( 0부터 인덱스)\nprint(arr[3])\n\n## 뒤에서 세번째 요소 추출 ( 뒤에서 인덱스는 -1부터)\nprint(arr[-3])", "[ 1 2 3 4 5 6 7 8 9 10]\n4\n8\n" ], [ "## 1부터 9까지 nparray를 만들고 3행 3열 2차원 구조로 변경한후\n## 두번째 행의 세번째 열의 값 추출\narr = np.arange(1, 10)\nprint(arr)\n\narr2 = arr.reshape(3, 3)\nprint(arr2.ndim)\nprint(arr2)\nprint(arr2[[1], 2])\n", "[1 2 3 4 5 6 7 8 9]\n2\n[[1 2 3]\n [4 5 6]\n [7 8 9]]\n[6]\n" ], [ "#------------------------------------------ (2) 슬라이싱 (:)\narr = np.arange(1, 10)\nprint(arr)\n\n# 2번째부터 4번째까지의 요소 추출\nprint(arr[1:4])\n\n# 2번째부터 마지막까지 요소 추출\nprint(arr[1:])\n\n# 처음부터 4번째까지 요소 추출\nprint(arr[0:4])\n\n", "[1 2 3 4 5 6 7 8 9]\n[2 3 4]\n[2 3 4 5 6 7 8 9]\n[1 2 3 4]\n" ], [ "# 2차원 배열에서 생성\n'''\n 1 2 3\n 4 5 6\n 7 8 9\n'''\n\narr = np.arange(1, 10)\nndarr = arr.reshape(3,3)\nprint(ndarr)\n \n## 그림에서 1, 2, 4, 5 추출\nprint(ndarr[[0, 1], :2])\n\n## 그림에서 4, 5, 6, 7, 8, 9 추출\nprint(ndarr[[1, 2], :])\n\n\n## 그림에서 2, 3, 5, 6 추출\nprint(ndarr[[0, 1], 1:])\n\n## 그림에서 1, 4 추출\nprint(ndarr[[0, 1], :1])\n\n## 그림에서 전체 요소 추출\nprint(ndarr[:, :])\nprint(ndarr[::])\nprint(ndarr)\n", "[[1 2 3]\n [4 5 6]\n [7 8 9]]\n[[1]\n [4]]\n" ], [ "# 2차원 ndarray에서 뒤의 오는 인덱스가 없으면 1차원으로 반환\n# 3차원 ndarray에서 뒤의 오는 인덱스가 없으면 2차원으로 반환\nprint(ndarr[1][1])\nprint(ndarr[1]) # 1차원 ndarray", "5\n[4 5 6]\n" ], [ "# 슬라이싱[:]과 인덱스선택을 결합\n'''\n 1 2 3\n 4 5 6\n 7 8 9\n'''\n\nimport numpy as np\narr = np.arange(1, 10)\nndarr = arr.reshape(3,3)\nprint(ndarr)\n\n## 그림에서 1, 2, 4, 5 추출\nprint(ndarr[[0, 1], :2])\nprint(ndarr[:2, :2])\n# print(ndarr[[0, 1], [0, 1]])\n\n## 그림에서 4, 5, 6, 7, 8, 9 추출\nprint(ndarr[[1, 2], :3])\n\n## 그림에서 2, 3, 5, 6 추출\nprint(ndarr[[0, 1], 1:3])\n\n## 그림에서 1, 4 추출\nprint(ndarr[[0 ,1], :1])\n\n## 그림에서 3, 6 추출\nprint(ndarr[[0, 1], 2:])\n\n\n# 지금은 너무 간단하다고 여기지만 추후에 데이타에서 많이 헷갈려서 여기서 시간 소요를 많이 한다", "[[1 2 3]\n [4 5 6]\n [7 8 9]]\n[[1 2]\n [4 5]]\n[[1 2]\n [4 5]]\n[[4 5 6]\n [7 8 9]]\n[[2 3]\n [5 6]]\n[[1]\n [4]]\n[[3]\n [6]]\n" ], [ "#------------------------------------------ (3) 블린인덱싱\n# 조건 필터링과 검색을 같이 하기에 자주 사용\n\narr = np.arange(1, 10)\nndarr = arr.reshape(3,3)\nprint(ndarr)\n\n# 5보다 큰 요소들 추출\nprint(ndarr > 5)\n\n# 8값 요소를 88로 변경\nndarr[[2], 1] = 88\nndarr", "[[1 2 3]\n [4 5 6]\n [7 8 9]]\n[[False False False]\n [False False True]\n [ True True True]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7a52f1d341f4c88e8fa96f26a0a49b554de1184
7,720
ipynb
Jupyter Notebook
NOTEBOOKS/aws_iot_mqtt.ipynb
SebastianRoll/NDC-MicroPython
b8a366b02b5eb18b6f2a37f35050bf62cc95dae9
[ "MIT" ]
1
2018-09-29T12:28:11.000Z
2018-09-29T12:28:11.000Z
NOTEBOOKS/aws_iot_mqtt.ipynb
SebastianRoll/NDC-MicroPython
b8a366b02b5eb18b6f2a37f35050bf62cc95dae9
[ "MIT" ]
null
null
null
NOTEBOOKS/aws_iot_mqtt.ipynb
SebastianRoll/NDC-MicroPython
b8a366b02b5eb18b6f2a37f35050bf62cc95dae9
[ "MIT" ]
1
2019-06-09T17:05:51.000Z
2019-06-09T17:05:51.000Z
58.931298
1,213
0.664896
[ [ [ "%%bash\npip install AWSIoTPythonSDK", "Collecting AWSIoTPythonSDK\n Downloading https://files.pythonhosted.org/packages/e4/af/2cc14c4c9a0e1397f0f6c571277f997936f88c2e2c6a3a388640a200f829/AWSIoTPythonSDK-1.3.1.tar.gz (70kB)\nBuilding wheels for collected packages: AWSIoTPythonSDK\n Running setup.py bdist_wheel for AWSIoTPythonSDK: started\n Running setup.py bdist_wheel for AWSIoTPythonSDK: finished with status 'done'\n Stored in directory: /home/sebastian/.cache/pip/wheels/bb/15/55/cb48293eb01350e3474c8cd830fd0efd1f0e1c1874deb3a40e\nSuccessfully built AWSIoTPythonSDK\nInstalling collected packages: AWSIoTPythonSDK\nSuccessfully installed AWSIoTPythonSDK-1.3.1\n" ], [ "# Import SDK packages\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\n\n# For certificate based connection\nmyMQTTClient = AWSIoTMQTTClient(\"black\")\n# For Websocket connection\n# myMQTTClient = AWSIoTMQTTClient(\"myClientID\", useWebsocket=True)\n# Configurations\n# For TLS mutual authentication\nmyMQTTClient.configureEndpoint(\"av8niy97wewyw.iot.eu-west-1.amazonaws.com\", 8883)\n# For Websocket\n# myMQTTClient.configureEndpoint(\"YOUR.ENDPOINT\", 443)\nmyMQTTClient.configureCredentials(\"/home/sebastian/PycharmProjects/webstep-office-sensors/sensors/aws-root-ca.txt\", \n \"/home/sebastian/PycharmProjects/webstep-office-sensors/sensors/black/7d76855907-private.pem.key\", \n \"/home/sebastian/PycharmProjects/webstep-office-sensors/sensors/black/7d76855907-certificate.pem.crt\")\n# For Websocket, we only need to configure the root CA\n# myMQTTClient.configureCredentials(\"YOUR/ROOT/CA/PATH\")\nmyMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nmyMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz\nmyMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec\nmyMQTTClient.configureMQTTOperationTimeout(5) # 5 sec", "_____no_output_____" ], [ "myMQTTClient.connect()\nmyMQTTClient.publish(\"black\", '{\"b\": \"myPayload\"}', 0)\n#myMQTTClient.subscribe(\"black\", 1, customCallback)\n#myMQTTClient.unsubscribe(\"myTopic\")\nmyMQTTClient.disconnect()", "Connect timed out\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7a5349e385fd2f6068c425764159365e6fac791
11,229
ipynb
Jupyter Notebook
notebook/Intro to PixieDust.ipynb
jordangeorge/pixiedust
00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4
[ "Apache-2.0" ]
null
null
null
notebook/Intro to PixieDust.ipynb
jordangeorge/pixiedust
00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4
[ "Apache-2.0" ]
null
null
null
notebook/Intro to PixieDust.ipynb
jordangeorge/pixiedust
00e49f4e8ba44cc248685146d3ad7e2d04ac6cd4
[ "Apache-2.0" ]
1
2018-04-13T18:25:52.000Z
2018-04-13T18:25:52.000Z
27.057831
574
0.599163
[ [ [ "# Hello PixieDust!\nThis sample notebook provides you with an introduction to many features included in PixieDust. You can find more information about PixieDust at https://ibm-watson-data-lab.github.io/pixiedust/. To ensure you are running the latest version of PixieDust uncomment and run the following cell. Do not run this cell if you installed PixieDust locally from source and want to continue to run PixieDust from source.", "_____no_output_____" ] ], [ [ "#!pip install --user --upgrade pixiedust", "_____no_output_____" ] ], [ [ "# Import PixieDust\nRun the following cell to import the PixieDust library. You may need to restart your kernel after importing. Follow the instructions, if any, after running the cell. Note: You must import PixieDust every time you restart your kernel.", "_____no_output_____" ] ], [ [ "import pixiedust", "_____no_output_____" ] ], [ [ "# Enable the Spark Progress Monitor\nPixieDust includes a Spark Progress Monitor bar that lets you track the status of your Spark jobs. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/sparkmonitor.html. Run the following cell to enable the Spark Progress Monitor:", "_____no_output_____" ] ], [ [ "pixiedust.enableJobMonitor();", "_____no_output_____" ] ], [ [ "# Example use of the PackageManager\nYou can use the PackageManager component of Pixiedust to install and uninstall maven packages into your notebook kernel without editing configuration files. This component is essential when you run notebooks from a hosted cloud environment and do not have access to the configuration files. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/packagemanager.html. Run the following cell to install the GraphFrame package. You may need to restart your kernel after installing new packages. Follow the instructions, if any, after running the cell. ", "_____no_output_____" ] ], [ [ "pixiedust.installPackage(\"graphframes:graphframes:0.1.0-spark1.6\")\nprint(\"done\")", "_____no_output_____" ] ], [ [ "Run the following cell to print out all installed packages:", "_____no_output_____" ] ], [ [ "pixiedust.printAllPackages()", "_____no_output_____" ] ], [ [ "# Example use of the display() API\nPixieDust lets you visualize your data in just a few clicks using the display() API. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/displayapi.html. The following cell creates a DataFrame and uses the display() API to create a bar chart:", "_____no_output_____" ] ], [ [ "sqlContext=SQLContext(sc)\nd1 = sqlContext.createDataFrame(\n[(2010, 'Camping Equipment', 3),\n (2010, 'Golf Equipment', 1),\n (2010, 'Mountaineering Equipment', 1),\n (2010, 'Outdoor Protection', 2),\n (2010, 'Personal Accessories', 2),\n (2011, 'Camping Equipment', 4),\n (2011, 'Golf Equipment', 5),\n (2011, 'Mountaineering Equipment',2),\n (2011, 'Outdoor Protection', 4),\n (2011, 'Personal Accessories', 2),\n (2012, 'Camping Equipment', 5),\n (2012, 'Golf Equipment', 5),\n (2012, 'Mountaineering Equipment', 3),\n (2012, 'Outdoor Protection', 5),\n (2012, 'Personal Accessories', 3),\n (2013, 'Camping Equipment', 8),\n (2013, 'Golf Equipment', 5),\n (2013, 'Mountaineering Equipment', 3),\n (2013, 'Outdoor Protection', 8),\n (2013, 'Personal Accessories', 4)],\n[\"year\",\"zone\",\"unique_customers\"])\n\ndisplay(d1)", "_____no_output_____" ] ], [ [ "# Example use of the Scala bridge\nData scientists working with Spark may occasionaly need to call out to one of the hundreds of libraries available on spark-packages.org which are written in Scala or Java. PixieDust provides a solution to this problem by letting users directly write and run scala code in its own cell. It also lets variables be shared between Python and Scala and vice-versa. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/scalabridge.html.", "_____no_output_____" ], [ "Start by creating a python variable that we'll use in scala:", "_____no_output_____" ] ], [ [ "python_var = \"Hello From Python\"\npython_num = 10", "_____no_output_____" ] ], [ [ "Create scala code that use the python_var and create a new variable that we'll use in Python:", "_____no_output_____" ] ], [ [ "%%scala\nprintln(python_var)\nprintln(python_num+10)\nval __scala_var = \"Hello From Scala\"", "_____no_output_____" ] ], [ [ "Use the __scala_var from python:", "_____no_output_____" ] ], [ [ "print(__scala_var)", "_____no_output_____" ] ], [ [ "# Sample Data\nPixieDust includes a number of sample data sets. You can use these sample data sets to start playing with the display() API and other PixieDust features. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/loaddata.html. Run the following cell to view the available data sets:", "_____no_output_____" ] ], [ [ "pixiedust.sampleData()", "_____no_output_____" ] ], [ [ "# Example use of sample data\nTo use sample data locally run the following cell to install required packages. You may need to restart your kernel after running this cell.", "_____no_output_____" ] ], [ [ "pixiedust.installPackage(\"com.databricks:spark-csv_2.10:1.5.0\")\npixiedust.installPackage(\"org.apache.commons:commons-csv:0\")", "_____no_output_____" ] ], [ [ "Run the following cell to get the first data set from the list. This will return a DataFrame and assign it to the variable d2:", "_____no_output_____" ] ], [ [ "d2 = pixiedust.sampleData(1)", "_____no_output_____" ] ], [ [ "Pass the sample data set (d2) into the display() API:", "_____no_output_____" ] ], [ [ "display(d2)", "_____no_output_____" ] ], [ [ "You can also download data from a CSV file into a DataFrame which you can use with the display() API:", "_____no_output_____" ] ], [ [ "d3 = pixiedust.sampleData(\"https://openobjectstore.mybluemix.net/misc/milliondollarhomes.csv\")", "_____no_output_____" ] ], [ [ "# PixieDust Log\nPixieDust comes complete with logging to help you troubleshoot issues. You can find more info at https://ibm-watson-data-lab.github.io/pixiedust/logging.html. To access the log run the following cell:", "_____no_output_____" ] ], [ [ "% pixiedustLog -l debug", "_____no_output_____" ] ], [ [ "# Environment Info.\nThe following cells will print out information related to your notebook environment.", "_____no_output_____" ] ], [ [ "%%scala\nval __scala_version = util.Properties.versionNumberString", "_____no_output_____" ], [ "import platform\nprint('PYTHON VERSON = ' + platform.python_version())\nprint('SPARK VERSON = ' + sc.version)\nprint('SCALA VERSON = ' + __scala_version)", "_____no_output_____" ] ], [ [ "# More Info.\nFor more information about PixieDust check out the following:\n#### PixieDust Documentation: https://ibm-watson-data-lab.github.io/pixiedust/index.html\n#### PixieDust GitHub Repo: https://github.com/ibm-watson-data-lab/pixiedust", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e7a53fa05d7b47d06f89fd73dc64a851b09c016c
352,199
ipynb
Jupyter Notebook
01_twoDim.ipynb
YanniPapandreou/statFEM
189ddbb9c2f5a363d6e7e2f62a893cb3706e45bb
[ "Apache-2.0" ]
1
2022-02-04T09:26:33.000Z
2022-02-04T09:26:33.000Z
01_twoDim.ipynb
YanniPapandreou/statFEM
189ddbb9c2f5a363d6e7e2f62a893cb3706e45bb
[ "Apache-2.0" ]
null
null
null
01_twoDim.ipynb
YanniPapandreou/statFEM
189ddbb9c2f5a363d6e7e2f62a893cb3706e45bb
[ "Apache-2.0" ]
null
null
null
237.651147
182,888
0.909457
[ [ [ "# default_exp twoDim", "_____no_output_____" ], [ "#hide\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nimport matplotlib.colorbar as colorbar\nimport matplotlib.tri as tri\nplt.rcParams['figure.figsize'] = (10,6)\nimport sympy; sympy.init_printing()\n# code for displaying matrices nicely\ndef display_matrix(m):\n display(sympy.Matrix(m))", "_____no_output_____" ] ], [ [ "# twoDim\n> Code for a 2-D problem.", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.showdoc import *", "_____no_output_____" ] ], [ [ "# 2 dimensional case (PDE)\n\nWe consider the following 2-D problem:\n\n$$\\nabla\\cdot\\left(\\kappa(x)\\nabla u(x)\\right)=f(x) \\quad\\forall x\\in D=[0,1]^{2}$$\n$$u(x)=0\\quad\\forall x\\in\\partial D$$\nwhere here $f$ is again a random forcing term, assumed to be a GP in this work.", "_____no_output_____" ], [ "## Variational formulation\n\nThe variational formulation is given by:\n\n$$a(u,v)=L(v)$$\n\nwhere:\n\n$$a(u,v)=\\int_{D}\\nabla u\\cdot\\left(\\kappa\\nabla u\\right)dx$$\n\nand\n\n$$L(v)=\\int_{D}fvdx$$", "_____no_output_____" ], [ "We will make the following choices for $\\kappa,f$:\n\n$$\\kappa(x)=1$$\n\n$$f\\sim\\mathcal{G}\\mathcal{P}(\\bar{f},k_{f})$$\n\n$$\\bar{f}(x)=1$$\n\n$$ k_{f}(x,y) = \\sigma_f^{2}\\exp\\left(-\\frac{\\|x-y\\|^2}{2l_f^2}\\right)$$\n\n$$ \\sigma_{f} = 0.1$$\n\n$$ l_f = 0.4 $$\n\nwhere $\\|\\cdot\\|$ is the usual Euclidean norm.", "_____no_output_____" ], [ "Since we do not have access to a suitable Green's function for this problem, we will have to estimate the rate of convergence of the statFEM prior and posterior by comparing them on a sequence of refined meshes. More details on this will follow later. Thus, we need similar code as for the 1-D problem.", "_____no_output_____" ], [ "## statFEM prior mean\n\nWe will again utilise FEniCS to obtain the statFEM prior mean. For this purpose, we create a function `mean_assembler` which will assemble the mean for the statFEM prior.", "_____no_output_____" ] ], [ [ "#export\nfrom dolfin import *\nimport numpy as np\nfrom scipy import integrate\nfrom scipy.spatial.distance import cdist\nfrom scipy.linalg import sqrtm\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.interpolate import interp1d\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\n# code to assemble the mean for a given mesh size\ndef mean_assembler(h,f_bar):\n \"This function assembles the mean for the statFEM prior for our 2-D problem\"\n # get size of the grid\n J = int(np.round(1/h))\n\n # set up the mesh and function space for FEM\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n\n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, 0.0, boundary)\n # set up the functions κ and f\n κ = Constant(1.0)\n f = f_bar\n\n # set up the bilinear form for the variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = inner(κ*grad(u),grad(v))*dx\n\n # set up the linear form\n L = f*v*dx\n\n # solve the variational problem\n μ = Function(V)\n solve(a == L, μ, bc)\n\n return μ", "_____no_output_____" ] ], [ [ "`mean_assembler` takes in the mesh size `h` and the mean function `f_bar` for the forcing and computes the mean of the approximate statFEM prior, returning this as a FEniCS function.", "_____no_output_____" ], [ "> Important: `mean_assembler` requires `f_bar` to be represented as a FEniCS function/expression/constant.", "_____no_output_____" ], [ "Let's check that this is working:", "_____no_output_____" ] ], [ [ "h = 0.1\nf_bar = Constant(1.0)\nμ = mean_assembler(h,f_bar)\nμ", "_____no_output_____" ], [ "# check the type of μ\nassert type(μ) == function.function.Function", "_____no_output_____" ] ], [ [ "Let's plot $\\mu$:", "_____no_output_____" ] ], [ [ "#hide_input\n# use FEniCS to plot μ\nplot(μ)\nplt.xlabel(r'$x$')\nplt.ylabel(r'$y$')\nplt.title(r'Plot of statFEM mean for $h=%.2f$'%h)\nplt.show()", "_____no_output_____" ] ], [ [ "## statFEM prior covariance\n\nWe will also utilise FEniCS again to obtain an approximation of our statFEM covariance function.\n\nThe statFEM covariance can be approximated as follows:\n\n$$c_u^{\\text{FEM}}(x,y)\\approx\\sum_{i,j=1}^{J}\\varphi_{i}(x)Q_{ij}\\varphi_{j}(y)$$\n\nwhere $Q=A^{-1}MC_{f}M^{T}A^{-T}$ and where the $\\{\\varphi_{i}\\}_{i=1}^{J}$ are the FE basis functions corresponding to the interior nodes of our domain.\n\nwith $C_f$ being the kernel matrix of $f$ (evaluated on the FEM grid).\n\nAs we will be comparing the statFEM covariance functions for finer and finer FE mesh sizes we will need to be able to assemble the statFEM covariance function on a grid. As discussed in <a href=\"/statFEM/oneDim.html#Difference-between-true-prior-covariance-and-statFEM-prior-covariance\"><code>oneDim</code></a>, we can assemble such covariance matrices in a very efficient manner. The code remains largely the same as in the 1-D case and so we do not go into as much detail here.", "_____no_output_____" ], [ "We start by creating a function `kernMat` which assembles the covariance matrix corresponding to a covariance function `k` on a grid `grid`.", "_____no_output_____" ] ], [ [ "#export\ndef kernMat(k,grid,parallel=True,translation_inv=False):\n \"Function to compute the covariance matrix $K$ corresponding to the covariance kernel $k$ on a grid. This matrix has $ij$-th entry $K_{ij}=k(x_i,x_j)$ where $x_i$ is the $i$-th point of the grid.\"\n # get the length of the grid\n n = grid.shape[0]\n # preallocate an n x n array of zeros to hold the cov matrix\n K = np.zeros((n,n))\n\n # check if the cov matrix should be computed in parallel\n if parallel:\n # compute the cov matrix in parallel by computing the upper triangular part column by column\n # set up function to compute the ith column of the upper triangular part:\n def processInput(i):\n return np.array([k(grid[i,:],grid[j,:]) for j in range(i,n)])\n \n # get the number of cpu cores present and compute the upper triangular columns in parallel\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(processInput)(i) for i in range(n))\n\n # store the results in the appropriate positions in K\n #for (i,v) in enumerate(results[0:n-1]):\n for (i,v) in enumerate(results): # is this correct???\n K[i,i:] = v\n \n # only the upper triangular part has been formed, so use the symmetry of the cov mat to get full K:\n K = K + K.T - np.diag(K.diagonal())\n return K\n elif translation_inv:\n # reshape grid so that it has correct dimensions\n grid = grid.reshape(n,-1)\n \n # compute the distance matrix D\n D = cdist(grid,grid)\n\n # evaluate the kernel function using D\n K = k(D)\n return K\n else:\n # compute the cov mat using a nested for loop\n for i in range(n):\n for j in range(i,n):\n K[i,j] = k(grid[i,:],grid[j,:])\n K = K + K.T - np.diag(K.diagonal())\n return K ", "_____no_output_____" ] ], [ [ "> Note: This function takes in two optional boolean arguments `parallel` and `translation_inv`. The first of these specifies whether or not the cov matrix should be computed in parallel and the second specifies whether or not the cov kernel is translation invariant. If it is, the covariance matrix is computed more efficiently using the `cdist` function from scipy.", "_____no_output_____" ], [ "Let's quickly test if this function is working, by computing the cov matrix for white noise, which has kernel function $k(x,y)=\\delta(x-y)$. For a grid of length $N$ this should be the $N\\times N$ identity matrix.", "_____no_output_____" ] ], [ [ "# set up the kernel function\n# set up tolerance for comparison\ntol = 1e-16\ndef k(x,y):\n if (np.abs(x-y) < tol).all():\n # x == y within the tolerance\n return 1.0\n else:\n # x != y within the tolerance\n return 0.0\n\n# set up grid\nn = 21\nx_range = np.linspace(0,1,n)\ngrid = np.array([[x,y] for x in x_range for y in x_range])\nN = len(grid) # get length of grid (N=n^2)\nK = kernMat(k,grid,True,False) # parallel mode\n\n# check that this is the N x N identity matrix\nassert (K == np.eye(N)).all()", "_____no_output_____" ] ], [ [ "We now create a function `BigPhiMat` to utilise FEniCS to efficiently compute the matrix $\\boldsymbol{\\Phi}$ defined above.", "_____no_output_____" ] ], [ [ "#export\ndef BigPhiMat(J,grid):\n \"Function to compute the $\\Phi$ matrix.\"\n # create the FE mesh and function space\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n # get the tree for the mesh\n tree = mesh.bounding_box_tree()\n # set up a function to compute the ith column of Phi corresponding to the ith grid point\n def Φ(i):\n x = grid[i]\n cell_index = tree.compute_first_entity_collision(Point(*x))\n cell = Cell(mesh,cell_index)\n cell_global_dofs = V.dofmap().cell_dofs(cell_index)\n vertex_coordinates = cell.get_vertex_coordinates()\n cell_orientation = cell.orientation()\n data = V.element().evaluate_basis_all(x,vertex_coordinates,cell_orientation)\n return (data,cell_global_dofs,i*np.ones_like(cell_global_dofs))\n # compute all the columns of Phi using the function above\n res = [Φ(i) for i in range(len(grid))]\n # assemble the sparse matrix Phi using the results\n data = np.hstack([res[i][0] for i in range(len(grid))])\n row = np.hstack([res[i][1] for i in range(len(grid))])\n col = np.hstack([res[i][2] for i in range(len(grid))])\n return csr_matrix((data,(row,col)),shape=(V.dim(),len(grid)))", "_____no_output_____" ] ], [ [ "`BigPhiMat` takes in two arguments: `J`, which controls the FE mesh size ($h=1/J^{2}$), and `grid` which is the grid in the definition of $\\boldsymbol{\\Phi}$. `BigPhiMat` returns $\\boldsymbol{\\Phi}$ as a sparse `csr_matrix` for memory efficiency.", "_____no_output_____" ], [ "> Note: Note that since FEniCS works with the FE functions corresponding to all the FE dofs and our statFEM cov matix only uses the FE functions corresponding to non-boundary dofs we need to account for this in the code. See the source code for `BigPhiMat` to see how this is done.", "_____no_output_____" ], [ "We now create a function `cov_asssembler` which assembles the approximate FEM covariance matrix on the grid.", "_____no_output_____" ] ], [ [ "#export\n# function to assemble the fem covariance\ndef cov_assembler(J,k_f,grid,parallel,translation_inv):\n \"Function to assemble the approximate FEM covariance matrix on the reference grid.\"\n \n # set up mesh and function space\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n \n # set up FE grid\n x_grid = V.tabulate_dof_coordinates()\n \n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n \n bc = DirichletBC(V, 0.0, boundary)\n \n # get the boundary and interior dofs\n bc_dofs = bc.get_boundary_values().keys()\n first, last = V.dofmap().ownership_range()\n all_dofs = range(last - first)\n interior_dofs = list(set(all_dofs) - set(bc_dofs))\n bc_dofs = list(set(bc_dofs))\n \n # set up the function κ\n κ = Constant(1.0)\n\n # get the mass and stiffness matrices as sparse csr_matrices\n u = TrialFunction(V)\n v = TestFunction(V)\n \n mass_form = u*v*dx\n a = inner(κ*grad(u),grad(v))*dx\n \n M = assemble(mass_form)\n A = assemble(a)\n M = as_backend_type(M).mat()\n A = as_backend_type(A).mat()\n M = csr_matrix(M.getValuesCSR()[::-1],shape=M.size)\n A = csr_matrix(A.getValuesCSR()[::-1],shape=A.size)\n \n # extract the submatrices corresponding to the interior dofs\n M = M[interior_dofs,:][:,interior_dofs]\n A = A[interior_dofs,:][:,interior_dofs]\n \n # get the forcing cov matrix on the interior nodes of the grid\n Σ_int = kernMat(k_f,x_grid[interior_dofs],parallel,translation_inv)\n \n # form the matrix Q in the defintion of the approximate FEM cov mat\n # Note: overwrite Σ_int for memory efficiency.\n# Σ_int = M @ Σ_int @ M.T\n Σ_int = Σ_int @ M.T\n Σ_int = M @ Σ_int\n\n Σ_int = spsolve(A,Σ_int)\n Σ_int = spsolve(A,Σ_int.T).T\n \n # ensure Σ_int is symmetric\n Σ_int = 0.5*(Σ_int + Σ_int.T)\n \n # get big phi matrix on the grid (extracting only the rows corresponding to the\n # interior dofs)\n Phi = BigPhiMat(J,grid)[interior_dofs,:]\n #print(\"Computed Phi\")\n \n # assemble cov mat on grid using Phi and Σ_int\n Σ = Phi.T @ Σ_int @ Phi\n \n # ensure Σ is symmetric and return\n Σ = 0.5*(Σ + Σ.T)\n return Σ", "_____no_output_____" ] ], [ [ "`cov_assembler` takes in several arguments which are explained below:\n\n- `J`: controls the FE mesh size ($h=1/J^{2})$\n- `k_f`: the covariance function for the forcing $f$\n- `grid`: the reference grid where the FEM cov matrix should be computed on\n- `parallel`: boolean argument indicating whether the intermediate computation of $C_f$ should be done in parallel \n- `translation_inv`: boolean argument indicating whether the intermediate computation of $C_f$ should be computed assuming `k_f` is translation invariant or not", "_____no_output_____" ], [ "As a quick demonstration that the code is working, we will the statFEM cov matrix for a relatively coarse grid.", "_____no_output_____" ] ], [ [ "# set up kernel function for forcing\nf_bar = Constant(1.0)\n\nl_f = 0.4\nσ_f = 0.1\n\ndef k_f(x):\n return (σ_f**2)*np.exp(-(x**2)/(2*(l_f**2)))\n\n# set up grid\nn = 21\nx_range = np.linspace(0,1,n)\ngrid = np.array([[x,y] for x in x_range for y in x_range])\n\n# get the statFEM grid for a particular choice of J\nJ = 10\nΣ = cov_assembler(J,k_f,grid,False,True)", "_____no_output_____" ] ], [ [ "Let's plot a heatmap of the statFEM cov matrix:", "_____no_output_____" ] ], [ [ "#hide_input\nsns.heatmap(Σ,cbar=True,\n annot=False,\n xticklabels=False,\n yticklabels=False,\n cmap=cm.viridis)\nplt.title('Heat map of statFEM covariance matrix')\nplt.show()", "_____no_output_____" ] ], [ [ "> Note: that the banded structure in the above statFEM covariance matrix is due to the internal ordering of the FE grid in FEniCS.", "_____no_output_____" ], [ "## statFEM posterior mean\n\nThe statFEM posterior from incorporating sensor readings has the same form as given in <a href=\"/statFEM/oneDim.html#Posterior-from-incorporating-sensor-readings\"><code>oneDim</code></a>. We will thus require very similar code as to the 1-D case. We start by creating a function `m_post` which evaluates the posterior mean at a given point.", "_____no_output_____" ] ], [ [ "#export\ndef m_post(x,m,c,v,Y,B):\n \"This function evaluates the posterior mean at the point $x$.\"\n m_vect = np.array([m(y_i) for y_i in Y]).flatten()\n c_vect = c(x).flatten()\n \n # compute the update term\n update = c_vect @ np.linalg.solve(B,m_vect-v)\n \n # return m_post\n return (m(x) - update)", "_____no_output_____" ] ], [ [ "`m_post` takes in several arguments which are explained below:\n\n- `x`: point where the posterior mean will be evaluated\n- `m`: function which computes the prior mean at a given point y\n- `c`: function which returns the vector (c(x,y)) for y in Y (note: c is the prior covariance function)\n- `v`: vector of noisy sensor readings\n- `Y`: vector of sensor locations\n- `B`: the matrix $\\epsilon^{2}I+C_Y$ to be inverted in order to obtain the posterior", "_____no_output_____" ], [ "We now require code to generate samples from a GP with mean $m$ and cov function $k$ on a grid. We write the function `sample_gp` for this purpose.", "_____no_output_____" ] ], [ [ "#export\ndef sample_gp(n_sim,m,k,grid,par,trans,tol=1e-9):\n \"Function to sample a GP with mean $m$ and cov $k$ on a grid.\"\n \n # get length of grid\n d = len(grid)\n \n # construct mean vector\n μ = np.array([m(x) for x in grid]).reshape(d,1)\n \n # construct covariance matrix\n Σ = kernMat(k,grid,parallel=par,translation_inv=trans)\n \n # construct the cholesky decomposition Σ = GG^T\n # we add a small diagonal perturbation to Σ to ensure it \n # strictly positive definite\n G = np.linalg.cholesky(Σ + tol * np.eye(d))\n \n # draw iid standard normal random vectors\n Z = np.random.normal(size=(d,n_sim))\n \n # construct samples from GP(m,k)\n Y = G@Z + np.tile(μ,n_sim)\n \n # return the sampled fields\n return Y", "_____no_output_____" ] ], [ [ "`sample_gp` takes in several arguments which are explained below:\n\n- `n_sim`: number of trajectories to be sampled\n- `m`: mean function for the GP\n- `k`: cov function for the GP\n- `grid`: grid of points on which to sample the GP\n- `par`: boolean argument indicating whether the computation of the cov matrix should be done in parallel\n- `trans`: boolean argument indicating whether the computation of the cov matrix should be computed assuming `k` is translation invariant or not\n- `tol`: controls the size of the tiny diagonal perturbation added to cov matrix to ensure it is strictly positive definite (defaults to `1e-9`)", "_____no_output_____" ], [ "As a quick demonstration that the code is working lets generate 2 realisations of white noise, using the kernel `k` from one of the previous tests and plot a heatmap of these random fields side-by-side.", "_____no_output_____" ] ], [ [ "#hide_input\nn = 41\nx_range = np.linspace(0,1,n)\ngrid = np.array([[x,y] for x in x_range for y in x_range])\n\n# set up mean\ndef m(x):\n return 0.0\n\nnp.random.seed(23534)\nsamples = sample_gp(2,m,k,grid,True,False)\n\nsample_1 = samples[:,0].flatten()\nsample_2 = samples[:,1].flatten()\n\nvmin = min(sample_1.min(),sample_2.min())\nvmax = max(sample_1.max(),sample_2.max())\ncmap = cm.jet\nnorm = colors.Normalize(vmin=vmin,vmax=vmax)\n\nx = grid[:,0].flatten()\ny = grid[:,1].flatten()\ntriang = tri.Triangulation(x,y)\n\nplt.rcParams['figure.figsize'] = (12,6)\nfig, axs = plt.subplots(ncols=3, gridspec_kw=dict(width_ratios=[4,4,0.2]))\naxs[0].tricontourf(triang,sample_1.flatten(),cmap=cmap)\naxs[1].tricontourf(triang,sample_2.flatten(),cmap=cmap)\ncb = colorbar.ColorbarBase(axs[2],cmap=cmap,norm=norm)\nfig.suptitle('Realisations of white-noise fields')\nplt.show()", "_____no_output_____" ] ], [ [ "Let's also quickly generate 2 realisations for the kernel `k_f` above:", "_____no_output_____" ] ], [ [ "#hide_input\nnp.random.seed(534)\nsamples = sample_gp(2,m,k_f,grid,False,True)\n\nsample_1 = samples[:,0].flatten()\nsample_2 = samples[:,1].flatten()\n\nvmin = min(sample_1.min(),sample_2.min())\nvmax = max(sample_1.max(),sample_2.max())\ncmap = cm.jet\nnorm = colors.Normalize(vmin=vmin,vmax=vmax)\n\nx = grid[:,0].flatten()\ny = grid[:,1].flatten()\ntriang = tri.Triangulation(x,y)\n\nplt.rcParams['figure.figsize'] = (12,6)\nfig, axs = plt.subplots(ncols=3, gridspec_kw=dict(width_ratios=[4,4,0.2]))\naxs[0].tricontourf(triang,sample_1.flatten(),cmap=cmap)\naxs[1].tricontourf(triang,sample_2.flatten(),cmap=cmap)\ncb = colorbar.ColorbarBase(axs[2],cmap=cmap,norm=norm)\nfig.suptitle(r'Realisations of random fields with covariance $k_f$')\nplt.show()", "_____no_output_____" ] ], [ [ "The next bit of code we require is code to generate noisy sensor readings from our system. We write the function `gen_sensor` for this purpose.", "_____no_output_____" ] ], [ [ "#export\ndef gen_sensor(ϵ,m,k,Y,J,par,trans,tol=1e-9,require=False):\n \"Function to generate noisy sensor readings of the solution u on a sensor grid Y.\"\n \n # get number of sensors from the sensor grid Y\n s = len(Y)\n \n # create FEM space and grid\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n grid = V.tabulate_dof_coordinates()\n \n # sample a single f on the grid\n f_sim = sample_gp(1,m,k,grid,par=par,trans=trans,tol=tol)\n \n # set up a FEM function for this realisation\n f = Function(V)\n f.vector().set_local(f_sim.flatten())\n \n # use FENICS to find the corresponding solution u\n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, 0.0, boundary)\n \n # set up the function κ\n κ = Constant(1.0)\n \n # set up the bilinear form for the variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = inner(κ*grad(u),grad(v))*dx\n \n # set up the linear form\n L = f*v*dx\n \n # solve the variational problem\n u_sol = Function(V)\n solve(a == L, u_sol, bc)\n \n # get solution on grid Y:\n u_Y = np.array([u_sol(y_i) for y_i in Y])\n \n # add N(0,ϵ^2) to each evaluation point\n u_S = u_Y + ϵ*np.random.normal(size=s)\n \n if require:\n return u_S, f_sim, u_sol\n else:\n return u_S", "_____no_output_____" ] ], [ [ "`gen_sensor` takes in several arguments which are explained below:\n\n- `ϵ`: controls the amount of sensor noise\n- `m`: mean function for the forcing f\n- `k`: cov function for the forcing f\n- `Y`: vector of sensor locations\n- `J`: controls the FE mesh size ($h=1/J^{2}$)\n- `par`: boolean argument indicating whether the computation of the forcing cov matrix should be done in parallel\n- `trans`: boolean argument indicating whether the computation of the forcing cov matrix should be computed assuming `k` is translation invariant or not\n- `tol`: controls the size of the tiny diagonal perturbation added to forcing cov matrix to ensure it is strictly positive definite (defaults to `1e-9`)\n- `require` : boolean argument indicating whether or not to also return the realisation of the forcing `f_sim` and the FEniCS solution `u_sol` (defaults to `False`)", "_____no_output_____" ], [ "> Warning: Since we do not have access to the true solution we must use FEniCS to get the solution for our system. Thus, one must choose a small enough `J` in `gen_sensor` above to ensure we get realistic noisy sensor readings.", "_____no_output_____" ], [ "Let's demonstrate that this code is working, by generating $s=25$ sensor observations with the sensors equally space in the domain $D$.", "_____no_output_____" ] ], [ [ "# set up mean function for forcing\ndef m_f(x):\n return 1.0\n\n# set up sensor grid and sensor noise level\nϵ = 0.2\ns = 25\ns_sqrt = int(np.round(np.sqrt(s)))\nY_range = np.linspace(0.01,0.99,s_sqrt)\nY = np.array([[x,y] for x in Y_range for y in Y_range])\nJ_fine = 100 # FE mesh size to compute solution on\n\n# generate the sensor observations\nnp.random.seed(235)\nv_dat = gen_sensor(ϵ,m_f,k_f,Y,J_fine,False,True)", "_____no_output_____" ], [ "#export\nclass MyExpression(UserExpression):\n \"Class to allow users to user their own functions to create a FEniCS UserExpression.\"\n def eval(self, value, x):\n value[0] = self.f(x)\n def value_shape(self):\n return ()", "_____no_output_____" ], [ "show_doc(MyExpression,title_level=4)", "_____no_output_____" ] ], [ [ "We now require code which will create the matrix $C_Y,h$ and the function $\\mathbf{c}^{(h)}$ required for the statFEM posterior mean. We will create the function `fem_cov_assembler_post` for this purpose. ", "_____no_output_____" ] ], [ [ "#export\ndef fem_cov_assembler_post(J,k_f,Y,parallel,translation_inv):\n \"Function to create the matrix $C_{Y,h}$ and the vector function $c^{(h)}$ required for the statFEM posterior mean.\"\n \n # set up mesh and function space\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n tree = mesh.bounding_box_tree()\n \n # set up grid\n x_grid = V.tabulate_dof_coordinates()\n \n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n \n bc = DirichletBC(V, 0.0, boundary)\n \n # get the boundary and interior dofs\n bc_dofs = bc.get_boundary_values().keys()\n first, last = V.dofmap().ownership_range()\n all_dofs = range(last - first)\n interior_dofs = list(set(all_dofs) - set(bc_dofs))\n bc_dofs = list(set(bc_dofs))\n \n # set up the function κ\n κ = Constant(1.0)\n\n # get the mass and stiffness matrices\n u = TrialFunction(V)\n v = TestFunction(V)\n \n mass_form = u*v*dx\n a = inner(κ*grad(u),grad(v))*dx\n \n M = assemble(mass_form)\n A = assemble(a)\n M = as_backend_type(M).mat()\n A = as_backend_type(A).mat()\n M = csr_matrix(M.getValuesCSR()[::-1],shape=M.size)\n A = csr_matrix(A.getValuesCSR()[::-1],shape=A.size)\n \n # extract the submatrices corresponding to the interior dofs\n M = M[interior_dofs,:][:,interior_dofs]\n A = A[interior_dofs,:][:,interior_dofs]\n \n # get the forcing cov matrix on the interior nodes of the grid\n Σ_int = kernMat(k_f,x_grid[interior_dofs],parallel,translation_inv)\n\n # form the matrix Q in the defintion of the approximate FEM cov mat\n # Note: overwrite Σ_int for memory efficiency\n Σ_int = M @ Σ_int @ M.T\n\n Σ_int = spsolve(A,Σ_int)\n Σ_int = spsolve(A,Σ_int.T).T\n \n # ensure Σ_int is symmetric\n Σ_int = 0.5*(Σ_int + Σ_int.T)\n \n # get big phi matrix on the sensor grid (only need the interior dofs)\n Phi = BigPhiMat(J,Y)[interior_dofs,:]\n\n # assemble the FEM cov mat on the sensor grid and ensure it is symmetric\n Σ_s = Phi.T @ Σ_int @ Phi\n Σ_s = 0.5*(Σ_s + Σ_s.T)\n \n # set up function to yield the vector (c(x,y)) for y in Y\n def Φ(x):\n cell_index = tree.compute_first_entity_collision(Point(*x))\n cell_global_dofs = V.dofmap().cell_dofs(cell_index)\n cell = Cell(mesh, cell_index)\n vertex_coordinates = cell.get_vertex_coordinates()\n cell_orientation = cell.orientation()\n data = V.element().evaluate_basis_all(x,vertex_coordinates,cell_orientation)\n col = np.zeros_like(cell_global_dofs)\n res = csr_matrix((data,(cell_global_dofs,col)),shape=(V.dim(),1))[interior_dofs,:]\n return res\n \n def c_fem(x):\n return Φ(x).T @ Σ_int @ Phi\n \n #return Σ_s and c_fem\n return Σ_s, c_fem", "_____no_output_____" ] ], [ [ "`fem_cov_assembler_post` takes in several arguments which are explained below:\n\n- `J`: controls the FE mesh size ($h=1/J^2$)\n- `k_f`: the covariance function for the forcing $f$\n- `Y`: vector of sensor locations\n- `parallel`: boolean argument indicating whether the computation of the forcing cov mat should be done in parallel\n- `translation_inv`: boolean argument indicating whether the computation of the forcing cov mat should be computed assuming `k_f` is translation invariant or not", "_____no_output_____" ], [ "With all of this code in place we can now finally write the function `m_post_fem_assmebler` which will assemble the statFEM posterior mean function.", "_____no_output_____" ] ], [ [ "#export\ndef m_post_fem_assembler(J,f_bar,k_f,ϵ,Y,v_dat,par=False,trans=True):\n \"Function to assemble the statFEM posterior mean function.\"\n \n # get number of sensors\n s = len(Y)\n \n # set up mesh and function space\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n \n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n \n bc = DirichletBC(V, 0.0, boundary)\n \n # set up the functions κ and f\n κ = Constant(1.0)\n f = f_bar\n \n # set up the bilinear form for the variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = inner(κ*grad(u),grad(v))*dx\n \n # set up linear form\n L = f*v*dx\n \n # solve the variational problem\n μ_fem = Function(V)\n solve(a == L, μ_fem, bc)\n \n # use fem_cov_assembler_post to obtain cov mat on sensor grid and function to compute vector\n # (c(x,y)) for y in Y\n C_fem_s, c_fem = fem_cov_assembler_post(J,k_f,Y,parallel=par,translation_inv=trans)\n \n # form B_fem_s by adding noise contribution\n C_fem_s += (ϵ**2)*np.eye(s)\n \n # assemble function to compute posterior mean and return\n def m_post_fem(x):\n return m_post(x,μ_fem,c_fem,v_dat,Y,C_fem_s)\n \n return m_post_fem", "_____no_output_____" ] ], [ [ "`m_post_fem_assembler` takes in several arguments which are explained below:\n\n- `J`: controls the FE mesh size ($h=1/J^{2}$)\n- `f_bar`: the mean function for the forcing $f$\n- `k_f`: the covariance function for the forcing $f$\n- `ϵ`: controls the amount of sensor noise\n- `Y`: vector of sensor locations\n- `v_dat`: vector of noisy sensor observations\n- `par`: boolean argument passed to `fem_cov_assembler_post`'s argument `parallel` (defaults to `False`)\n- `trans`: boolean argument passed to `fem_cov_assembler_post`'s argument `translation_inv` (defaults to `True`)", "_____no_output_____" ], [ "Let's quickly check that this function is working.", "_____no_output_____" ] ], [ [ "J = 20\nf_bar = Constant(1.0)\nm_post_fem = m_post_fem_assembler(J,f_bar,k_f,ϵ,Y,v_dat)\n# compute posterior mean at a location x in D\nx = np.array([0.3,0.1])\nm_post_fem(x)", "_____no_output_____" ] ], [ [ "## statFEM posterior covariance\n\nThe form of the statFEM posterior covariance remains the same as given in <a href=\"/statFEM/oneDim.html#Posterior-covariance\"><code>oneDim</code></a>. Thus, we require very similar code as to the 1-D case. We start by creating a function `c_post` which evaluates the posterior covariance at a given point.", "_____no_output_____" ] ], [ [ "#export\ndef c_post(x,y,c,Y,B):\n \"This function evaluates the posterior covariance at $(x,y)$\"\n \n # compute vectors c_x and c_y:\n c_x = np.array([c(x,y_i) for y_i in Y])\n c_y = np.array([c(y_i,y) for y_i in Y])\n \n # compute update term\n update = c_x @ np.linalg.solve(B,c_y)\n \n # return c_post\n return (c(x,y) - update)", "_____no_output_____" ] ], [ [ "`c_post` takes in several arguments which are explained below:\n\n- `x`,`y`: points to evaluate the covariance at\n- `c`: function which returns the prior covariance at any given pair $(x,y)$\n- `Y`: vector of sensor locations\n- `B`: the matrix $\\epsilon^{2}I+C_{Y}$ to be inverted in order to obtain the posterior", "_____no_output_____" ], [ "To compare the statFEM covariance matrices for finer and finer FE mesh sizes we will require some more code. First we create a function `post_fem_cov_assembler` which helps us to quickly assemble the statFEM posterior covariance matrix as explained in <a href=\"/statFEM/oneDim.html#Difference-between-posterior-covariances\"><code>oneDim</code></a>.", "_____no_output_____" ] ], [ [ "#export\ndef post_fem_cov_assembler(J,k_f,grid,Y,parallel,translation_inv):\n \"Function which assembles the matrices $Σ_X$ ,$Σ_{XY}$, and $Σ_Y$ required for the statFEM posterior covariance.\"\n\n # set up mesh and function space\n mesh = UnitSquareMesh(J,J)\n V = FunctionSpace(mesh,'Lagrange',1)\n \n # set up grid\n x_grid = V.tabulate_dof_coordinates()\n \n # set up boundary condition\n def boundary(x, on_boundary):\n return on_boundary\n \n bc = DirichletBC(V, 0.0, boundary)\n \n # get the boundary and interior dofs\n bc_dofs = bc.get_boundary_values().keys()\n first, last = V.dofmap().ownership_range()\n all_dofs = range(last - first)\n interior_dofs = list(set(all_dofs) - set(bc_dofs))\n bc_dofs = list(set(bc_dofs))\n \n # set up the function κ\n κ = Constant(1.0)\n\n # get the mass and stiffness matrices\n u = TrialFunction(V)\n v = TestFunction(V)\n \n mass_form = u*v*dx\n a = inner(κ*grad(u),grad(v))*dx\n \n M = assemble(mass_form)\n A = assemble(a)\n M = as_backend_type(M).mat()\n A = as_backend_type(A).mat()\n M = csr_matrix(M.getValuesCSR()[::-1],shape=M.size)\n A = csr_matrix(A.getValuesCSR()[::-1],shape=A.size)\n \n # extract the submatrices corresponding to the interior dofs\n M = M[interior_dofs,:][:,interior_dofs]\n A = A[interior_dofs,:][:,interior_dofs]\n \n # get the forcing cov matrix on the interior nodes of the grid\n Σ_int = kernMat(k_f,x_grid[interior_dofs],parallel,translation_inv)\n\n # form the matrix Q in the defintion of the approximate FEM cov mat\n # Note: overwrite Σ_int for memory efficiency\n Σ_int = M @ Σ_int @ M.T\n\n Σ_int = spsolve(A,Σ_int)\n Σ_int = spsolve(A,Σ_int.T).T\n \n # ensure Σ_int is symmetric\n Σ_int = 0.5*(Σ_int + Σ_int.T)\n \n # get big phi matrix on the grid (only need the interior nodes)\n Phi_grid = BigPhiMat(J,grid)[interior_dofs,:]\n \n # get big phi matrix on the sensor grid (only need the interior nodes)\n Phi_Y = BigPhiMat(J,Y)[interior_dofs,:]\n \n # assemble the FEM cov mat on the sensor grid using Σ_int and Phi_Y\n Σ_Y = Phi_Y.T @ Σ_int @ Phi_Y\n \n # assemble the FEM cov mat on the grid using Σ_int and Phi_grid\n Σ_X = Phi_grid.T @ Σ_int @ Phi_grid\n \n # assemble cross term matrix (with ijth entry c(x_i,y_j))\n Σ_XY = Phi_grid.T @ Σ_int @ Phi_Y\n \n # return these sigma matrices\n return Σ_Y, Σ_X, Σ_XY", "_____no_output_____" ] ], [ [ "`post_fem_cov_assembler` takes in several arguments which are explained below:\n\n- `J`: controls the FE mesh size ($h=1/J^2$)\n- `k_f`: the covariance function for the forcing $f$\n- `grid`: the fixed reference grid $\\{x_{i}\\}_{i=1}^{N}$ on which to assemble the posterior cov mat\n- `Y`: vector of sensor locations.\n- `parallel`: boolean argument indicating whether the computation of the forcing cov mat should be done in parallel\n- `translation_inv`: boolean argument indicating whether the computation of the forcing cov mat should be computed assuming `k_f` is translation invariant or not", "_____no_output_____" ], [ "Finally, we create the function `c_post_fem_assembler` which assembles the statFEM posterior cov mat on the reference grid using the matrices `post_fem_cov_assembler` returns.", "_____no_output_____" ] ], [ [ "#export\ndef c_post_fem_assembler(J,k_f,grid,Y,ϵ,par,trans):\n \"Function to assemble the statFEM posterior cov mat on a reference grid specified by grid.\"\n # use post_fem_cov_assembler to get the sigma matrices needed for posterior cov mat\n Σ_Y, Σ_X, Σ_XY = post_fem_cov_assembler(J,k_f,grid,Y,parallel=par,translation_inv=trans)\n \n # create the matrix B (store in Σ_Y for memory efficiency)\n s = len(Y) # number of sensor points\n Σ_Y += (ϵ**2)*np.eye(s)\n \n #form the posterior cov matrix (store in Σ_X for memory efficiency)\n Σ_X -= Σ_XY @ np.linalg.solve(Σ_Y,Σ_XY.T)\n \n return Σ_X", "_____no_output_____" ], [ "#hide\nfrom nbdev.export import notebook2script; notebook2script()", "Converted 00_oneDim.ipynb.\nConverted 01_twoDim.ipynb.\nConverted index.ipynb.\nConverted oneDim_prior_results.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
e7a5435527ade2806fd6056b81b2ce36af7b6aa1
73,843
ipynb
Jupyter Notebook
otros/06_reg_lineal.ipynb
anakarinarm/TallerModNum
36e54897f23b5ac70f125eba5e1ad7055871df4a
[ "Apache-2.0" ]
3
2022-02-14T20:30:06.000Z
2022-02-14T21:23:11.000Z
otros/06_reg_lineal.ipynb
anakarinarm/TallerModNum
36e54897f23b5ac70f125eba5e1ad7055871df4a
[ "Apache-2.0" ]
null
null
null
otros/06_reg_lineal.ipynb
anakarinarm/TallerModNum
36e54897f23b5ac70f125eba5e1ad7055871df4a
[ "Apache-2.0" ]
null
null
null
208.596045
23,368
0.91035
[ [ [ "## Regresión lineal\n\n**Temas Selectos de Modelación Numérica** <br>\nFacultad de Ciencias, UNAM <br>\nSemestre 2021-2\n\nEn este notebook aprenderemos como hacer una regresión lineal por el método de mínimos cuadrados y por el método matricial. \n\nNo olvides resolver los ejercicios de tarea al final del notebook. Entrega tu solución en un notebook en la carpeta de Classroom con el nombre `apellido_nombre_tarea05.ipynb`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 1. Mínimos cuadrados \n\nAjuste de rectas de la forma $y=mx+b$ por mínimos cuadrados. La idea detrás de este método es que queremos encontrar la pendiente $m$ y la ordenada al origen $b$ que nos dan la recta que minimiza la suma de los cuadrados de las distancias entre los puntos (digamos, datos) y la recta ajustada:\n\n![min_cuadrados](min_cuadrados.png)\n\nEs decir, que cuando sumemos el cuadrado de todas las distancias de los puntos a la recta (líneas azules) el valor que obtengamos sea el más pequeño posible (este tipo de problemas se llaman problemas de optimización). \n\nSe pueden ajustar todo tipo de funciones por este método, no sólo rectas, pero para el caso particular de la recta, se obtiene que la pendiente y la ordenada al origen de la recta que minimiza el cuadrado de las distancias se calcula como:\n\n\\begin{align}\n\\tag{1}\nm =\\frac{N \\sum(x_iy_i) − \\sum x_i\\sum y_i}{N \\sum(x_i^2) − (\\sum x_i)^2} \\label{eq1}\\\\\n\\end{align}\n\n\\begin{align}\nb = \\frac{\\sum y_i − m \\sum x_i}{N} \\tag{2}\n\\end{align}\n\nen donde $N$ es el número de mediciones o puntos, $x_i$, $y_i$ son las mediciones y las sumas ($\\sum$) son sobre todas las mediciones.\n\n**OJO**: Debido a que no es necesario graficar los datos para realizar un ajuste por mínimos cuadrados, se puede caer en errores graves como tratar de ajustar una recta a un conjunto de mediciones cuya relación no es lineal. Por eso **es muy importante graficar** los datos y asegurarse de que la relación entre las variables es lineal antes de aplicar el método de mínimos cuadrados. \n\nSiguiendo las ecuaciones anteriores definamos una función `reg_lineal` que calcule a pendiente y ordenada al origen de la recta que mejor se ajusta a los \"datos\" usando el método de mínimos cuadrados:", "_____no_output_____" ] ], [ [ "def reg_lineal(X,Y):\n '''Esta función calcula la pendiente y la ordenada al origen de la recta y=mx+b por mínimos \n cuadrados a partir de los vectores de mediciones X y Y.\n Input: \n X - arreglo de numpy 1D\n Y - arreglo de numpy 1D del mismo tamaño que X.\n Output:\n m, b : Escalares, la pendiente y ordenada al origen.\n '''\n N = len(X) # numero de valores del vector X\n sum_xy = np.sum(X*Y) # suma de todos los Xi*Yi\n sum_x = np.sum(X) # suma de todas las X\n sum_y = np.sum(Y) # suma de todas las Y\n sum_x2 = np.sum(X**2) # suma de todas las Xˆ2\n\n m = ((N*sum_xy) - (sum_x*sum_y)) / ((N*sum_x2) - (sum_x**2))\n b = (sum_y - (m*sum_x)) / N\n return(m, b)", "_____no_output_____" ] ], [ [ "Probemos nuestra función que calcula la regresión lineal. Para ello generamos un vector X y un vector f(X)=Y de la siguiente manera:", "_____no_output_____" ] ], [ [ "X = np.linspace(1,10,10)\nY = 1 + 2*X + 1*np.random.randn(1) # f(x)=y=1+2x+d \n\nplt.plot(X,Y,'o')\nplt.xlabel('x')\nplt.ylabel('y=f(x)')\nplt.show()", "_____no_output_____" ] ], [ [ "Ahora podemos probar la función `reg_lineal` usando X y Y:", "_____no_output_____" ] ], [ [ "m, b = reg_lineal(X,Y)\nprint('La pendiente m es %f y la ordenada b es %f' %(m,b))\nY2 = m*X+b\n\nplt.plot(X,Y,'o', label='Y')\nplt.plot(X,Y2,'-',label='regresión' )\nplt.xlabel('x')\nplt.ylabel('y=f(x)')\nplt.legend()\nplt.show()", "La pendiente m es 2.000000 y la ordenada b es 2.756253\n" ] ], [ [ "## 2. Método matricial\n(Nota: El material de esta sección fue tomado del blog [cmdlinetips](https://cmdlinetips.com/2020/03/linear-regression-using-matrix-multiplication-in-python-using-numpy/)) \n\nTambién podemos hacer regresiones lineales usando el método matricial. Recordemos que en una regresión lineal queremos ajustar nuestros datos, observaciones, etc. usando el modelo lineal $$y=\\beta_0+\\beta_1X+\\epsilon$$ y estimar los parámetros del modelo $\\beta_0$ y $\\beta_1$ que son la ordenada al origen y la pendiente, respectivamente.\n\nPodemos combinar las \"variables predictivas\", en este caso X, en una matriz que tiene un vector columna lleno de unos (lo que multiplica a $\\beta_0$) y X (lo que multiplica a $\\beta_1$):", "_____no_output_____" ] ], [ [ "X_mat = np.vstack((np.ones(len(X)), X)).T # en este caso usamos la función vstack \n # y el método T (transponer) para obtener las dimensiones \n # adecuadas", "_____no_output_____" ] ], [ [ "Con un poco de álgebra lineal y el objetivo de minimizar el error cuadrático medio del sistema de ecuaciones lineales llegamos a que podemos calcular el valor de los parámetros $\\hat{\\beta}=(\\beta_0, \\beta_1)$ de la forma:\n\n$$\\hat{\\beta}=(X^T. X)^{-1}. X^T. Y$$", "_____no_output_____" ], [ "Podemos implementar esta ecuación usando las funciones para la inversa de una matriz y multiplicación matricial del módulo de álgebra lineal de numpy `linalg`:", "_____no_output_____" ] ], [ [ "beta = np.linalg.inv(X_mat.T.dot(X_mat)).dot(X_mat.T).dot(Y)\n\nprint('La pendiente beta_1 es %f y la ordenada beta_0 es %f' %(beta[1], beta[0]))", "La pendiente beta_1 es 2.000000 y la ordenada beta_0 es 2.756253\n" ] ], [ [ "que son los mismos valores para la pendiente y ordenada al origen que encontramos usando la función `reg_lineal`. Ahora usemos estos parámetros para estimar los valores de Y:", "_____no_output_____" ] ], [ [ "Y_mat = X_mat.dot(beta)\n\nplt.plot(X,Y,'o', label='Y')\nplt.plot(X,Y_mat,'-',label='regresión método matricial' )\nplt.xlabel('x')\nplt.ylabel('y=f(x)')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## 3. Ejemplo usando ambos métodos", "_____no_output_____" ] ], [ [ "X = np.linspace(1,10)\nY = 1 + 2*X + 3*X**2 + 20*np.random.randn(1)\n\n# Método matricial - ahora tenemos otro término, Xˆ2\nX_mat = np.vstack((np.ones(len(X)), X, X**2)).T\nbeta = np.linalg.inv(X_mat.T.dot(X_mat)).dot(X_mat.T).dot(Y)\nY_mat = X_mat.dot(beta)\n\n# Función reg_lineal\nm, b = reg_lineal(X,Y)\nY2 = m*X+b\n\nplt.plot(X,Y,'o',label='datos')\nplt.plot(X,Y_mat, label='Método matricial')\nplt.plot(X,Y2, label='Mínimos cuadrados - recta')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "Ups, claramente ajustar los datos a una recta no era una buena idea. Por eso es importante hacer una inspección gráfica de lo que queremos ajustar.", "_____no_output_____" ], [ "### Ejercicios de tarea:\n \n1. Desarrollar las expresiones para la ordenada al origen y la pendiente en una recta usando el método de mínimos cuadrados. (Entregar el desarrollo en un archivo aparte).\n\n2. Programar una función que calcule la regresión de un polinomio de grado 3. En un notebook define la función y puébala usando el polinomio de grado 3 $y = 2x^3+5x^2-11x+7$.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7a548a52df61f835714ab013ed3afeda0031636
72,898
ipynb
Jupyter Notebook
tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial1.ipynb
florgf88/course-content
35268525be4cfb79814a5932dddee80bda95e722
[ "CC-BY-4.0" ]
2
2021-05-12T02:19:05.000Z
2021-05-12T13:49:29.000Z
tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial1.ipynb
pattanaikay/course-content
b9c79974109a279121e6875cdcd2e69f39aeb2fb
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2020-08-26T10:44:11.000Z
2020-08-26T10:44:11.000Z
tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial1.ipynb
pattanaikay/course-content
b9c79974109a279121e6875cdcd2e69f39aeb2fb
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2021-05-02T10:03:07.000Z
2021-05-02T10:03:07.000Z
49.056528
1,248
0.633227
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Neuromatch Academy: Week 3, Day 4, Tutorial 1\n# Deep Learning: Decoding Neural Responses\n\n**Content creators**: Jorge A. Menendez, Carsen Stringer\n\n**Content reviewers**: Roozbeh Farhoodi, Madineh Sarvestani, Kshitij Dwivedi, Spiros Chavlis, Ella Batty, Michael Waskom\n", "_____no_output_____" ], [ "---\n# Tutorial Objectives\nIn this tutorial, we'll use deep learning to decode stimulus information from the responses of sensory neurons. Specifically, we'll look at the activity of ~20,000 neurons in mouse primary visual cortex responding to oriented gratings recorded in [this study](https://www.biorxiv.org/content/10.1101/679324v2.abstract). Our task will be to decode the orientation of the presented stimulus from the responses of the whole population of neurons. We could do this in a number of ways, but here we'll use deep learning. Deep learning is particularly well-suited to this problem for a number of reasons:\n* The data are very high-dimensional: the neural response to a stimulus is a ~20,000 dimensional vector. Many machine learning techniques fail in such high dimensions, but deep learning actually thrives in this regime, as long as you have enough data (which we do here!).\n* As you'll be able to see below, different neurons can respond quite differently to stimuli. This complex pattern of responses will, therefore, require non-linear methods to be decoded, which we can easily do with non-linear activation functions in deep networks.\n* Deep learning architectures are highly flexible, meaning we can easily adapt the architecture of our decoding model to optimize decoding. Here, we'll focus on a single architecture, but you'll see that it can easily be modified with few changes to the code.\n\nMore concretely, our goal will be learn how to:\n* Build a deep feed-forward network using PyTorch\n* Evaluate the network's outputs using PyTorch built-in loss functions\n* Compute gradients of the loss with respect to each parameter of the network using automatic differentiation\n* Implement gradient descent to optimize the network's parameters\n\nThis tutorial will take up the first full session (equivalent to two tutorials on other days).", "_____no_output_____" ] ], [ [ "#@title Video 1: Decoding from neural data using feed-forward networks in pytorch\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"SlrbMvvBOzM\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "---\n# Setup\n", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "#@title Data retrieval and loading\nimport hashlib\nimport requests\n\nfname = \"W3D4_stringer_oribinned1.npz\"\nurl = \"https://osf.io/683xc/download\"\nexpected_md5 = \"436599dfd8ebe6019f066c38aed20580\"\n\nif not os.path.isfile(fname):\n try:\n r = requests.get(url)\n except requests.ConnectionError:\n print(\"!!! Failed to download data !!!\")\n else:\n if r.status_code != requests.codes.ok:\n print(\"!!! Failed to download data !!!\")\n elif hashlib.md5(r.content).hexdigest() != expected_md5:\n print(\"!!! Data download appears corrupted !!!\")\n else:\n with open(fname, \"wb\") as fid:\n fid.write(r.content)", "_____no_output_____" ], [ "#@title Figure Settings\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")", "_____no_output_____" ], [ "#@title Helper Functions\n\ndef load_data(data_name=fname, bin_width=1):\n \"\"\"Load mouse V1 data from Stringer et al. (2019)\n\n Data from study reported in this preprint:\n https://www.biorxiv.org/content/10.1101/679324v2.abstract\n\n These data comprise time-averaged responses of ~20,000 neurons\n to ~4,000 stimulus gratings of different orientations, recorded\n through Calcium imaginge. The responses have been normalized by\n spontanous levels of activity and then z-scored over stimuli, so\n expect negative numbers. They have also been binned and averaged\n to each degree of orientation.\n\n This function returns the relevant data (neural responses and\n stimulus orientations) in a torch.Tensor of data type torch.float32\n in order to match the default data type for nn.Parameters in\n Google Colab.\n\n This function will actually average responses to stimuli with orientations\n falling within bins specified by the bin_width argument. This helps\n produce individual neural \"responses\" with smoother and more\n interpretable tuning curves.\n\n Args:\n bin_width (float): size of stimulus bins over which to average neural\n responses\n\n Returns:\n resp (torch.Tensor): n_stimuli x n_neurons matrix of neural responses,\n each row contains the responses of each neuron to a given stimulus.\n As mentioned above, neural \"response\" is actually an average over\n responses to stimuli with similar angles falling within specified bins.\n stimuli: (torch.Tensor): n_stimuli x 1 column vector with orientation\n of each stimulus, in degrees. This is actually the mean orientation\n of all stimuli in each bin.\n\n \"\"\"\n with np.load(data_name) as dobj:\n data = dict(**dobj)\n resp = data['resp']\n stimuli = data['stimuli']\n\n if bin_width > 1:\n # Bin neural responses and stimuli\n bins = np.digitize(stimuli, np.arange(0, 360 + bin_width, bin_width))\n stimuli_binned = np.array([stimuli[bins == i].mean() for i in np.unique(bins)])\n resp_binned = np.array([resp[bins == i, :].mean(0) for i in np.unique(bins)])\n else:\n resp_binned = resp\n stimuli_binned = stimuli\n\n # Return as torch.Tensor\n resp_tensor = torch.tensor(resp_binned, dtype=torch.float32)\n stimuli_tensor = torch.tensor(stimuli_binned, dtype=torch.float32).unsqueeze(1) # add singleton dimension to make a column vector\n\n return resp_tensor, stimuli_tensor\n\n\ndef plot_data_matrix(X, ax):\n \"\"\"Visualize data matrix of neural responses using a heatmap\n\n Args:\n X (torch.Tensor or np.ndarray): matrix of neural responses to visualize\n with a heatmap\n ax (matplotlib axes): where to plot\n\n \"\"\"\n\n cax = ax.imshow(X, cmap=mpl.cm.pink, vmin=np.percentile(X, 1), vmax=np.percentile(X, 99))\n cbar = plt.colorbar(cax, ax=ax, label='normalized neural response')\n\n ax.set_aspect('auto')\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef identityLine():\n \"\"\"\n Plot the identity line y=x\n \"\"\"\n ax = plt.gca()\n lims = np.array([ax.get_xlim(), ax.get_ylim()])\n minval = lims[:, 0].min()\n maxval = lims[:, 1].max()\n equal_lims = [minval, maxval]\n ax.set_xlim(equal_lims)\n ax.set_ylim(equal_lims)\n line = ax.plot([minval, maxval], [minval, maxval], color=\"0.7\")\n line[0].set_zorder(-1)\n\ndef get_data(n_stim, train_data, train_labels):\n \"\"\" Return n_stim randomly drawn stimuli/resp pairs\n\n Args:\n n_stim (scalar): number of stimuli to draw\n resp (torch.Tensor):\n train_data (torch.Tensor): n_train x n_neurons tensor with neural\n responses to train on\n train_labels (torch.Tensor): n_train x 1 tensor with orientations of the\n stimuli corresponding to each row of train_data, in radians\n\n Returns:\n (torch.Tensor, torch.Tensor): n_stim x n_neurons tensor of neural responses and n_stim x 1 of orientations respectively\n \"\"\"\n n_stimuli = train_labels.shape[0]\n istim = np.random.choice(n_stimuli, n_stim)\n r = train_data[istim] # neural responses to this stimulus\n ori = train_labels[istim] # true stimulus orientation\n\n return r, ori\n\ndef stimulus_class(ori, n_classes):\n \"\"\"Get stimulus class from stimulus orientation\n\n Args:\n ori (torch.Tensor): orientations of stimuli to return classes for\n n_classes (int): total number of classes\n\n Returns:\n torch.Tensor: 1D tensor with the classes for each stimulus\n\n \"\"\"\n bins = np.linspace(0, 360, n_classes + 1)\n return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accomodate Python indexing\n\ndef plot_decoded_results(train_loss, test_labels, predicted_test_labels):\n \"\"\" Plot decoding results in the form of network training loss and test predictions\n\n Args:\n train_loss (list): training error over iterations\n test_labels (torch.Tensor): n_test x 1 tensor with orientations of the\n stimuli corresponding to each row of train_data, in radians\n predicted_test_labels (torch.Tensor): n_test x 1 tensor with predicted orientations of the\n stimuli from decoding neural network\n\n \"\"\"\n\n # Plot results\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))\n\n # Plot the training loss over iterations of GD\n ax1.plot(train_loss)\n\n # Plot true stimulus orientation vs. predicted class\n ax2.plot(stimuli_test.squeeze(), predicted_test_labels, '.')\n\n ax1.set_xlim([0, None])\n ax1.set_ylim([0, None])\n ax1.set_xlabel('iterations of gradient descent')\n ax1.set_ylabel('negative log likelihood')\n ax2.set_xlabel('true stimulus orientation ($^o$)')\n ax2.set_ylabel('decoded orientation bin')\n ax2.set_xticks(np.linspace(0, 360, n_classes + 1))\n ax2.set_yticks(np.arange(n_classes))\n class_bins = [f'{i * 360 / n_classes: .0f}$^o$ - {(i + 1) * 360 / n_classes: .0f}$^o$' for i in range(n_classes)]\n ax2.set_yticklabels(class_bins);\n\n # Draw bin edges as vertical lines\n ax2.set_ylim(ax2.get_ylim()) # fix y-axis limits\n for i in range(n_classes):\n lower = i * 360 / n_classes\n upper = (i + 1) * 360 / n_classes\n ax2.plot([lower, lower], ax2.get_ylim(), '-', color=\"0.7\", linewidth=1, zorder=-1)\n ax2.plot([upper, upper], ax2.get_ylim(), '-', color=\"0.7\", linewidth=1, zorder=-1)\n\n plt.tight_layout()", "_____no_output_____" ] ], [ [ "---\n# Section 1: Load and visualize data\n\nIn the next cell, we have provided code to load the data and plot the matrix of neural responses.\n\nNext to it, we plot the tuning curves of three randomly selected neurons.", "_____no_output_____" ] ], [ [ "#@title\n\n#@markdown Execute this cell to load and visualize data\n\n# Load data\nresp_all, stimuli_all = load_data() # argument to this function specifies bin width\nn_stimuli, n_neurons = resp_all.shape\n\nprint(f'{n_neurons} neurons in response to {n_stimuli} stimuli')\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(2 * 6, 5))\n\n# Visualize data matrix\nplot_data_matrix(resp_all[:100, :].T, ax1) # plot responses of first 100 neurons\nax1.set_xlabel('stimulus')\nax1.set_ylabel('neuron')\n\n# Plot tuning curves of three random neurons\nineurons = np.random.choice(n_neurons, 3, replace=False) # pick three random neurons\nax2.plot(stimuli_all, resp_all[:, ineurons])\nax2.set_xlabel('stimulus orientation ($^o$)')\nax2.set_ylabel('neural response')\nax2.set_xticks(np.linspace(0, 360, 5))\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "We will split our data into a training set and test set. In particular, we will have a training set of orientations (`stimuli_train`) and the corresponding responses (`resp_train`). Our testing set will have held-out orientations (`stimuli_test`) and the corresponding responses (`resp_test`).", "_____no_output_____" ] ], [ [ "#@title\n#@markdown Execute this cell to split into training and test sets\n\n# Set random seeds for reproducibility\nnp.random.seed(4)\ntorch.manual_seed(4)\n\n# Split data into training set and testing set\nn_train = int(0.6 * n_stimuli) # use 60% of all data for training set\nishuffle = torch.randperm(n_stimuli)\nitrain = ishuffle[:n_train] # indices of data samples to include in training set\nitest = ishuffle[n_train:] # indices of data samples to include in testing set\nstimuli_test = stimuli_all[itest]\nresp_test = resp_all[itest]\nstimuli_train = stimuli_all[itrain]\nresp_train = resp_all[itrain]", "_____no_output_____" ] ], [ [ "---\n# Section 2: Deep feed-forward networks in *pytorch* \n\nWe'll now build a simple deep neural network that takes as input a vector of neural responses and outputs a single number representing the decoded stimulus orientation.\n\nTo keep things simple, we'll build a deep network with **one** hidden layer. See the appendix for a deeper discussion of what this choice entails, and when one might want to use deeper/shallower and wider/narrower architectures.\n\nLet $\\mathbf{r}^{(n)} = \\begin{bmatrix} r_1^{(n)} & r_2^{(n)} & \\ldots & r_N^{(n)} \\end{bmatrix}^T$ denote the vector of neural responses (of neurons $1, \\ldots, N$) to the $n$th stimulus. The network we will use is described by the following set of equations:\n\\begin{align}\n \\mathbf{h}^{(n)} &= \\mathbf{W}^{in} \\mathbf{r}^{(n)} + \\mathbf{b}^{in}, && [\\mathbf{W}^{in}: M \\times N], \\\\\n y^{(n)} &= \\mathbf{W}^{out} \\mathbf{h}^{(n)} + \\mathbf{b}^{out}, && [\\mathbf{W}^{out}: 1 \\times M],\n\\end{align}\nwhere $y^{(n)}$ denotes the scalar output of the network: the decoded orientation of the $n$th stimulus. \n\nThe $M$-dimensional vector $\\mathbf{h}^{(n)}$ denotes the activations of the **hidden layer** of the network. \n\n<p align=\"center\">\n <img src=\"https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/static/one-layer-network.png?raw=true\" width=\"450\" />\n</p>\n\nThe blue components of this diagram denote the **parameters** of the network, which we will later optimize with gradient descent. These include all the weights and biases $\\mathbf{W}^{in}, \\mathbf{b}^{in}, \\mathbf{W}^{out}, \\mathbf{b}^{out}$.\n\n", "_____no_output_____" ], [ "### Section 2.1: Introduction to PyTorch\n\nHere, we'll use the **PyTorch** package to build, run, and train deep networks of this form in Python. There are two core components to the PyTorch package: \n\n1. The first is the `torch.Tensor` data type used in PyTorch. `torch.Tensor`'s are effectively just like a `numpy` arrays, except that they have some important attributes and methods needed for automatic differentiation (to be discussed below). They also come along with infrastructure for easily storing and computing with them on GPU's, a capability we won't touch on here but which can be really useful in practice.\n\n2. The second core ingredient is the PyTorch `nn.Module` class. This is the class we'll use for constructing deep networks, so that we can then easily train them using built-in PyTorch functions. Keep in my mind that `nn.Module` classes can actually be used to build, run, and train any model -- not just deep networks!\n\n The next cell contains code for building the deep network we defined above using the `nn.Module` class. It contains three key ingredients:\n\n * `__init__()` method to initialize its parameters, like in any other Python class. In this case, it takes two arguments:\n * `n_inputs`: the number of input units. This should always be set to the number of neurons whose activities are being decoded (i.e. the dimensionality of the input to the network). \n * `n_hidden`: the number of hidden units. This is a parameter that we are free to vary in deciding how to build our network. See the appendix for a discussion of how this architectural choice affects the computations the network can perform.\n\n * `nn.Linear` modules, which are built-in PyTorch classes containing all the weights and biases for a given network layer (documentation [here](https://pytorch.org/docs/master/generated/torch.nn.Linear.html)). This class takes two arguments to initialize:\n * \\# of inputs to that layer\n * \\# of outputs from that layer\n\n For the input layer, for example, we have:\n * \\# of inputs = \\# of neurons whose responses are to be decoded ($N$, specified by `n_inputs`)\n * \\# of outputs = \\# of hidden layer units ($M$, specified by `n_hidden`)\n \n PyTorch will initialize all weights and biases randomly.\n\n * `forward()` method, which takes as argument an input to the network and returns the network output. In our case, this comprises computing the output $y$ from a given input $\\mathbf{r}$ using the above two equations. See the next cell for code implementing this computation using the built-in PyTorch `nn.Linear` classes.", "_____no_output_____" ] ], [ [ "class DeepNet(nn.Module):\n \"\"\"Deep Network with one hidden layer\n\n Args:\n n_inputs (int): number of input units\n n_hidden (int): number of units in hidden layer\n\n Attributes:\n in_layer (nn.Linear): weights and biases of input layer\n out_layer (nn.Linear): weights and biases of output layer\n\n \"\"\"\n\n def __init__(self, n_inputs, n_hidden):\n super().__init__() # needed to invoke the properties of the parent class nn.Module\n self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units\n self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output\n\n def forward(self, r):\n \"\"\"Decode stimulus orientation from neural responses\n\n Args:\n r (torch.Tensor): vector of neural responses to decode, must be of\n length n_inputs. Can also be a tensor of shape n_stimuli x n_inputs,\n containing n_stimuli vectors of neural responses\n\n Returns:\n torch.Tensor: network outputs for each input provided in r. If\n r is a vector, then y is a 1D tensor of length 1. If r is a 2D\n tensor then y is a 2D tensor of shape n_stimuli x 1.\n\n \"\"\"\n h = self.in_layer(r) # hidden representation\n y = self.out_layer(h)\n return y", "_____no_output_____" ] ], [ [ "The next cell contains code for initializing and running this network. We use it to decode stimulus orientation from a vector of neural responses to the very first stimulus. Note that when the initialized network class is called as a function on an input (e.g. `net(r)`), its `.forward()` method is called. This is a special property of the `nn.Module` class.\n\nNote that the decoded orientations at this point will be nonsense, since the network has been initialized with random weights. Below, we'll learn how to optimize these weights for good stimulus decoding.", "_____no_output_____" ] ], [ [ "# Set random seeds for reproducibility\nnp.random.seed(1)\ntorch.manual_seed(1)\n\n# Initialize a deep network with M=200 hidden units\nnet = DeepNet(n_neurons, 200)\n\n# Get neural responses (r) to and orientation (ori) to one stimulus in dataset\nr, ori = get_data(1, resp_train, stimuli_train) # using helper function get_data\n\n# Decode orientation from these neural responses using initialized network\nout = net(r) # compute output from network, equivalent to net.forward(r)\n\nprint('decoded orientation: %.2f degrees' % out)\nprint('true orientation: %.2f degrees' % ori)", "_____no_output_____" ] ], [ [ "---\n### Section 2.2: Activation functions", "_____no_output_____" ] ], [ [ "#@title Video 2: Nonlinear activation functions\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"JAdukDCQALA\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "Note that the deep network we constructed above comprises solely **linear** operations on each layer: each layer is just a weighted sum of the elements in the previous layer. It turns out that linear hidden layers like this aren't particularly useful, since a sequence of linear transformations is actually essentially the same as a single linear transformation. We can see this from the above equations by plugging in the first one into the second one to obtain\n\\begin{equation}\n y^{(n)} = \\mathbf{W}^{out} \\left( \\mathbf{W}^{in} \\mathbf{r}^{(n)} + \\mathbf{b}^{in} \\right) + \\mathbf{b}^{out} = \\mathbf{W}^{out}\\mathbf{W}^{in} \\mathbf{r}^{(n)} + \\left( \\mathbf{W}^{out}\\mathbf{b}^{in} + \\mathbf{b}^{out} \\right)\n\\end{equation}\nIn other words, the output is still just a weighted sum of elements in the input -- the hidden layer has done nothing to change this.\n\nTo extend the set of computable input/output transformations to more than just weighted sums, we'll incorporate a **non-linear activation function** in the hidden units. This is done by simply modifying the equation for the hidden layer activations to be\n\\begin{equation}\n \\mathbf{h}^{(n)} = \\phi(\\mathbf{W}^{in} \\mathbf{r}^{(n)} + \\mathbf{b}^{in})\n\\end{equation}\nwhere $\\phi$ is referred to as the activation function. Using a non-linear activation function will ensure that the hidden layer performs a non-linear transformation of the input, which will make our network much more powerful (or *expressive*, cf. appendix). In practice, deep networks *always* use non-linear activation functions.\n\n", "_____no_output_____" ], [ "#### Exercise 1: Nonlinear Activations \n\nCreate a new class `DeepNetReLU` by modifying our above deep network model to use a non-linear activation function. We'll use the linear rectification function:\n\\begin{equation}\n \\phi(x) = \n \\begin{cases}\n x & \\text{if } x > 0 \\\\\n 0 & \\text{else}\n \\end{cases}\n\\end{equation}\nwhich can be implemented in PyTorch using `torch.relu()`. Hidden layers with this activation function are typically referred to as \"**Re**ctified **L**inear **U**nits\", or **ReLU**'s.\n\nInitialize this network with 20 hidden units and run on an example stimulus.\n\n**Hint**: you only need to modify the `forward()` method of the above `DeepNet()` class.\n", "_____no_output_____" ] ], [ [ "class DeepNetReLU(nn.Module):\n\n def __init__(self, n_inputs, n_hidden):\n super().__init__() # needed to invoke the properties of the parent class nn.Module\n self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units\n self.out_layer = nn.Linear(n_hidden, 1) # hidden units --> output\n\n def forward(self, r):\n\n ############################################################################\n ## TO DO for students: write code for computing network output using a\n ## rectified linear activation function for the hidden units\n # Fill out function and remove\n raise NotImplementedError(\"Student exercise: complete DeepNetReLU forward\")\n ############################################################################\n\n h = ...\n y = ...\n\n return y\n\n\n# Set random seeds for reproducibility\nnp.random.seed(1)\ntorch.manual_seed(1)\n\n# Get neural responses (r) to and orientation (ori) to one stimulus in dataset\nr, ori = get_data(1, resp_train, stimuli_train)\n\n# Uncomment to test your class\n\n# Initialize deep network with M=20 hidden units and uncomment lines below\n# net = DeepNetReLU(...)\n\n# Decode orientation from these neural responses using initialized network\n# net(r) is equivalent to net.forward(r)\n# out = net(r)\n\n# print('decoded orientation: %.2f degrees' % out)\n# print('true orientation: %.2f degrees' % ori)", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_5bdc2033.py)\n\n", "_____no_output_____" ], [ "You should see that the decoded orientation is 0.13 $^{\\circ}$ while the true orientation is 139.00 $^{\\circ}$.", "_____no_output_____" ], [ "---\n# Section 3: Loss functions and gradient descent\n", "_____no_output_____" ] ], [ [ "#@title Video 3: Loss functions & gradient descent\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"aEtKpzEuviw\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Section 3.1: Loss functions\n\nBecause the weights of the network are currently randomly chosen, the outputs of the network are nonsense: the decoded stimulus orientation is nowhere close to the true stimulus orientation. We'll shortly write some code to change these weights so that the network does a better job of decoding.\n\nBut to do so, we first need to define what we mean by \"better\". One simple way of defining this is to use the squared error\n\\begin{equation}\n L = (y - \\tilde{y})^2\n\\end{equation}\nwhere $y$ is the network output and $\\tilde{y}$ is the true stimulus orientation. When the decoded stimulus orientation is far from the true stimulus orientation, $L$ will be large. We thus refer to $L$ as the **loss function**, as it quantifies how *bad* the network is at decoding stimulus orientation.\n\nPyTorch actually carries with it a number of built-in loss functions. The one corresponding to the squared error is called `nn.MSELoss()`. This will take as arguments a **batch** of network outputs $y_1, y_2, \\ldots, y_P$ and corresponding target outputs $\\tilde{y}_1, \\tilde{y}_2, \\ldots, \\tilde{y}_P$, and compute the **mean squared error (MSE)**\n\\begin{equation}\n L = \\frac{1}{P}\\sum_{n=1}^P \\left(y^{(n)} - \\tilde{y}^{(n)}\\right)^2\n\\end{equation}\n\n", "_____no_output_____" ], [ "#### Exercise 2: Computing MSE \n\n\nEvaluate the mean squared error for a deep network with $M=20$ rectified linear units, on the decoded orientations from neural responses to 20 random stimuli.", "_____no_output_____" ] ], [ [ "# Set random seeds for reproducibility\nnp.random.seed(1)\ntorch.manual_seed(1)\n\n# Initialize a deep network with M=20 hidden units\nnet = DeepNetReLU(n_neurons, 20)\n\n# Get neural responses to first 20 stimuli in the data set\nr, ori = get_data(20, resp_train, stimuli_train)\n\n# Decode orientation from these neural responses\nout = net(r)\n\n###################################################\n## TO DO for students: evaluate mean squared error\n###################################################\n\n# Initialize PyTorch mean squared error loss function (Hint: look at nn.MSELoss)\nloss_fn = ...\n\n# Evaluate mean squared error\nloss = ...\n\n# Uncomment once above is filled in\n# print('mean squared error: %.2f' % loss)", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_0e539ef5.py)\n\n", "_____no_output_____" ], [ "You should see a mean squared error of 42943.75.", "_____no_output_____" ], [ "---\n### Section 3.2: Optimization with gradient descent\n\nOur goal is now to modify the weights to make the mean squared error loss $L$ as small as possible over the whole data set. To do this, we'll use the **gradient descent (GD)** algorithm, which consists of iterating three steps:\n1. **Evaluate the loss** on the training data,\n```\nout = net(train_data)\nloss = loss_fn(out, train_labels)\n```\nwhere `train_data` are the network inputs in the training data (in our case, neural responses), and `train_labels` are the target outputs for each input (in our case, true stimulus orientations).\n2. **Compute the gradient of the loss** with respect to each of the network weights. In PyTorch, we can do this with one line of code:\n```\nloss.backward()\n```\nThis command tells PyTorch to compute the gradients of the quantity stored in the variable `loss` with respect to each network parameter using [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation). These gradients are then stored behind the scenes (see appendix for more details).\n3. **Update the network weights** by descending the gradient. In Pytorch, we can do this using built-in optimizers. We'll use the `optim.SGD` optimizer (documentation [here](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD)) which updates parameters along the negative gradient, scaled by a learning rate (see appendix for details). To initialize this optimizer, we have to tell it\n * which parameters to update, and\n * what learning rate to use\n\n For example, to optimize *all* the parameters of a network `net` using a learning rate of .001, the optimizer would be initialized as follows\n ```\n optimizer = optim.SGD(net.parameters(), lr=.001)\n ```\n where `.parameters()` is a method of the `nn.Module` class that returns a [Python generator object](https://wiki.python.org/moin/Generators) over all the parameters of that `nn.Module` class (in our case, $\\mathbf{W}^{in}, \\mathbf{b}^{in}, \\mathbf{W}^{out}, \\mathbf{b}^{out}$).\n \n After computing all the parameter gradients in step 2, we can then update each of these parameters using the `.step()` method of this optimizer,\n ```\n optimizer.step()\n ```\n This single line of code will extract all the gradients computed with `.backward()` and execute the SGD updates for each parameter given to the optimizer. Note that this is true no matter how big/small the network is, allowing us to use the same two lines of code to perform the gradient descent updates for any deep network model built using PyTorch.\n\nFinally, an important detail to remember is that the gradients of each parameter need to be cleared before calling `.backward()`, or else PyTorch will try to accumulate gradients across iterations. This can again be done using built-in optimizers via the method `zero_grad()`, as follows:\n```\noptimizer.zero_grad()\n```\n\nPutting all this together, each iteration of the GD algorith will contain a block of code that looks something like this:\n```\nGet outputs from network\nEvaluate loss\n\n# Compute gradients\noptimizer.zero_grad() # clear gradients\nloss.backward()\n\n# Update weights\noptimizer.step()\n```\n\nIn the next exercise, we'll give you a code skeleton for implementing the GD algorithm. Your job will be to fill in the blanks.\n\nFor the mathematical details of the GD algorithm, see the appendix. Note, in particular, that here we using the gradient descent algorithm, rather than the more commonly used *stochastic* gradient descent algorithm. See the appendix for a more detailed discussion of how these differ and when one might need to use the stochastic variant.", "_____no_output_____" ], [ "#### Exercise 3: Gradient descent in PyTorch\n\nComplete the function `train()` that uses the gradient descent algorithm to optimize the weights of a given network. This function takes as input arguments\n* `net`: the PyTorch network whose weights to optimize\n* `loss_fn`: the PyTorch loss function to use to evaluate the loss\n* `train_data`: the training data to evaluate the loss on (i.e. neural responses to decode)\n* `train_labels`: the target outputs for each data point in `train_data` (i.e. true stimulus orientations)\n\nWe will then train a neural network on our data and plot the loss (mean squared error) over time. When we run this function, behind the scenes PyTorch is actually changing the parameters inside this network to make the network better at decoding, so its weights will now be different than they were at initialization.\n\n\n**Hint:** all the code you need for doing this is provided in the above description of the GD algorithm.", "_____no_output_____" ] ], [ [ "def train(net, loss_fn, train_data, train_labels, n_iter=50, learning_rate=1e-4):\n \"\"\"Run gradient descent to opimize parameters of a given network\n\n Args:\n net (nn.Module): PyTorch network whose parameters to optimize\n loss_fn: built-in PyTorch loss function to minimize\n train_data (torch.Tensor): n_train x n_neurons tensor with neural\n responses to train on\n train_labels (torch.Tensor): n_train x 1 tensor with orientations of the\n stimuli corresponding to each row of train_data, in radians\n n_iter (int): number of iterations of gradient descent to run\n learning_rate (float): learning rate to use for gradient descent\n\n Returns:\n (list): training loss over iterations\n\n \"\"\"\n\n # Initialize PyTorch SGD optimizer\n optimizer = optim.SGD(net.parameters(), lr=learning_rate)\n\n # Placeholder to save the loss at each iteration\n track_loss = []\n\n # Loop over epochs (cf. appendix)\n for i in range(n_iter):\n\n ######################################################################\n ## TO DO for students: fill in missing code for GD iteration\n raise NotImplementedError(\"Student exercise: write code for GD iterations\")\n ######################################################################\n\n # Evaluate loss using loss_fn\n out = ... # compute network output from inputs in train_data\n loss = ... # evaluate loss function\n\n # Compute gradients\n ...\n\n # Update weights\n ...\n\n # Store current value of loss\n track_loss.append(loss.item()) # .item() needed to transform the tensor output of loss_fn to a scalar\n\n # Track progress\n if (i + 1) % (n_iter // 5) == 0:\n print(f'iteration {i + 1}/{n_iter} | loss: {loss.item():.3f}')\n\n return track_loss\n\n# Set random seeds for reproducibility\nnp.random.seed(1)\ntorch.manual_seed(1)\n\n# Initialize network\nnet = DeepNetReLU(n_neurons, 20)\n\n# Initialize built-in PyTorch MSE loss function\nloss_fn = nn.MSELoss()\n\n# Run GD on data\n#train_loss = train(net, loss_fn, resp_train, stimuli_train)\n\n# Plot the training loss over iterations of GD\n#plt.plot(train_loss)\nplt.xlim([0, None])\nplt.ylim([0, None])\nplt.xlabel('iterations of gradient descent')\nplt.ylabel('mean squared error')\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_8f827dbe.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_DeepLearning1/static/W3D4_Tutorial1_Solution_8f827dbe_1.png>\n\n", "_____no_output_____" ], [ "---\n# Section 4: Evaluating model performance\n\n", "_____no_output_____" ], [ "## Section 4.1: Generalization performance with test data\n\nNote that gradient descent is essentially an algorithm for fitting the network's parameters to a given set of training data. Selecting this training data is thus crucial for ensuring that the optimized parameters **generalize** to unseen data they weren't trained on. In our case, for example, we want to make sure that our trained network is good at decoding stimulus orientations from neural responses to any orientation, not just those in our data set.\n\nTo ensure this, we have split up the full data set into a **training set** and a **testing set**. In Exercise 3, we trained a deep network by optimizing the parameters on a training set. We will now evaluate how good the optimized parameters are by using the trained network to decode stimulus orientations from neural responses in the testing set. Good decoding performance on this testing set should then be indicative of good decoding performance on the neurons' responses to any other stimulus orientation. This procedure is commonly used in machine learning (not just in deep learning)and is typically referred to as **cross-validation**.\n\nWe will compute the MSE on the test data and plot the decoded stimulus orientations as a function of the true stimulus.\n", "_____no_output_____" ] ], [ [ "#@title\n#@markdown Execute this cell to evaluate and plot test error\n\nout = net(resp_test) # decode stimulus orientation for neural responses in testing set\nori = stimuli_test # true stimulus orientations\ntest_loss = loss_fn(out, ori) # MSE on testing set (Hint: use loss_fn initialized in previous exercise)\n\nplt.plot(ori, out.detach(), '.') # N.B. need to use .detach() to pass network output into plt.plot()\nidentityLine() # draw the identity line y=x; deviations from this indicate bad decoding!\nplt.title('MSE on testing set: %.2f' % test_loss.item()) # N.B. need to use .item() to turn test_loss into a scalar\nplt.xlabel('true stimulus orientation ($^o$)')\nplt.ylabel('decoded stimulus orientation ($^o$)')\naxticks = np.linspace(0, 360, 5)\nplt.xticks(axticks)\nplt.yticks(axticks)\nplt.show()", "_____no_output_____" ] ], [ [ "**PyTorch Note**:\n\nAn important thing to note in the code snippet for plotting the decoded orientations is the `.detach()` method. The PyTorch `nn.Module` class is special in that, behind the scenes, each of the variables inside it are linked to each other in a computational graph, for the purposes of automatic differentiation (the algorithm used in `.backward()` to compute gradients). As a result, if you want to do anything that is not a `torch` operation to the parameters or outputs of an `nn.Module` class, you'll need to first \"detach\" it from its computational graph. This is what the `.detach()` method does. In this hidden code above, we need to call it on the outputs of the network so that we can plot them with the `plt.plot()` function.", "_____no_output_____" ], [ "---\n## (Bonus) Section 4.2: Model criticism\n\nPlease move to the Summary and visit this section only if you have time after completing all non-bonus material! \n\nLet's now take a step back and think about how our model is succeeding/failing and how to improve it.", "_____no_output_____" ] ], [ [ "#@title\n#@markdown Execute this cell to plot decoding error\n\nout = net(resp_test) # decode stimulus orientation for neural responses in testing set\nori = stimuli_test # true stimulus orientations\nerror = out - ori # decoding error\n\n\nplt.plot(ori, error.detach(), '.') # plot decoding error as a function of true orientation (make sure all arguments to plt.plot() have been detached from PyTorch network!)\n\n# Plotting\nplt.xlabel('true stimulus orientation ($^o$)')\nplt.ylabel('decoding error ($^o$)')\nplt.xticks(np.linspace(0, 360, 5))\nplt.yticks(np.linspace(-360, 360, 9))\nplt.show()", "_____no_output_____" ] ], [ [ "### Think \n\nIn the cell below, we will plot the *decoding error* for each neural response in the testing set. The decoding error is defined as the decoded stimulus orientation minus true stimulus orientation\n\\begin{equation}\n \\text{decoding error} = y^{(n)} - \\tilde{y}^{(n)}\n\\end{equation}\n\nIn particular, we plot decoding error as a function of the true stimulus orientation.\n\n\n * Are some stimulus orientations harder to decode than others?\n * If so, in what sense? Are the decoded orientations for these stimuli more variable and/or are they biased?\n * Can you explain this variability/bias? What makes these stimulus orientations different from the others?\n * (Will be addressed in next exercise) Can you think of a way to modify the deep network in order to avoid this?", "_____no_output_____" ], [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_3ccf8501.py)\n\n", "_____no_output_____" ], [ "### (Advanced Bonus) Exercise 4: Improving the loss function \nAs illustrated in the previous exercise, the squared error is not a good loss function for circular quantities like angles, since two angles that are very close (e.g. $1^o$ and $359^o$) might actually have a very large squared error.\n\nHere, we'll avoid this problem by changing our loss function to treat our decoding problem as a **classification problem**. Rather than estimating the *exact* angle of the stimulus, we'll now aim to construct a decoder that classifies the stimulus into one of $C$ classes, corresponding to different bins of angles of width $b = \\frac{360}{C}$. The true class $\\tilde{y}^{(n)}$ of stimulus $i$ is now given by\n\\begin{equation}\n \\tilde{y}^{(n)} =\n \\begin{cases}\n 1 &\\text{if angle of stimulus $n$ is in the range } [0, b] \\\\\n 2 &\\text{if angle of stimulus $n$ is in the range } [b, 2b] \\\\\n 3 &\\text{if angle of stimulus $n$ is in the range } [2b, 3b] \\\\\n \\vdots \\\\\n C &\\text{if angle of stimulus $n$ is in the range } [(C-1)b, 360]\n \\end{cases}\n\\end{equation}\n\nWe have a helper function `stimulus_class` that will extract `n_classes` stimulus classes for us from the stimulus orientations.", "_____no_output_____" ], [ "To decode the stimulus class from neural responses, we'll use a deep network that outputs a $C$-dimensional vector of probabilities $\\mathbf{p} = \\begin{bmatrix} p_1, p_2, \\ldots, p_C \\end{bmatrix}^T$, corresponding to the estimated probabilities of the stimulus belonging to each class $1, 2, \\ldots, C$. \n\nTo ensure the network's outputs are indeed probabilities (i.e. they are positive numbers between 0 and 1, and sum to 1), we'll use a [softmax function](https://en.wikipedia.org/wiki/Softmax_function) to transform the real-valued outputs from the hidden layer into probabilities. Letting $\\sigma(\\cdot)$ denote this softmax function, the equations describing our network are\n\\begin{align}\n \\mathbf{h}^{(n)} &= \\phi(\\mathbf{W}^{in} \\mathbf{r}^{(n)} + \\mathbf{b}^{in}), && [\\mathbf{W}^{in}: M \\times N], \\\\\n \\mathbf{p}^{(n)} &= \\sigma(\\mathbf{W}^{out} \\mathbf{h}^{(n)} + \\mathbf{b}^{out}), && [\\mathbf{W}^{out}: C \\times M],\n\\end{align}\nThe decoded stimulus class is then given by that assigned the highest probability by the network:\n\\begin{equation}\n y^{(n)} = \\underset{i}{\\arg\\max} \\,\\, p_i\n\\end{equation}\nThe softmax function can be implemented in PyTorch simply using `torch.softmax()`.\n\nOften *log* probabilities are easier to work with than actual probabilities, because probabilities tend to be very small numbers that computers have trouble representing. We'll therefore actually use the logarithm of the softmax as the output of our network,\n\\begin{equation}\n \\mathbf{l}^{(n)} = \\log \\left( \\mathbf{p}^{(n)} \\right)\n\\end{equation}\nwhich can implemented in PyTorch together with the softmax via an `nn.LogSoftmax` layer. The nice thing about the logarithmic function is that it's *monotonic*, so if one probability is larger/smaller than another, then its logarithm is also larger/smaller than the other's. We therefore have that\n\\begin{equation}\n y^{(n)} = \\underset{i}{\\arg\\max} \\,\\, p_i^{(n)} = \\underset{i}{\\arg\\max} \\, \\log p_i^{(n)} = \\underset{i}{\\arg\\max} \\,\\, l_i^{(n)}\n\\end{equation}\n\nSee the next cell for code for constructing a deep network with one hidden layer that of ReLU's that outputs a vector of log probabilities.", "_____no_output_____" ] ], [ [ "# Deep network for classification\nclass DeepNetSoftmax(nn.Module):\n \"\"\"Deep Network with one hidden layer, for classification\n\n Args:\n n_inputs (int): number of input units\n n_hidden (int): number of units in hidden layer\n n_classes (int): number of outputs, i.e. number of classes to output\n probabilities for\n\n Attributes:\n in_layer (nn.Linear): weights and biases of input layer\n out_layer (nn.Linear): weights and biases of output layer\n\n \"\"\"\n\n def __init__(self, n_inputs, n_hidden, n_classes):\n super().__init__() # needed to invoke the properties of the parent class nn.Module\n self.in_layer = nn.Linear(n_inputs, n_hidden) # neural activity --> hidden units\n self.out_layer = nn.Linear(n_hidden, n_classes) # hidden units --> outputs\n self.logprob = nn.LogSoftmax(dim=1) # probabilities across columns should sum to 1 (each output row corresponds to a different input)\n\n def forward(self, r):\n \"\"\"Predict stimulus orientation bin from neural responses\n\n Args:\n r (torch.Tensor): n_stimuli x n_inputs tensor with neural responses to n_stimuli\n\n Returns:\n torch.Tensor: n_stimuli x n_classes tensor with predicted class probabilities\n\n \"\"\"\n h = torch.relu(self.in_layer(r))\n logp = self.logprob(self.out_layer(h))\n return logp", "_____no_output_____" ] ], [ [ "What should our loss function now be? Ideally, we want the probabilities outputted by our network to be such that the probability of the true stimulus class is high. One way to formalize this is to say that we want to maximize the *log* probability of the true stimulus class $\\tilde{y}^{(n)}$ under the class probabilities predicted by the network,\n\\begin{equation}\n \\log \\left( \\text{predicted probability of stimulus } n \\text{ being of class } \\tilde{y}^{(n)} \\right) = \\log p^{(n)}_{\\tilde{y}^{(n)}} = l^{(n)}_{\\tilde{y}^{(n)}}\n\\end{equation}\nTo turn this into a loss function to be *minimized*, we can then simply multiply it by -1: maximizing the log probability is the same as minimizing the *negative* log probability. Summing over a batch of $P$ inputs, our loss function is then given by\n\\begin{equation}\n L = -\\sum_{n=1}^P \\log p^{(n)}_{\\tilde{y}^{(n)}} = -\\sum_{n=1}^P l^{(n)}_{\\tilde{y}^{(n)}}\n\\end{equation}\nIn the deep learning community, this loss function is typically referred to as the **cross-entropy**, or **negative log likelihood**. The corresponding built-in loss function in PyTorch is `nn.NLLLoss()` (documentation [here](https://pytorch.org/docs/master/generated/torch.nn.CrossEntropyLoss.html)).\n\nIn the next cell, we've provided most of the code to train and test a network to decode stimulus orientations via classification, by minimizing the negative log likelihood. Fill in the missing pieces.\n\nOnce you've done this, have a look at the plotted results. Does changing the loss function from mean squared error to a classification loss solve our problems? Note that errors may still occur -- but are these errors as bad as the ones that our network above was making?", "_____no_output_____" ] ], [ [ "def decode_orientation(n_classes, train_data, train_labels, test_data, test_labels):\n \"\"\" Initialize, train, and test deep network to decode binned orientation from neural responses\n\n Args:\n n_classes (scalar): number of classes in which to bin orientation\n train_data (torch.Tensor): n_train x n_neurons tensor with neural\n responses to train on\n train_labels (torch.Tensor): n_train x 1 tensor with orientations of the\n stimuli corresponding to each row of train_data, in radians\n test_data (torch.Tensor): n_test x n_neurons tensor with neural\n responses to train on\n test_labels (torch.Tensor): n_test x 1 tensor with orientations of the\n stimuli corresponding to each row of train_data, in radians\n\n Returns:\n (list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the\n stimuli from decoding neural network\n \"\"\"\n\n # Bin stimulus orientations in training set\n train_binned_labels = stimulus_class(train_labels, n_classes)\n\n ##############################################################################\n ## TODO for students: fill out missing pieces below to initialize, train, and\n # test network\n # Fill out function and remove\n raise NotImplementedError(\"Student exercise: complete decode_orientation function\")\n ##############################################################################\n\n # Initialize network\n net = ... # use M=20 hidden units\n\n # Initialize built-in PyTorch MSE loss function\n loss_fn = nn.NLLLoss()\n\n # Run GD on training set data, using learning rate of 0.1\n train_loss = ...\n\n # Decode neural responses in testing set data\n out = ...\n out_labels = np.argmax(out.detach(), axis=1) # predicted classes\n\n return train_loss, out_labels\n\n\n# Set random seeds for reproducibility\nnp.random.seed(1)\ntorch.manual_seed(1)\n\nn_classes = 12 # start with 12, then (bonus) try making this as big as possible! does decoding get worse?\n\n# Uncomment below to test your function\n\n# Initialize, train, and test network\n#train_loss, predicted_test_labels = decode_orientation(n_classes, resp_train, stimuli_train, resp_test, stimuli_test)\n\n# Plot results\n#plot_decoded_results(train_loss, stimuli_test, predicted_test_labels)", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_b22595d5.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1134 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_DeepLearning1/static/W3D4_Tutorial1_Solution_b22595d5_2.png>\n\n", "_____no_output_____" ], [ "---\n# Summary\n\nWe have now covered a number of common and powerful techniques for applying deep learning to decoding from neural data, some of which are common to almost any machine learning problem:\n* Building and training deep networks using the **PyTorch** `nn.Module` class and built-in **optimizers**\n* Choosing and evaluating **loss functions**\n* Testing a trained model on unseen data via **cross-validation**, by splitting the data into a **training set and testing set**\n\nAn important aspect of this tutorial was the `train()` function we wrote in exercise 6. Note that it can be used to train *any* network to minimize *any* loss function (cf. advanced exercise) on *any* training data. This is the power of using PyTorch to train neural networks and, for that matter, **any other model**! There is nothing in the `nn.Module` class that forces us to use `nn.Linear` layers that implement neural network operations. You can actually put anything you want inside the `.__init__()` and `.forward()` methods of this class. As long as its parameters and computations involve only `torch.Tensor`'s, and the model is differentiable, you'll then be able to optimize the parameters of this model in exactly the same way we optimized the deep networks here.\n\nWhat kinds of conclusions can we draw from these sorts of analyses? If we can decode the stimulus well from visual cortex activity, that means that there is information about this stimulus available in visual cortex. Whether or not the animal uses that information to make decisions is not determined from an analysis like this. In fact mice perform poorly in orientation discrimination tasks compared to monkeys and humans, even though they have information about these stimuli in their visual cortex. Why do you think they perform poorly in orientation discrimination tasks?\n\nSee this paper for some potential hypotheses (https://www.biorxiv.org/content/10.1101/679324v2), but this is totally an open question!", "_____no_output_____" ], [ "---\n# Appendix", "_____no_output_____" ], [ "## Neural network *depth*, *width* and *expressivity*\n\nTwo important architectural choices that always have to be made when constructing deep feed-forward networks like those used here are\n* the number of hidden layers, or the network's *depth*\n* the number of units in each layer, or the layer *widths*\n\nHere, we restricted ourselves to networks with a single hidden layer with a width of $M$ units, but it is easy to see how this code could be adapted to arbitrary depths. Adding another hidden layer simply requires adding another `nn.Linear` module to the `__init__()` method and incorporating it into the `.forward()` method.\n\nThe depth and width of a network determine the set of input/output transormations that it can perform, often referred to as its *expressivity*. The deeper and wider the network, the more *expressive* it is; that is, the larger the class of input/output transformations it can compute. In fact, it turns out that an infinitely wide *or* infinitely deep networks can in principle [compute (almost) *any* input/output transformation](https://en.wikipedia.org/wiki/Universal_approximation_theorem).\n\nA classic mathematical demonstration of the power of depth is given by the so-called [XOR problem](https://medium.com/@jayeshbahire/the-xor-problem-in-neural-networks-50006411840b#:~:text=The%20XOr%2C%20or%20%E2%80%9Cexclusive%20or,value%20if%20they%20are%20equal.). This toy problem demonstrates how even a single hidden layer can drastically expand the set of input/output transformations a network can perform, relative to a shallow network with no hidden layers. The key intuition is that the hidden layer allows you to represent the input in a new format, which can then allow you to do almost anything you want with it. The *wider* this hidden layer, the more flexibility you have in this representation. In particular, if you have more hidden units than input units, then the hidden layer representation of the input is higher-dimensional than the raw data representation. This higher dimensionality effectively gives you more \"room\" to perform arbitrary computations in. It turns out that even with just this one hidden layer, if you make it wide enough you can actually approximate any input/output transformation you want. See [here](http://neuralnetworksanddeeplearning.com/chap4.html) for a neat visual demonstration of this.\n\nIn practice, however, it turns out that increasing depth seems to grant more expressivity with fewer units than increasing width does (for reasons that are not well understood). It is for this reason that truly *deep* networks are almost always used in machine learning, which is why this set of techniques is often referred to as *deep* learning.\n\nThat said, there is a cost to making networks deeper and wider. The bigger your network, the more parameters (i.e. weights and biases) it has, which need to be optimized! The extra expressivity afforded by higher width and/or depth thus carries with it (at least) two problems:\n* optimizing more parameters usually requires more data\n* a more highly parameterized network is more prone to overfit to the training data, so requires more sophisticated optimization algorithms to ensure generalization", "_____no_output_____" ], [ "## Gradient descent equations\n\nHere we provide the equations for the three steps of the gradient descent algorithm, as applied to our decoding problem:\n\n1. **Evaluate the loss** on the training data. For a mean squared error loss, this is given by\n\\begin{equation}\n L = \\frac{1}{P}\\sum_{n=1}^P (y^{(n)} - \\tilde{y}^{(n)})^2\n\\end{equation}\nwhere $y^{(n)}$ denotes the stimulus orientation decoded from the population response $\\mathbf{r}^{(n)}$ to the $n$th stimulus in the training data, and $\\tilde{y}^{(n)}$ is the true orientation of that stimulus. $P$ denotes the total number of data samples in the training set. In the syntax of our `train()` function above, $\\mathbf{r}^{(n)}$ is given by `train_data[n, :]` and $\\tilde{y}^{(n)}$ by `train_labels[n]`.\n\n2. **Compute the gradient of the loss** with respect to each of the network weights. In our case, this entails computing the quantities\n\\begin{equation}\n \\frac{\\partial L}{\\partial \\mathbf{W}^{in}}, \\frac{\\partial L}{\\partial \\mathbf{b}^{in}}, \\frac{\\partial L}{\\partial \\mathbf{W}^{out}}, \\frac{\\partial L}{\\partial \\mathbf{b}^{out}}\n\\end{equation}\nUsually, we would require lots of math in order to derive each of these gradients, and lots of code to compute them. But this is where PyTorch comes to the rescue! Using a cool technique called [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), PyTorch automatically calculates these gradients when the `.backward()` function is called.\n\n More specifically, when this function is called on a particular variable (e.g. `loss`, as above), PyTorch will compute the gradients with respect to each network parameter. These are computed and stored behind the scenes, and can be accessed through the `.grad` attribute of each of the network's parameters. As we saw above, however, we actually never need to look at or call these gradients when implementing gradient descent, as this can be taken care of by PyTorch's built-in optimizers, like `optim.SGD`.\n\n3. **Update the network weights** by descending the gradient:\n\\begin{align}\n \\mathbf{W}^{in} &\\leftarrow \\mathbf{W}^{in} - \\alpha \\frac{\\partial L}{\\partial \\mathbf{W}^{in}} \\\\\n \\mathbf{b}^{in} &\\leftarrow \\mathbf{b}^{in} - \\alpha \\frac{\\partial L}{\\partial \\mathbf{b}^{in}} \\\\\n \\mathbf{W}^{out} &\\leftarrow \\mathbf{W}^{out} - \\alpha \\frac{\\partial L}{\\partial \\mathbf{W}^{out}} \\\\\n \\mathbf{b}^{out} &\\leftarrow \\mathbf{b}^{out} - \\alpha \\frac{\\partial L}{\\partial \\mathbf{b}^{out}}\n\\end{align}\nwhere $\\alpha$ is called the **learning rate**. This **hyperparameter** of the SGD algorithm controls how far we descend the gradient on each iteration. It should be as large as possible so that fewer iterations are needed, but not too large so as to avoid parameter updates from skipping over minima in the loss landscape.\n\nWhile the equations written down here are specific to the network and loss function considered in this tutorial, the code provided above for implementing these three steps is completely general: no matter what loss function or network you are using, exactly the same commands can be used to implement these three steps.", "_____no_output_____" ], [ "## *Stochastic* gradient descent (SGD) vs. gradient descent (GD)\n\nIn this tutorial, we used the gradient descent algorithm, which differs in a subtle yet very important way from the more commonly used **stochastic gradient descent (SGD)** algorithm. The key difference is in the very first step of each iteration, where in the GD algorithm we evaluate the loss *at every data sample in the training set*. In SGD, on the other hand, we evaluate the loss only at a random subset of data samlpes from the full training set, called a **mini-batch**. At each iteration, we randomly sample a mini-batch to perform steps 1-3 on. All the above equations still hold, but now the $P$ data samples $\\mathbf{r}^{(n)}, \\tilde{y}^{(n)}$ denote a mini-batch of $P$ random samples from the training set, rather than the whole training set.\n\nThere are several reasons why one might want to use SGD instead of GD. The first is that the training set might be too big, so that we actually can't actually evaluate the loss on every single data sample in it. In this case, GD is simply infeasible, so we have no choice but to turn to SGD, which bypasses the restrictive memory demands of GD by sub-sampling the training set into smaller mini-batches.\n\nBut, even when GD is feasible, SGD turns out to be generally better. The stochasticity induced by the extra random sampling step in SGD effectively adds some noise in the search for local minima of the loss function. This can be really useful for avoiding potential local minima, and enforce that whatever minimum is converged to is a good one. This is particularly important when networks are wider and/or deeper, in which case the large number of parameters can lead to overfitting.\n\nHere, we used only GD because (1) it is simpler, and (2) it suffices for the problem being considered here. Because we have so many neurons in our data set, decoding is not too challenging and doesn't require a particularly deep or wide network. The small number of parameters in our deep networks therefore can be optimized without a problem using GD.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7a54b5b84e9fc39dc6d4355390aacdcc70170bf
13,381
ipynb
Jupyter Notebook
martin/TimeSeriesModelling.ipynb
marhoy/Koopen
50b45a1bc4e6b5a157758e3091925405a28920ab
[ "CC-BY-4.0" ]
1
2021-03-19T14:40:35.000Z
2021-03-19T14:40:35.000Z
martin/TimeSeriesModelling.ipynb
marhoy/Koopen
50b45a1bc4e6b5a157758e3091925405a28920ab
[ "CC-BY-4.0" ]
null
null
null
martin/TimeSeriesModelling.ipynb
marhoy/Koopen
50b45a1bc4e6b5a157758e3091925405a28920ab
[ "CC-BY-4.0" ]
null
null
null
26.083821
119
0.552425
[ [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "import math\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nimport patsy\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.tsa.ar_model import AutoReg\n\nplt.rcParams[\"figure.dpi\"] = 100\n\ndef rms(s: pd.Series) -> float:\n return np.sqrt((s**2).mean())", "_____no_output_____" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "from sensor import create_raw_data_file\ncreate_raw_data_file()", "_____no_output_____" ], [ "# Read all data from parquet file\ndata = pd.read_parquet(\"raw_data_all.parquet\")\n\n# For simplicity, select sensor 3\ndata = data[data[\"sensor\"] == \"node_03\"]\n\n# Replace 0-measurements with missing\ndata.loc[data[\"Leq\"] == 0, \"Leq\"] = None\n\n# For simplicity, downsample to 10 minutes\ndata = data.resample(\"1min\").median()\n\n# Forward-fill missing values\ndata = data.fillna(method=\"ffill\")\n\n# Add some extra columns\ndata[\"hour\"] = data.index.hour\ndata[\"dow\"] = data.index.dayofweek\ndata[\"workday\"] = (data.index.dayofweek < 5).astype(int)\ndata[\"doy\"] = data.index.dayofyear\ndata[\"week\"] = data.index.week\ndata[\"workhour\"] = data[\"hour\"].isin(range(6,21))*data[\"hour\"]\n\ndata.head()", "_____no_output_____" ], [ "fig = px.line(data, y=\"Leq\", title=f\"Raw data resampled to {data.index.freq.n} minutes\", color=\"week\")\nfig.show()", "_____no_output_____" ], [ "decomposed = sm.tsa.seasonal_decompose(data[\"Leq\"], period=pd.Timedelta(\"24hours\") // data.index.freq)\nfig = decomposed.plot()\nfig.set_size_inches(10,10)", "_____no_output_____" ], [ "decomposed = sm.tsa.seasonal_decompose(data[\"Leq\"], period=pd.Timedelta(\"1W\") // data.index.freq)\nfig = decomposed.plot()\nfig.set_size_inches(10,10)", "_____no_output_____" ], [ "# Split in training and test\ntrain = data[data[\"week\"].isin([7, 8, 9, 10, 11])]\ntest = data[data[\"week\"].isin([12, 13, 14, 15])]\ntrain_test = pd.concat([train, test])\ntrain_test.loc[train.index, \"dataset\"] = \"train\"\ntrain_test.loc[test.index, \"dataset\"] = \"test\"", "_____no_output_____" ] ], [ [ "# Linear model", "_____no_output_____" ] ], [ [ "model_formula = \"C(dow) + C(hour):C(dow)\" # week\"\n# model_formula = \"C(workday) + C(workhour):C(workday)\"\n\nlinmodel = smf.ols(formula=f\"Leq ~ {model_formula}\", data=train).fit()\nlinmodel_resid = train_test[\"Leq\"] - linmodel.predict(train_test)\nlinmodel.summary()", "_____no_output_____" ], [ "fig = make_subplots(rows=2, cols=1, shared_xaxes=True)\nfig.add_trace(go.Scatter(x=train.index, y=train[\"Leq\"], name=\"Train\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=test.index, y=test[\"Leq\"], name=\"Test\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=train_test.index, y=linmodel.predict(train_test), name=\"Model\"), row=1, col=1)\n\nfig.add_trace(go.Scatter(x=train_test.index, y=linmodel_resid, name=\"Residual\"), row=2, col=1)\n\nfig.update_layout(\n title=\"Linear model results\",\n width=1000,\n height=800,\n hovermode=\"x\"\n)", "_____no_output_____" ], [ "linmodel_resid.groupby(train_test[\"dataset\"]).apply(rms)", "_____no_output_____" ] ], [ [ "# ARX model", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(2, 1, figsize=(10, 10))\nfig = sm.graphics.tsa.plot_acf(linmodel.resid, lags=30, ax=ax[0])\nfig = sm.graphics.tsa.plot_pacf(linmodel.resid, lags=30, ax=ax[1])", "_____no_output_____" ], [ "exog_train = patsy.dmatrix(model_formula, train)\nexog_test = patsy.dmatrix(model_formula, test)", "_____no_output_____" ], [ "lags = math.ceil(pd.Timedelta(\"4min\") / train.index.freq)\nlags", "_____no_output_____" ], [ "arxmodel = AutoReg(endog=train[\"Leq\"], lags=lags, exog=exog_train).fit()\narxmodel_pred = pd.concat([\n arxmodel.predict(),\n arxmodel.predict(start=test.index[0], end=test.index[-1], exog_oos=exog_test)\n])\narxmodel_resid = train_test[\"Leq\"] - arxmodel_pred", "_____no_output_____" ], [ "fig = make_subplots(rows=2, cols=1, shared_xaxes=True)\nfig.add_trace(go.Scatter(x=train.index, y=train[\"Leq\"], name=\"Train\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=test.index, y=test[\"Leq\"], name=\"Test\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=train_test.index, y=arxmodel_pred, name=\"Model\"), row=1, col=1)\n\nfig.add_trace(go.Scatter(x=train_test.index, y=arxmodel_resid, name=\"Residual\"), row=2, col=1)\n\nfig.update_layout(\n title=\"ARX model results\",\n width=1000,\n height=800,\n hovermode=\"x\"\n)", "_____no_output_____" ], [ "arxmodel_resid.groupby(train_test[\"dataset\"]).apply(rms)", "_____no_output_____" ] ], [ [ "# Model comparison", "_____no_output_____" ] ], [ [ "fig = make_subplots(rows=1, cols=1, shared_xaxes=True)\nfig.add_trace(go.Scatter(x=train_test.index, y=train_test[\"Leq\"], name=\"Measured\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=train_test.index, y=linmodel.predict(train_test), name=\"LinModel\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=train_test.index, y=arxmodel_pred, name=\"ARXModel\"), row=1, col=1)\nfig.update_layout(\n title=\"Model comparison\",\n hovermode=\"x\"\n)", "_____no_output_____" ] ], [ [ "# ARX with dynamic forecasting", "_____no_output_____" ] ], [ [ "exog = patsy.dmatrix(model_formula, train_test)\nmodel = AutoReg(endog=train_test[\"Leq\"], lags=lags, exog=exog).fit()", "_____no_output_____" ], [ "forecast_period = pd.Timedelta(\"3hour\")\n\nt = train_test.index[0] + forecast_period\npreds = pd.Series(dtype=float)\nwhile t < train_test.index[-1] - forecast_period:\n preds = preds.append(model.predict(start=t, end=t + forecast_period, dynamic=t))\n t += forecast_period", "_____no_output_____" ], [ "fig = make_subplots(rows=1, cols=1, shared_xaxes=True)\nfig.add_trace(go.Scatter(x=train_test.index, y=train_test[\"Leq\"], name=\"Measured\"), row=1, col=1)\nfig.add_trace(go.Scatter(x=preds.index, y=preds, name=\"ARX Forecast\"), row=1, col=1)\nfig.update_layout(\n title=\"Dynamic ARX forecasting\",\n hovermode=\"x\"\n)", "_____no_output_____" ], [ "resid = train_test[\"Leq\"] - preds\nrms(resid)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7a55e1b519cf3f0dff477586e1ee8fcf26f34c7
52,657
ipynb
Jupyter Notebook
Notebooks/PY0101EN-1-1-Types.ipynb
tibonobo/Coursera_IBM_DataScience
d099df5a6e686b9100ff3741f2d9c8038339a9b3
[ "MIT" ]
null
null
null
Notebooks/PY0101EN-1-1-Types.ipynb
tibonobo/Coursera_IBM_DataScience
d099df5a6e686b9100ff3741f2d9c8038339a9b3
[ "MIT" ]
null
null
null
Notebooks/PY0101EN-1-1-Types.ipynb
tibonobo/Coursera_IBM_DataScience
d099df5a6e686b9100ff3741f2d9c8038339a9b3
[ "MIT" ]
null
null
null
24.277086
716
0.52673
[ [ [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <a href=\"https://cocl.us/topNotebooksPython101Coursera\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png\" width=\"750\" align=\"center\">\n </a>\n</div>", "_____no_output_____" ], [ "<a href=\"https://cognitiveclass.ai/\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png\" width=\"200\" align=\"center\">\n</a>", "_____no_output_____" ], [ "<h1>Python - Writing Your First Python Code!</h1>", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you the basics of the Python programming language. Although the information presented here is quite basic, it is an important foundation that will help you read and write Python code. By the end of this notebook, you'll know the basics of Python, including how to write basic commands, understand some basic types, and how to perform simple operations on them.</p> ", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"#hello\">Say \"Hello\" to the world in Python</a>\n <ul>\n <li><a href=\"version\">What version of Python are we using?</a></li>\n <li><a href=\"comments\">Writing comments in Python</a></li>\n <li><a href=\"errors\">Errors in Python</a></li>\n <li><a href=\"python_error\">Does Python know about your error before it runs your code?</a></li>\n <li><a href=\"exercise\">Exercise: Your First Program</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#types_objects\">Types of objects in Python</a>\n <ul>\n <li><a href=\"int\">Integers</a></li>\n <li><a href=\"float\">Floats</a></li>\n <li><a href=\"convert\">Converting from one object type to a different object type</a></li>\n <li><a href=\"bool\">Boolean data type</a></li>\n <li><a href=\"exer_type\">Exercise: Types</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#expressions\">Expressions and Variables</a>\n <ul>\n <li><a href=\"exp\">Expressions</a></li>\n <li><a href=\"exer_exp\">Exercise: Expressions</a></li>\n <li><a href=\"var\">Variables</a></li>\n <li><a href=\"exer_exp_var\">Exercise: Expression and Variables in Python</a></li>\n </ul>\n </li>\n </ul>\n <p>\n Estimated time needed: <strong>25 min</strong>\n </p>\n</div>\n\n<hr>", "_____no_output_____" ], [ "<h2 id=\"hello\">Say \"Hello\" to the world in Python</h2>", "_____no_output_____" ], [ "When learning a new programming language, it is customary to start with an \"hello world\" example. As simple as it is, this one line of code will ensure that we know how to print a string in output and how to execute code within cells in a notebook.", "_____no_output_____" ], [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n[Tip]: To execute the Python code in the code cell below, click on the cell to select it and press <kbd>Shift</kbd> + <kbd>Enter</kbd>.\n</div>\n<hr/>", "_____no_output_____" ] ], [ [ "# Try your first Python output\n\nprint('Hello, Python!')", "Hello, Python!\n" ] ], [ [ "After executing the cell above, you should see that Python prints <code>Hello, Python!</code>. Congratulations on running your first Python code!", "_____no_output_____" ], [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] <code>print()</code> is a function. You passed the string <code>'Hello, Python!'</code> as an argument to instruct Python on what to print.\n</div>\n<hr/>", "_____no_output_____" ], [ "<h3 id=\"version\">What version of Python are we using?</h3>", "_____no_output_____" ], [ "<p>\n There are two popular versions of the Python programming language in use today: Python 2 and Python 3. The Python community has decided to move on from Python 2 to Python 3, and many popular libraries have announced that they will no longer support Python 2.\n</p>\n<p>\n Since Python 3 is the future, in this course we will be using it exclusively. How do we know that our notebook is executed by a Python 3 runtime? We can look in the top-right hand corner of this notebook and see \"Python 3\".\n</p>\n<p>\n We can also ask directly Python and obtain a detailed answer. Try executing the following code:\n</p>", "_____no_output_____" ] ], [ [ "# Check the Python Version\n\nimport sys\nprint(sys.version)", "3.6.6 | packaged by conda-forge | (default, Oct 12 2018, 14:43:46) \n[GCC 7.3.0]\n" ] ], [ [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] <code>sys</code> is a built-in module that contains many system-specific parameters and functions, including the Python version in use. Before using it, we must explictly <code>import</code> it.\n</div>\n<hr/>", "_____no_output_____" ], [ "<h3 id=\"comments\">Writing comments in Python</h3>", "_____no_output_____" ], [ "<p>\n In addition to writing code, note that it's always a good idea to add comments to your code. It will help others understand what you were trying to accomplish (the reason why you wrote a given snippet of code). Not only does this help <strong>other people</strong> understand your code, it can also serve as a reminder <strong>to you</strong> when you come back to it weeks or months later.</p>\n\n<p>\n To write comments in Python, use the number symbol <code>#</code> before writing your comment. When you run your code, Python will ignore everything past the <code>#</code> on a given line.\n</p>", "_____no_output_____" ] ], [ [ "# Practice on writing comments\n\nprint('Hello, Python!') # This line prints a string\n# print('Hi')", "Hello, Python!\n" ] ], [ [ "<p>\n After executing the cell above, you should notice that <code>This line prints a string</code> did not appear in the output, because it was a comment (and thus ignored by Python).\n</p>\n<p>\n The second line was also not executed because <code>print('Hi')</code> was preceded by the number sign (<code>#</code>) as well! Since this isn't an explanatory comment from the programmer, but an actual line of code, we might say that the programmer <em>commented out</em> that second line of code.\n</p>", "_____no_output_____" ], [ "<h3 id=\"errors\">Errors in Python</h3>", "_____no_output_____" ], [ "<p>Everyone makes mistakes. For many types of mistakes, Python will tell you that you have made a mistake by giving you an error message. It is important to read error messages carefully to really understand where you made a mistake and how you may go about correcting it.</p>\n<p>For example, if you spell <code>print</code> as <code>frint</code>, Python will display an error message. Give it a try:</p>", "_____no_output_____" ] ], [ [ "# Print string as error message\n\nfrint(\"Hello, Python!\")", "_____no_output_____" ] ], [ [ "<p>The error message tells you: \n<ol>\n <li>where the error occurred (more useful in large notebook cells or scripts), and</li> \n <li>what kind of error it was (NameError)</li> \n</ol>\n<p>Here, Python attempted to run the function <code>frint</code>, but could not determine what <code>frint</code> is since it's not a built-in function and it has not been previously defined by us either.</p>", "_____no_output_____" ], [ "<p>\n You'll notice that if we make a different type of mistake, by forgetting to close the string, we'll obtain a different error (i.e., a <code>SyntaxError</code>). Try it below:\n</p>", "_____no_output_____" ] ], [ [ "# Try to see build in error message\n\nprint(\"Hello, Python!)", "_____no_output_____" ] ], [ [ "<h3 id=\"python_error\">Does Python know about your error before it runs your code?</h3>", "_____no_output_____" ], [ "Python is what is called an <em>interpreted language</em>. Compiled languages examine your entire program at compile time, and are able to warn you about a whole class of errors prior to execution. In contrast, Python interprets your script line by line as it executes it. Python will stop executing the entire program when it encounters an error (unless the error is expected and handled by the programmer, a more advanced subject that we'll cover later on in this course).", "_____no_output_____" ], [ "Try to run the code in the cell below and see what happens:", "_____no_output_____" ] ], [ [ "# Print string and error to see the running order\n\nprint(\"This will be printed\")\nfrint(\"This will cause an error\")\nprint(\"This will NOT be printed\")", "This will be printed\n" ] ], [ [ "<h3 id=\"exercise\">Exercise: Your First Program</h3>", "_____no_output_____" ], [ "<p>Generations of programmers have started their coding careers by simply printing \"Hello, world!\". You will be following in their footsteps.</p>\n<p>In the code cell below, use the <code>print()</code> function to print out the phrase: <code>Hello, world!</code></p>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \nprint(\"Hello, world!\")", "Hello, world!\n" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\nprint(\"Hello, world!\")\n\n-->", "_____no_output_____" ], [ "<p>Now, let's enhance your code with a comment. In the code cell below, print out the phrase: <code>Hello, world!</code> and comment it with the phrase <code>Print the traditional hello world</code> all in one line of code.</p>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \nprint(\"Hello, world!\") # Print the traditional hello world", "Hello, world!\n" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\nprint(\"Hello, world!\") # Print the traditional hello world\n\n-->\n", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"types_objects\" align=\"center\">Types of objects in Python</h2>", "_____no_output_____" ], [ "<p>Python is an object-oriented language. There are many different types of objects in Python. Let's start with the most common object types: <i>strings</i>, <i>integers</i> and <i>floats</i>. Anytime you write words (text) in Python, you're using <i>character strings</i> (strings for short). The most common numbers, on the other hand, are <i>integers</i> (e.g. -1, 0, 100) and <i>floats</i>, which represent real numbers (e.g. 3.14, -42.0).</p>", "_____no_output_____" ], [ "<a align=\"center\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/TypesObjects.png\" width=\"600\">\n</a>", "_____no_output_____" ], [ "<p>The following code cells contain some examples.</p>", "_____no_output_____" ] ], [ [ "# Integer\n\n11", "_____no_output_____" ], [ "# Float\n\n2.14", "_____no_output_____" ], [ "# String\n\n\"Hello, Python 101!\"", "_____no_output_____" ] ], [ [ "<p>You can get Python to tell you the type of an expression by using the built-in <code>type()</code> function. You'll notice that Python refers to integers as <code>int</code>, floats as <code>float</code>, and character strings as <code>str</code>.</p>", "_____no_output_____" ] ], [ [ "# Type of 12\n\ntype(12)", "_____no_output_____" ], [ "# Type of 2.14\n\ntype(2.14)", "_____no_output_____" ], [ "# Type of \"Hello, Python 101!\"\n\ntype(\"Hello, Python 101!\")", "_____no_output_____" ] ], [ [ "<p>In the code cell below, use the <code>type()</code> function to check the object type of <code>12.0</code>.", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\ntype(12.0)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\ntype(12.0)\n\n-->", "_____no_output_____" ], [ "<h3 id=\"int\">Integers</h3>", "_____no_output_____" ], [ "<p>Here are some examples of integers. Integers can be negative or positive numbers:</p>", "_____no_output_____" ], [ "<a align=\"center\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/TypesInt.png\" width=\"600\">\n</a>", "_____no_output_____" ], [ "<p>We can verify this is the case by using, you guessed it, the <code>type()</code> function:", "_____no_output_____" ] ], [ [ "# Print the type of -1\n\ntype(-1)", "_____no_output_____" ], [ "# Print the type of 4\n\ntype(4)", "_____no_output_____" ], [ "# Print the type of 0\n\ntype(0)", "_____no_output_____" ] ], [ [ "<h3 id=\"float\">Floats</h3> ", "_____no_output_____" ], [ "<p>Floats represent real numbers; they are a superset of integer numbers but also include \"numbers with decimals\". There are some limitations when it comes to machines representing real numbers, but floating point numbers are a good representation in most cases. You can learn more about the specifics of floats for your runtime environment, by checking the value of <code>sys.float_info</code>. This will also tell you what's the largest and smallest number that can be represented with them.</p>\n\n<p>Once again, can test some examples with the <code>type()</code> function:", "_____no_output_____" ] ], [ [ "# Print the type of 1.0\n\ntype(1.0) # Notice that 1 is an int, and 1.0 is a float", "_____no_output_____" ], [ "# Print the type of 0.5\n\ntype(0.5)", "_____no_output_____" ], [ "# Print the type of 0.56\n\ntype(0.56)", "_____no_output_____" ], [ "# System settings about float type\n\nsys.float_info", "_____no_output_____" ] ], [ [ "<h3 id=\"convert\">Converting from one object type to a different object type</h3>", "_____no_output_____" ], [ "<p>You can change the type of the object in Python; this is called typecasting. For example, you can convert an <i>integer</i> into a <i>float</i> (e.g. 2 to 2.0).</p>\n<p>Let's try it:</p>", "_____no_output_____" ] ], [ [ "# Verify that this is an integer\n\ntype(2)", "_____no_output_____" ] ], [ [ "<h4>Converting integers to floats</h4>\n<p>Let's cast integer 2 to float:</p>", "_____no_output_____" ] ], [ [ "# Convert 2 to a float\n\nfloat(2)", "_____no_output_____" ], [ "# Convert integer 2 to a float and check its type\n\ntype(float(2))", "_____no_output_____" ] ], [ [ "<p>When we convert an integer into a float, we don't really change the value (i.e., the significand) of the number. However, if we cast a float into an integer, we could potentially lose some information. For example, if we cast the float 1.1 to integer we will get 1 and lose the decimal information (i.e., 0.1):</p>", "_____no_output_____" ] ], [ [ "# Casting 1.1 to integer will result in loss of information\n\nint(1.1)", "_____no_output_____" ] ], [ [ "<h4>Converting from strings to integers or floats</h4>", "_____no_output_____" ], [ "<p>Sometimes, we can have a string that contains a number within it. If this is the case, we can cast that string that represents a number into an integer using <code>int()</code>:</p>", "_____no_output_____" ] ], [ [ "# Convert a string into an integer\n\nint('1')", "_____no_output_____" ] ], [ [ "<p>But if you try to do so with a string that is not a perfect match for a number, you'll get an error. Try the following:</p>", "_____no_output_____" ] ], [ [ "# Convert a string into an integer with error\n\nint('1 or 2 people')", "_____no_output_____" ] ], [ [ "<p>You can also convert strings containing floating point numbers into <i>float</i> objects:</p>", "_____no_output_____" ] ], [ [ "# Convert the string \"1.2\" into a float\n\nfloat('1.2')", "_____no_output_____" ] ], [ [ "<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] Note that strings can be represented with single quotes (<code>'1.2'</code>) or double quotes (<code>\"1.2\"</code>), but you can't mix both (e.g., <code>\"1.2'</code>).\n</div>\n<hr/>", "_____no_output_____" ], [ "<h4>Converting numbers to strings</h4>", "_____no_output_____" ], [ "<p>If we can convert strings to numbers, it is only natural to assume that we can convert numbers to strings, right?</p>", "_____no_output_____" ] ], [ [ "# Convert an integer to a string\n\nstr(1)", "_____no_output_____" ] ], [ [ "<p>And there is no reason why we shouldn't be able to make floats into strings as well:</p> ", "_____no_output_____" ] ], [ [ "# Convert a float to a string\n\nstr(1.2)", "_____no_output_____" ] ], [ [ "<h3 id=\"bool\">Boolean data type</h3>", "_____no_output_____" ], [ "<p><i>Boolean</i> is another important type in Python. An object of type <i>Boolean</i> can take on one of two values: <code>True</code> or <code>False</code>:</p>", "_____no_output_____" ] ], [ [ "# Value true\n\nTrue", "_____no_output_____" ] ], [ [ "<p>Notice that the value <code>True</code> has an uppercase \"T\". The same is true for <code>False</code> (i.e. you must use the uppercase \"F\").</p>", "_____no_output_____" ] ], [ [ "# Value false\n\nFalse", "_____no_output_____" ] ], [ [ "<p>When you ask Python to display the type of a boolean object it will show <code>bool</code> which stands for <i>boolean</i>:</p> ", "_____no_output_____" ] ], [ [ "# Type of True\n\ntype(True)", "_____no_output_____" ], [ "# Type of False\n\ntype(False)", "_____no_output_____" ] ], [ [ "<p>We can cast boolean objects to other data types. If we cast a boolean with a value of <code>True</code> to an integer or float we will get a one. If we cast a boolean with a value of <code>False</code> to an integer or float we will get a zero. Similarly, if we cast a 1 to a Boolean, you get a <code>True</code>. And if we cast a 0 to a Boolean we will get a <code>False</code>. Let's give it a try:</p> ", "_____no_output_____" ] ], [ [ "# Convert True to int\n\nint(True)", "_____no_output_____" ], [ "# Convert 1 to boolean\n\nbool(1)", "_____no_output_____" ], [ "# Convert 0 to boolean\n\nbool(0)", "_____no_output_____" ], [ "# Convert True to float\n\nfloat(True)", "_____no_output_____" ] ], [ [ "<h3 id=\"exer_type\">Exercise: Types</h3>", "_____no_output_____" ], [ "<p>What is the data type of the result of: <code>6 / 2</code>?</p>", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\ntype(6/2)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\ntype(6/2) # float\n-->", "_____no_output_____" ], [ "<p>What is the type of the result of: <code>6 // 2</code>? (Note the double slash <code>//</code>.)</p>", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\ntype(6//2)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\ntype(6//2) # int, as the double slashes stand for integer division \n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"expressions\">Expression and Variables</h2>", "_____no_output_____" ], [ "<h3 id=\"exp\">Expressions</h3>", "_____no_output_____" ], [ "<p>Expressions in Python can include operations among compatible types (e.g., integers and floats). For example, basic arithmetic operations like adding multiple numbers:</p>", "_____no_output_____" ] ], [ [ "# Addition operation expression\n\n43 + 60 + 16 + 41", "_____no_output_____" ] ], [ [ "<p>We can perform subtraction operations using the minus operator. In this case the result is a negative number:</p>", "_____no_output_____" ] ], [ [ "# Subtraction operation expression\n\n50 - 60", "_____no_output_____" ] ], [ [ "<p>We can do multiplication using an asterisk:</p>", "_____no_output_____" ] ], [ [ "# Multiplication operation expression\n\n5 * 5", "_____no_output_____" ] ], [ [ "<p>We can also perform division with the forward slash:", "_____no_output_____" ] ], [ [ "# Division operation expression\n\n25 / 5", "_____no_output_____" ], [ "# Division operation expression\n\n25 / 6", "_____no_output_____" ] ], [ [ "<p>As seen in the quiz above, we can use the double slash for integer division, where the result is rounded to the nearest integer:", "_____no_output_____" ] ], [ [ "# Integer division operation expression\n\n25 // 5", "_____no_output_____" ], [ "# Integer division operation expression\n\n25 // 6", "_____no_output_____" ] ], [ [ "<h3 id=\"exer_exp\">Exercise: Expression</h3>", "_____no_output_____" ], [ "<p>Let's write an expression that calculates how many hours there are in 160 minutes:", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\nhours= 160/60", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n160/60 \n# Or \n160//60\n-->", "_____no_output_____" ], [ "<p>Python follows well accepted mathematical conventions when evaluating mathematical expressions. In the following example, Python adds 30 to the result of the multiplication (i.e., 120).", "_____no_output_____" ] ], [ [ "# Mathematical expression\n\n30 + 2 * 60", "_____no_output_____" ] ], [ [ "<p>And just like mathematics, expressions enclosed in parentheses have priority. So the following multiplies 32 by 60.", "_____no_output_____" ] ], [ [ "# Mathematical expression\n\n(30 + 2) * 60", "_____no_output_____" ] ], [ [ "<h3 id=\"var\">Variables</h3>", "_____no_output_____" ], [ "<p>Just like with most programming languages, we can store values in <i>variables</i>, so we can use them later on. For example:</p>", "_____no_output_____" ] ], [ [ "# Store value into variable\n\nx = 43 + 60 + 16 + 41", "_____no_output_____" ] ], [ [ "<p>To see the value of <code>x</code> in a Notebook, we can simply place it on the last line of a cell:</p>", "_____no_output_____" ] ], [ [ "# Print out the value in variable\n\nx", "_____no_output_____" ] ], [ [ "<p>We can also perform operations on <code>x</code> and save the result to a new variable:</p>", "_____no_output_____" ] ], [ [ "# Use another variable to store the result of the operation between variable and value\n\ny = x / 60\ny", "_____no_output_____" ] ], [ [ "<p>If we save a value to an existing variable, the new value will overwrite the previous value:</p>", "_____no_output_____" ] ], [ [ "# Overwrite variable with new value\n\nx = x / 60\nx", "_____no_output_____" ] ], [ [ "<p>It's a good practice to use meaningful variable names, so you and others can read the code and understand it more easily:</p>", "_____no_output_____" ] ], [ [ "# Name the variables meaningfully\n\ntotal_min = 43 + 42 + 57 # Total length of albums in minutes\ntotal_min", "_____no_output_____" ], [ "# Name the variables meaningfully\n\ntotal_hours = total_min / 60 # Total length of albums in hours \ntotal_hours", "_____no_output_____" ] ], [ [ "<p>In the cells above we added the length of three albums in minutes and stored it in <code>total_min</code>. We then divided it by 60 to calculate total length <code>total_hours</code> in hours. You can also do it all at once in a single expression, as long as you use parenthesis to add the albums length before you divide, as shown below.</p>", "_____no_output_____" ] ], [ [ "# Complicate expression\n\ntotal_hours = (43 + 42 + 57) / 60 # Total hours in a single expression\ntotal_hours", "_____no_output_____" ] ], [ [ "<p>If you'd rather have total hours as an integer, you can of course replace the floating point division with integer division (i.e., <code>//</code>).</p>", "_____no_output_____" ], [ "<h3 id=\"exer_exp_var\">Exercise: Expression and Variables in Python</h3>", "_____no_output_____" ], [ "<p>What is the value of <code>x</code> where <code>x = 3 + 2 * 2</code></p>", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\nx = 3 + 2 * 2\nprint(x)\nx:8\nprint(x)", "7\n7\n" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n7\n-->\n", "_____no_output_____" ], [ "<p>What is the value of <code>y</code> where <code>y = (3 + 2) * 2</code>?</p>", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\ny = (3 + 2) * 2\ny", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n10\n-->", "_____no_output_____" ], [ "<p>What is the value of <code>z</code> where <code>z = x + y</code>?</p>", "_____no_output_____" ] ], [ [ "# Write your code below. Don't forget to press Shift+Enter to execute the cell\nz = x + y\nz", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n17\n-->", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<h2>Get IBM Watson Studio free of charge!</h2>\n <p><a href=\"https://cocl.us/bottemNotebooksPython101Coursera\"><img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png\" width=\"750\" align=\"center\"></a></p>\n</div>", "_____no_output_____" ], [ "<h3>About the Authors:</h3> \n<p><a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>", "_____no_output_____" ], [ "Other contributors: <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>", "_____no_output_____" ], [ "<hr>\n<p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7a55f0c2bd071644ac7c432c00095ef5bd06626
789
ipynb
Jupyter Notebook
Course 4. Deploying Machine Learning Models in Production/2.4 Data Preprocessing.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
Course 4. Deploying Machine Learning Models in Production/2.4 Data Preprocessing.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
Course 4. Deploying Machine Learning Models in Production/2.4 Data Preprocessing.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
18.348837
96
0.523447
[ [ [ "# Week 2. Data Preprocessing\n---\n\n<img src = \"https://i.gyazo.com/74d06174c92c6c31709a4c30e7b94ac5.png\" width = \"600px\">", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e7a56479c94aad82d55084dd4331fad08fd6fc95
26,155
ipynb
Jupyter Notebook
assignment_applied_modeling_2.ipynb
justin-hsieh/DS-Unit-2-Applied-Modeling
0570505558983c5f8a26ebed7093f5e9fee1d5f1
[ "MIT" ]
1
2019-08-21T16:59:08.000Z
2019-08-21T16:59:08.000Z
assignment_applied_modeling_2.ipynb
justin-hsieh/DS-Unit-2-Applied-Modeling
0570505558983c5f8a26ebed7093f5e9fee1d5f1
[ "MIT" ]
null
null
null
assignment_applied_modeling_2.ipynb
justin-hsieh/DS-Unit-2-Applied-Modeling
0570505558983c5f8a26ebed7093f5e9fee1d5f1
[ "MIT" ]
null
null
null
99.448669
16,220
0.800612
[ [ [ "<a href=\"https://colab.research.google.com/github/justin-hsieh/DS-Unit-2-Applied-Modeling/blob/master/assignment_applied_modeling_2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science, Unit 2: Predictive Modeling\n\n# Applied Modeling, Module 2\n\nYou will use your portfolio project dataset for all assignments this sprint.\n\n## Assignment\n\nComplete these tasks for your project, and document your work.\n\n- [ ] Plot the distribution of your target. \n - Regression problem: Is your target skewed? Then, log-transform it.\n - Classification: Are your classes imbalanced? Then, don't use just accuracy. And try `class_balance` parameter in scikit-learn.\n- [ ] Continue to clean and explore your data. Make exploratory visualizations.\n- [ ] Fit a model. Does it beat your baseline?\n- [ ] Share at least 1 visualization on Slack.\n\nYou need to complete an initial model today, because the rest of the week, we're making model interpretation visualizations.\n\n\n## Reading\n\n### Today\n- [imbalance-learn](https://github.com/scikit-learn-contrib/imbalanced-learn)\n- [Learning from Imbalanced Classes](https://www.svds.com/tbt-learning-imbalanced-classes/)\n- [Machine Learning Meets Economics](http://blog.mldb.ai/blog/posts/2016/01/ml-meets-economics/)\n- [ROC curves and Area Under the Curve explained](https://www.dataschool.io/roc-curves-and-auc-explained/)\n- [The philosophical argument for using ROC curves](https://lukeoakdenrayner.wordpress.com/2018/01/07/the-philosophical-argument-for-using-roc-curves/)\n\n\n### Yesterday\n- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _\"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score.\"_\n- [How Shopify Capital Uses Quantile Regression To Help Merchants Succeed](https://engineering.shopify.com/blogs/engineering/how-shopify-uses-machine-learning-to-help-our-merchants-grow-their-business)\n- [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), **by Lambda DS3 student** Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.\n- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)\n- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video\n- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)\n\n\n\n\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport seaborn as sns\n#import plotly.express as px\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_absolute_error,r2_score,mean_squared_error\nfrom sklearn.model_selection import train_test_split\n#import category_encoders as ce\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline", "_____no_output_____" ], [ "df = pd.read_csv('/content/openpowerlifting.csv')\ndrops = ['Squat4Kg', 'Bench4Kg', 'Deadlift4Kg','Country','Place','Squat1Kg',\n 'Squat2Kg','Squat3Kg','Bench1Kg','Bench2Kg','Bench3Kg','Deadlift1Kg',\n 'Deadlift2Kg','Deadlift3Kg']\ndf = df.drop(columns=drops)\ndf.dropna(inplace=True)\ndf.shape", "_____no_output_____" ], [ "X = df.drop(columns='Best3SquatKg')\ny = df['Best3SquatKg']\n\nXtrain, X_test, ytrain,y_test = train_test_split(X,y, test_size=0.40, \n random_state=42)\n\nX_train, X_val, y_train,y_val = train_test_split(Xtrain,ytrain, test_size=0.25,\n random_state=42)\n\nX_train.shape, X_test.shape, X_val.shape", "_____no_output_____" ], [ "model = LinearRegression()\n\nfeatures = ['Sex','Equipment','Age','BodyweightKg','Best3BenchKg','Best3DeadliftKg']\n\nX = X_train[features].replace({'M':0,'F':1,'Raw':2,'Single-ply':3,\n 'Wraps':4,'Multi-ply':5})\ny = y_train\n\nmodel.fit(X,y)\n\ny_pred = model.predict(X_val[features].replace({'M':0,'F':1,'Raw':2,'Single-ply':3,\n 'Wraps':4,'Multi-ply':5}))\nprint('Validation Accuracy', r2_score(y_pred, y_val))\nprint('Mean Absolute Error:', mean_absolute_error(y_val, y_pred)) ", "Validation Accuracy 0.8523288497456892\nMean Absolute Error: 16.220872523786927\n" ], [ "plt.scatter(y_val, y_pred)\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e7a5670c6a6239d751103f5f54c6de234aaa2e5f
4,786
ipynb
Jupyter Notebook
QueueLL.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
2
2020-05-06T07:00:27.000Z
2020-05-06T07:00:31.000Z
QueueLL.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
null
null
null
QueueLL.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
null
null
null
29.726708
262
0.440242
[ [ [ "<a href=\"https://colab.research.google.com/github/souravgopal25/Data-Structure-Algorithm-Nanodegree/blob/master/QueueLL.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#QUEUE USING LinkedList\n", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self,value):\n self.value=value\n self.next=None", "_____no_output_____" ], [ "class Queue:\n \n def __init__(self):\n self.head = None\n self.tail = None\n self.num_elements = 0\n \n def enqueue(self, value):\n new_node = Node(value)\n if self.head is None:\n self.head = new_node\n self.tail = self.head\n else:\n self.tail.next = new_node # add data to the next attribute of the tail (i.e. the end of the queue)\n self.tail = self.tail.next # shift the tail (i.e., the back of the queue)\n self.num_elements += 1\n \n def dequeue(self):\n if self.is_empty():\n return None\n value=self.head.value\n self.head=self.head.next\n self.num_elements-=1\n return value\n \n def size(self):\n return self.num_elements\n \n def is_empty(self):\n return self.num_elements == 0\n ", "_____no_output_____" ], [ "# Setup\nq = Queue()\nq.enqueue(1)\nq.enqueue(2)\nq.enqueue(3)\n\n# Test size\nprint (\"Pass\" if (q.size() == 3) else \"Fail\")\n\n# Test dequeue\nprint (\"Pass\" if (q.dequeue() == 1) else \"Fail\")\n\n# Test enqueue\nq.enqueue(4)\nprint (\"Pass\" if (q.dequeue() == 2) else \"Fail\")\nprint (\"Pass\" if (q.dequeue() == 3) else \"Fail\")\nprint (\"Pass\" if (q.dequeue() == 4) else \"Fail\")\nq.enqueue(5)\nprint (\"Pass\" if (q.size() == 1) else \"Fail\")", "Pass\nPass\nPass\nPass\nPass\nPass\n" ] ], [ [ "## Time Complexity\n\nSo what's the time complexity of adding or removing things from our queue here?\n\nWell, when we use `enqueue`, we simply create a new node and add it to the tail of the list. And when we `dequeue` an item, we simply get the value from the head of the list and then shift the `head` variable so that it refers to the next node over.\n\nBoth of these operations happen in constant time—that is, they have a time-complexity of O(1).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
e7a57df360143e417616126632f34ff50fb12459
102,424
ipynb
Jupyter Notebook
Analysis Intro.ipynb
kadnan/IntroTechnicalAnalysis
f641dac47b004732ba1255b9a443e5050bc30f78
[ "MIT" ]
null
null
null
Analysis Intro.ipynb
kadnan/IntroTechnicalAnalysis
f641dac47b004732ba1255b9a443e5050bc30f78
[ "MIT" ]
null
null
null
Analysis Intro.ipynb
kadnan/IntroTechnicalAnalysis
f641dac47b004732ba1255b9a443e5050bc30f78
[ "MIT" ]
null
null
null
318.086957
56,684
0.909513
[ [ [ "# All Imports\nimport yfinance as yf\nimport talib as ta\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Facebook Historial Data\nfb = yf.Ticker(\"FB\")\ndf = fb.history(start=\"2022-01-03\")", "_____no_output_____" ], [ "plt.style.use('fivethirtyeight')\ndf['MA'] = ta.SMA(df['Close'],timeperiod=5)\ndf['EMA'] = ta.EMA(df['Close'], timeperiod = 5)\ndf[['Close','MA','EMA']].plot(figsize=(8,8))\nplt.show()", "_____no_output_____" ], [ "# RSI\ndf['RSI'] = ta.RSI(df['Close'],14)\ndf['RSI'].plot(figsize=(8,8),marker='o')\ndf.tail(10)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
e7a57ef314894d2fb9f39323b1131ac6306ee199
3,311
ipynb
Jupyter Notebook
downloader/dsspGet.ipynb
thautwarm/BioInfoPlus
b3f404d3f13cfccf4694cfba586ac7efcbf5bf29
[ "Apache-2.0" ]
null
null
null
downloader/dsspGet.ipynb
thautwarm/BioInfoPlus
b3f404d3f13cfccf4694cfba586ac7efcbf5bf29
[ "Apache-2.0" ]
1
2017-10-31T15:27:03.000Z
2018-02-08T18:38:38.000Z
downloader/dsspGet.ipynb
thautwarm/BioInfoPlus
b3f404d3f13cfccf4694cfba586ac7efcbf5bf29
[ "Apache-2.0" ]
null
null
null
22.678082
169
0.507098
[ [ [ "import urllib ", "_____no_output_____" ], [ "print(dir(urllib))", "['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', 'error', 'parse', 'request', 'response']\n" ], [ "class Env:\n pageUrl = r\"ftp://ftp.cbi.pku.edu.cn/pub/database/DSSP/20130820\" #数据目录地址\n toStoragePath = r\"H:\\BioDatas\\DSSP\" #数据压缩包下载路径\n toUpZippedPath=r\"H:\\BioDatas\\DSSP\\UnZipped\" #解压路径", "_____no_output_____" ], [ "directory = urllib.request.urlopen(Env.pageUrl)\ndirs = directory.read().decode(\"utf-8\") # 包含所有压缩文件名的字符串。", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "# 提取文件名\nmatcher = re.compile(\"([\\w]+\\.dssp\\.gz)\") \n# Test Regex Expr.\nfileNames = matcher.findall(dirs)", "_____no_output_____" ], [ "import gzip\ndef storeOneFile(filename):\n url = f\"{Env.pageUrl}/{filename}\"\n try: \n content = urllib.request.urlopen(url).read()\n except:\n print(f\"error find: in storing processing. filename : {filename}\")\n return\n \n zipFile = f\"{Env.toStoragePath}/{filename}\"\n unZipFile = f'{Env.toUpZippedPath}/{filename.replace(\".gz\",\"\")}'\n \n with open(zipFile,\"wb\") as f:\n f.write(content)\n outUnZippedFile = gzip.GzipFile(zipFile)\n \n with open(unZipFile,'w+',encoding='utf-8') as f:\n f.write(outUnZippedFile.read().decode(\"utf-8\"))\n outUnZippedFile.close()", "_____no_output_____" ], [ "# test : storeOneFile(\"101m.dssp.gz\")\nfor task in map(storeOneFile, fileNames): pass", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7a595bf09c2586a527cc6aeccdf8e240438f68e
536,333
ipynb
Jupyter Notebook
MFPHistogramRmaxEFF.ipynb
tommychinwj/HI_characterization
e39c584cf44b7201bf4fe250b22c1dbfbe31791f
[ "BSD-3-Clause" ]
null
null
null
MFPHistogramRmaxEFF.ipynb
tommychinwj/HI_characterization
e39c584cf44b7201bf4fe250b22c1dbfbe31791f
[ "BSD-3-Clause" ]
null
null
null
MFPHistogramRmaxEFF.ipynb
tommychinwj/HI_characterization
e39c584cf44b7201bf4fe250b22c1dbfbe31791f
[ "BSD-3-Clause" ]
null
null
null
3,047.346591
531,492
0.961196
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "mfp_neutral_region_size = np.load('/lustre/aoc/projects/hera/wchin/mfp_neutral_region_size.npy')\nmfp_size_probabilities = np.load('/lustre/aoc/projects/hera/wchin/mfp_size_probabilities.npy')", "_____no_output_____" ], [ "bin_num_mfp = int(1e3)\niteration_mfp = int(1e8)", "_____no_output_____" ] ], [ [ "## Vary: Rmax, EFF, constant: x_HI, z. x_HI error: 1e-2%", "_____no_output_____" ] ], [ [ "R_BUBBLE_MAXES = np.linspace(30, 0.225, 9)\nHII_EFF_FACTORS = np.array(\n [19.04625, \n 19.511249999999997, \n 20.23875, \n 21.085, \n 22.655000000000012, \n 25.779375, \n 32.056640625, \n 56.6734375, \n 5291.5]\n)\nredshifts = np.array([6]*len(R_BUBBLE_MAXES))\n\ntotal_neutral_fractions = np.array([0.19999881, 0.19998097, 0.20000417, 0.20001106, 0.19998624,\n 0.20001978, 0.19999591, 0.19998911, 0.19998213])", "_____no_output_____" ], [ "color='w'\n\npercent=0.475\n\nmfp_maxRs = np.zeros(len(mfp_size_probabilities))\n\nfig = plt.figure(dpi=500, facecolor='#404040')\nax = fig.gca()\nfor spine in ax.spines.values(): # figure color\n spine.set_edgecolor(color)\n \nfor i in np.array([0, 5, 6, 7]):\n \n mfp_maxRs[i] = mfp_neutral_region_size[np.argmax(mfp_size_probabilities[i])]\n \n plt.plot(\n mfp_neutral_region_size[:int(percent*bin_num_mfp)], \n mfp_size_probabilities[i][:int(percent*bin_num_mfp)], \n '-',\n label=f'$R_{{max}}={R_BUBBLE_MAXES[i]:.2f}, \\zeta={HII_EFF_FACTORS[i]:.2f}, \\\npeak={mfp_maxRs[i]:.2f}Mpc$'\n )\nplt.legend(fancybox=True, framealpha=0)\nplt.tick_params(color=color, labelcolor=color)\nplt.xlabel('$R$ (Mpc)', color=color)\nplt.ylabel('$R\\mathrm{d}P/\\mathrm{d}R$', color=color)\nplt.title(f'Mean Free Path method', color=color)\n# plt.rcParams['font.size'] = font\n# plt.yscale('log')\nplt.show()", "_____no_output_____" ], [ "color='white'\npercent=0.475\n\nmfp_maxRs = np.zeros(len(mfp_size_probabilities))\n\nplt.rcParams['figure.figsize'] = [10, 6]\nfor i, mfp_size_probability in enumerate(mfp_size_probabilities):\n \n mfp_maxRs[i] = mfp_neutral_region_size[np.argmax(mfp_size_probability)]\n \n plt.plot(\n mfp_neutral_region_size[:int(percent*bin_num_mfp)], \n mfp_size_probability[:int(percent*bin_num_mfp)], \n '-',\n label=f'Rmax={R_BUBBLE_MAXES[i]:.2f}, EFF={HII_EFF_FACTORS[i]:.2f}, \\\nx_HI={total_neutral_fractions[i]*100:.1f}%, \\\nmaxR={mfp_maxRs[i]:.2f}'\n )\nplt.legend(prop={'size': 15}, fancybox=True, framealpha=0)\nplt.tick_params(color=color, labelcolor=color)\nplt.xlabel('$R$ (Mpc)', color=color)\nplt.ylabel('$R\\mathrm{d}P/\\mathrm{d}R$', color=color)\nplt.title(f'Our Boxes, MFP method: Vary: Rmax, EFF, constant: x_HI, z={redshifts[0]} ({iteration_mfp:.0e} iterations)', color=color)\n# plt.rcParams['font.size'] = 18\n# plt.yscale('log')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7a5a92501faf7f5c11b84602176453ac4bdd13f
2,336
ipynb
Jupyter Notebook
notebooks/summarize-oldbird-evaluation.ipynb
BirdVox/bv_context_adaptation
9bd446326ac927d72f7c333eac07ee0490fc3127
[ "MIT" ]
5
2018-10-17T21:17:26.000Z
2019-06-14T01:48:29.000Z
notebooks/summarize-oldbird-evaluation.ipynb
BirdVox/bv_context_adaptation
9bd446326ac927d72f7c333eac07ee0490fc3127
[ "MIT" ]
null
null
null
notebooks/summarize-oldbird-evaluation.ipynb
BirdVox/bv_context_adaptation
9bd446326ac927d72f7c333eac07ee0490fc3127
[ "MIT" ]
3
2018-12-22T00:04:43.000Z
2021-06-09T20:02:28.000Z
22.247619
76
0.538099
[ [ [ "import itertools\nimport os\nimport sys\n\nsys.path.append(\"../src\")\nimport localmodule", "_____no_output_____" ], [ "# Define constants\nmodels_dir = localmodule.get_models_dir()\noldbird_models_dir = os.path.join(models_dir, \"oldbird\")\nfolds = localmodule.fold_units()\nodfs = [\"thrush\", \"tseep\", \"merged\"]\nclip_suppressor_modes = [\"clip-suppressor\", \"no-clip-suppressor\"]", "_____no_output_____" ], [ "# UPDATE ME\niterator_product = itertools.product(odfs, clip_suppressor_modes)\n# for odf_str, clip_suppressor_str in iterator_product:\nodf_str = \"merged\"\nclip_suppressor_str = \"no-clip-suppressor\"\n\n# UPDATE ME\nfold = folds[0]\n# for fold in folds:\ntest_units = fold[0]\ntraining_units = fold[1]\nval_units = fold[2]\n\n# Oracle mode\ntest_unit_str = test_units[0]\ntest_unit_dir = os.path.join(oldbird_models_dir, test_unit_str)\npredictions_dir = \"_\".join([\"predictions\", clip_suppressor_str])\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7a5aeef21a675320069edc82e5766416c683f47
894,570
ipynb
Jupyter Notebook
notebooks/03_EY_challenge1_v1/EY_Challenge1_Getting_started_v1.ipynb
aogeodh/aogeodh-cube-in-a-box
01ffbd17bf23a37511d668edc80f1b5c759207e8
[ "MIT" ]
null
null
null
notebooks/03_EY_challenge1_v1/EY_Challenge1_Getting_started_v1.ipynb
aogeodh/aogeodh-cube-in-a-box
01ffbd17bf23a37511d668edc80f1b5c759207e8
[ "MIT" ]
null
null
null
notebooks/03_EY_challenge1_v1/EY_Challenge1_Getting_started_v1.ipynb
aogeodh/aogeodh-cube-in-a-box
01ffbd17bf23a37511d668edc80f1b5c759207e8
[ "MIT" ]
null
null
null
772.512953
285,508
0.949435
[ [ [ "# Challenge 1 - Getting started <img align=\"right\" src=\"../Supplementary_data/EY_logo.png\" style=\"margin:0px 50px\">\n\nWelcome to the 2020 NextWave Data Science Challenge! Thank you for taking part in the private challenge and helping us test and improve the student experience.\n\nWe have prepared a small initial dataset for you to start flexing your data science muscles. We are hoping you will be able to open and view some data, create a basic solution to the problem, and submit your results via the EY Data Science platform.", "_____no_output_____" ], [ "## Registering for the challenge and getting the data\n\nPrior to running this notebook, make sure you have:\n* **Created a profile** on the [EY Data Science Platform](http://datascience.cognistreamer.com/)\n* **Registered** for the \"NextWave Bushfire Challenge Phase 1 - Detect fire edges in airborne image\" on the Platform\n* **Downloaded and extracted** the \"Challenge1_v1.zip\" file under \"Additional data\" from the Challenge page on the Platform\n* **Uploaded** the contents of the .zip file into your jupyter environment, in the \"03_EY_challenge1\" folder.\n\nYour folder structure should look like the below:\n\n<img src=\"../Supplementary_data/EY_Challenge1_Getting_started/folder_structure.png\">", "_____no_output_____" ], [ "To check you have executed this correct, execute the code cell below and compare the contents of your current working directory (where this notebook is executing from) to the image above. You should see:\n\n* `/home/jovyan/03_EY_challenge1` showing you are working in the \"03_EY_challenge1\" folder.\n\n* `['.ipynb_checkpoints', 'EY_Challenge1_Getting_started.ipynb', 'input_linescan', 'test.csv', 'tgt_mask', 'train.csv', 'world']` showing the contents of the folder.", "_____no_output_____" ] ], [ [ "import os\nprint(os.getcwd())\nprint(os.listdir())", "/home/jovyan/test_nb_4/03_EY_challenge1_v1\n['.ipynb_checkpoints', 'EY_Challenge1_Getting_started_v1.ipynb', 'input_linescan', 'sample_submission.csv', 'test_v1.csv', 'tgt_mask', 'tmp.tif', 'train_v1.csv', 'world']\n" ] ], [ [ "## A quick word on the data\n\nThe initial dataset is organised into the following folder structure:\n<ul>\n <li><b>input_linescan</b>: these are images of fires taken from a plane. They are simple .jpg files, not georeferenced in space.</li>\n <li><b>tgt_mask</b>: these are masks which align to the linescan images. They have been manually drawn based on the linescan images.</li>\n <li><b>world</b>: these are strings of numbers called \"world\" files used for georeferencing the linescan and mask files. They put the .jpg files 'in context' with respect to a Coordinate Reference System (CRS).</li>\n </ul>\n \nThere are 25 linescan and associated world images, however only 20 masks. Your task is to use the 20 linescan/mask pairs to train a model or process which can produce a mask for the remaining 5 linescans with no mask. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport datacube\nimport rasterio\nimport matplotlib.pyplot as plt\n\nfrom skimage import io\nfrom skimage import data\nfrom skimage import color\nfrom skimage import morphology\nfrom skimage import segmentation\nfrom skimage import measure\n\nfrom affine import Affine\nfrom rasterio.plot import show, show_hist", "_____no_output_____" ] ], [ [ "### Import input variable: aerial linescan images", "_____no_output_____" ] ], [ [ "file_stem = 'JORDAN 244 P1_201901291522_MGA94_55'\nraster_filename = 'input_linescan/' + file_stem + '.jpg'\nworld_filename = 'world/' + file_stem + '.bqw'", "_____no_output_____" ], [ "src = rasterio.open(raster_filename, mode='r+')\nsrc.read()", "Dataset has no geotransform set. The identity matrix may be returned.\n" ] ], [ [ " ### Contextualise raster data in space by providing a Coordinate Reference System (CRS) and transform function", "_____no_output_____" ] ], [ [ "show(src.read())", "_____no_output_____" ] ], [ [ "Note that this raster data is just a table of values, it is not pinned to a particular location, or \"georeferenced\". For this we need a CRS and an affine transformation function.\n\n1. CRS: 'epsg:28355' is a useful CRS for Australia, otherwise known as GDA94 / MGA zone 55. https://epsg.io/28355\n2. Affine transformation function: we also need a transformation function to descibe the how to transform our raster data into the relevant CRS. This includes the location, scale and rotation of our raster data. These values can be found in the world files ending in '.bqw' files of the same name. https://en.wikipedia.org/wiki/World_file", "_____no_output_____" ] ], [ [ "a, d, b, e, c, f = np.loadtxt(world_filename) # order depends on convention\ntransform = Affine(a, b, c, d, e, f)\ncrs = rasterio.crs.CRS({\"init\": \"epsg:28355\"}) # \"epsg:4326\" WGS 84, or whatever CRS you know the image is in\n\nsrc.transform = transform\nsrc.crs = crs", "_____no_output_____" ], [ "show(src.read(), transform=src.transform)", "_____no_output_____" ] ], [ [ "Note that the coordinates of the image are now shown in the 'epsg:28355' CRS. This means our data is no longer just an image, but an observation of a particular location.", "_____no_output_____" ], [ "### Compare against the target mask\nEach linescan/mask pair share the same transform (found in the world file of the same name), so we can reuse the transform defined above to view the target for this particular linescan.", "_____no_output_____" ] ], [ [ "mask_filename = 'tgt_mask/' + file_stem + '.jpg'", "_____no_output_____" ], [ "tgt = rasterio.open(mask_filename, mode='r+')\n\ntgt.transform = transform\ntgt.crs = crs\n\nshow(tgt.read(), transform=tgt.transform)", "_____no_output_____" ] ], [ [ "Plotting the linescan and the mask together allows us to take a look at how they compare.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 1, figsize=(10,10))\n\nshow(src.read(), transform=src.transform, ax=ax)\nshow(tgt.read(), transform=src.transform, ax=ax, alpha=0.5)", "_____no_output_____" ] ], [ [ "### Understanding the linescan files", "_____no_output_____" ] ], [ [ "src.read().shape", "_____no_output_____" ] ], [ [ "We can see that there are three channgels in the raster image file: red, green and blue. If we show these individually we can see the image is similar for all three channels", "_____no_output_____" ] ], [ [ "r = src.read(1)\ng = src.read(2)\nb = src.read(3)\n\nfig, (axr, axg, axb) = plt.subplots(1,3, figsize=(21,7))\nshow(r, ax=axr, cmap='Reds', title='red channel', transform=src.transform)\nshow(g, ax=axg, cmap='Greens', title='green channel', transform=src.transform)\nshow(b, ax=axb, cmap='Blues', title='blue channel', transform=src.transform)\nplt.show()", "_____no_output_____" ] ], [ [ "A histogram of each channel shows that the distribution of the values of the three channels is also similar, with a slightly higher red channel count at the high end of the distribution. The dynamic range of each channel is 8 bits, so the values vary between 0 and 255, with 0 meaning the camera sensor received no light and 255 meaning the camera received the maximum amout of light that can be recorded.", "_____no_output_____" ] ], [ [ "show_hist(\n src.read(), bins=50, lw=0.0, stacked=False, alpha=0.3,\n histtype='stepfilled', title=\"Histogram by channel\")", "_____no_output_____" ] ], [ [ "For the red channel, the histogram shows two clusters of values, below which the data is mostly noise, and above which the signal is clearer.", "_____no_output_____" ], [ "### Preprocess raster", "_____no_output_____" ], [ "Before we can extract meaninful information from the raster image, we need to clean up noise in the image and make the signal clearer. Based on the histogram above, we could suggest a threshold in the red channel of 100 to mask the data and remove the noise.", "_____no_output_____" ] ], [ [ "threshold = 100\n\nr[r < threshold] = 0\ng[g < threshold] = 0\nb[b < threshold] = 0\n\nfig, (axr, axg, axb) = plt.subplots(1,3, figsize=(21,7))\nshow(r, ax=axr, cmap='Reds', title='red channel')\nshow(g, ax=axg, cmap='Greens', title='green channel')\nshow(b, ax=axb, cmap='Blues', title='blue channel')\n\nfor ax in (axr, axg, axb):\n ax.set_axis_off()\n\nplt.show()", "_____no_output_____" ] ], [ [ "A number of further cleansing operations have been applied below. You can experiment with different strategies including machine learning and feature engineering to find an optimal process.", "_____no_output_____" ] ], [ [ "r = src.read(1)\nthreshold = 100\nr[r < threshold] = 0\n\nlum = color.rgb2gray(r)\nmask1 = morphology.remove_small_objects(lum, 50)\nmask2 = morphology.remove_small_holes(mask1, 5)\nmask3 = morphology.opening(mask2, morphology.disk(3))\n\nfig, ax_arr = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(20, 10))\nax1, ax2, ax3, ax4 = ax_arr.ravel()\n\nax1.imshow(r, cmap='Reds')\nax1.set_title(\"Thresholded image - red channel greater than \" + str(threshold))\n\nax2.imshow(mask1, cmap=\"gray\")\nax2.set_title(\"Mask1 - small objects removed\")\n\nax3.imshow(mask2, cmap=\"gray\")\nax3.set_title(\"Mask2 - small holes removed\")\n\nax4.imshow(mask3, cmap=\"gray\")\nax4.set_title(\"Mask3 - disk + opening\")\n\nfor ax in ax_arr.ravel():\n ax.set_axis_off()\n\nplt.tight_layout()\nplt.show()", "The behavior of rgb2gray will change in scikit-image 0.19. Currently, rgb2gray allows 2D grayscale image to be passed as inputs and leaves them unmodified as outputs. Starting from version 0.19, 2D arrays will be treated as 1D images with 3 channels.\nAny labeled images will be returned as a boolean array. Did you mean to use a boolean array?\n" ] ], [ [ "At this point, our mask is just a table of values, it is not georeferenced. We can write this array into a temporary rasterio dataset so that we can query it in context with the source linescan image.", "_____no_output_____" ] ], [ [ "# convert boolean mask to integers\nmask = mask3.astype(np.uint8)\n\n# create a temporary dataset for storing the array\ntemp = rasterio.open(\n 'tmp.tif',\n mode='w+',\n driver='GTiff',\n height=mask.shape[0],\n width=mask.shape[1],\n count=1,\n dtype=mask.dtype,\n crs=src.crs,\n transform=src.transform)\n\n# copy the array into the opened dataset\ntemp.write(mask, 1)", "_____no_output_____" ], [ "show(temp.read(), transform=src.transform)", "_____no_output_____" ], [ "temp.close()", "_____no_output_____" ] ], [ [ "Now, the mask is georeferenced. To understand more about the rasterio.open() function, uncomment and run the cell below.", "_____no_output_____" ] ], [ [ "# help(rasterio.open)", "_____no_output_____" ] ], [ [ "Once we are happy with our preprocessing steps, we can create a function to pass each image to directly.", "_____no_output_____" ] ], [ [ "def get_mask(img, thresh):\n r = img.read(1)\n r[r < thresh] = 0\n lum = color.rgb2gray(r)\n mask1 = morphology.remove_small_objects(lum, 50)\n mask2 = morphology.remove_small_holes(mask1, 5)\n mask3 = morphology.opening(mask2, morphology.disk(3))\n mask3[mask3 > 0 ] = 255\n return mask3.astype(np.uint8)", "_____no_output_____" ], [ "mask = get_mask(src, 90)\nshow(mask, transform=src.transform, cmap='binary_r')", "The behavior of rgb2gray will change in scikit-image 0.19. Currently, rgb2gray allows 2D grayscale image to be passed as inputs and leaves them unmodified as outputs. Starting from version 0.19, 2D arrays will be treated as 1D images with 3 channels.\nAny labeled images will be returned as a boolean array. Did you mean to use a boolean array?\n" ] ], [ [ "### Making a submission\nFor the five linescans where there is no mask provided, you must first create a mask, and then return True or False for a specific set of coordinates, where True indicates that coordinate is on fire, and False indicates it is not.\n\nThe \"test.csv\" file provides a list of 1000 coordinates that are required to be classified for each of these five linescans. For this part of the challenge, you can ignore the dateTimeLocal column as we are not working with timestamps yet. Note that the coordinates are denoted in the CRS mentioned above, epsg:28355.", "_____no_output_____" ] ], [ [ "test = pd.read_csv('test_v1.csv', index_col='id')\ntest.head()", "_____no_output_____" ] ], [ [ "The index method allows you to pass in a set of x and y coordinates and return the row and column of a rasterio dataset which is georeferenced in space. We can then index the dataset using this row and col to return the value at that address.", "_____no_output_____" ] ], [ [ "# get the red band of the dataset only\nred = src.read(1)\n\n# get the coordinates of the centre of the dataset\nx, y = (src.bounds.left + src.width // 2 , src.bounds.top - src.height // 2)\n\n# get the row and column indicies that correspond to the centre of the dataset\nrow, col = src.index(x, y)\n\n# get the value at that address\nred[row, col]", "_____no_output_____" ] ], [ [ "Now we will iterate over the test set of linescan images, and iterate over the test coordinates required in each image, filling the 'onFire' column of the 'test' dataframe with the results of the masking process we have developed.", "_____no_output_____" ] ], [ [ "fnames = test.stem.unique()\nfnames", "_____no_output_____" ], [ "for file_stem in fnames:\n \n # open the raster file and georeference with the corresponding world file\n raster_filename = 'input_linescan/' + file_stem + '.jpg'\n world_filename = 'world/' + file_stem + '.bqw'\n src = rasterio.open(raster_filename, mode='r+')\n a, d, b, e, c, f = np.loadtxt(world_filename) # order depends on convention\n transform = Affine(a, b, c, d, e, f)\n crs = rasterio.crs.CRS({\"init\": \"epsg:28355\"}) # \"epsg:4326\" WGS 84, or whatever CRS you know the image is in\n src.transform = transform\n src.crs = crs\n \n # create a mask using the process we developed earlier. For this example, provide the same threshold for all linescans\n mask = get_mask(src, 100)\n\n # create a temporary dataset for storing the array\n temp = rasterio.open(\n 'tmp.tif',\n mode='w+',\n driver='GTiff',\n height=mask.shape[0],\n width=mask.shape[1],\n count=1,\n dtype=mask.dtype,\n crs=src.crs,\n transform=src.transform)\n\n # copy the array into the opened dataset\n temp.write(mask, 1)\n\n # iterate over the coordinates that are required for testing in the current linescan file\n for idx, ob in test.loc[test.stem==file_stem].iterrows():\n row, col = temp.index(ob.x, ob.y)\n result = temp.read(1)[row, col]\n test.loc[(test.stem==file_stem) & (test.x==ob.x) & (test.y==ob.y), 'target'] = result\n \n temp.close()", "Dataset has no geotransform set. The identity matrix may be returned.\nThe behavior of rgb2gray will change in scikit-image 0.19. Currently, rgb2gray allows 2D grayscale image to be passed as inputs and leaves them unmodified as outputs. Starting from version 0.19, 2D arrays will be treated as 1D images with 3 channels.\nAny labeled images will be returned as a boolean array. Did you mean to use a boolean array?\n" ], [ "test.to_csv('sample_submission.csv', columns = ['target'])\ntest.head()", "_____no_output_____" ] ], [ [ "Submit your file to the EY Data Science platform for grading.", "_____no_output_____" ], [ "## Important note for participants in the private challenge\n\nOne of the purposes of the private challenge is to estimate the amount of Azure credits students will need to complete the challenge. Please help us do this by following the instructions [here](https://github.com/EY-Data-Science-Program/2020-bushfire-challenge/wiki/Credit-consumption-by-resource-group:-Azure) to capture your current credit consumption. Doing this each time you submit results will help us understand what stage of development students can hope to reach with a given amount of credits.", "_____no_output_____" ], [ "***\n## Additional information\n\n**License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). \nDigital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license.\n\n**Contact:** If you need assistance, please post a question in the [Troubleshooting EY Data Science Program MS Teams Channel](https://teams.microsoft.com/l/channel/19%3a90804a73cb5a4159a60693c41a8820d2%40thread.tacv2/Troubleshooting?groupId=f6acd945-fed9-4db4-bed8-414988473a36&tenantId=5b973f99-77df-4beb-b27d-aa0c70b8482c) or on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).\nIf you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks).\n\n**Last modified:** October 2020", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
e7a5af57aa09281fc4215bf9a94fd549766a7f2d
5,838
ipynb
Jupyter Notebook
Training.py.ipynb
sudoberlin/chatbot
07c99c65393190f9e20106006e1ba71b8f9539b2
[ "MIT" ]
3
2019-10-06T21:04:24.000Z
2020-05-14T22:56:47.000Z
Training.py.ipynb
sudoberlin/chatbot
07c99c65393190f9e20106006e1ba71b8f9539b2
[ "MIT" ]
null
null
null
Training.py.ipynb
sudoberlin/chatbot
07c99c65393190f9e20106006e1ba71b8f9539b2
[ "MIT" ]
null
null
null
57.80198
205
0.468996
[ [ [ "# training the seq2seq model", "_____no_output_____" ] ], [ [ "batch_index_check_training_loss = 100\nbatch_index_check_validation_loss = ((len(training_questions)) // batch_size // 2) - 1\ntotal_training_loss_error = 0\nlist_validation_loss_error = []\nearly_stopping_check = 0\nearly_stopping_stop = 1000\ncheckpoint = \"chatbot_weights.ckpt\"\nsession.run(tf.global_variables_initializer())\nfor epoch in range (1, epochs+1):\n for batch_index, (padded_questions_in_batch, padded_answers_in_batch) in enumerate(split_into_batches(training_questions, training_answers, batch_size)):\n starting_time = time.time()\n _, batch_training_loss_error = session.run([optimizer_gradient_clipping, loss_error], {inputs: padded_questions_in_batch,\n targets: padded_answers_in_batch, \n lr: learning_rate,\n sequence_length: padded_answers_in_batch.shape[1],\n keep_prob: keep_probability})\n total_training_loss_error += batch_training_loss_error\n ending_time = time.time()\n batch_time = ending_time - starting_time\n if batch_index % batch_index_check_training_loss == 0:\n print('Epoch: {:>3}/{}, Batch: {:>4}/{}, Training Loss Error: {:>6.3f}, Training Time on 100 Batches: {:d} seconds' .format(epoch,\n epochs,\n batch_index,\n len(training_questions) // batch_size,\n total_training_loss_error / batch_index_check_training_loss,\n int(batch_time * batch_index_check_training_loss)))\n total_training_loss_error = 0\n if batch_index % batch_index_check_validation_loss == 0 and batch_index > 0:\n total_training_loss_error = 0\n starting_time = time.time()\n for batch_index_validation, (padded_questions_in_batch, padded_answers_in_batch) in enumerate(split_into_batches(validation_questions, validation_answers, batch_size)):\n batch_validation_loss_error = session.run(loss_error, {inputs: padded_questions_in_batch,\n targets: padded_answers_in_batch,\n lr: learning_rate,\n sequence_length: padded_answers_in_batch.shape[1],\n keep_prob: 1})\n total_validation_loss_error += batch_validation_loss_error\n ending_time = time.time()\n batch_time = ending_time - starting_time\n average_validation_loss_error = total_validation_loss_error / (len(validation_questions) / batch_size)\n print('Validation Loss Error: {:>6.3f}, Batch Validation Time: {:d} seconds'.format(average_validation_loss_error, int(batch_time)))\n learning_rate *= learning_rate_decay\n if learning_rate < min_learning_rate:\n learning_rate = min_learning_rate\n list_validation_loss_error.append(average_validation_loss_error)\n if average_validation_loss_error <= min(list_validation_loss_error):\n print('I speak better now!!')\n early_stopping_check = 0\n saver = tf.train.Saver()\n saver.save(session, checkpoint)\n else:\n print('Sorry I do not speak better, I need to practice more.')\n early_stopping_check += 1\n if early_stopping_check == early_stopping_stop:\n break\n if early_stopping_check == early_stopping_stop:\n print('My apologies, I cannot speak better anymore. This is the best I can do!.')\n break\nprint('Game Over')\n ", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]