Dataset Viewer (First 5GB)
Auto-converted to Parquet
Search is not available for this dataset
text
stringlengths
1.61k
105M
id
stringlengths
23
24
file_path
stringclasses
45 values
{ "cells": [ { "cell_type": "markdown", "id": "568549f2", "metadata": { "papermill": { "duration": 0.012488, "end_time": "2022-06-27T22:23:48.970824", "exception": false, "start_time": "2022-06-27T22:23:48.958336", "status": "completed" }, "tags": [] }, "source": [ "# How to load data for the MiFit data files" ] }, { "cell_type": "code", "execution_count": 1, "id": "9d92cf80", "metadata": { "execution": { "iopub.execute_input": "2022-06-27T22:23:49.008165Z", "iopub.status.busy": "2022-06-27T22:23:49.006269Z", "iopub.status.idle": "2022-06-27T22:23:49.016452Z", "shell.execute_reply": "2022-06-27T22:23:49.015499Z", "shell.execute_reply.started": "2022-06-27T22:16:03.895150Z" }, "papermill": { "duration": 0.032467, "end_time": "2022-06-27T22:23:49.016791", "exception": true, "start_time": "2022-06-27T22:23:48.984324", "status": "failed" }, "tags": [] }, "outputs": [ { "ename": "SyntaxError", "evalue": "invalid syntax (497870201.py, line 1)", "output_type": "error", "traceback": [ "\u001b[0;36m File \u001b[0;32m\"/tmp/ipykernel_19/497870201.py\"\u001b[0;36m, line \u001b[0;32m1\u001b[0m\n\u001b[0;31m pip install xlrd\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n" ] } ], "source": [ "pip install xlrd\n", "\n", "import xlrd\n", "from xlrd.sheet import ctype_text \n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "from pathlib import Path, PureWindowsPath\n", "\n", "import os.path\n", "from os.path import join, dirname, abspath\n", "\n", "import numpy as np # linear algebra\n", "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n", "\n", "import datetime" ] }, { "cell_type": "markdown", "id": "557b4703", "metadata": { "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "source": [ "# Load dataset\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2cd004f3", "metadata": { "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "execution": { "iopub.execute_input": "2022-06-27T22:16:08.852040Z", "iopub.status.busy": "2022-06-27T22:16:08.851834Z", "iopub.status.idle": "2022-06-27T22:16:08.893454Z", "shell.execute_reply": "2022-06-27T22:16:08.892583Z", "shell.execute_reply.started": "2022-06-27T22:16:08.852016Z" }, "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "outputs": [], "source": [ "# This Python 3 environment comes with many helpful analytics libraries installed\n", "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n", "# For example, here's several helpful packages to load\n", "\n", "# Input data files are available in the read-only \"../input/\" directory\n", "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n", "\n", "import os\n", "for dirname, _, filenames in os.walk('/kaggle/input'):\n", " for filename in filenames:\n", " print(os.path.join(dirname, filename))\n", "\n", "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n", "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session\n", "\n", "data_path = \"../input/mi-fit-dailylife-heart-rate/\"" ] }, { "cell_type": "code", "execution_count": null, "id": "e0e9e0bd", "metadata": { "execution": { "iopub.execute_input": "2022-06-27T22:16:27.808440Z", "iopub.status.busy": "2022-06-27T22:16:27.808216Z", "iopub.status.idle": "2022-06-27T22:16:27.825807Z", "shell.execute_reply": "2022-06-27T22:16:27.824934Z", "shell.execute_reply.started": "2022-06-27T22:16:27.808417Z" }, "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "outputs": [], "source": [ "def wrap_up_data(datout):\n", " out = np.array(datout)\n", " HR = out[:,:][0] # string\n", " timestamp = out[:,:][1] # string\n", " date = out[:,0][2] # string\n", " wc_time = out[:,:][3] # string wallclock time\n", "\n", " Ldat = len(HR)\n", "\n", " # Convert string values to float\n", " HR_float = []\n", " for i in range(0, Ldat):\n", " HR_float.append(np.float32(HR[i]))\n", " \n", " H = []\n", " MI = []\n", " S_MS = []\n", " for i in range(0, Ldat):\n", " TS = np.float32(timestamp[i]) # convert timestamp from a string to a float\n", " TS_sec = TS/1000; # Recorded UNIX timestamp in milliseconds, convert to seconds\n", " # full_datetime = datetime.datetime.fromtimestamp(TS_sec).strftime('%Y-%m-%d %H:%M:%S.%f')\n", "\n", " # datatime conversion returns a string, need to convert it back to a float\n", " H = H + [np.float32(datetime.datetime.fromtimestamp(TS_sec).strftime('%H'))]\n", " MI = MI + [np.float32(datetime.datetime.fromtimestamp(TS_sec).strftime('%M'))]\n", " S_MS = S_MS + [np.float32(datetime.datetime.fromtimestamp(TS_sec).strftime('%S.%f'))]\n", "\n", " # Convert list to an array\n", " H = np.reshape(H, (Ldat,1))\n", " MI = np.reshape(MI, (Ldat,1))\n", " S_MS = np.reshape(S_MS, (Ldat,1))\n", "\n", " # The method of appending makes each entry in a list, we want all entries in a single list/array\n", " # So, resample H, MI, S_MS such that all entries in a single list/array\n", " Hvec = []\n", " MIvec = []\n", " S_MSvec = []\n", " for i in range(0, Ldat):\n", " Hvec = Hvec + [H[:][i]]\n", " MIvec = MIvec + [MI[:][i]]\n", " S_MSvec = S_MSvec + [S_MS[:][i]]\n", "\n", " # Convert list to an array\n", " Hvec = np.reshape(H, (Ldat,1))\n", " MIvec = np.reshape(MI, (Ldat,1))\n", " S_MSvec = np.reshape(S_MS, (Ldat,1))\n", "\n", " # Cumulative time vector in seconds using [H, MI, S_MS]\n", " cum_s = Hvec*(60/1)*(60/1) + MIvec*(60/1) + S_MSvec\n", " \n", " # ----------------------------------------------\n", " # Ensure that all time starts from 0\n", " stnew = np.argmin(cum_s)\n", " cum_s = np.ravel(cum_s[stnew::])\n", " HR_float = np.ravel(HR_float[stnew::])\n", " Hvec = np.ravel(Hvec[stnew::])\n", " MIvec = np.ravel(MIvec[stnew::])\n", " S_MSvec = np.ravel(S_MSvec[stnew::])\n", " # ----------------------------------------------\n", " \n", " # ----------------------------------------------\n", " plt.plot(cum_s, HR_float)\n", " plt.xlabel('Time (s)')\n", " plt.ylabel('Heart Rate')\n", " plt.show()\n", " # ----------------------------------------------\n", " \n", " # ----------------------------------------------\n", " # Plot time data for dividing the HR data\n", " fig, (ax0, ax1, ax2) = plt.subplots(3)\n", " fig.suptitle('Time data')\n", " ax0.plot(Hvec[:], 'b-', label='hours')\n", " ax0.set_ylabel('hour')\n", "\n", " ax1.plot(MIvec[:], 'r--', label='mins')\n", " ax1.set_ylabel('mins')\n", "\n", " ax2.plot(S_MSvec[:], 'g:', label='sec_ms')\n", " ax2.set_ylabel('secs')\n", " \n", " plt.legend(loc='best')\n", " plt.tight_layout()\n", " plt.show()\n", " # ----------------------------------------------\n", " \n", " return HR_float, cum_s, Hvec, MIvec, S_MSvec, date" ] }, { "cell_type": "code", "execution_count": null, "id": "b8307dd9", "metadata": { "execution": { "iopub.execute_input": "2022-06-27T22:16:39.201029Z", "iopub.status.busy": "2022-06-27T22:16:39.200782Z", "iopub.status.idle": "2022-06-27T22:16:39.211678Z", "shell.execute_reply": "2022-06-27T22:16:39.210977Z", "shell.execute_reply.started": "2022-06-27T22:16:39.201001Z" }, "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "outputs": [], "source": [ "# Created by Jamilah Foucher, February 11, 2021 \n", "\n", "# Purpose: The purpose of this function is to open and extract the cell contents of an excel file \n", "\n", "# Input VARIABLES:\n", "# (1) file_to_open is the full path name and file with extention as a string \n", "# ie: file_to_open = 'data_path' + 'filename' + '.file_suffix'\n", "# \n", "# Output VARIABLES:\n", "# (1) dataout is a matrix of the cells in the .xls file\n", "# \n", "# *** NOTE: There is a weird formatting bug with xlrd. You have to open each .xls file, push save, and close for this code to work. If you do not do this, xlrd.open_workbook will not be able to open the file. ***\n", "\n", "\n", "def load_xls_file(file_to_open):\n", " \n", " # ----------------------------------------------\n", " # Get data file name\n", " # ----------------------------------------------\n", " print(\"Opening file : \" + str(file_to_open))\n", " # ----------------------------------------------\n", " \n", " # ----------------------------------------------\n", " # Load unknown rows and columns from an excel file\n", " # ----------------------------------------------\n", " workbook = xlrd.open_workbook(file_to_open)\n", " \n", " # List sheet names, and pull a sheet by name\n", " sheet_names = workbook.sheet_names()\n", " first_sheet = workbook.sheet_by_name(sheet_names[0]) # sheet by name\n", " \n", " ooo = []\n", " cell_str = []\n", " for nrows in range(0, first_sheet.nrows):\n", " cell_str = [first_sheet.cell(nrows, j) for j in range(0, first_sheet.ncols)]\n", " ooo.append(cell_str)\n", " \n", " # Numerical:\n", " # Load all data except the header, in a vector containing a vector for each column of data\n", " datout = []\n", " cell_num = []\n", " for ncols in range(0, first_sheet.ncols):\n", " cell_num = [first_sheet.cell_value(j, ncols) for j in range(1, first_sheet.nrows)]\n", " datout.append(cell_num)\n", " \n", " workbook.release_resources()\n", " del workbook\n", " \n", " temp = np.array(datout)\n", " print('Overall size = ', temp.shape)\n", " # ----------------------------------------------\n", " \n", " return datout" ] }, { "cell_type": "code", "execution_count": null, "id": "521404f4", "metadata": { "execution": { "iopub.execute_input": "2022-06-27T22:16:41.090179Z", "iopub.status.busy": "2022-06-27T22:16:41.089967Z", "iopub.status.idle": "2022-06-27T22:18:24.045562Z", "shell.execute_reply": "2022-06-27T22:18:24.044878Z", "shell.execute_reply.started": "2022-06-27T22:16:41.090157Z" }, "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "scrolled": true, "tags": [] }, "outputs": [], "source": [ "# Step 0: Read in data : If a .h5 file does not exist or adding NEW DATA, run this section\n", "fsuffix = \"_export.xls\"\n", "\n", "# There is a weird formatting bug: You have to open each file, push save, and close.\n", "# If you do not do this, xlrd.open_workbook will not be able to open the file.\n", "X = []\n", "Y_val = []\n", " \n", "for month in range(2):\n", " if month == 0:\n", " # Novembre\n", " # lost at the moment : \"d02m11y20\", \"d03m11y20\", \"d04m11y20\", \"d05m11y20\", \"d06m11y20\",\n", " monthname = 'nov2021'\n", " dater = [\"1_d07m11y20\", \"2_d13m11y20\", \"3_d15m11y20\", \"4_d17m11y20\", \"5_d18m11y20\", \"6_d19m11y20\", \n", " \"7_d20m11y20\", \"8_d22m11y20\", \"9_d24m11y20\", \"10_d27m11y20\", \"11_d30m11y20\"]\n", " elif month == 1:\n", " # Decembre\n", " monthname = 'dec2021'\n", " dater = [\"12_d02m12y20\", \"13_d03m12y20\", \"14_d04m12y20\", \"15_d05m12y20\", \"16_d13m12y20\", \"17_d14m12y20\", \n", " \"18_d15m12y20\", \"19_d17m12y20\", \"20_d28m12y20\"]\n", "\n", " \n", " for i in range(0, len(dater)):\n", " #file_to_open = join(data_path, i, fsuffix)\n", " # OR\n", " file_to_open = data_path + str(dater[i]) + fsuffix\n", "\n", " datout = load_xls_file(file_to_open)\n", "\n", " HR_float, cum_s, Hvec, MIvec, S_MSvec, date = wrap_up_data(datout)\n", " \n", " # These are accumulated across all datasets\n", " for j in range(len(HR_float)):\n", " X = X + [HR_float[j]]\n", " Y_val = Y_val + [Hvec[j]]" ] }, { "cell_type": "markdown", "id": "9459459c", "metadata": { "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "source": [ "This dataset does not have a ylabel. So you need to make a label by clustering the data." ] }, { "cell_type": "code", "execution_count": null, "id": "732841f2", "metadata": { "papermill": { "duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending" }, "tags": [] }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.12" }, "papermill": { "default_parameters": {}, "duration": 14.168429, "end_time": "2022-06-27T22:23:49.839357", "environment_variables": {}, "exception": true, "input_path": "__notebook__.ipynb", "output_path": "__notebook__.ipynb", "parameters": {}, "start_time": "2022-06-27T22:23:35.670928", "version": "2.3.3" } }, "nbformat": 4, "nbformat_minor": 5 }
0099/474/99474386.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"id\": \"05cbe7b2\",\n \"metadata\": (...TRUNCATED)
0099/474/99474404.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\"metadata\":{\"kernelspec\":{\"language\":\"python\",\"display_name\":\"Python 3\",\"name\":\"pyt(...TRUNCATED)
0099/474/99474781.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"id\": \"967b75(...TRUNCATED)
0099/474/99474799.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\"metadata\":{\"kernelspec\":{\"language\":\"python\",\"display_name\":\"Python 3\",\"name\":\"pyt(...TRUNCATED)
0099/475/99475040.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"id\": \"3e88741f\",\n \"metadata\": (...TRUNCATED)
0099/475/99475098.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"id\": \"e09df1(...TRUNCATED)
0099/475/99475385.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"id\": \"bb9c8c(...TRUNCATED)
0099/475/99475617.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"raw\",\n \"id\": \"5fd6541d\",\n \"metadata\": {\n (...TRUNCATED)
0099/476/99476131.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"id\": \"2fdced(...TRUNCATED)
0099/476/99476332.ipynb
s3://data-agents/kaggle-outputs/sharded/015_00099.jsonl.gz
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
25