hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
โŒ€
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
โŒ€
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
โŒ€
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
โŒ€
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
โŒ€
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
โŒ€
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
โŒ€
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
โŒ€
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
โŒ€
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb0e019df829a2846e3cc02083e88a14df32166e
14,064
ipynb
Jupyter Notebook
preprocessing/1.SpeakingSound_audio_classification.ipynb
kyle-bong/Realtime_Voice_Activity_Detection
9ea2d6187ec157051163ac83bed3f7135a3bb802
[ "MIT" ]
3
2022-02-10T01:38:27.000Z
2022-02-25T04:50:17.000Z
preprocessing/1.SpeakingSound_audio_classification.ipynb
kyle-bong/Realtime_Voice_Activity_Detection
9ea2d6187ec157051163ac83bed3f7135a3bb802
[ "MIT" ]
null
null
null
preprocessing/1.SpeakingSound_audio_classification.ipynb
kyle-bong/Realtime_Voice_Activity_Detection
9ea2d6187ec157051163ac83bed3f7135a3bb802
[ "MIT" ]
null
null
null
31.604494
102
0.400526
[ [ [ "# classificate speaking_audio files", "_____no_output_____" ] ], [ [ "\n# ๊ท ํ˜•์žˆ๊ฒŒ ๊ตฌ์„ฑํ•˜๊ธฐ\n # 1. ์„ฑ๋ณ„ 50:50\n # 2. ์ง€์—ญ 25:25:25:25\n # ๊ฐ ์ง€์—ญ๋ณ„๋กœ ๋‚จ 10, ์—ฌ 10๋ช…\n # ์ด 80๋ช….\n\n\nimport os\nimport shutil\nimport random\nfrom typing_extensions import final", "_____no_output_____" ], [ "A = [] # ๊ฐ•์›\nB = [] # ์„œ์šธ/๊ฒฝ๊ธฐ\nC = [] # ๊ฒฝ์ƒ\nD = [] # ์ „๋ผ\nE = [] # ์ œ์ฃผ(ํ˜„์žฌ ์—†์Œ)\nF = [] # ์ถฉ์ฒญ(ํ˜„์žฌ ์—†์Œ)\nG = [] # ๊ธฐํƒ€(ํ˜„์žฌ ์—†์Œ)", "_____no_output_____" ], [ "region = ['A', 'B', 'C', 'D', 'E', 'F', 'G']", "_____no_output_____" ], [ "# ๊ฐ ํŒŒ์ผ๋“ค์„ ์ง€์—ญ๋ณ„๋กœ ๋ถ„๋ฅ˜ํ•ฉ๋‹ˆ๋‹ค.\n\n# ๋…ธ์ธ ์Œ์„ฑ ๋ฐ์ดํ„ฐ์…‹์ด ์žˆ๋Š” ๋””๋ ‰ํ† ๋ฆฌ\nbasic_path = os.path.join('../Dataset_audio/old_total')\n\nfor i in region:\n os.makedirs(basic_path + '/' + i)\n\nfor (path, dir, files) in os.walk(basic_path):\n for filename in files:\n ext = os.path.splitext(filename)[-1]\n if ext == '.wav':\n if os.path.splitext(filename)[0][-1] == 'A':\n A.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'A', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'B':\n B.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'B', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'C':\n C.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'C', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'D':\n D.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'D', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'E':\n E.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'E', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'F':\n F.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'F', filename)\n )\n elif os.path.splitext(filename)[0][-1] == 'G':\n G.append(filename)\n shutil.move(\n os.path.join(path, filename),\n os.path.join(basic_path, 'G', filename)\n )\n\nfor i in [A, B, C, D, E, F, G]:\n print('file_num: ', len(i))", "file_num: 100588\nfile_num: 8857\nfile_num: 7882\nfile_num: 17703\nfile_num: 0\nfile_num: 0\nfile_num: 0\n" ], [ "# ์ง€์—ญ๋ณ„๋กœ ๋‚˜๋ˆˆ ํŒŒ์ผ์„ ์„ฑ๋ณ„๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.\nM=[]\nF=[]\n\nfor i in region:\n print(i)\n for (path, dir, files) in os.walk(os.path.join(basic_path, i)):\n for filename in files:\n ext = os.path.splitext(filename)[-1]\n if ext == '.wav':\n if os.path.splitext(filename)[0][-6] == 'M':\n #print(filename, 'M')\n M.append(filename)\n try:\n os.mkdir(\n os.path.join(basic_path, i, 'M')\n )\n except FileExistsError:\n pass\n shutil.move(\n os.path.join(basic_path, i, filename),\n os.path.join(basic_path, i, 'M', filename)\n )\n elif os.path.splitext(filename)[0][-6] == 'F':\n #print(filename, 'F')\n F.append(filename)\n try:\n os.mkdir(\n os.path.join(basic_path, i, 'F')\n )\n except FileExistsError:\n pass\n shutil.move(\n os.path.join(basic_path, i, filename),\n os.path.join(basic_path, i, 'F', filename)\n )\n else:\n print('Cannot find gender')", "A\nB\nC\nD\nE\nF\nG\n" ], [ "# 3. ๊ฐ ์ง€์—ญ๋ณ„๋กœ ์ตœ๋Œ€ ๋‚จ 100, ์—ฌ 100๋ช…์˜ ํ™”์ž๋ฅผ ์„ ์ •ํ•ฉ๋‹ˆ๋‹ค.\n# ๋žœ๋ค ์„ ์ •\n# random.sample(list, n_sample)\n\ntarget_path = os.path.join('../Dataset_audio/old_total')\n\ndef speaker_select(target_path):\n region = ['A', 'B', 'C', 'D', 'E', 'F']\n gender = ['M', 'F']\n result = []\n for i in region:\n for g in gender:\n print(i, '-', g)\n try:\n by_gender_files = os.listdir(os.path.join(target_path, i, g))\n by_gender_speaker = [file[:6] for file in by_gender_files]\n selected_speaker = random.sample(by_gender_speaker, 100)\n result.append(selected_speaker)\n print('num of selected_speaker: ', len(list(set(selected_speaker))))\n except FileNotFoundError:\n pass\n return result\n\nselected_speakers = speaker_select(target_path)", "A - M\nnum of selected_speaker: 24\nA - F\nnum of selected_speaker: 34\nB - M\nnum of selected_speaker: 1\nB - F\nnum of selected_speaker: 5\nC - M\nnum of selected_speaker: 2\nC - F\nnum of selected_speaker: 2\nD - M\nnum of selected_speaker: 5\nD - F\nnum of selected_speaker: 6\nE - M\nE - F\nF - M\nF - F\n" ], [ "# file select\ntarget_path = r'../Dataset_audio/old_total'\ndef file_select(target_path, selected_speakers):\n err_count = []\n region = ['A', 'B', 'C', 'D', 'E', 'F']\n for i in region:\n print(i)\n for (path, dir, files) in os.walk(os.path.join(target_path, i)):\n for filename in files:\n# ext = os.path.splitext(filename)[-1]\n# if ext == '.wav':\n speaker = filename[:6]\n g = os.path.splitext(filename)[0][-6]\n for x in selected_speakers:\n if speaker in x:\n #print('he/she is selected speaker.')\n if g == 'M':\n #print('{} is male'.format(speaker))\n try:\n os.makedirs(\n os.path.join(target_path, i, 'selected_M', speaker)\n )\n except:\n pass\n shutil.copy(\n os.path.join(target_path, i, 'M', filename),\n os.path.join(target_path, i, 'selected_M', speaker, filename)\n )\n elif g == 'F':\n #print('{} is female'.format(speaker))\n try:\n os.makedirs(\n os.path.join(target_path, i, 'selected_F', speaker)\n )\n except:\n pass\n shutil.copy(\n os.path.join(target_path, i, 'F', filename),\n os.path.join(target_path, i, 'selected_F', speaker, filename)\n )\n else:\n print('cannot found gender')\n err_count.append(filename)\n print(err_count)\n\nfile_select(target_path, selected_speakers)", "A\nB\nC\nD\nE\nF\n[]\n" ], [ "# selected_folders์— ์žˆ๋Š” ํŒŒ์ผ ์ฐพ๊ธฐ\n# ํ•œ ํ™”์ž๋‹น ์ตœ๋Œ€ 30๊ฐœ์”ฉ\ntarget_path = r'../Dataset_audio/old_total'\nselected_folders = ['selected_M', 'selected_F']\n\ndef finding_selected_files(folder_name_list):\n filenames_random = []\n for i in region:\n for (path, dir, files) in os.walk(target_path + '/' + i):\n #print('current path:', path)\n #print('curren dir:', dir)\n if path.split('/')[-2] in folder_name_list:\n filenames = []\n for filename in files:\n #print('filename: ', filename)\n ext = os.path.splitext(filename)[-1]\n if ext == '.wav':\n filenames.append(filename)\n\n filenames_random += random.sample(filenames, min(len(filenames), 30)) #์ตœ๋Œ€ 30\n \n return filenames_random\n\nselected_files = finding_selected_files(selected_folders)", "_____no_output_____" ], [ "len(selected_files)", "_____no_output_____" ], [ "# ๋žœ๋ค์œผ๋กœ ์„ ํƒํ•œ ํŒŒ์ผ์„ ๋ณต์‚ฌํ•˜๊ธฐ\nspeaking_path = r'../Dataset_audio/Speaking'\n\ndef final_selected_files(new_path, filename_list):\n target_path = r'../Dataset_audio/old_total'\n for (path, dir, files) in os.walk(target_path):\n for filename in files:\n if filename in filename_list:\n try:\n shutil.copy(\n os.path.join(path, filename),\n os.path.join(new_path, filename)\n )\n #print(os.path.join(path, filename))\n #print(os.path.join(new_path, filename), 'copied')\n except FileNotFoundError:\n pass\nfinal_selected_files(speaking_path, selected_files)", "_____no_output_____" ], [ "len(os.listdir(r'../Dataset_audio/Speaking'))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0e01f305630545705c8bf2cfe50982b8976b13
92,993
ipynb
Jupyter Notebook
module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb
maiormarso/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
422a2ce373c13c96a305f37b1fbd25892714d975
[ "MIT" ]
null
null
null
module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb
maiormarso/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
422a2ce373c13c96a305f37b1fbd25892714d975
[ "MIT" ]
null
null
null
module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb
maiormarso/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
422a2ce373c13c96a305f37b1fbd25892714d975
[ "MIT" ]
null
null
null
234.239295
33,520
0.882744
[ [ [ "<a href=\"https://colab.research.google.com/github/maiormarso/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# ASSIGNMEN\n### 1) Replt\n\nicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit).\n\nGet caught up to where we got our example in class and then try and take things further. How close to \"pixel perfect\" can you make the lecture graph?\n\nOnce you have something that you're proud of, share your graph in the cohort channel and move on to the second exercise.\n\n### 2) Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/).\n\n**WARNING**: There are a lot of very custom graphs and tables at the above link. I **highly** recommend not trying to reproduce any that look like a table of values or something really different from the graph types that we are already familiar with. Search through the posts until you find a graph type that you are more or less familiar with: histogram, bar chart, stacked bar chart, line chart, [seaborn relplot](https://seaborn.pydata.org/generated/seaborn.relplot.html), etc. Recreating some of the graphics that 538 uses would be a lot easier in Adobe photoshop/illustrator than with matplotlib. \n\n- If you put in some time to find a graph that looks \"easy\" to replicate you'll probably find that it's not as easy as you thought. \n\n- If you start with a graph that looks hard to replicate you'll probably run up against a brick wall and be disappointed with your afternoon.\n\n\n\n\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nplt.style.use('fivethirtyeight')\nimport numpy as np\nimport pandas as pd\n\n\nfake = pd.Series([38,3,2,1,2,4,6,5,5,33])\nfig = plt.figure()\n\nfig.patch.set(facecolor='white')\n\n\nax = ax=fake.plot.bar(color='C1', width=0.9)\nax.set(facecolor='White')\n\n\nplt.xlabel('xlabel')\nplt.ylabel('')\n\n\nax.text(x=-1.8, y=44, s='An Inconvenient',\nfontweight='bold', fontsize= 12)\nax.text(x=-1.8, y=41.5, s='IMDb ratings', fontsize=11)\n\nax.set_ylabel('percent y label', fontsize=9, fontweight='bold')\nax.set_xlabel('Rating', fontsize=9, fontweight='bold', labelpad=10)\n\n\nax.set_xticklabels([1,2,3,4,5,6,7,8,9,10], rotation=0) \nax.set_yticks(range(0,50,10))\nfmt='%.0f%%'\n\nxticks = mtick.FormatStrFormatter(fmt)\nax.text(x=-1.8, y=44, s=\"An Inconvenient Sequel: 'Truth To Power' is divisive\", fontsize=12, fontweight='bold')\nax.text(x=-1.8, y=41.5, s='IMBb ratings for the film as of Aug 29', fontsize=11)\nax.set_yticklabels(range(0,50,10))\nfmt='%.0f%%'\nxticks = mtick.FormatStrFormatter(fmt)\nax.yaxis.set_major_formatter(xticks)\nplt.plot();", "_____no_output_____" ], [ "\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.ticker as mtick\n\nplt.style.use('fivethirtyeight')\nfake =pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],\n index=range(1,11))\n#generate the figure\nfig = plt.figure()\nfig.patch.set(facecolor='palegreen')\n\n# generate the axes (center section) for the plot\nax = ax=fake.plot.bar(color='C1', width=0.9)\nax.set(facecolor='White')\n\n#11:am\n#matplotlib.pyplot.text\nax.text(x=-1.8, y=45, s=\"An Inconvenient Sequel: 'Truth To Power' is divisive\", fontsize=12, fontweight='bold')\nax.text(x=-1.8, y=42.5, s='IMBb ratings for the film as of Aug 29', fontsize=11)\nax.text(x=-.8, y=39.3, s='%', fontsize=11, color='gray')\nax.set_ylabel('Percent of total votes', fontsize=9, fontweight='bold')\nax.set_xlabel('Rating', fontsize=9, fontweight='bold', labelpad=10)\n\n# fix our tick lables\nax.set_xticklabels([1,2,3,4,5,6,7,8,9,10], color='gray', rotation=0) # (range(1,11))\nax.set_yticks(range(0,50,10))\nax.set_yticklabels(range(0, 50 , 10), color='gray',)\n# fmt='%.0f%%'\n# xticks = mtick.FormatStrFormatter(fmt)\n# ax.yaxis.set_major_formatter(xticks)\n\n\nplt.show()", "_____no_output_____" ], [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(=\"darkgrid\")\nimport datetime", "_____no_output_____" ], [ "Qwelian Tanner helped me with the code", "_____no_output_____" ], [ "approve = [.455,.500,.488,.537,.549,.560,.543,.574,.536,.567,.556,.575,.543,.532,.538,.543,.520,.516,.524,.537,.520,.525,.521,.558,.536,.532,.530,.526,.528,.526,.537,.539]\ndisapprove = [.413,.438,.447,.400,.391,.381,.396,.366,.395,.375,.382,.364,.402,.415,.405,.403,.420,.424,.421,.399,.425,.422,.424,.398,.419,.416,.420,.424,.424,.430,.421,.415] \ndates = ['Jan23','Feb20','Mar11','Apr4','May21','Jun11','Jul1','Aug7','Sep21','Oct26','Nov28','Dec16','Jan17','Feb15','Mar18','Apr21','May10','Jun23','Aug16','Sep14','Oct8','Nov9','Dec9','Jan29','Feb19','Mar18','Apr17','May8','Jun14','Juj22','Aug18','Sep11']\n\nnumdays = 32\nbase = datetime.datetime.today()\ndates = [base - datetime.timedelta(days=x) for x in range(numdays)]\n \nfill = {\n 'dates': dates,\n 'approval': approve, \n 'dissaproval': disapprove, \n \n}\n \n# Calling DataFrame constructor on list \ndf1 = pd.DataFrame(data=fill)\ndf1.head(1) ", "_____no_output_____" ], [ "\ndf1.plot(x=\"dates\", y=[\"approval\", \"dissaproval\"], kind=\"line\");\n", "_____no_output_____" ] ], [ [ "# STRETCH OPTIONS\n\n### 1) Reproduce one of the following using the matplotlib or seaborn libraries:\n\n- [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) \n- [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) \n- or another example of your choice!\n\n### 2) Make more charts!\n\nChoose a chart you want to make, from [Visual Vocabulary - Vega Edition](http://ft.com/vocabulary).\n\nFind the chart in an example gallery of a Python data visualization library:\n- [Seaborn](http://seaborn.pydata.org/examples/index.html)\n- [Altair](https://altair-viz.github.io/gallery/index.html)\n- [Matplotlib](https://matplotlib.org/gallery.html)\n- [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html)\n\nReproduce the chart. [Optionally, try the \"Ben Franklin Method.\"](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes.\n\nTake notes. Consider sharing your work with your cohort!", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb0e0c49f9c040a21c2b62c3c46273d40d0dcfef
918,918
ipynb
Jupyter Notebook
jupyter-notebooks/exploratory_analysis_and_SARIMAX_fitting.ipynb
maxnolte/river_forecast
719d4d44bff37142f01c636158ea6157d661fb69
[ "MIT" ]
1
2020-10-26T22:26:42.000Z
2020-10-26T22:26:42.000Z
jupyter-notebooks/exploratory_analysis_and_SARIMAX_fitting.ipynb
maxnolte/river_forecast
719d4d44bff37142f01c636158ea6157d661fb69
[ "MIT" ]
1
2020-08-31T19:21:52.000Z
2020-08-31T19:21:52.000Z
jupyter-notebooks/exploratory_analysis_and_SARIMAX_fitting.ipynb
maxnolte/river_forecast
719d4d44bff37142f01c636158ea6157d661fb69
[ "MIT" ]
null
null
null
702.536697
144,792
0.945753
[ [ [ "## Exploratory data analysis of Dranse discharge data\n\nSummary: The data is stationary even without differencing, but ACF and PACF plots show that an hourly first order difference and a periodic 24h first order difference is needed for SARIMA fitting.\n\nNote: Final fitting done in Google Colab due to memory constraints - this notebook will throw some errors\n\n## SARIMAX model fitting\n\n### 1.) Loading the river flow (discharge) data", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "from river_forecast.training_data_access import get_combined_flow", "_____no_output_____" ], [ "flow_df = get_combined_flow()", "_____no_output_____" ], [ "plt.plot(flow_df.index, flow_df)", "_____no_output_____" ] ], [ [ "### Exploratory Analysis", "_____no_output_____" ] ], [ [ "subset_df = flow_df.loc[:]\nsubset_df['year'] = subset_df.index.year\nsubset_df['offset_datetime'] = subset_df.index + pd.DateOffset(year=2019)", "/home/mnolte/anaconda3/lib/python3.7/site-packages/pandas/core/arrays/datetimes.py:692: PerformanceWarning: Non-vectorized DateOffset being applied to Series or DatetimeIndex\n PerformanceWarning,\n" ], [ "sns.set(style=\"whitegrid\")", "_____no_output_____" ], [ "sns.set(rc={'figure.figsize':(15, 8)})\nax = sns.lineplot(x='offset_datetime', y='discharge', hue='year', data=subset_df, markers='')\nimport matplotlib.dates as mdates\nmyFmt = mdates.DateFormatter('%b')\nax.get_xaxis().set_major_formatter(myFmt)\nax.set_xlabel('Month')\nax.set_ylabel('Discharge (m^3/s)')", "_____no_output_____" ] ], [ [ "### train-test split", "_____no_output_____" ] ], [ [ "import statsmodels.api as sm", "_____no_output_____" ], [ "train = flow_df.loc[flow_df.index < pd.to_datetime('2019-01-01 00:00:00')]\ntest = flow_df.loc[(flow_df.index >= pd.to_datetime('2019-01-01 00:00:00')) & (flow_df.index < pd.to_datetime('2019-07-01 00:00:00'))]", "_____no_output_____" ], [ "fig, ax = plt.subplots()\ntrain.plot(ax=ax, label='train')\ntest.plot(ax=ax, label='test')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "### Time series stationarity analysis", "_____no_output_____" ] ], [ [ "import statsmodels.formula.api as smf\nimport statsmodels.tsa.api as smt\nimport statsmodels.api as sm\nimport scipy.stats as scs", "_____no_output_____" ], [ "def tsplot(y, lags=None, figsize=(12, 7), style='bmh'):\n \"\"\"\n Plot time series, its ACF and PACF, calculate Dickeyโ€“Fuller test\n \n -> Adapted from https://gist.github.com/DmitrySerg/14c1af2c1744bb9931d1eae6d9713b21\n \n y - timeseries\n lags - how many lags to include in ACF, PACF calculation\n \"\"\"\n if not isinstance(y, pd.Series):\n y = pd.Series(y)\n \n with plt.style.context(style): \n fig = plt.figure(figsize=figsize)\n layout = (2, 2)\n ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)\n acf_ax = plt.subplot2grid(layout, (1, 0))\n pacf_ax = plt.subplot2grid(layout, (1, 1))\n \n y.plot(ax=ts_ax)\n t_statistic, p_value = sm.tsa.stattools.adfuller(y)[:2]\n ts_ax.set_title('Time Series Analysis Plots\\n Dickey-Fuller: p={0:.5f}'.format(p_value))\n smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)\n smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)\n plt.tight_layout()", "_____no_output_____" ] ], [ [ "#### Augmenteded Dicky-Fuller to check for stationarity\n", "_____no_output_____" ] ], [ [ "flow = flow_df['discharge']\nflow_diff_1 = (flow - flow.shift(1)).dropna()\nflow_diff_1_24 = (flow_diff_1 - flow_diff_1.shift(24)).dropna()\nflow_diff_24 = (flow - flow.shift(24)).dropna()", "_____no_output_____" ], [ " tsplot(flow, lags=24*5, figsize=(12, 7))", "_____no_output_____" ], [ " tsplot(flow_diff_1, lags=24*5, figsize=(12, 7))", "_____no_output_____" ], [ " tsplot(flow_diff_1_24, lags=24*7, figsize=(12, 7))", "_____no_output_____" ], [ " tsplot(flow_diff_1_24, lags=12, figsize=(12, 7))", "_____no_output_____" ] ], [ [ "#### Fitting SARIMAX", "_____no_output_____" ] ], [ [ "train['discharge'].plot()", "_____no_output_____" ], [ "from statsmodels.tsa.statespace.sarimax import SARIMAX\n### Crashed again upon completion, make sure the time series is ok -> computation moved to Colab\n\n# Create a SARIMAX model\nmodel = SARIMAX(train['discharge'], order=(4,1,1), seasonal_order=(0,1,1,24))\n# p - try 0, 1, 2, 3, 4; q is cleary one. Q is clearly 1, P is tapering off: 0.", "_____no_output_____" ], [ "# Fit the model\nresults = model.fit()", "_____no_output_____" ], [ "import pickle", "_____no_output_____" ], [ "pickle.dump(results.params, open('../models/sarimax_211_011-24_model-parameters.pkl', 'wb'))\n### # load model\n### loaded = ARIMAResults.load('model.pkl')", "_____no_output_____" ], [ "results = pickle.load(open('../models/sarimax_211_011-24_model.pkl', 'rb'))", "_____no_output_____" ], [ "pwd", "_____no_output_____" ], [ "# Print the results summary\nprint(results.summary())", " SARIMAX Results \n==========================================================================================\nDep. Variable: discharge No. Observations: 26304\nModel: SARIMAX(4, 1, 1)x(0, 1, 1, 24) Log Likelihood -51111.394\nDate: Wed, 08 Jul 2020 AIC 102236.789\nTime: 16:52:10 BIC 102294.024\nSample: 01-01-2016 HQIC 102255.270\n - 12-31-2018 \nCovariance Type: opg \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nar.L1 0.5842 0.059 9.911 0.000 0.469 0.700\nar.L2 -0.2164 0.020 -10.875 0.000 -0.255 -0.177\nar.L3 0.0708 0.008 8.444 0.000 0.054 0.087\nar.L4 -0.0682 0.004 -17.258 0.000 -0.076 -0.060\nma.L1 -0.2464 0.059 -4.171 0.000 -0.362 -0.131\nma.S.L24 -0.7444 0.002 -375.686 0.000 -0.748 -0.741\nsigma2 2.8613 0.008 344.781 0.000 2.845 2.878\n===================================================================================\nLjung-Box (Q): 666.23 Jarque-Bera (JB): 316722.26\nProb(Q): 0.00 Prob(JB): 0.00\nHeteroskedasticity (H): 0.89 Skew: 0.10\nProb(H) (two-sided): 0.00 Kurtosis: 20.01\n===================================================================================\n\nWarnings:\n[1] Covariance matrix calculated using the outer product of gradients (complex-step).\n" ], [ "results", "_____no_output_____" ] ], [ [ "#### Plotting the forecast", "_____no_output_____" ] ], [ [ "# Generate predictions\none_step_forecast = results.get_prediction(start=-48)\n\n# Extract prediction mean\nmean_forecast = one_step_forecast.predicted_mean\n\n# Get confidence intervals of predictions\nconfidence_intervals = one_step_forecast.conf_int()\n\n# Select lower and upper confidence limits\nlower_limits = confidence_intervals.loc[:, 'lower discharge']\nupper_limits = confidence_intervals.loc[:, 'upper discharge']\n", "_____no_output_____" ], [ "# plot the dranse data\n\n# plot your mean predictions\nplt.plot(mean_forecast.index, mean_forecast, color='r', label='forecast')\n\n# shade the area between your confidence limits\nplt.fill_between(lower_limits.index, lower_limits, \n upper_limits, color='pink')\n\n# set labels, legends and show plot\nplt.xlabel('Date')\nplt.ylabel('Discharge')\nplt.title('hourly forecaset')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# Generate predictions\ndynamic_forecast = results.get_prediction(start=-6, dynamic=True)\n\n# Extract prediction mean\nmean_forecast = dynamic_forecast.predicted_mean\n\n# Get confidence intervals of predictions\nconfidence_intervals = dynamic_forecast.conf_int(alpha=0.32) # 95 percent confidence interval\n\n# Select lower and upper confidence limits\nlower_limits = confidence_intervals.loc[:,'lower discharge']\nupper_limits = confidence_intervals.loc[:,'upper discharge']\n", "_____no_output_____" ], [ "# plot your mean predictions\nplt.plot(mean_forecast.index, mean_forecast, color='r', label='forecast')\n\n# shade the area between your confidence limits\nplt.fill_between(lower_limits.index, lower_limits, \n upper_limits, color='pink', alpha=0.5)\n\n# set labels, legends and show plot\nplt.xlabel('Date')\nplt.ylabel('Discharge')\nplt.title('dynamic forecast')\nplt.legend()", "_____no_output_____" ] ], [ [ "#### Finding the best model manually", "_____no_output_____" ] ], [ [ "# Create empty list to store search results\norder_aic_bic=[]\n\n# Loop over p values from 0-2\nfor p in range(0, 5):\n print(p)\n # create and fit ARMA(p,q) model\n model = SARIMAX(train['discharge'], order=(p,1,1), seasonal_order=(0,1,1,24))\n # p - try 0, 1, 2, 3, 4; q is cleary one. Q is clearly 1, P is tapering off: 0.\n results = model.fit()\n \n # Append order and results tuple\n order_aic_bic.append((p,results.aic, results.bic))", "0\n1\n2\n3\n4\n" ], [ "# Construct DataFrame from order_aic_bic\norder_df = pd.DataFrame(order_aic_bic, \n columns=['p', 'AIC', 'BIC'])\n\n# Print order_df in order of increasing AIC\nprint(order_df.sort_values('AIC'))\n\n# Print order_df in order of increasing BIC\nprint(order_df.sort_values('BIC'))", " p AIC BIC\n4 4 102236.788642 102294.024320\n3 3 102311.143591 102360.202744\n1 1 102320.155001 102352.861103\n2 2 102322.067849 102362.950476\n0 0 102324.493738 102349.023315\n p AIC BIC\n4 4 102236.788642 102294.024320\n0 0 102324.493738 102349.023315\n1 1 102320.155001 102352.861103\n3 3 102311.143591 102360.202744\n2 2 102322.067849 102362.950476\n" ], [ "# Create the 4 diagostics plots\nresults.plot_diagnostics()\nplt.show()\n\n# Print summary\nprint(results.summary())", "_____no_output_____" ] ], [ [ "### Forecasting", "_____no_output_____" ] ], [ [ "results.forecast(steps=6)", "_____no_output_____" ], [ "resB.forecast(steps=6)", "/home/mnolte/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.\n ValueWarning)\n" ], [ "import river_forecast.api_data_access\nimport importlib, sys", "_____no_output_____" ], [ "importlib.reload(river_forecast.api_data_access)", "_____no_output_____" ], [ "rivermap_data = river_forecast.api_data_access.RivermapDataRetriever()", "_____no_output_____" ], [ "recent_flow_df = rivermap_data.get_latest_river_flow(n_days=3, station='Dranse')", "_____no_output_____" ], [ "recent_flow_df", "_____no_output_____" ], [ "modelB = SARIMAX(recent_flow_df.iloc[:2].asfreq('h'), order=(4,1,1), seasonal_order=(0,1,1,24))\nresB = modelB.smooth(results.params)", "_____no_output_____" ], [ "resB.forecast(steps=6)", "_____no_output_____" ], [ "from river_forecast.api_data_access import RivermapDataRetriever\ndata = RivermapDataRetriever().get_standard_dranse_data()\n", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "import importlib\nimport river_forecast.forecast\nimportlib.reload(river_forecast.forecast)", "_____no_output_____" ], [ "sf = river_forecast.forecast.SARIMAXForecast()", "_____no_output_____" ], [ "sf.generate_prediction_plot(data)", "_____no_output_____" ], [ "sf.dynamic_forecast(data)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0e0c57c5f2650a5836a56116b82fd11d8226a8
2,762
ipynb
Jupyter Notebook
jhub-spawner-client/plot_isotonic_regression.ipynb
zach-schoenberger/CS6460
9e4d99ac5c0a1192f5b856b06357cca4170c1a03
[ "MIT" ]
null
null
null
jhub-spawner-client/plot_isotonic_regression.ipynb
zach-schoenberger/CS6460
9e4d99ac5c0a1192f5b856b06357cca4170c1a03
[ "MIT" ]
null
null
null
jhub-spawner-client/plot_isotonic_regression.ipynb
zach-schoenberger/CS6460
9e4d99ac5c0a1192f5b856b06357cca4170c1a03
[ "MIT" ]
1
2020-06-22T07:54:13.000Z
2020-06-22T07:54:13.000Z
51.148148
1,387
0.591238
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Isotonic Regression\n\n\nAn illustration of the isotonic regression on generated data. The\nisotonic regression finds a non-decreasing approximation of a function\nwhile minimizing the mean squared error on the training data. The benefit\nof such a model is that it does not assume any form for the target\nfunction such as linearity. For comparison a linear regression is also\npresented.\n\n\n", "_____no_output_____" ] ], [ [ "print(__doc__)\n\n# Author: Nelle Varoquaux <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.utils import check_random_state\n\nn = 100\nx = np.arange(n)\nrs = check_random_state(0)\ny = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))\n\n# #############################################################################\n# Fit IsotonicRegression and LinearRegression models\n\nir = IsotonicRegression()\n\ny_ = ir.fit_transform(x, y)\n\nlr = LinearRegression()\nlr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression\n\n# #############################################################################\n# Plot result\n\nsegments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]\nlc = LineCollection(segments, zorder=0)\nlc.set_array(np.ones(len(y)))\nlc.set_linewidths(np.full(n, 0.5))\n\nfig = plt.figure()\nplt.plot(x, y, 'r.', markersize=12)\nplt.plot(x, y_, 'g.-', markersize=12)\nplt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')\nplt.gca().add_collection(lc)\nplt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')\nplt.title('Isotonic regression')\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
cb0e1592902bd68a178e8c9f868a71d2457f090b
10,422
ipynb
Jupyter Notebook
examples/archive/package_management/subpackage_graph.ipynb
lipteck/pymedphys
6e8e2b5db8173eafa6006481ceeca4f4341789e0
[ "Apache-2.0" ]
2
2020-02-04T03:21:20.000Z
2020-04-11T14:17:53.000Z
prototyping/dependency-tree/old-prototypes/subpackage_graph.ipynb
SimonBiggs/pymedphys
83f02eac6549ac155c6963e0a8d1f9284359b652
[ "Apache-2.0" ]
6
2020-10-06T15:36:46.000Z
2022-02-27T05:15:17.000Z
prototyping/dependency-tree/old-prototypes/subpackage_graph.ipynb
SimonBiggs/pymedphys
83f02eac6549ac155c6963e0a8d1f9284359b652
[ "Apache-2.0" ]
1
2020-12-20T14:14:00.000Z
2020-12-20T14:14:00.000Z
23.84897
149
0.489925
[ [ [ "%load_ext autoreload\n%autoreload 2\n\nfrom pymedphys_monomanage.tree import PackageTree\nimport networkx as nx\nfrom copy import copy", "_____no_output_____" ], [ "package_tree = PackageTree('../../packages')", "_____no_output_____" ], [ "package_tree.package_dependencies_digraph", "_____no_output_____" ], [ "package_tree.roots", "_____no_output_____" ], [ "modules = list(package_tree.digraph.neighbors('pymedphys_analysis'))\nmodules", "_____no_output_____" ], [ "internal_packages = copy(package_tree.roots)\ninternal_packages.remove('pymedphys')", "_____no_output_____" ], [ "module_paths = [\n item\n for package in internal_packages\n for item in package_tree.digraph.neighbors(package)\n]\n\nmodules = {\n item: os.path.splitext(item)[0].replace(os.sep, '.')\n for item in module_paths\n}\n\nmodules", "_____no_output_____" ], [ "module_digraph = nx.DiGraph()\n\n\n\n", "_____no_output_____" ], [ "dependencies = {\n module.replace(os.sep, '.'): [\n '.'.join(item.split('.')[0:2])\n for item in\n package_tree.descendants_dependencies(module)['internal_module'] + package_tree.descendants_dependencies(module)['internal_package']\n ]\n for module in modules.keys()\n}\n\ndependencies", "_____no_output_____" ], [ "dependents = {\n key: [] for key in dependencies.keys()\n}\nfor key, values in dependencies.items():\n for item in values:\n dependents[item].append(key)\n \ndependents", "_____no_output_____" ], [ "current_modules = [\n item.replace(os.sep, '.')\n for item in package_tree.digraph.neighbors('pymedphys_analysis')\n]\ncurrent_modules", "_____no_output_____" ], [ "def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n else:\n return text\n", "_____no_output_____" ], [ "graphed_module = 'pymedphys_monomanage'\n\n\ncurrent_modules = [\n item.replace(os.sep, '.')\n for item in package_tree.digraph.neighbors(graphed_module)\n]\ncurrent_modules\n\n\ndef simplify(text):\n text = remove_prefix(text, \"{}.\".format(graphed_module))\n text = remove_prefix(text, 'pymedphys_')\n \n return text", "_____no_output_____" ], [ "current_modules", "_____no_output_____" ], [ "module_internal_relationships = {\n module.replace(os.sep, '.'): [\n '.'.join(item.split('.')[0:2])\n for item in\n package_tree.descendants_dependencies(module)['internal_module']\n ]\n for module in package_tree.digraph.neighbors(graphed_module)\n}\n\nmodule_internal_relationships", "_____no_output_____" ], [ "dag = nx.DiGraph()\n\nfor key, values in module_internal_relationships.items():\n dag.add_node(key)\n dag.add_nodes_from(values)\n edge_tuples = [\n (key, value) for value in values\n ]\n dag.add_edges_from(edge_tuples)\n \n\ndag.edges()\n", "_____no_output_____" ], [ "def get_levels(dag):\n\n topological = list(nx.topological_sort(dag))\n\n level_map = {}\n for package in topological[::-1]:\n depencencies = nx.descendants(dag, package)\n levels = {0}\n for dependency in depencencies:\n try:\n levels.add(level_map[dependency])\n except KeyError:\n pass\n max_level = max(levels)\n level_map[package] = max_level + 1\n\n levels = {\n level: []\n for level in range(max(level_map.values()) + 1)\n }\n for package, level in level_map.items():\n levels[level].append(package)\n \n return levels\n \nlevels = get_levels(dag)\nlevels", "_____no_output_____" ], [ "nodes = \"\"\n\nfor level in range(max(levels.keys()) + 1):\n if levels[level]:\n trimmed_nodes = [\n simplify(node) for node in levels[level]\n ]\n\n grouped_packages = '\"; \"'.join(trimmed_nodes)\n nodes += \"\"\"\n {{ rank = same; \"{}\"; }}\n \"\"\".format(grouped_packages)\n \nprint(nodes)", "_____no_output_____" ], [ "edges = \"\"\ncurrent_packages = \"\"\n\ncurrent_dependents = set()\ncurrent_dependencies = set()\n\n\nfor module in current_modules:\n module_repr = simplify(module)\n current_packages += '\"{}\";\\n'.format(module_repr)\n \n for dependency in dependencies[module]:\n simplified = simplify(dependency)\n edges += '\"{}\" -> \"{}\";\\n'.format(module_repr, simplified)\n if not dependency in current_modules:\n current_dependencies.add(simplified)\n \n for dependent in dependents[module]:\n simplified = simplify(dependent)\n edges += '\"{}\" -> \"{}\";\\n'.format(simplified, module_repr)\n if not dependent in current_modules:\n current_dependents.add(simplified)\n \n\nexternal_ranks = \"\"\nif current_dependents:\n grouped_dependents = '\"; \"'.join(current_dependents)\n external_ranks += '{{ rank = same; \"{}\"; }}\\n'.format(grouped_dependents)\n\nif current_dependencies:\n grouped_dependencies = '\"; \"'.join(current_dependencies)\n external_ranks += '{{ rank = same; \"{}\"; }}\\n'.format(grouped_dependencies)\n \nprint(edges)", "_____no_output_____" ], [ "dot_file_contents = \"\"\"\n strict digraph {{\n rankdir = LR;\n \n \n {}\n \n subgraph cluster_0 {{\n {}\n label = \"{}\";\n style = dashed;\n {}\n }}\n \n {}\n }}\n\"\"\".format(external_ranks, current_packages, graphed_module, nodes, edges)\nprint(dot_file_contents)", "_____no_output_____" ], [ "\tsubgraph cluster_0 {\n\t\tstyle=filled;\n\t\tcolor=lightgrey;\n\t\tnode [style=filled,color=white];\n\t\ta0 -> a1 -> a2 -> a3;\n\t\tlabel = \"process #1\";\n\t}", "_____no_output_____" ], [ "package_tree.descendants_dependencies('pymedphys_monomanage/parse')", "_____no_output_____" ], [ "package_tree.imports", "_____no_output_____" ], [ "list(package_tree.digraph.nodes)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0e1c7de42096a8ce5addcad3cfde600630e78a
641,458
ipynb
Jupyter Notebook
SongTidy/FinalTidy/final_tidy_yewon_ver02.ipynb
jyw7664/M5_Idol_lyrics
9cec56ba92344355182674bd25b6c42b9f0a763a
[ "MIT" ]
7
2018-09-04T14:06:26.000Z
2021-08-30T02:45:58.000Z
SongTidy/FinalTidy/final_tidy_yewon_ver02.ipynb
jyw7664/M5_Idol_lyrics
9cec56ba92344355182674bd25b6c42b9f0a763a
[ "MIT" ]
4
2018-09-05T05:47:19.000Z
2018-09-14T12:28:42.000Z
SongTidy/FinalTidy/final_tidy_yewon_ver02.ipynb
jyw7664/M5_Idol_lyrics
9cec56ba92344355182674bd25b6c42b9f0a763a
[ "MIT" ]
12
2018-09-04T15:55:55.000Z
2021-08-20T02:47:20.000Z
45.966177
173
0.351708
[ [ [ "##### M5_Idol_lyrics/SongTidy ํด๋”์˜ ์ „์ฒ˜๋ฆฌ ipnb์„ ์ด์ •๋ฆฌํ•˜๊ณ , ์ž˜๋ชป๋œ ์ฝ”๋“œ๋ฅผ ์ˆ˜์ •ํ•œ ๋…ธํŠธ๋ถ", "_____no_output_____" ], [ "### ๊ฐ€์‚ฌ ๋ฐ์ดํ„ฐ(song_tidy01) ์ „์ฒ˜๋ฆฌ\n**df = pd.read_csv('rawdata/song_data_raw_ver01.csv')**<br>\n**!!!!!!!!!!!!!์ˆœ์„œ๋กœ df(๋ฒˆํ˜ธ)๋กœ ์ง€์ •!!!!!!!!!!!!!**\n1. Data20180915/song_data_raw_ver01.csv ๋ฐ์ดํ„ฐ๋กœ ์‹œ์ž‘ํ•จ (ํ‚ค์Šค์žˆ๋Š”์ง€์ฒดํฌ)\n - ์ œ๋ชฉ์— ๋ฆฌ๋ฏน์Šค,๋ผ์ด๋ธŒ,inst,์˜์ผ์ค‘,ver ์ธ ํ–‰\n - ์•จ๋ฒ”์— ๋‚˜๊ฐ€์ˆ˜, ๋ถˆ๋ช…, ๋ณต๋ฉด์ธ ํ–‰\n - ํƒ€์ดํ‹€, ๊ฐ€์‚ฌ, ์•จ๋ฒ”์— ํžˆ๋ผ๊ฐ€๋‚˜/๊ฐ€ํƒ€์นด๋‚˜๊ฐ€ ๋“ค์–ด๊ฐ„ ํ–‰\n - is_title์ด nan์ธ ํ–‰์„ '์ˆ˜๋ก๊ณก'์œผ๋กœ ๋ณ€๊ฒฝ\n - ๊ฐ€์‚ฌ์—\\r\\r\\n์„ ๊ณต๋ฐฑ์œผ๋กœ ๋ณ€๊ฒฝ\n\n2. ํžˆ๋ผ๊ฐ€๋‚˜/๊ฐ€ํƒ€์นด๋‚˜๋ฅผ ์ œ๊ฑฐํ•œ ํ›„์—๋„ ์ผ๋ณธ์–ด ๊ฐ€์‚ฌ๊ฐ€ ํ•œ๊ธ€๋กœ ํฌํ•จ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ<br>--> contains๋กœ ํ™•์ธํ•œ๋’ค ํ–‰์ œ๊ฑฐ ๋ฐ˜๋ณต\n3. ๊ฐ€์‚ฌ๊ฐ€ ๋ชจ๋‘ ์˜์–ด, ์ค‘๊ตญ์–ด์ธ ๊ฒฝ์šฐ<br>--> ๊ฐ€์‚ฌ์— ํ•œ๊ธ€์ด ํ•˜๋‚˜๋„ ๋“ค์–ด๊ฐ€์ง€ ์•Š์€ ํ–‰ ์ œ๊ฑฐ\n4. creator์นผ๋Ÿผ์„ lyricist, composer, arranger๋กœ ๋‚˜๋ˆ” --> creator_tidy_kavin_ver02.ipynb์ฐธ๊ณ <br>\n**์—ฌ๊ธฐ์„œ df4.to_csv('tidydata/tidy01.csv', index=False) ๋กœ ์ค‘๊ฐ„ ์ €์žฅ**\n\n5. ์ค‘๋ณต๋…ธ๋ž˜(๋„์–ด์“ฐ๊ธฐ,๋Œ€์†Œ๋ฌธ์ž,ํ”ผ์ฒ˜๋ง์— ์˜ํ•œ)๋ฅผ ์ œ๊ฑฐ --> song_tidy_yoon_ver01.ipynb์ฐธ๊ณ <br>\n **!!!ํ™•์ธํ•ด๋ณด๋‹ˆ ํฌ๋กค๋ง์‹œ์— ๋ฐœ๋งค์ผ์ˆœ์„œ๋Œ€๋กœ ๋‹ด๊ธฐ์ง€ ์•Š์•˜์Œ. sort by 'artist', 'release_date'๋กœ ์ฃผ๊ณ  ์ค‘๋ณต์ œ๊ฑฐํ•˜๊ธฐ.**<br>**์—ฌ๊ธฐ์„œ df5.to_csv('tidydata/song_tidy01.csv', index=False) ๋กœ ์ €์žฅ**\n-----------------\n### ์ž‘์‚ฌ์ž‘๊ณก ๋ฐ์ดํ„ฐ(lyricist_tidy01) ์ „์ฒ˜๋ฆฌ\n**๋‹ค์‹œ ์ „ํŒŒ์ผ๋กœ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ df6 = pd.read_csv('tidydata/tidy01.csv')**\n6. creator๊ฐ€ ์—†๋Š” ํ–‰ ์ œ๊ฑฐ\n7. ์ค‘๋ณต๋…ธ๋ž˜(๋„์–ด์“ฐ๊ธฐ,๋Œ€์†Œ๋ฌธ์ž,ํ”ผ์ฒ˜๋ง์— ์˜ํ•œ)๋ฅผ ์ œ๊ฑฐ<br>\n**์—ฌ๊ธฐ์„œ df7.to_csv('tidydata/lyricist_tidy01.csv', index=False) ๋กœ ์ €์žฅ**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport re", "_____no_output_____" ], [ "df1 = pd.read_csv('C:/Users/pje17/Desktop/Lyricsis/M5_Idol_lyrics/Data/Data20180921/song_data_raw_20180921_ver02.csv')\ndf1.head()", "_____no_output_____" ], [ "# ํ‚ค์Šค์—†์Œํ™•์ธ\ndf1[df1['artist'] == 'ํ‚ค์Šค']", "_____no_output_____" ], [ "df1.shape", "_____no_output_____" ], [ "# ์ธ๋ฑ์Šค์นผ๋Ÿผ ๋“œ๋ž\ndf1 = df1.drop(df1.columns[0], axis=1)\ndf1", "_____no_output_____" ], [ "# ๊ฐ€์‚ฌ ์ •๋ณด ์—†๋Š” ํ–‰ ๋“œ๋ž\ndf1 = df1[df1.lyrics.notnull()]\ndf1.shape", "_____no_output_____" ], [ "# ๊ณต๋ฐฑ ๋ฐ ์ค„๋ฐ”๊ฟˆ ๋ฐ”๊ฟ”์ฃผ๊ธฐ\ndf1['lyrics'] = df1['lyrics'].str.replace(r'\\r\\r\\r\\n|\\r\\r\\n','<br>')\ndf1['creator'] = df1['creator'].str.replace(r'\\r|\\n',' ')\ndf1", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \nC:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "# is_title์ด nan์ผ ๋•Œ = '์ˆ˜๋ก๊ณก' \ndf1['is_title'] = df1['is_title'].fillna('์ˆ˜๋ก๊ณก')\ndf1", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \n" ], [ "# ์ œ๋ชฉ์— ๋ฆฌ๋ฏน์Šค,๋ผ์ด๋ธŒ,inst,์˜์ผ์ค‘,ver ์ธ ํ–‰ ์ œ๊ฑฐ\ndf1 = df1[df1.title.str.contains(r'\\(.*\\s*([Rr]emix|[Mm]ix|[Ll]ive|[Ii]nst|[Cc]hn|[Jj]ap|[Ee]ng|[Vv]er)\\s*.*\\)') == False]", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.\n \n" ], [ "# ์•จ๋ฒ”์— ๋‚˜๊ฐ€์ˆ˜, ๋ถˆ๋ช…, ๋ณต๋ฉด์ธ ํ–‰ ์ œ๊ฑฐ\ndf1 = df1[df1.album.str.contains(r'(๊ฐ€์ˆ˜๋‹ค|๋ถˆํ›„์˜|๋ณต๋ฉด๊ฐ€์™•)') == False]", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.\n \n" ], [ "# ํƒ€์ดํ‹€์— ํžˆ๋ผ๊ฐ€๋‚˜/๊ฐ€ํƒ€์นด๋‚˜๊ฐ€ ๋“ค์–ด๊ฐ„ ํ–‰ ์‚ญ์ œ\ndf1 = df1[df1.title.str.contains(u'[\\u3040-\\u309F\\u30A0-\\u30FF\\u31F0-\\u31FF]+') == False]\ndf1", "_____no_output_____" ], [ "# ์•ˆ์ง€์›Œ์ง„ ํ–‰์ด ์žˆ์Œ\ndf1.loc[df1['lyrics'].str.contains((u'[\\u3040-\\u309F\\u30A0-\\u30FF\\u31F0-\\u31FF]+'), regex=True)]", "_____no_output_____" ], [ "# ํ•œ ๋ฒˆ ๋” ์‚ญ์ œ\ndf1 = df1[df1.lyrics.str.contains(u'[\\u3040-\\u309F\\u30A0-\\u30FF\\u31F0-\\u31FF]+') == False]\ndf1", "_____no_output_____" ], [ "# ๋‹ค ์‚ญ์ œ ๋œ ๊ฒƒ ํ™•์ธ\ndf1.loc[df1['lyrics'].str.contains((u'[\\u3040-\\u309F\\u30A0-\\u30FF\\u31F0-\\u31FF]+'), regex=True)]", "_____no_output_____" ], [ "df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 13569 entries, 0 to 18091\nData columns (total 9 columns):\ntitle 13569 non-null object\nartist 13569 non-null object\nalbum 13569 non-null object\nrelease_date 13569 non-null object\nsong_genre 13569 non-null object\nis_title 13569 non-null object\nlike 13569 non-null object\ncreator 11030 non-null object\nlyrics 13569 non-null object\ndtypes: object(9)\nmemory usage: 1.0+ MB\n" ] ], [ [ "## --------------------2๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "# ํžˆ๋ผ๊ฐ€๋‚˜/๊ฐ€ํƒ€์นด๋‚˜๋ฅผ ์ œ๊ฑฐํ•œ ํ›„์—๋„ ์ผ๋ณธ์–ด ๊ฐ€์‚ฌ๊ฐ€ ํ•œ๊ธ€๋กœ ํฌํ•จ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ ์ „์ฒ˜๋ฆฌ\ndf2 = df1[df1.lyrics.str.contains(r'(์™€ํƒ€์‹œ|ํ˜ผํ† |์•„๋…ธํžˆ|ํ˜ผ๋˜|๋งˆ์„ผ|์—๊ฐ€์ด|ํžˆํ† ์ธ |ํ›„ํƒ€์ธ |๋งˆ์น˜๋…ธ|๋ชฌ๋‹ค์ด|๋งˆ์—๋…ธ|์•„๋ฉ”๊ฐ€)') == False]", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.\n \n" ], [ "df2= df2[df2.lyrics.str.contains(r'(ํžˆ์นด๋ฆฌ|๋ฏธ๋ผ์ด|์˜ค๋‚˜์ง€|์ถ‹|์นด๋ผ๋‹ค|ํฅ|์ฆ›๋˜|๋‚˜์บ‡|ํ† ๋‚˜๋ฆฌ|๋ชป๋˜|๋Ž…์™€|์ฝ”์ด|ํžˆํ† ๋ฆฌ|๋ง›์Šค๊ตฌ|ํ›„ํƒ€๋ฆฌ|์ผ€์‹œํ‚ค|์Ÿˆ๋‚˜์ด|์ž‡์ŠŒ|์ด์ธ ๋ชจ|์•„ํƒ€๋ผ|๋ด์ƒค|์ฆˆ์ฟ |์—๊ฐ€์˜ค|์†Œ๋ผ์˜ค|๋‚œํ…Œ|๊ณ ๋ฉ˜๋„ค|์•„์ด์‹œํ…Œ|๋‹คํ‚ค์‹œ|์œ ๋ฉ”|์ž‡ํƒ„๋‹ค|์†Œ๋ ˆ|๋ฐ”์‡ผ)') == False]", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "df2= df2[df2.lyrics.str.contains(r'(ํ‚ค๋ฏธ๋‹ˆ|๋ณด์ฟ |์„ธ์นด์ด|๋„์ฝ”๋ฐ|์ฆ›ํ† |์†Œ๋ฐ”๋‹ˆ|๋ฐ”์‡ผ|๋ ˆ๋ฃจ|์Šค๋ฒ ํ…Œ|ํƒ“ํ…Œ|์‹ฏํ…Œ|์š”์ฟ )') == False]", "C:\\Users\\pje17\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "# 450๊ณก ์ œ๊ฑฐ\ndf2.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 13119 entries, 0 to 18091\nData columns (total 9 columns):\ntitle 13119 non-null object\nartist 13119 non-null object\nalbum 13119 non-null object\nrelease_date 13119 non-null object\nsong_genre 13119 non-null object\nis_title 13119 non-null object\nlike 13119 non-null object\ncreator 10928 non-null object\nlyrics 13119 non-null object\ndtypes: object(9)\nmemory usage: 1.0+ MB\n" ] ], [ [ "## --------------------3๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "# ํ•œ๊ธ€์ด ํ•œ๊ธ€์ž๋ผ๋„ ๋‚˜์˜ค๋Š” ๊ฒƒ๋งŒ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. \n# 469๊ณก ์ œ๊ฑฐ\ndf3 = df2[df2.lyrics.str.contains(r'[๊ฐ€-ํžฃ]+') == True]\ndf3.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 12667 entries, 0 to 18091\nData columns (total 9 columns):\ntitle 12667 non-null object\nartist 12667 non-null object\nalbum 12667 non-null object\nrelease_date 12667 non-null object\nsong_genre 12667 non-null object\nis_title 12667 non-null object\nlike 12667 non-null object\ncreator 10726 non-null object\nlyrics 12667 non-null object\ndtypes: object(9)\nmemory usage: 989.6+ KB\n" ] ], [ [ "## --------------------4๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "# creator์นผ๋Ÿผ์„ lyricist, composer, arranger๋กœ ๋‚˜๋ˆ„๊ธฐ\ndf4 = df3.copy()", "_____no_output_____" ], [ "# ๋ฆฌ์ธ๋ฑ์Šคํ•ด์ค˜์•ผ ์•ˆ๋ฐ€๋ฆผ\ndf4 = df4.reset_index(drop=True)", "_____no_output_____" ], [ "# ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜\ndef preprocess(text):\n splitArr = list(filter(None, re.split(\"(์ž‘์‚ฌ)|(์ž‘๊ณก)|(ํŽธ๊ณก)\", text)))\n\n lyricist = []\n composer = []\n arranger = []\n \n lyricist.clear()\n composer.clear()\n arranger.clear()\n\n i = 0\n for i in range(0, len(splitArr)):\n if splitArr[i] == \"์ž‘์‚ฌ\":\n lyricist.append(splitArr[i-1].strip())\n elif splitArr[i] == \"์ž‘๊ณก\":\n composer.append(splitArr[i-1].strip())\n elif splitArr[i] == \"ํŽธ๊ณก\":\n arranger.append(splitArr[i-1].strip())\n i = i + 1\n result = [', '.join(lyricist), ', '.join(composer), ', '.join(arranger)]\n return result", "_____no_output_____" ], [ "# ํ–‰๋งˆ๋‹ค ์ž‘์‚ฌ/์ž‘๊ณก/ํŽธ๊ณก๊ฐ€ ์ „์ฒ˜๋ฆฌ ๊ฒฐ๊ณผ ๋ณด๊ธฐ\npreprocess(df4.creator[0])", "_____no_output_____" ], [ "# song ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ์ „์ฒ˜๋ฆฌํ•จ์ˆ˜ ์ด์šฉํ•˜์—ฌ ์ „์ฒ˜๋ฆฌ ํ›„ dataframe ์ถ”๊ฐ€๋กœ ๋งŒ๋“ค๊ธฐ\ni = 0\nlyricist = []\ncomposer = []\narranger = []\n\nlyricist.clear()\ncomposer.clear()\narranger.clear()\n \nfor i in range(0, len(df4)):\n try:\n lyricist.append(str(preprocess(df4.creator[i])[0]))\n composer.append(str(preprocess(df4.creator[i])[1]))\n arranger.append(str(preprocess(df4.creator[i])[2]))\n except:\n lyricist.append('')\n composer.append('')\n arranger.append('')\n \npreprocessing_result = pd.DataFrame({\"lyricist\" : lyricist, \"composer\" : composer, \"arranger\" : arranger})", "_____no_output_____" ], [ "# ์ธ๋ฑ์Šค 3 ์€ ๋ฆฌ๋ฏน์Šค๋ผ ์ œ๊ฑฐ๋˜์–ด์„œ ์•ˆ๋ณด์ž„\npreprocessing_result.head()", "_____no_output_____" ], [ "# ๋‘ ๊ฐœ์˜ ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ๊ธธ์ด๊ฐ€ ๊ฐ™์€์ง€ ํ™•์ธ\nlen(df4) == len(preprocessing_result)", "_____no_output_____" ], [ "# ๋‘ ๊ฐœ์˜ ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ํ•ฉ์น˜๊ธฐ\ndf4 = pd.concat([df4, preprocessing_result], axis=1)", "_____no_output_____" ], [ "df4", "_____no_output_____" ], [ "# ์—ฌ๊ธฐ์„œ df4.to_csv('tidydata/tidy01.csv', index=False) ๋กœ ์ค‘๊ฐ„ ์ €์žฅ\ndf4.to_csv('tidy03.csv', index=False)", "_____no_output_____" ] ], [ [ "## --------------------5๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "df5 = df4.copy()", "_____no_output_____" ], [ "# ๋ฐœ๋งค์ผ์ด ๋„๊ฐ’์ธ ๊ณก์ด ๋‚จ์ง€ ์•Š๋„๋ก ๋„๊ฐ’์ธ ๊ฒƒ๋“ค์€ ๋ฏธ๋ž˜์˜ ๋‚ ์งœ๋กœ ์ฑ„์›Œ์ค€๋‹ค.\nd = {'':'2019.01.01','-':'2019.01.01'}\ndf5['release_date'] = df5['release_date'].replace(d)", "_____no_output_____" ], [ "# ํ™•์ธํ•ด๋ณด๋‹ˆ ํฌ๋กค๋ง์‹œ์— ๋ฐœ๋งค์ผ์ˆœ์„œ๋Œ€๋กœ ๋‹ด๊ธฐ์ง€ ์•Š์•˜์Œ\n# !!!!!!sort by 'artist', 'release_date'๋กœ ์ฃผ๊ณ  ์ค‘๋ณต์ œ๊ฑฐํ•˜๊ธฐ\ndf5 = df5.sort_values(by=['artist', 'release_date'])", "_____no_output_____" ], [ "# ์ค‘๋ณต๋…ธ๋ž˜(๋„์–ด์“ฐ๊ธฐ,๋Œ€์†Œ๋ฌธ์ž,ํ”ผ์ฒ˜๋ง์— ์˜ํ•œ)๋ฅผ ์ œ๊ฑฐ\n# ์ œ๋ชฉ์˜ ๊ณต๋ฐฑ(๋„์–ด์“ฐ๊ธฐ)๋ฅผ ๋ชจ๋‘ ์ œ๊ฑฐํ•œ๋‹ค \ndf5['title'] = df5['title'].str.replace(r' ', '')", "_____no_output_____" ], [ "# ์ œ๋ชฉ์˜ ์˜์–ด ๋ถ€๋ถ„์„ ์ „๋ถ€ ์†Œ๋ฌธ์ž๋กœ ๋ฐ”๊ฟ”์ค€๋‹ค\ndf5['title'] = df5['title'].str.lower()", "_____no_output_____" ], [ "# ๊ทธ๋ฆฌ๊ณ  ๋‹ค์‹œ ์ค‘๋ณต๊ฐ’์„ ์ œ๊ฑฐํ•ด์ค€๋‹ค. \n# !!!!!!๊ฐ€์žฅ ์˜ค๋ž˜๋œ๋…ธ๋ž˜๊ฐ€ ์œ„๋กœ ์˜ฌ๋ผ์˜ค๋ฏ€๋กœ keep='first'๋กœ ์ฃผ๊ธฐ\ndf5 = df5.drop_duplicates(['artist', 'title'], keep='first')", "_____no_output_____" ], [ "# ์ค‘๋ณต ๊ฐ’์„ ์ฐ์–ด๋ณด๋‹ˆ ์ž˜ ์ง€์›Œ์กŒ๋‹ค! (๋„์–ด์“ฐ๊ธฐ ์ œ๊ฑฐ ํ…Œ์ŠคํŠธ)\ndf5[df5['title'] == '๊ฒฐํ˜ผ ํ•˜์ง€๋งˆ']", "_____no_output_____" ], [ "df5[df5['title'] == '๊ฒฐํ˜ผํ•˜์ง€๋งˆ']", "_____no_output_____" ], [ "# ์ค‘๋ณต ๊ฐ’์„ ์ฐ์–ด๋ณด๋‹ˆ ์ž˜ ์ง€์›Œ์กŒ๋‹ค! (์˜์–ด ๋Œ€->์†Œ๋ฌธ์ž ๋ณ€ํ™˜ ํ…Œ์ŠคํŠธ)\ndf5[df5['title'] == '์–ด์ด\\(UH-EE\\)']", "_____no_output_____" ], [ "df5[df5['title'].str.contains('์–ด์ด\\(uh-ee\\)')]", "_____no_output_____" ], [ "# ์ œ๋ชฉ ์—ด์„ ์ƒˆ๋กœ ๋งŒ๋“ค์–ด์„œ \ndf5['t'] = df5['title']", "_____no_output_____" ], [ "# ๊ด„ํ˜ธ ์•ˆ์˜ ๋ถ€๋ถ„์„ ์—†์•ค๋‹ค.\ndf5.t = df5.t.str.replace(r'\\(.*?\\)','')", "_____no_output_____" ], [ "# ์ƒˆ๋กœ ๋งŒ๋“  ์—ด์˜ ์ค‘๋ณต๊ฐ’์„ ์ œ๊ฑฐํ•œ๋‹ค.\ndf5 = df5.drop_duplicates(['artist', 't'], keep='first')", "_____no_output_____" ], [ "# ์ƒˆ๋กœ ๋งŒ๋“  ์—ด์„ ๋‹ค์‹œ ์ง€์›Œ์ค€๋‹ค.\ndf5 = df5.drop('t', axis = 1)", "_____no_output_____" ], [ "# ์ค‘๋ณต ๊ฐ’์„ ์ฐ์–ด๋ณด๋‹ˆ ์ž˜ ์ง€์›Œ์กŒ๋‹ค! (ํ•˜๋‚˜๋งŒ ๋‚จ์Œ) (ํ”ผ์ฒ˜๋ง ๋‹ค๋ฅธ ๋ฒ„์ „ ์ œ๊ฑฐ ํ…Œ์ŠคํŠธ)\ndf5[df5['title'].str.contains('highwaystar')]", "_____no_output_____" ], [ "df5.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 9536 entries, 12516 to 12426\nData columns (total 12 columns):\ntitle 9536 non-null object\nartist 9536 non-null object\nalbum 9536 non-null object\nrelease_date 9536 non-null object\nsong_genre 9536 non-null object\nis_title 9536 non-null object\nlike 9536 non-null object\ncreator 8431 non-null object\nlyrics 9536 non-null object\nlyricist 9536 non-null object\ncomposer 9536 non-null object\narranger 9536 non-null object\ndtypes: object(12)\nmemory usage: 968.5+ KB\n" ], [ "df5[df5['title'] == 'ํ•ด์„๋‚จ๋…€']", "_____no_output_____" ], [ "# ๋‚จ์•„์žˆ๋Š” 2019๋…„ ๊ณก์€ ์˜คํˆฌ์•Œ ๋ฟ\ndf5[df5['release_date'] == '2019.01.01']", "_____no_output_____" ], [ "d = {'2019.01.01':'2002.07.19'}\ndf5['release_date'] = df5['release_date'].replace(d)", "_____no_output_____" ], [ "# ์—ฌ๊ธฐ์„œ df5.to_csv('tidydata/song_tidy01.csv', index=False) ๋กœ ์ €์žฅ\ndf5.to_csv('song_tidy03.csv', index=False)", "_____no_output_____" ] ], [ [ "## ๊ฐ€์‚ฌ ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ ๋", "_____no_output_____" ], [ "------------------------------", "_____no_output_____" ], [ "## ์ž‘์‚ฌ์ž‘๊ณก ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------6๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "# ์œ„์˜ df4๋ฅผ ํŒŒ์ผ๋กœ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ df6 = pd.read_csv('tidydata/tidy01.csv')\ndf6 = pd.read_csv('tidy03.csv')", "_____no_output_____" ], [ "df6", "_____no_output_____" ], [ "# ํ™•์ธํ•ด๋ณด๋‹ˆ ํฌ๋กค๋ง์‹œ์— ๋ฐœ๋งค์ผ์ˆœ์„œ๋Œ€๋กœ ๋‹ด๊ธฐ์ง€ ์•Š์•˜์Œ\n# !!!!!!sort by 'artist', 'release_date'๋กœ ์ฃผ๊ณ  ์‹œ์ž‘ํ•˜๊ธฐ\ndf6 = df6.sort_values(by=['artist', 'release_date'])", "_____no_output_____" ], [ "df6", "_____no_output_____" ], [ "# creator๊ฐ€ ์—†๋Š” ํ–‰ ์ œ๊ฑฐ\ndf6 = df6[pd.notnull(df6['creator'])]\ndf6", "_____no_output_____" ] ], [ [ "## --------------------7๋ฒˆ ์ „์ฒ˜๋ฆฌ ์‹œ์ž‘--------------------", "_____no_output_____" ] ], [ [ "# ์ค‘๋ณต๋…ธ๋ž˜(๋„์–ด์“ฐ๊ธฐ,๋Œ€์†Œ๋ฌธ์ž,ํ”ผ์ฒ˜๋ง์— ์˜ํ•œ)๋ฅผ ์ œ๊ฑฐ << ์œ„ ๊ณผ์ • ๋ฐ˜๋ณต\ndf7 = df6.copy()", "_____no_output_____" ], [ "# ์ œ๋ชฉ์˜ ๊ณต๋ฐฑ(๋„์–ด์“ฐ๊ธฐ)๋ฅผ ๋ชจ๋‘ ์ œ๊ฑฐํ•œ๋‹ค \ndf7['title'] = df7['title'].str.replace(r' ', '')", "_____no_output_____" ], [ "# ์ œ๋ชฉ์˜ ์˜์–ด ๋ถ€๋ถ„์„ ์ „๋ถ€ ์†Œ๋ฌธ์ž๋กœ ๋ฐ”๊ฟ”์ค€๋‹ค\ndf7['title'] = df7['title'].str.lower()", "_____no_output_____" ], [ "# ๊ทธ๋ฆฌ๊ณ  ๋‹ค์‹œ ์ค‘๋ณต๊ฐ’์„ ์ œ๊ฑฐํ•ด์ค€๋‹ค. \ndf7 = df7.drop_duplicates(['artist', 'title'], keep='first')", "_____no_output_____" ], [ "# ์ œ๋ชฉ ์—ด์„ ์ƒˆ๋กœ ๋งŒ๋“ค์–ด์„œ \ndf7['t'] = df7['title']", "_____no_output_____" ], [ "# ๊ด„ํ˜ธ ์•ˆ์˜ ๋ถ€๋ถ„์„ ์—†์•ค๋‹ค.\ndf7.t = df7.t.str.replace(r'\\(.*?\\)','')", "_____no_output_____" ], [ "# ์ƒˆ๋กœ ๋งŒ๋“  ์—ด์˜ ์ค‘๋ณต๊ฐ’์„ ์ œ๊ฑฐํ•œ๋‹ค.\ndf7 = df7.drop_duplicates(['artist', 't'], keep='first')", "_____no_output_____" ], [ "# ์ƒˆ๋กœ ๋งŒ๋“  ์—ด์„ ๋‹ค์‹œ ์ง€์›Œ์ค€๋‹ค.\ndf7 = df7.drop('t', axis = 1)", "_____no_output_____" ], [ "# ์ค‘๋ณต ๊ฐ’์„ ์ฐ์–ด๋ณด๋‹ˆ ์ž˜ ์ง€์›Œ์กŒ๋‹ค! (ํ•˜๋‚˜๋งŒ ๋‚จ์Œ) (ํ”ผ์ฒ˜๋ง ๋‹ค๋ฅธ ๋ฒ„์ „ ์ œ๊ฑฐ ํ…Œ์ŠคํŠธ)\ndf7[df7['title'].str.contains('highwaystar')]", "_____no_output_____" ], [ "# ๊ฐ€์‚ฌ๋ฐ์ดํ„ฐ์™€ ๋‹ค๋ฅด๊ฒŒ ์ „์ฒ˜๋ฆฌ ๋˜์—ˆ์Œ ํ™•์ธ\ndf7.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 8563 entries, 12516 to 12426\nData columns (total 12 columns):\ntitle 8563 non-null object\nartist 8563 non-null object\nalbum 8563 non-null object\nrelease_date 8563 non-null object\nsong_genre 8563 non-null object\nis_title 8563 non-null object\nlike 8563 non-null object\ncreator 8563 non-null object\nlyrics 8563 non-null object\nlyricist 8489 non-null object\ncomposer 8516 non-null object\narranger 7397 non-null object\ndtypes: object(12)\nmemory usage: 869.7+ KB\n" ], [ "# ์—ฌ๊ธฐ์„œ df7.to_csv('tidydata/lyricist_tidy01.csv', index=False) ๋กœ ์ €์žฅ\ndf7.to_csv('lyricist_tidy03.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0e28dc4eaa20a06a188215f9bd8a3c4b6471ac
3,174
ipynb
Jupyter Notebook
Python/Python - Assignment 5 Exception Handelling.ipynb
harshit-saraswat/DL_CV_NLP-Assignments
8ffa45e1c17d94b48382be1c92168e7c82fb04d6
[ "MIT" ]
null
null
null
Python/Python - Assignment 5 Exception Handelling.ipynb
harshit-saraswat/DL_CV_NLP-Assignments
8ffa45e1c17d94b48382be1c92168e7c82fb04d6
[ "MIT" ]
null
null
null
Python/Python - Assignment 5 Exception Handelling.ipynb
harshit-saraswat/DL_CV_NLP-Assignments
8ffa45e1c17d94b48382be1c92168e7c82fb04d6
[ "MIT" ]
null
null
null
23
203
0.521739
[ [ [ "## Q1. WAF to compute 5/0 and use try-except to catch exceptions", "_____no_output_____" ] ], [ [ "try:\n res=5/0\n print(res)\nexcept ZeroDivisionError:\n print(\"Zero Division Error occurred\")", "Zero Division Error occurred\n" ] ], [ [ "## Q2. Implement a Python program to generate all sentences where subject is in [\"Americans\", \"Indians\"] and verb is in [\"Play\", \"watch\"] and the object is in [\"Baseball\",\"cricket\"].\n\n### Hint: Subject,Verb and Object should be declared in the program as shown below.\nsubjects=[\"Americans \",\"Indians\"]<br>\nverbs=[\"play\",\"watch\"]<br>\nobjects=[\"Baseball\",\"Cricket\"]<br>\n\n### Output should come as below:\nAmericans play Baseball.<br>\nAmericans play Cricket.<br>\nAmericans watch Baseball.<br>\nAmericans watch Cricket.<br>\nIndians play Baseball.<br>\nIndians play Cricket.<br>\nIndians watch Baseball.<br>\nIndians watch Cricket.<br>", "_____no_output_____" ] ], [ [ "import itertools\n\nsubjects=[\"Americans\",\"Indians\"]\nverbs=[\"play\",\"watch\"]\nobjects=[\"Baseball\",\"Cricket\"]\nfinalList=[subjects,verbs,objects]\n\nsentenceList=list(itertools.product(*finalList))\nfor sentence in sentenceList:\n print(\" \".join(sentence),end='.\\n')", "Americans play Baseball.\nAmericans play Cricket.\nAmericans watch Baseball.\nAmericans watch Cricket.\nIndians play Baseball.\nIndians play Cricket.\nIndians watch Baseball.\nIndians watch Cricket.\n" ], [ "lst=[-1,2,3,45,-23]\nfilt=[x for x in lst if x>0 ]\nfilt", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0e29f053e475dec8cb81bc600633a5701f3e30
3,199
ipynb
Jupyter Notebook
example/stemmer/load-stemmer.ipynb
fossabot/Malaya
c645b0eb8b826224c0fa8f8d339631ce465a80f4
[ "MIT" ]
null
null
null
example/stemmer/load-stemmer.ipynb
fossabot/Malaya
c645b0eb8b826224c0fa8f8d339631ce465a80f4
[ "MIT" ]
null
null
null
example/stemmer/load-stemmer.ipynb
fossabot/Malaya
c645b0eb8b826224c0fa8f8d339631ce465a80f4
[ "MIT" ]
null
null
null
17.772222
121
0.490778
[ [ [ "import malaya", "_____no_output_____" ] ], [ [ "## Use Sastrawi stemmer", "_____no_output_____" ] ], [ [ "malaya.stem.sastrawi('saya tengah berjalan')", "_____no_output_____" ], [ "malaya.stem.sastrawi('saya tengah berjalankan sangat-sangat')", "_____no_output_____" ], [ "malaya.stem.sastrawi('menarik')", "_____no_output_____" ] ], [ [ "## Load deep learning model\n\nI really not suggest you to use this model. Use Sastrawi instead. We are adding our own rules into Sastrawi stemmer", "_____no_output_____" ] ], [ [ "stemmer = malaya.stem.deep_model()", "_____no_output_____" ], [ "stemmer.stem('saya tengah berjalankan sangat-sangat')", "_____no_output_____" ], [ "stemmer.stem('saya sangat sukakan awak')", "_____no_output_____" ], [ "stemmer.stem('saya sangat suakkan awak')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb0e2ad487adc35d3ba520e485ed9618b5d4c40e
64,295
ipynb
Jupyter Notebook
matplotlib/show_error.ipynb
subhadarship/visualization
b1df7e5041662c580a70b79020ed8f8eb444f064
[ "MIT" ]
1
2021-05-28T09:34:59.000Z
2021-05-28T09:34:59.000Z
matplotlib/show_error.ipynb
subhadarship/visualization
b1df7e5041662c580a70b79020ed8f8eb444f064
[ "MIT" ]
null
null
null
matplotlib/show_error.ipynb
subhadarship/visualization
b1df7e5041662c580a70b79020ed8f8eb444f064
[ "MIT" ]
null
null
null
378.205882
33,748
0.940213
[ [ [ "import matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "# create data\nx = np.array(range(1, 50 + 1)) # 50 different x-axis values (say epochs)\nys = np.random.rand(50, 2000) # 2000 records for each x-axis value (say, accuracies for 2000 runs for each epoch)\n\n# compute means\ny_means = np.mean(ys, axis=1)\n\n# compute standard deviations\ny_stds = np.std(ys, axis=1)", "_____no_output_____" ] ], [ [ "## Style A", "_____no_output_____" ] ], [ [ "# plot\nplt.figure(figsize=(6, 4), dpi=160)\nplt.errorbar(x, y_means, y_stds, color='xkcd:red')\nplt.show()", "_____no_output_____" ] ], [ [ "## Style B", "_____no_output_____" ] ], [ [ "# plot\nplt.figure(figsize=(6, 4), dpi=160)\nplt.plot(x, y_means, color='xkcd:red')\nplt.fill_between(x, y_means - y_stds, y_means + y_stds, facecolor='xkcd:red', \n edgecolor='None', alpha=0.3)\nplt.show()", "_____no_output_____" ], [ "\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0e52043c7c33294bb01d18980f1d5633762811
26,193
ipynb
Jupyter Notebook
sem01_intro/sem01_intro.ipynb
Vinley123/dap_2021_spring
d95c539be097a73aab5831224abeefeb15359db2
[ "MIT" ]
null
null
null
sem01_intro/sem01_intro.ipynb
Vinley123/dap_2021_spring
d95c539be097a73aab5831224abeefeb15359db2
[ "MIT" ]
null
null
null
sem01_intro/sem01_intro.ipynb
Vinley123/dap_2021_spring
d95c539be097a73aab5831224abeefeb15359db2
[ "MIT" ]
null
null
null
24.185596
858
0.547436
[ [ [ "<center>\n<img src=\"http://i0.kym-cdn.com/photos/images/original/000/234/765/b7e.jpg\" height=\"400\" width=\"400\">\n</center>\n\n# ะŸะตั€ะฒะพะผัƒ ัะตะผะธะฝะฐั€ัƒ ะฟั€ะธะณะพั‚ะพะฒะธั‚ัŒัั", "_____no_output_____" ], [ " __ะะฐัˆะฐ ั†ะตะปัŒ ะฝะฐ ัะตะณะพะดะฝั:__ \n\n* ะ—ะฐะฟัƒัั‚ะธั‚ัŒ ะฐะฝะฐะบะพะฝะดัƒ ะฑั‹ัั‚ั€ะตะต, ั‡ะตะผ ะทะฐ 20 ะผะธะฝัƒั‚. \n* ะŸะพะฟั€ะพะฑะพะฒะฐั‚ัŒ python ะฝะฐ ะฒะบัƒั\n* ะ ะตัˆะธั‚ัŒ ะฟะฐั€ัƒ ะฟั€ะพัั‚ั‹ั… ะทะฐะดะฐั‡ ะธ ะทะฐะปะธั‚ัŒ ะธั… ะฒ ะฏะฝะดะตะบั.ะšะพะฝั‚ะตัั‚", "_____no_output_____" ], [ "## 0. ะšัƒะดะฐ ัั‚ะพ ั ะฟะพะฟะฐะป? \n\n__Jupyter Notebook__ - ัั‚ะพ ัˆั‚ัƒะบะฐ ะดะปั ะธะฝั‚ะตั€ะฐะบะบั‚ะธะฒะฝะพะณะพ ะทะฐะฟัƒัะบะฐ ะบะพะดะฐ ะฒ ะฑั€ะฐัƒะทะตั€ะต. ะœะฝะพะณะพ ะณะดะต ะธัะฟะพะปัŒะทัƒัŽั‚. ะœะพะถะฝะพ ะฟะธัะฐั‚ัŒ ะบะพะด, ะฒั‹ะฟะพะปะฝัั‚ัŒ ะตะณะพ ะธ ัะผะพั‚ั€ะตั‚ัŒ ะฝะฐ ั€ะตะทัƒะปัŒั‚ะฐั‚.\n\nะะฐะฟะธัˆะธ ะฒ ัั‡ะตะนะบะต ะฝะธะถะต `2 + 2` ะธ ะฝะฐะถะผะธ ะฝะฐ ะบะฝะพะฟะบะธ __Shift__ ะธ __Enter__ ะพะดะฝะพะฒั€ะตะผะตะฝะฝะพ. ะขะฒะพะน ะบะพะด ะฒั‹ะฟะพะปะฝะธั‚ัั ะธ ั‚ั‹ ัƒะฒะธะดะธัˆัŒ ะพั‚ะฒะตั‚. ะ”ะฐะปัŒัˆะต ั‚ะฐะบ ะธ ะฑัƒะดะตะผ ะฟะธัะฐั‚ัŒ ะบะพะด. ", "_____no_output_____" ] ], [ [ "# ะฝะฐะฟะธัˆะธ ะบะพะด ะฟั€ัะผะพ ั‚ัƒั‚ ะฒะผะตัั‚ะพ ั‚ั€ั‘ั… ั‚ะพั‡ะตะบ\n2+2", "_____no_output_____" ] ], [ [ "> ะ•ั‰ั‘ ัƒ ัั‡ะตะตะบ ะฑั‹ะฒะฐะตั‚ ั€ะฐะทะฝั‹ะน ั‚ะธะฟ. ะ’ ัั‚ะพะน ั‡ะฐัั‚ะธ ัะตะผะธะฝะฐั€ะฐ ะฒะฐัˆ ัะตะผะธะฝะฐั€ะธัั‚ ะฝะตะผะฝะพะณะพ ะฟะพัƒั‡ะธั‚ ะฒะฐั ั€ะฐะฑะพั‚ะฐั‚ัŒ ะฒ ั‚ะตั‚ั€ะฐะดะบะฐั… ั Markdown. \n\n", "_____no_output_____" ], [ "### Markdown\n\n- [10-ะผะธะฝัƒั‚ะฝั‹ะน ัƒั€ะพะบ ะฟะพ ัะธะฝั‚ะฐะบัะธััƒ](https://www.markdowntutorial.com/)\n- [ะšะพั€ะพั‚ะบะธะน ะณะฐะนะด ะฟะพ ัะธะฝั‚ะฐะบัะธััƒ](https://guides.github.com/features/mastering-markdown/)", "_____no_output_____" ], [ "## 1. Python ะบะฐะบ ะบะฐะปัŒะบัƒะปัั‚ะพั€\n\nะœะพะถะฝะพ ัะบะปะฐะดั‹ะฒะฐั‚ัŒ, ัƒะผะฝะพะถะฐั‚ัŒ, ะดะตะปะธั‚ัŒ ะธ ั‚ะฐะบ ะดะฐะปะตะต...", "_____no_output_____" ] ], [ [ "2+2", "_____no_output_____" ], [ "4 * 7", "_____no_output_____" ], [ "3 * (2 + 5)", "_____no_output_____" ], [ "5 ** 1", "_____no_output_____" ], [ "5 / 2", "_____no_output_____" ], [ "5 // 2 #ะงะตะปะพะฒะตะบ ะฟั€ะพั‚ะธะฒ ะผะฐัˆะธะฝั‹: ั€ะฐัƒะฝะด 1, ัƒะณะฐะดะฐะนั‚ะต, ั‡ั‚ะพ ะฟะพะปัƒั‡ะธั‚ัั?", "_____no_output_____" ], [ "5 % 2 #ะฐ ั‚ัƒั‚?", "_____no_output_____" ] ], [ [ "ะšะฐะบ ะพะฑัั‚ะพัั‚ ะดะตะปะฐ ั ะดั€ัƒะณะธะผะธ ะพะฟะตั€ะฐั†ะธัะผะธ? ะŸะพะฟั€ะพะฑัƒะตะผ ะธะทะฒะปะตั‡ัŒ ะบะฒะฐะดั€ะฐั‚ะฝั‹ะน ะบะพั€ะตะฝัŒ:", "_____no_output_____" ] ], [ [ "1+1\nsqrt(4)", "_____no_output_____" ] ], [ [ "ะ˜ะทะฒะปะตั‡ะตะฝะธะต ะบะฒะฐะดั€ะฐั‚ะฝะพะณะพ ะบะพั€ะฝั ะฝะต ะฒั…ะพะดะธั‚ ะฒ ะบะพะผะฟะปะตะบั‚ ะผะฐั‚ะตะผะฐั‚ะธั‡ะตัะบะธั… ะพะฟะตั€ะฐั†ะธะน, ะดะพัั‚ัƒะฟะฝั‹ั… ะฒ Python ะฟะพ ัƒะผะพะปั‡ะฐะฝะธัŽ, ะฟะพัั‚ะพะผัƒ ะฒะผะตัั‚ะพ ะพั‚ะฒะตั‚ะฐ ะผั‹ ะฟะพะปัƒั‡ะธะปะธ ะบะฐะบัƒัŽ-ั‚ะพ ะฝะตะฟะพะฝัั‚ะฝัƒัŽ ั€ัƒะณะฐะฝัŒ. \n\nะญั‚ะฐ ะฝะตะฟะพะฝัั‚ะฝะฐั ั€ัƒะณะฐะฝัŒ ะฝะฐะทั‹ะฒะฐะตั‚ัั ะธัะบะปัŽั‡ะตะฝะธะตะผ, ะบะพะณะดะฐ-ะฝะธะฑัƒะดัŒ ะผั‹ ะฝะฐัƒั‡ะธะผัั ะธั… ะพะฑั€ะฐะฑะฐั‚ั‹ะฒะฐั‚ัŒ, ะฐ ัะตะนั‡ะฐั ะพะฑั€ะฐั‚ะธะผ ะฒะฝะธะผะฐะฝะธะต ะฝะฐ ะฟะพัะปะตะดะฝัŽัŽ ัั‚ั€ะพั‡ะบัƒ: `NameError: name 'sqrt' is not defined` โ€” ั‚ะพ ะตัั‚ัŒ ยซั ะฝะต ะฟะพะฝะธะผะฐัŽ, ั‡ั‚ะพ ั‚ะฐะบะพะต sqrtยป. ะžะดะฝะฐะบะพ, ะฝะต ะฒัั‘ ั‚ะฐะบ ะฟะปะพั…ะพ: ัะพะพั‚ะฒะตั‚ัั‚ะฒัƒัŽั‰ะฐั ั„ัƒะฝะบั†ะธั ะตัั‚ัŒ ะฒ ะผะพะดัƒะปะต math. ะงั‚ะพะฑั‹ ะตะน ะฒะพัะฟะพะปัŒะทะพะฒะฐั‚ัŒัั, ะฝัƒะถะฝะพ ะธะผะฟะพั€ั‚ะธั€ะพะฒะฐั‚ัŒ ัั‚ะพั‚ ะผะพะดัƒะปัŒ. ะญั‚ะพ ะผะพะถะฝะพ ัะดะตะปะฐั‚ัŒ ั€ะฐะทะฝั‹ะผะธ ัะฟะพัะพะฑะฐะผะธ.", "_____no_output_____" ] ], [ [ "import math\nmath.sqrt(4)", "_____no_output_____" ] ], [ [ "ะŸะพัะปะต ั‚ะพะณะพ, ะบะฐะบ ะผะพะดัƒะปัŒ `math` ะธะผะฟะพั€ั‚ะธั€ะพะฒะฐะฝ, ะฒั‹ ะผะพะถะตั‚ะต ัƒะทะฝะฐั‚ัŒ, ะบะฐะบะธะต ะตั‰ั‘ ะฒ ะฝั‘ะผ ะตัั‚ัŒ ั„ัƒะฝะบั†ะธะธ. ะ’ __IPython Notebook__ ะดะปั ัั‚ะพะณะพ ะดะพัั‚ะฐั‚ะพั‡ะฝะพ ะฒะฒะตัั‚ะธ ะธะผั ะผะพะดัƒะปั, ะฟะพัั‚ะฐะฒะธั‚ัŒ ั‚ะพั‡ะบัƒ ะธ ะฝะฐะถะฐั‚ัŒ ะบะฝะพะฟะบัƒ __ยซTabยป__. ะ’ะพั‚, ะฝะฐะฟั€ะธะผะตั€, ัะธะฝัƒั:", "_____no_output_____" ] ], [ [ "math.sin(0)", "_____no_output_____" ] ], [ [ "ะŸั€ะธะฒะตะดะตะฝะฝั‹ะน ัะธะฝั‚ะฐะบัะธั ะผะพะถะตั‚ ะพะบะฐะทะฐั‚ัŒัั ะฝะตัƒะดะพะฑะฝั‹ะผ, ะตัะปะธ ะฒะฐะผ ั‡ะฐัั‚ะพ ะฟั€ะธั…ะพะดะธั‚ัั ะฒั‹ะทั‹ะฒะฐั‚ัŒ ะบะฐะบะธะต-ั‚ะพ ะผะฐั‚ะตะผะฐั‚ะธั‡ะตัะบะธะต ั„ัƒะฝะบั†ะธะธ. ะงั‚ะพะฑั‹ ะฝะต ะฟะธัะฐั‚ัŒ ะบะฐะถะดั‹ะน ั€ะฐะท ัะปะพะฒะพ ยซmathยป, ะผะพะถะฝะพ ะธะผะฟะพั€ั‚ะธั€ะพะฒะฐั‚ัŒ ะธะท ะผะพะดัƒะปั ะบะพะฝะบั€ะตั‚ะฝั‹ะต ั„ัƒะฝะบั†ะธะธ.", "_____no_output_____" ] ], [ [ "from math import sqrt\nsqrt(4)", "_____no_output_____" ] ], [ [ "ะขะฐะบะถะต ะผะพะถะฝะพ ะฟะพะดะณั€ัƒะทะธั‚ัŒ ะบะฐะบะพะน-ะฝะธะฑัƒะดัŒ ะผะพะดัƒะปัŒ ะธะปะธ ะฟะฐะบะตั‚, ะฝะพ ะฟั€ะธ ัั‚ะพะผ ะธะทะผะตะฝะธั‚ัŒ ัƒ ะฝะตะณะพ ะฝะฐะทะฒะฐะฝะธะต ะฝะฐ ะฑะพะปะตะต ะบะพั€ะพั‚ะบะพะต ะธ ะฟะพะปัŒะทะพะฒะฐั‚ัŒัั ะธะผ.", "_____no_output_____" ] ], [ [ "import math as mh\nmh.sqrt(4)", "_____no_output_____" ] ], [ [ "## 2. ะŸะตั€ะตะผะตะฝะฝั‹ะต\n\nะŸะพะฝัั‚ะธะต ยซะฟะตั€ะตะผะตะฝะฝะพะนยป ะฒ ะฟั€ะพะณั€ะฐะผะผะธั€ะพะฒะฐะฝะธะธ ะฟะพั…ะพะถะต ะฝะฐ ะฐะฝะฐะปะพะณะธั‡ะฝะพะต ะฟะพะฝัั‚ะธะต ะฒ ะผะฐั‚ะตะผะฐั‚ะธะบะต. ะŸะตั€ะตะผะตะฝะฝะฐั โ€” ัั‚ะพ ัั‡ะตะนะบะฐ ะฟะฐะผัั‚ะธ, ะพะฑะพะทะฝะฐั‡ะฐะตะผะฐั ะบะฐะบะธะผ-ั‚ะพ ะธะผะตะฝะตะผ. ะ’ ัั‚ะพะน ัั‡ะตะนะบะต ะผะพะณัƒั‚ ั…ั€ะฐะฝะธั‚ัŒัั ั‡ะธัะปะฐ, ัั‚ั€ะพะบะธ ะธ ะฑะพะปะตะต ัะปะพะถะฝั‹ะต ะพะฑัŠะตะบั‚ั‹. ะœั‹ ะฟะพะบะฐ ะฟะพั€ะฐะฑะพั‚ะฐะตะผ ะฝะตะผะฝะพะถะบะพ ั ั‡ะธัะปะพะฒั‹ะผะธ ะฟะตั€ะตะผะตะฝะฝั‹ะผะธ.", "_____no_output_____" ] ], [ [ "x = 4\ny=1\ny\nx", "_____no_output_____" ], [ "x = x + 2\nx", "_____no_output_____" ], [ "x", "_____no_output_____" ] ], [ [ "ะ ั‡ั‚ะพ ะฑัƒะดะตั‚ ะฒ $x$, ะตัะปะธ ะทะฐะฟัƒััั‚ะธั‚ัŒ ัั‡ะตะนะบัƒ ะตั‰ั‘ ั€ะฐะท? ", "_____no_output_____" ], [ "## 3. ะขะธะฟั‹\n\nะŸะพะฟั€ะพะฑัƒะตะผ ะทะฐะฟะธัะฐั‚ัŒ ั‡ะธัะปะฐ ะฟะพ-ั€ะฐะทะฝะพะผัƒ", "_____no_output_____" ] ], [ [ "4 * 42", "_____no_output_____" ], [ "'4' * 42 #ะงะตะปะพะฒะตะบ ะฟั€ะพั‚ะธะฒ ะผะฐัˆะธะฝั‹: ั€ะฐัƒะฝะด 2, ัƒะณะฐะดะฐะนั‚ะต, ั‡ั‚ะพ ะฟะพะปัƒั‡ะธั‚ัั?", "_____no_output_____" ] ], [ [ "ะ”ะปั ะบะฐะถะดะพะณะพ ั‚ะธะฟะฐ ะฐั€ะธั„ะผะตั‚ะธะบะฐ ั€ะฐะฑะพั‚ะฐะตั‚ ะฟะพ-ัะฒะพะตะผัƒ!", "_____no_output_____" ] ], [ [ "a = 'ั‘ะถ'\nb = 'ะธะบ'\na + b + a +a", "_____no_output_____" ], [ "type(4)", "_____no_output_____" ], [ "c = 3\ntype (c)", "_____no_output_____" ], [ "type('4')", "_____no_output_____" ], [ "type(4.0)", "_____no_output_____" ], [ "type(True)", "_____no_output_____" ] ], [ [ "- `str` - ั‚ะตะบัั‚ะพะฒั‹ะน\n- `int` - ั†ะตะปะพั‡ะธัะปะตะฝะฝั‹ะน\n- `float` - ั‡ะธัะปะพ ั ะฟะปะฐะฒะฐัŽั‰ะตะน ะทะฐะฟัั‚ะพะน (ะพะฑั‹ั‡ะฝะพะต ะดะตะนัั‚ะฒะธั‚ะตะปัŒะฝะพะต ั‡ะธัะปะพ)\n- `bool` - ะฑัƒะปะตะฒะฐ ะฟะตั€ะตะผะตะฝะฝะฐั", "_____no_output_____" ], [ "ะ˜ะฝะพะณะดะฐ ะผะพะถะฝะพ ะฟะตั€ะตั…ะพะดะธั‚ัŒ ะพั‚ ะพะดะฝะพะณะพ ั‚ะธะฟะฐ ะฟะตั€ะตะผะตะฝะฝะพะน ะบ ะดั€ัƒะณะพะผัƒ.", "_____no_output_____" ] ], [ [ "x = '42'\nprint(type(x))\n\nx = int(x)\nprint(type(x))", "<class 'str'>\n<class 'int'>\n" ] ], [ [ "ะ ะธะฝะพะณะดะฐ ะฝะตั‚. ะ’ะบะปัŽั‡ะฐะนั‚ะต ะปะพะณะธะบัƒ :)", "_____no_output_____" ] ], [ [ "x = 'ะ›ัŽะบ, ั ั‚ะฒะพะน ะพั‚ะตั†'\nprint(type(x))\n\nx = int(x)\nprint(type(x))", "<class 'str'>\n" ] ], [ [ "ะ‘ัƒะปะตะฒั‹ ะฟะตั€ะตะผะตะฝะฝั‹ะต ะฒะพะทะฝะธะบะฐัŽั‚ ะฟั€ะธ ั€ะฐะทะฝั‹ั… ัั€ะฐะฒะฝะตะฝะธัั…, ะธั… ะผั‹ ะฑัƒะดะตะผ ะฐะบั‚ะธะฒะฝะพ ะธัะฟะพะปัŒะทะพะฒะฐั‚ัŒ ะฝะฐ ัะปะตะดัƒัŽั‰ะตะผ ัะตะผะธะฝะฐั€ะต.", "_____no_output_____" ] ], [ [ "2 + 2 == 4", "_____no_output_____" ], [ "2 + 2 == 5", "_____no_output_____" ], [ "x = 5\nx < 8", "_____no_output_____" ] ], [ [ "## 4. ะ’ะตั‰ะตัั‚ะฒะตะฝะฝั‹ะต ั‡ะธัะปะฐ ะธ ะฟะพะณั€ะตัˆะฝะพัั‚ะธ\n\nะ’ะตั‰ะตัั‚ะฒะตะฝะฝั‹ะต ั‡ะธัะปะฐ ะฒ ะฟั€ะพะณั€ะฐะผะผะธั€ะพะฒะฐะฝะธะธ ะฝะต ั‚ะฐะบ ะฟั€ะพัั‚ั‹. ะ’ะพั‚, ะฝะฐะฟั€ะธะผะตั€, ะฟะพัั‡ะธั‚ะฐะตะผ ัะธะฝัƒั ั‡ะธัะปะฐ $\\pi$:", "_____no_output_____" ] ], [ [ "from math import pi, sin\nsin(pi) #ะดัƒะผะฐะตั‚ะต, ะฟะพะปัƒั‡ะธั‚ัั 0? ะฅะฐ-ั…ะฐ!", "_____no_output_____" ] ], [ [ "ะะตะฟะพะฝัั‚ะฝั‹ะน ะพั‚ะฒะตั‚? ะ’ะพ-ะฟะตั€ะฒั‹ั…, ัั‚ะพ ั‚ะฐะบ ะฝะฐะทั‹ะฒะฐะตะผะฐั [ะบะพะผะฟัŒัŽั‚ะตั€ะฝะฐั ั„ะพั€ะผะฐ ัะบัะฟะพะฝะตะฝั†ะธะฐะปัŒะฝะพะน ะทะฐะฟะธัะธ ั‡ะธัะตะป.](https://ru.wikipedia.org/wiki/ะญะบัะฟะพะฝะตะฝั†ะธะฐะปัŒะฝะฐั_ะทะฐะฟะธััŒ#.D0.9A.D0.BE.D0.BC.D0.BF.D1.8C.D1.8E.D1.82.D0.B5.D1.80.D0.BD.D1.8B.D0.B9_.D1.81.D0.BF.D0.BE.D1.81.D0.BE.D0.B1_.D1.8D.D0.BA.D1.81.D0.BF.D0.BE.D0.BD.D0.B5.D0.BD.D1.86.D0.B8.D0.B0.D0.BB.D1.8C.D0.BD.D0.BE.D0.B9_.D0.B7.D0.B0.D0.BF.D0.B8.D1.81.D0.B8) ะžะฝะฐ ัƒะดะพะฑะฝะฐ, ะตัะปะธ ะฝัƒะถะฝะพ ัƒะผะตั‚ัŒ ะทะฐะฟะธัั‹ะฒะฐั‚ัŒ ะพั‡ะตะฝัŒ ะฑะพะปัŒัˆะธะต ะธะปะธ ะพั‡ะตะฝัŒ ะผะฐะปะตะฝัŒะบะธะต ั‡ะธัะปะฐ:`1.2E2` ะพะทะฝะฐั‡ะฐะตั‚ `1.2โ‹…102`, ั‚ะพ ะตัั‚ัŒ `1200`, ะฐ `2.4e-3` โ€” ั‚ะพ ะถะต ัะฐะผะพะต, ั‡ั‚ะพ `2.4โ‹…10โˆ’3=00024`. \n\nะ ะตะทัƒะปัŒั‚ะฐั‚, ะฟะพัั‡ะธั‚ะฐะฝะฝั‹ะน Python ะดะปั $\\sin \\pi$, ะธะผะตะตั‚ ะฟะพั€ัะดะพะบ `10โˆ’16` โ€” ัั‚ะพ ะพั‡ะตะฝัŒ ะผะฐะปะตะฝัŒะบะพะต ั‡ะธัะปะพ, ะฑะปะธะทะบะพะต ะบ ะฝัƒะปัŽ. ะŸะพั‡ะตะผัƒ ะฝะต ยซะฝะฐัั‚ะพัั‰ะธะนยป ะฝะพะปัŒ? ะ’ัะต ะฒั‹ั‡ะธัะปะตะฝะธั ะฒ ะฒะตั‰ะตัั‚ะฒะตะฝะฝั‹ั… ั‡ะธัะปะฐั… ะดะตะปะฐัŽั‚ัั ะบะพะผะฟัŒัŽั‚ะตั€ะพะผ ั ะฝะตะบะพั‚ะพั€ะพะน ะพะณั€ะฐะฝะธั‡ะตะฝะฝะพะน ั‚ะพั‡ะฝะพัั‚ัŒัŽ, ะฟะพัั‚ะพะผัƒ ะทะฐั‡ะฐัั‚ัƒัŽ ะฒะผะตัั‚ะพ ยซั‡ะตัั‚ะฝั‹ั…ยป ะพั‚ะฒะตั‚ะพะฒ ะฟะพะปัƒั‡ะฐัŽั‚ัั ั‚ะฐะบะธะต ะฟั€ะธะฑะปะธะถะตะฝะฝั‹ะต. ะš ัั‚ะพะผัƒ ะฝะฐะดะพ ะฑั‹ั‚ัŒ ะณะพั‚ะพะฒั‹ะผ.", "_____no_output_____" ] ], [ [ "#ะงะตะปะพะฒะตะบ ะฟั€ะพั‚ะธะฒ ะผะฐัˆะธะฝั‹: ั€ะฐัƒะฝะด 3, ัƒะณะฐะดะฐะนั‚ะต, ั‡ั‚ะพ ะฟะพะปัƒั‡ะธั‚ัั?\n0.4 - 0.3 == 0.1", "_____no_output_____" ], [ "0.4 - 0.3", "_____no_output_____" ] ], [ [ "ะšะพะณะดะฐ ัั€ะฐะฒะฝะธะฒะฐะตั‚ะต ะฒะตั‰ะตัั‚ะฒะตะฝะฝั‹ะต ั‡ะธัะปะฐ ะฑัƒะดัŒั‚ะต ะพัั‚ะพั€ะพะถะฝะตะต. ", "_____no_output_____" ], [ "## 5. ะ’ะฒะพะด ะธ ะฒั‹ะฒะพะด\n\nะ ะฐะฑะพั‚ะฐ ะฒ Jupyter ั€ะตะดะบะพ ั‚ั€ะตะฑัƒะตั‚ ะฟะธัะฐั‚ัŒ ะบะพะด, ะบะพั‚ะพั€ั‹ะน ัะฐะผ ะฟะพ ัะตะฑะต ะทะฐะฟั€ะฐัˆะธะฒะฐะตั‚ ะดะฐะฝะฝั‹ะต ั ะบะปะฐะฒะธะฐั‚ัƒั€ั‹, ะฝะพ ะดะปั ะดั€ัƒะณะธั… ะฟั€ะธะปะพะถะตะฝะธะน (ะธ ะฒ ั‡ะฐัั‚ะฝะพัั‚ะธ ะดะปั ะดะพะผะฐัˆะฝะธั… ั€ะฐะฑะพั‚) ัั‚ะพ ะผะพะถะตั‚ ะฟะพั‚ั€ะตะฑะพะฒะฐั‚ัŒัั. ะš ั‚ะพะผัƒ ะถะต, ะฝะฐะฟะธัะฐะฝะธะต ะธะฝั‚ะตั€ะฐะบั‚ะธะฒะฝั‹ั… ะฟั€ะธะปะพะถะตะฝะธะน ัะฐะผะพ ะฟะพ ัะตะฑะต ะทะฐะฑะฐะฒะฝะพะต ะทะฐะฝัั‚ะธะต. ะะฐะฟะธัˆะตะผ, ะฝะฐะฟั€ะธะผะตั€, ะฟั€ะพะณั€ะฐะผะผัƒ, ะบะพั‚ะพั€ะฐั ะทะดะพั€ะพะฒะฐะตั‚ัั ั ะฝะฐะผะธ ะฟะพ ะธะผะตะฝะธ.", "_____no_output_____" ] ], [ [ "name = input(\"ะ’ะฒะตะดะธั‚ะต ะฒะฐัˆะต ะธะผั: \")\nprint(\"ะŸั€ะธะฒะตั‚,\",name)", "_____no_output_____" ], [ "name", "_____no_output_____" ] ], [ [ "ะงั‚ะพ ะทะดะตััŒ ะฟั€ะพะธะทัˆะปะพ? ะ’ ะฟะตั€ะฒะพะน ัั‚ั€ะพั‡ะบะต ะผั‹ ะธัะฟะพะปัŒะทะพะฒะฐะปะธ ั„ัƒะฝะบั†ะธัŽ `input`. ะžะฝะฐ ะฒั‹ะฒะตะปะฐ ะฝะฐ ัะบั€ะฐะฝ ัั‚ั€ะพั‡ะบัƒ, ะบะพั‚ะพั€ัƒัŽ ะตะน ะฟะตั€ะตะดะฐะปะธ (ะพะฑัะทะฐั‚ะตะปัŒะฝะพ ะฒ ะบะฐะฒั‹ั‡ะบะฐั…) ะธ ะทะฐะฟั€ะพัะธะปะฐ ะพั‚ะฒะตั‚ ั ะบะปะฐะฒะธะฐั‚ัƒั€ั‹. ะฏ ะตะณะพ ะฒะฒั‘ะป, ัƒะบะฐะทะฐะฒ ัะฒะพั‘ ะธะผั. ะŸะพัะปะต ั‡ะตะณะพ `input` ะฒะตั€ะฝัƒะปะฐ ัั‚ั€ะพั‡ะบัƒ ั ะธะผะตะฝะตะผ ะธ ะฟั€ะธัะฒะพะธะปะฐ ะตั‘ ะฟะตั€ะตะผะตะฝะฝะพะน `name`.\n\nะŸะพัะปะต ัั‚ะพะณะพ ะฒะพ ะฒั‚ะพั€ะพะน ัั‚ั€ะพะบะต ะฑั‹ะปะฐ ะฒั‹ะทะฒะฐะฝะฐ ั„ัƒะฝะบั†ะธั `print` ะธ ะตะน ะฑั‹ะปะธ ะฟะตั€ะตะดะฐะฝั‹ ะดะฒะต ัั‚ั€ะพั‡ะบะธ โ€” \"ะŸั€ะธะฒะตั‚,\" ะธ ั‚ะพ, ั‡ั‚ะพ ั…ั€ะฐะฝะธะปะพััŒ ะฒ ะฟะตั€ะตะผะตะฝะฝะพะน `name` ะคัƒะฝะบั†ะธั `print` ะฒั‹ะฒะตะปะฐ ัั‚ะธ ะดะฒะต ัั‚ั€ะพั‡ะบะธ ะฟะพัะปะตะดะพะฒะฐั‚ะตะปัŒะฝะพ, ั€ะฐะทะดะตะปะธะฒ ะฟั€ะพะฑะตะปะพะผ. ะ—ะฐะผะตั‚ะธะผ, ั‡ั‚ะพ ะฒ ะฟะตั€ะตะผะตะฝะฝะพะน `name` ะฟะพ-ะฟั€ะตะถะฝะตะผัƒ ะปะตะถะธั‚ ั‚ะฐ ัั‚ั€ะพั‡ะบะฐ, ะบะพั‚ะพั€ัƒัŽ ะผั‹ ะฒะฒะตะปะธ ั ะบะปะฐะฒะธะฐั‚ัƒั€ั‹.", "_____no_output_____" ], [ "ะŸะพะฟั€ะพะฑัƒะตะผ ั‚ะตะฟะตั€ัŒ ะฝะฐะฟะธัะฐั‚ัŒ ะฟั€ะพะณั€ะฐะผะผัƒ ยซัƒะดะฒะพะธั‚ะตะปัŒยป. ะžะฝะฐ ะดะพะปะถะฝะฐ ะฑัƒะดะตั‚ ะฟั€ะธะฝะธะผะฐั‚ัŒ ะฝะฐ ะฒั…ะพะด ั‡ะธัะปะพ, ัƒะดะฒะฐะธะฒะฐั‚ัŒ ะตะณะพ ะธ ะฒะพะทะฒั€ะฐั‰ะฐั‚ัŒ ั€ะตะทัƒะปัŒั‚ะฐั‚.", "_____no_output_____" ] ], [ [ "x = input(\"ะ’ะฒะตะดะธั‚ะต ะบะฐะบะพะต-ะฝะธะฑัƒะดัŒ ั‡ะธัะปะพ: \")\ny = x * 2\nprint(y)", "_____no_output_____" ] ], [ [ "ะงั‚ะพ-ั‚ะพ ะฟะพัˆะปะพ ะฝะต ั‚ะฐะบ. ะงั‚ะพ ะธะผะตะฝะฝะพ? ะšะฐะบ ัั‚ะพ ะธัะฟั€ะฐะฒะธั‚ัŒ?", "_____no_output_____" ], [ "## 6. ะฃั‡ะธะผัั ะดั€ัƒะถะธั‚ัŒ ั ะฟะพะธัะบะพะฒะธะบะฐะผะธ\n\n__ะ—ะฐะดะฐั‡ะฐ:__ ั ั…ะพั‡ัƒ ัะณะตะฝะตั€ะธั€ะพะฒะฐั‚ัŒ ั€ะฐะฝะดะพะผะฝะพะต ั‡ะธัะปะพ, ะฝะพ ั ะฝะต ะทะฝะฐัŽ ะบะฐะบ ัั‚ะพ ัะดะตะปะฐั‚ัŒ. \n\nะ’ ัั‚ะพะผ ะผะตัั‚ะต ะฒะฐัˆ ัะตะผะธะฝะฐั€ะธัั‚ ัะพะฒะตั€ัˆะธั‚ ัะผะตั€ั‚ะตะปัŒะฝั‹ะน ะฝะพะผะตั€. ะžะฝ ะทะฐะณัƒะณะปะธั‚ ัƒ ะฒะฐั ะฝะฐ ะณะปะฐะทะฐั… ะบะฐะบ ัะณะตะฝะตั€ะธั€ะพะฒะฐั‚ัŒ ัะปัƒั‡ะฐะนะฝะพะต ั‡ะธัะปะพ ะธ ะฝะฐะนะดั‘ั‚ ะบะพะด ะดะปั ัั‚ะพะณะพ.", "_____no_output_____" ] ], [ [ "# ะœะตัั‚ะตั‡ะบะพ ะดะปั ะผะฐะปะตะฝัŒะบะพะณะพ ั‡ัƒะดะฐ", "_____no_output_____" ] ], [ [ "ะฃะฒะธะดะตะปะธ ั‡ัƒะดะพ? ะ”ะฐะฒะฐะนั‚ะต ะดะพะณะพะฒะพั€ะธะผัั, ั‡ั‚ะพ ะฒั‹ ะฝะต ะฑัƒะดะตั‚ะต ัั‚ะตัะฝัั‚ัŒัั ะณัƒะณะปะธั‚ัŒ ะฝัƒะถะฝั‹ะต ะฒะฐะผ ะบะพะผะฐะฝะดั‹ ะธ ะธัะบะฐั‚ัŒ ะพั‚ะฒะตั‚ั‹ ะฝะฐ ัะฒะพะธ ะฒะพะฟั€ะพัั‹ ะฒ ะธะฝั‚ะตั€ะฝะตั‚ะต. ะ•ัะปะธ ัƒะถ ัะพะฒัะตะผ ะฝะต ะฒั‹ั…ะพะดะธั‚, ะทะฐะดะฐะฒะฐะนั‚ะต ะธั… ะฒ ะฝะฐัˆ ั‡ะฐั‚ ั‚ะตั…ะฝะธั‡ะตัะบะพะน ะฟะพะดะดะตั€ะถะบะธ ะฒ ะขะตะปะตะณั€ะฐะผ. ", "_____no_output_____" ], [ "## 7. ะšะพะฝั‚ะตัั‚\n\nะฏะฝะดะตะบั.ะšะพะฝั‚ะตัั‚ - ัั‚ะพ ัะธัั‚ะตะผะฐ ะดะปั ะฐะฒั‚ะพะผะฐั‚ะธั‡ะตัะบะพะณะพ ั‚ะตัั‚ะธั€ะพะฒะฐะฝะธั ะบะพะดะฐ. ะ’ั‹ ะฑัƒะดะตั‚ะต ัั‚ะฐะปะบะธะฒะฐั‚ัŒัั ั ะฝะตะน ะฒ ั‚ะตั‡ะตะฝะธะต ะฒัะตะณะพ ะฝะฐัˆะตะณะพ ะบัƒั€ัะฐ. ะ”ะฐะฒะฐะนั‚ะต ะฟะพะฟั€ะพะฑัƒะตะผ ะฟะพั€ะฐะฑะพั‚ะฐั‚ัŒ ั ะฝะตะน ะธ ะฟะตั€ะตะฝะตัั‘ะผ ั‚ัƒะดะฐ ั€ะตัˆะตะฝะธะต ะทะฐะดะฐั‡ะบะธ ั ะฒะฒะพะดะพะผ ะธะผะตะฝะธ. \n\n> ะ—ะฐะดะฐั‡ะธ ะดะปั ะฟะตั€ะฒะพะณะพ ัะตะผะธะฝะฐั€ะฐ ะดะพัั‚ัƒะฟะฝั‹ ั‚ัƒั‚: https://official.contest.yandex.ru/contest/24363/enter/\n\nะ’ัั‘ ะพัั‚ะฐะฒัˆะตะตัั ะฒั€ะตะผั ะผั‹ ะฑัƒะดะตะผ ั€ะตัˆะฐั‚ัŒ ะทะฐะดะฐั‡ะธ ะธะท ะบะพะฝั‚ะตัั‚ะฐ. ะ ะตะบะพะผะตะฝะดัƒะตะผั‹ะน ัะฟะธัะพะบ: B,H,O,X, ะปัŽะฑั‹ะต ะดั€ัƒะณะธะต :)", "_____no_output_____" ] ], [ [ "### โ•ฐ( อกยฐ อœส– อกยฐ )ใคโ–ฌโ–ฌฮนโ•โ•โ•โ•โ•โ•โ• bzzzzzzzzzz\n# will the code be with you", "_____no_output_____" ] ], [ [ "## 8. ะ”ะทะตะฝ Python ะธ PEP-8\n\nะšะฐะบ ะผั‹ ัƒะฒะธะดะตะปะธ ะฒั‹ัˆะต, ะบะพะผะฐะฝะดะฐ `import` ะฟะพะทะฒะพะปัะตั‚ ะฟะพะดะณั€ัƒะทะธั‚ัŒ ั€ะฐะทะปะธั‡ะฝั‹ะต ะฟะฐะบะตั‚ั‹ ะธ ะผะพะดัƒะปะธ. ะžะดะธะฝ ะธะท ะผะพะดัƒะปะตะน, ะบะพั‚ะพั€ั‹ะน ะพะฑัะทะฐั‚ะตะปัŒะฝะพ ะฝัƒะถะฝะพ ะฟะพะดะณั€ัƒะทะธั‚ัŒ ะฝะฐ ะฟะตั€ะฒะพะน ะถะต ะฟะฐั€ะต - ัั‚ะพ ะผะพะดัƒะปัŒ `this`", "_____no_output_____" ] ], [ [ "import this", "The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n" ] ], [ [ "ะ ะฐะทั€ะฐะฑะพั‚ั‡ะธะบะธ ัะทั‹ะบะฐ Python ะฟั€ะธะดะตั€ะถะธะฒะฐัŽั‚ัั ะพะฟั€ะตะดะตะปั‘ะฝะฝะพะน ั„ะธะปะพัะพั„ะธะธ ะฟั€ะพะณั€ะฐะผะผะธั€ะพะฒะฐะฝะธั, ะฝะฐะทั‹ะฒะฐะตะผะพะน ยซThe Zen of Pythonยป (ยซะ”ะทะตะฝ ะŸะธั‚ะพะฝะฐยป, ะธะปะธ ยซะ”ะทะตะฝ ะŸะฐะนั‚ะพะฝะฐยป). ะ’ั‹ัˆะต ะผั‹ ะฒั‹ะฒะตะปะธ ะฝะฐ ัะบั€ะฐะฝ ะธะผะตะฝะฝะพ ะตั‘. ะ˜ะทัƒั‡ะธั‚ะต ัั‚ัƒ ั„ะธะปะพัะพั„ะธัŽ ะธ ั‚ะพะถะต ะฝะฐั‡ะฝะธั‚ะต ะตั‘ ะฟั€ะธะดะตั€ะถะธะฒะฐั‚ัŒัั.\n\nะ‘ะพะปะตะต ั‚ะพะณะพ, ะผั‹ ั€ะตะบะพะผะตะฝะดัƒะตะผ ะฒะฐะผ ะธะทัƒั‡ะธั‚ัŒ [ัั‚ะฐะนะปะณะฐะนะด ะฟะพ ะฝะฐะฟะธัะฐะฝะธัŽ ะบะพะดะฐ PEP 8.](https://pythonworld.ru/osnovy/pep-8-rukovodstvo-po-napisaniyu-koda-na-python.html) ะ’ ะบะพะฝั†ะต ะบัƒั€ัะฐ ะฒะฐั ะฑัƒะดะตั‚ ะถะดะฐั‚ัŒ ะดะพะผะฐัˆะบะฐ ั ะบะพะด-ั€ะตะฒัŒัŽ. ะ’ ะฝะตะน ะผั‹ ะฑัƒะดะตะผ ั‚ั€ะตะฑะพะฒะฐั‚ัŒ, ั‡ั‚ะพะฑั‹ ะฒั‹ ะฟั€ะธะดะตั€ะถะธะฒะฐะปะธััŒ PEP 8. ะšะพะด ะดะพะปะถะตะฝ ะฑั‹ั‚ัŒ ั‡ะธั‚ะฐะตะผั‹ะผ :) ", "_____no_output_____" ], [ "## ะ’ะฐัˆะต ะทะฐะดะฐะฝะธะต\n\n- ะ”ะพั€ะตัˆะฐั‚ัŒ ะฒะทะต ะทะฐะดะฐั‡ะธ ะธะท [ะฟะตั€ะฒะพะณะพ ะบะพะฝั‚ะตัั‚ะฐ.](https://official.contest.yandex.ru/contest/24363/enter/) ะžะฑั€ะฐั‚ะธั‚ะต ะฒะฝะธะผะฐะฝะธะต, ั‡ั‚ะพ ะพะฝะธ ะฟะพะปะฝะพัั‚ัŒัŽ ัะพะพั‚ะฒะตัั‚ะฒัƒัŽั‚ [ะฟะตั€ะฒะพะน ะฝะตะดะตะปะต](https://www.coursera.org/learn/python-osnovy-programmirovaniya/home/week/1) ั€ะตะบะพะผะตะฝะดะพะฒะฐะฝะฝะพะณะพ ะฒะฐะผ ะบัƒั€ัะฐ ั Coursera. ะœะพะถะฝะพ ั€ะตัˆะฐั‚ัŒ ะธั… ะฒ ะบะพะฝั‚ะตัั‚ะต, ะผะพะถะฝะพ ะฝะฐ ะบัƒั€ัะตั€ะต. ะšะฐะบ ะฒะฐะผ ะฑัƒะดะตั‚ ัƒะดะพะฑะฝะตะต. ะŸะพัั‚ะฐั€ะฐะนั‚ะตััŒ ั€ะตัˆะธั‚ัŒ ั…ะพั‚ัะฑั‹ ะฟะพะปะพะฒะธะฝัƒ ะธั… ะฝะธั….\n- ะ’ ะบะฐั‡ะตัั‚ะฒะต ะฐะปัŒั‚ะตั€ะฝะฐั‚ะธะฒั‹ ะผั‹ ะผะพะถะตั‚ะต ะฟะพะฟั€ะพะฑะพะฒะฐั‚ัŒ ะฟะพั€ะตัˆะฐั‚ัŒ [ะฟะพั…ะพะถะธะต ะทะฐะดะฐั‡ะธ ั pythontutor](https://pythontutor.ru/lessons/inout_and_arithmetic_operations/)\n\nะ ะตัˆะตะฝะธะต ัั‚ะธั… ะทะฐะดะฐะฝะธะน ะฝะฐะผะธ ะฝะธะบะฐะบ ะฝะต ะฟั€ะพะฒะตั€ัะตั‚ัั. ะžะฝะธ ะฝัƒะถะฝั‹ ะดะปั ะฟั€ะฐะบั‚ะธะบะธ, ั‡ั‚ะพะฑั‹ ะฒะฟะพัะปะตะดัั‚ะฒะธะธ ะฒะฐะผ ะฑั‹ะปะพ ะปะตะณั‡ะต ั€ะตัˆะฐั‚ัŒ ะดะพะผะฐัˆะฝะธะต ั€ะฐะฑะพั‚ั‹ ะธ ัะฐะผะพัั‚ะพัั‚ะตะปัŒะฝั‹ะต ั€ะฐะฑะพั‚ั‹. \n\n> ะ‘ะพะปะตะต ั‚ะพะณะพ, ะผะพะถะฝะพ ะฟั€ะพัะธั‚ัŒ ั€ะฐะทะฑะธั€ะฐั‚ัŒ ะฝะฐ ัะตะผะธะฝะฐั€ะฐั… ะธ ะบะพะฝััƒะปัŒั‚ะฐั†ะธัั… ะทะฐะดะฐั‡ะธ, ะบะพั‚ะพั€ั‹ะต ะฒั‹ ะฝะต ัะผะพะณะปะธ ั€ะตัˆะธั‚ัŒ ะปะธะฑะพ ะฝะต ะฟะพะฝัะปะธ. ะžะฑ ัั‚ะพะผ ะผะพะถะฝะพ ะฟะพะฟั€ะพัะธั‚ัŒ ัะฒะพะตะณะพ ัะตะผะธะฝะฐั€ะธัั‚ะฐ ะธะปะธ ะฐััะธัั‚ะตะฝั‚ะพะฒ. ", "_____no_output_____" ], [ "![](https://raw.githubusercontent.com/FUlyankin/stickers/master/2-itog_stickers/zen.png)", "_____no_output_____" ], [ " ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
cb0e561facecdb584e3d38ecc95520e214c03341
18,236
ipynb
Jupyter Notebook
notebooks/C2. Random Forests - Data Exploration.ipynb
cmiller8/LearnDataScience
7dcc895a3b5ef895ba0e85993a4de47a22636fed
[ "BSD-2-Clause" ]
1
2018-06-03T21:20:01.000Z
2018-06-03T21:20:01.000Z
notebooks/C2. Random Forests - Data Exploration.ipynb
cmiller8/LearnDataScience
7dcc895a3b5ef895ba0e85993a4de47a22636fed
[ "BSD-2-Clause" ]
null
null
null
notebooks/C2. Random Forests - Data Exploration.ipynb
cmiller8/LearnDataScience
7dcc895a3b5ef895ba0e85993a4de47a22636fed
[ "BSD-2-Clause" ]
null
null
null
48.759358
436
0.604299
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb0e59444e8c587a7221d65104602e02ba7902ee
1,877
ipynb
Jupyter Notebook
02_quick_recap_of_scala/03_scala_repl_or_jupyter_notebook.ipynb
itversity/spark-scala
5bb49725a2471816c71e177eb90da560800e5435
[ "MIT" ]
null
null
null
02_quick_recap_of_scala/03_scala_repl_or_jupyter_notebook.ipynb
itversity/spark-scala
5bb49725a2471816c71e177eb90da560800e5435
[ "MIT" ]
null
null
null
02_quick_recap_of_scala/03_scala_repl_or_jupyter_notebook.ipynb
itversity/spark-scala
5bb49725a2471816c71e177eb90da560800e5435
[ "MIT" ]
4
2021-06-03T15:23:11.000Z
2021-12-05T01:49:31.000Z
22.082353
108
0.553543
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb0e634e7a18e036d87a8b956eddbf4830355e43
6,947
ipynb
Jupyter Notebook
Data Science/Image Processing/Resizer IT.ipynb
Alissonfelipe1234/JupyterNotebooks
ba732e40bbaef70d9ddaae312b72616a5aa8b98b
[ "Unlicense" ]
null
null
null
Data Science/Image Processing/Resizer IT.ipynb
Alissonfelipe1234/JupyterNotebooks
ba732e40bbaef70d9ddaae312b72616a5aa8b98b
[ "Unlicense" ]
null
null
null
Data Science/Image Processing/Resizer IT.ipynb
Alissonfelipe1234/JupyterNotebooks
ba732e40bbaef70d9ddaae312b72616a5aa8b98b
[ "Unlicense" ]
null
null
null
26.822394
227
0.542968
[ [ [ "## Imports:", "_____no_output_____" ] ], [ [ "from PIL import Image, ImageOps\nimport cv2\nfrom google_images_download import google_images_download ", "_____no_output_____" ], [ "solicitor = google_images_download.googleimagesdownload()\n\narguments = {\"keywords\":\"consorcio feliz\", \"aspect_ratio\":\"square\", \"color_type\":\"transparent\", \"limit\":7,\"output_directory\":\"images\", \"no_directory\":True}\npaths = solicitor.download(arguments)", "\nItem no.: 1 --> Item name = consorcio feliz\nEvaluating...\nStarting Download...\nCompleted Image ====> 1.consorcio_site.banner.b57de633fbb34701.62616e6e6572735f4b415f4d41494f2e706e67.png\nCompleted Image ====> 2.consorcio_site.banner.948ec30cc8c55333.62616e6e6572735f474f4c5f4d41494f2e706e67.png\nURLError on an image...trying next one... Error: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid for 'blog.consorcioluiza.com.br'. (_ssl.c:1076)>\nCompleted Image ====> 3.banner-carro-e-moto-simulador-de-consorcio.png\nCompleted Image ====> 4.img-consorcio-govesa.png\nCompleted Image ====> 5.ademilar-santa-felicidade-belo-horizonte-4.png\nCompleted Image ====> 6.logo.png\nCompleted Image ====> 7.logo-triunfo.png\n\nErrors: 1\n\n" ] ], [ [ "## Instantiating the variables", "_____no_output_____" ] ], [ [ "imagePath = \"topo.png\"\ndinamic_image = Image.open(imagePath)\n\nnew_width = 386\nnew_height = 390", "_____no_output_____" ] ], [ [ "## Processing:", "_____no_output_____" ] ], [ [ "image1 = dinamic_image.resize((new_width, new_height), Image.NEAREST) # use nearest neighbour\nimage2 = dinamic_image.resize((new_width, new_height), Image.BILINEAR) # linear interpolation in a 2x2 environment\nimage3 = dinamic_image.resize((new_width, new_height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment\nimage4 = dinamic_image.resize((new_width, new_height), Image.ANTIALIAS) # best down-sizing filter", "_____no_output_____" ] ], [ [ "## Save", "_____no_output_____" ] ], [ [ "ext = \".png\"\nimage1.save(\"top\" + ext) #Bad result for my tests But smaller size\nimage2.save(\"top2\" + ext) #Averange result and size\nimage3.save(\"top3\" + ext) #Best result\nimage4.save(\"top4\" + ext) #Best result too\n#dinamic_image.save(\"original\" + ext) #convert original to png", "_____no_output_____" ], [ "#LESS CODE\nimagePath = \"original.png\"\ndinamic_image = Image.open(imagePath)\n\nnew_width = 100\nnew_height = 100\nimage4 = dinamic_image.resize((new_width, new_height), Image.ANTIALIAS)\nimage4.save(imagePath) #Best result too", "_____no_output_____" ], [ "#USE TO RETANGULAR FROM SQUARE IMAGE\n\nfrom PIL import Image, ImageOps\nimport cv2\n\ndesired_size = 333\nim_pth = \"path/img.png\"\n\nim = Image.open(im_pth)\nprint(im.mode)\nold_size = im.size \n\nratio = float(desired_size)/max(old_size)\n\nnew_size = tuple([int(x) for x in old_size])\n\nim = im.resize(new_size, Image.ANTIALIAS)\n\nnew_im = Image.new(\"RGB\", (desired_size, desired_size))\nnew_im.paste(im, ((desired_size-new_size[0])//2,\n (desired_size-new_size[1])//2))\n\ndelta_w = desired_size - new_size[0]\ndelta_h = desired_size - new_size[1]\npadding = (delta_w//2, delta_h//2, delta_w-(delta_w//2), delta_h-(delta_h//2))\nprint(padding)\nnew_im = ImageOps.expand(im, padding, fill=000000)\nim_final = new_im.resize((new_width, new_height), Image.BICUBIC) \nim_final.save(im_pth)", "RGBA\n(557, 672, 557, 672)\n" ] ], [ [ "## transform white in transparent", "_____no_output_____" ] ], [ [ "def white_cleaner():\n from PIL import Image\n\n img = Image.open('img.png')\n img = img.convert(\"RGBA\")\n\n pixdata = img.load()\n\n width, height = image.size\n for y in xrange(height):\n for x in xrange(width):\n if pixdata[x, y] == (255, 255, 255, 255):\n pixdata[x, y] = (255, 255, 255, 0)\n\n img.save(\"img2.png\", \"PNG\")", "_____no_output_____" ], [ "from PIL import Image\n\nimg = Image.open('logo.jpeg')\nimg = img.convert(\"RGBA\")\n\npixdata = img.load()\n\nwidth, height = img.size\nfor y in range(height):\n for x in range(width):\n if pixdata[x, y] == (255, 255, 255, 255):\n pixdata[x, y] = (255, 255, 255, 0)\n\nimg.save(\"logo.png\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0e6b980b77a028f9faf0034f0545e1f6087aea
77,123
ipynb
Jupyter Notebook
src/notebooks/121-line-chart-customization.ipynb
s-lasch/The-Python-Graph-Gallery
1df060780e5e9cf763815581aad15da20f5a4213
[ "0BSD" ]
1
2022-01-28T09:36:36.000Z
2022-01-28T09:36:36.000Z
src/notebooks/121-line-chart-customization.ipynb
preguza/The-Python-Graph-Gallery
4645ec59eaa6b8c8e2ff4eee86516ee3a7933b4d
[ "0BSD" ]
null
null
null
src/notebooks/121-line-chart-customization.ipynb
preguza/The-Python-Graph-Gallery
4645ec59eaa6b8c8e2ff4eee86516ee3a7933b4d
[ "0BSD" ]
null
null
null
339.748899
16,812
0.936711
[ [ [ "## Custom Line Color", "_____no_output_____" ], [ "To custom color, just use the `color` argument! \n\nNote that you can add **transparency** to the color with the `alpha` argument (0=transparent, 1=opaque).", "_____no_output_____" ] ], [ [ "# Libraries and data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\ndf=pd.DataFrame({'x_values': range(1,11), 'y_values': np.random.randn(10) })\n\n# Draw plot\nplt.plot( 'x_values', 'y_values', data=df, color='skyblue')\nplt.show()\n\n# Draw line chart by modifiying transparency of the line\nplt.plot( 'x_values', 'y_values', data=df, color='skyblue', alpha=0.3)\n\n# Show plot\nplt.show()", "_____no_output_____" ] ], [ [ "## Custom Line Style", "_____no_output_____" ], [ "You can choose between different **line styles** with the `linestyle` argument.", "_____no_output_____" ] ], [ [ "# Libraries and data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\ndf=pd.DataFrame({'x_values': range(1,11), 'y_values': np.random.randn(10) })\n\n# Draw line chart with dashed line\nplt.plot( 'x_values', 'y_values', data=df, linestyle='dashed')\n\n# Show graph\nplt.show()", "_____no_output_____" ] ], [ [ "The following examples show different types of line styles.", "_____no_output_____" ] ], [ [ "plt.plot( [1,1.1,1,1.1,1], linestyle='-' , linewidth=4)\nplt.text(1.5, 1.3, \"linestyle = '-' \", horizontalalignment='left', size='medium', color='C0', weight='semibold')\nplt.plot( [2,2.1,2,2.1,2], linestyle='--' , linewidth=4 )\nplt.text(1.5, 2.3, \"linestyle = '--' \", horizontalalignment='left', size='medium', color='C1', weight='semibold')\nplt.plot( [3,3.1,3,3.1,3], linestyle='-.' , linewidth=4 )\nplt.text(1.5, 3.3, \"linestyle = '-.' \", horizontalalignment='left', size='medium', color='C2', weight='semibold')\nplt.plot( [4,4.1,4,4.1,4], linestyle=':' , linewidth=4 )\nplt.text(1.5, 4.3, \"linestyle = ':' \", horizontalalignment='left', size='medium', color='C3', weight='semibold')\nplt.axis('off')\nplt.show()", "_____no_output_____" ] ], [ [ "## Custom Line Width", "_____no_output_____" ], [ "Finally you can custom the **line width** as well using `linewidth` argument.", "_____no_output_____" ] ], [ [ "# Libraries and data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\ndf=pd.DataFrame({'x_values': range(1,11), 'y_values': np.random.randn(10) })\n\n# Modify line width of the graph\nplt.plot( 'x_values', 'y_values', data=df, linewidth=22)\n\n# Show graph\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb0e7315d698209292a9ef5a71153b17634a5d21
292,028
ipynb
Jupyter Notebook
Reddit_Sentiment_Assignment.ipynb
ebayandelger/MSDS600
7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4
[ "MIT" ]
null
null
null
Reddit_Sentiment_Assignment.ipynb
ebayandelger/MSDS600
7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4
[ "MIT" ]
null
null
null
Reddit_Sentiment_Assignment.ipynb
ebayandelger/MSDS600
7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4
[ "MIT" ]
null
null
null
227.790952
175,647
0.783593
[ [ [ "# This handy piece of code changes Jupyter Notebooks margins to fit your screen.\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:95% !important; }</style>\"))", "_____no_output_____" ] ], [ [ "## Be sure you've installed the praw and tqdm libraries. If you haven't you can run the line below. Node.js in required to install the jupyter widgets in a few cells. These two cells can take a while to run and won't show progress; you can also run the commands in the command prompt (without the !) to see the progress as it installs.\n\nIf conda is taking a long time, you might try the mamba installer: https://github.com/TheSnakePit/mamba\n`conda install -c conda-forge mamba -y`\nThen installing packages with mamba should be done from the command line (console or terminal).", "_____no_output_____" ] ], [ [ "!conda install tqdm praw nodejs -y", "Collecting package metadata (current_repodata.json): ...working... done" ] ], [ [ "Install the jupyter widget to enable tqdm to work with jupyter lab:", "_____no_output_____" ] ], [ [ "!jupyter labextension install @jupyter-widgets/jupyterlab-manager", "An error occured.\nValueError: Please install Node.js and npm before continuing installation. You may be able to install Node.js from your package manager, from conda, or directly from the Node.js website (https://nodejs.org).\nSee the log file for details: C:\\Users\\ENKHBA~1\\AppData\\Local\\Temp\\jupyterlab-debug-b79fsi1d.log\n" ] ], [ [ "# Scrape Reddit Comments for a Sentiment Analysis - Assignment\n### Go through the notebook and complete the code where prompted\n##### This assignment was adapted from a number of sources including: http://www.storybench.org/how-to-scrape-reddit-with-python/ and https://towardsdatascience.com/scraping-reddit-data-1c0af3040768", "_____no_output_____" ] ], [ [ "# Import all the necessary libraries\nimport praw # Import the Praw library: https://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\nimport pandas as pd # Import Pandas library: https://pandas.pydata.org/\nimport datetime as dt # Import datetime library\nimport matplotlib.pyplot as plt # Import Matplot lib for plotting\nfrom tqdm.notebook import tqdm # progress bar used in loops\n\nimport credentials as cred # make sure to enter your API credentials in the credentials.py file", "_____no_output_____" ] ], [ [ "# Prompt\n### In the cell below, enter your client ID, client secret, user agent, username, and password in the appropitate place withing the quotation marks", "_____no_output_____" ] ], [ [ "reddit = praw.Reddit(client_id = 'A0HZsCKBk-P50g',\n client_secret = '9vAJO7D2uhGh-I7wpVNDI2JEwm0',\n username = 'baymsds', \n password = '2003@@PUujin', \n user_agent = 'msds')", "_____no_output_____" ] ], [ [ "# Prompt\n## In the cell below, enter a subreddit you which to compare the sentiment of the post comments, decide how far back to pull posts, and how many posts to pull comments from.\n## We will be comparing two subreddits, so think of a subject where a comparison might be interesting (e.g. if there are two sides to an issue which may show up in the sentiment analysis as positive and negative scores).", "_____no_output_____" ] ], [ [ "number_of_posts = 200\ntime_period = 'all' # use posts from all time\n\n# .top() can use the time_period argument\n# subreddit = reddit.subreddit('').top(time_filter=time_period, limit=number_of_posts)\n\nsubreddit = reddit.subreddit('GlobalWarming').hot(limit=number_of_posts)\n\n# Create an empty list to store the data\nsubreddit_comments = []\n\n# go through each post in our subreddit and put the comment body and id in our dictionary\n# the value for 'total' here needs to match 'limit' in reddit.subreddit().top()\nfor post in tqdm(subreddit, total=number_of_posts):\n submission = reddit.submission(id=post)\n submission.comments.replace_more(limit=0) # This line of code expands the comments if โ€œload more commentsโ€ and โ€œcontinue this threadโ€ links are encountered\n for top_level_comment in submission.comments: \n subreddit_comments.append(top_level_comment.body) # add the comment to our list of comments", "_____no_output_____" ], [ "# View the comments.\nprint(subreddit_comments)", "['Part 2 would probably go something like this. \\n\\n\"Ok so he\\'s ill, but the important thing is that it\\'s not my fault\"', 'This is perfect and needs to be made into a film.', 'This actually was well written and so much truth', \"That's actually a wonderful allegory. Thanks for posting!\", 'I love your writing', \" Hey, he might not actually be ill tho. There is still a chance he's not.\\n There is still a chance everyone is lying about climate change.\", \"> Is it already too late to do anything. \\n\\nNot necessarily. It is likely too late to stave off +2C, but that doesn't mean we can't do anything about the problem. It just means that we'll have to kick our adaptation into high gear because the window of possibility for mitigation has closed.\\n\\n> Is everything i want to accomplish in life impossible? \\n\\nNo. Go to school to be an engineer, because they will be absolutely KEY in designing resilient infrastructure. You will also have considerable job security, because the demand for your skills will only increase as things worsen.\\n\\n> Will i see the end of humanity?! \\n\\nNo. Whoever is telling you that you will is blowing smoke up your tuchus. Geopolitically speaking, things could get quite problematic as developing nations are destabilized and climate refugees are mobilized and displaced en masse, but the species will not die out as a result of near-term climate change. We survived the last ice age. We'll make it through. It will be painful, sure, but not fatal.\\n\\n> Is everything i do meaningless? \\n\\nNot if you dedicate your life to living sustainably, your career to on-the-ground skills, and your energy to promoting awareness among your peers, friends, and family.\\n\\n> Iโ€™m really scared. \\n\\nFear is like a rocking chair. Sure, it gives you something to do, but it won't actually *get* you anywhere. Channel your fear into positive changes in your life. You control the amount of energy you put into making the world a better place, and global warming doesn't change the fact that you cannot control the world around you. You may be hit by a bus tomorrow on your way to school. Is that a reason not to wake up tomorrow? Of course not.\\n\\n> Is this the end? \\n\\nNo.\\n\\n> are there no other options to save earth?\\n\\nOf course there are options. Take a deep breath. The planet will make it through just fine. It's been a flaming hellscape and a frozen ball of ice, and yet life found a way. We might enter a period of destabilized ecosystems and political unrest, but you won't die in a fiery inferno tomorrow, or the next day, or the next day. Take it step by step and build a life you can be proud of. After a while, you might actually change the people around you for the better.\", \"Don't be scared. I do not see any value in that, and it is something you can control. \\n\\nFeel empowered by your awareness. For example, I have convinced my fiance that we have no right thinking about having kids. I am prioritizing land ownership and practicing horticulture. Also, many musical instruments don't require power, which is helpful if things get absurdly bad. \\n\\nKnowing possible future conditions can be very empowering.\", \"I'm 33. I feel the same way.\", 'YOU DEAD BOY', \"I think that in our lifetimes, it's going to make things worse, but it won't kill us all. Species extinctions make us all poorer, since a world of diverse species is better, and we'll have to live with that. Some areas will flood, and the people living there will have to go elsewhere. A lot of places will have hotter summers and more and more violent storms. We can live with all of that, it's just not as good.\", 'Iโ€™m 2 years older than you and I came to know about climate change when I was your age and I was very scared like you. Iโ€™m here to tell you that NO humanity is not ending with you! all your work is NOT a meaningless and this is NOT the end. Although much damage has been done already, we canโ€™t lose hope here. I commend you actually for having the will to at Least learn about climate change. Youโ€™re better off than people who just deny it! There are solutions around us in the nature giving us signs to look at them and use them. And yes as cheesy as it sounds โ€˜change is possibleโ€™!! I believe youth like us should use our voice and talk about the climate issues and demand solutions and well initiate change!! I donโ€™t want to give your false affirmations and tell you everything is going to be okay because it will not be okay if we donโ€™t act now! So donโ€™t lose hope and try and start looking for solutions around you and search. It will surely help you!', 'Yeah my 15 year old son told me the same thing, so Iโ€™m definitely watching this post...', \"Just stop.\\n\\nI'm 41 and live in Sweden whatever tens of meter above the sea level and so far I haven't noticed just about shit.\\n\\nThe coral reefs seem to have it worst at the moment?\\n\\nIf necessary we could run nuclear plants or use renewable energy to remove CO2. As for whatever we'd want to use the resources that's a different story.\\n\\nI don't think human kind is doomed from it. Yet. Life may change with it but humans have had that impact for quite some time now.\\n\\nOf course we should try to act better but I don't think you should kinda worry if you can't have a life because of it.\", \"Basically (no '?'). Most likely. No. Yes. Not yet. It's complicated.\\n\\n&#x200B;\\n\\nFor more precise answers: are you on the 'winning' side of capital ?\", 'This is a pretty typical disaster mate... Just a large forest fire.', 'Siberia. Just saying.', '[removed]', 'Why attack on meat industry, and animal lifes. while oil industry fuck up whole world and create wars and destruction every where.', \"Pigs don't emit greenhouse gasses!\\n\\nOnly Ruminants, like cattle, do.\\n\\nSo Bacon and Pork chops and BBQ ribs are fine.\\n\\nJust avoid the steaks and burgers.\", 'I am Chinese, so my English is relatively poor. As other replies said,\\xa0the approach will not reduce carbon emissions by 50%. But if it can be implemented as a policy, the reduction in carbon emissions can still be expected.\\n\\nIf implemented, there will be other effects. For example, people will wonder why the government has set such a policy. With the popularity of \"Friday for Future\" action, people may learn more about global warming.\\n\\nGlobal warming will lead to reduced food production and reduced fresh water. A smaller population will have less competition and less injustice.', 'It is already a thing. Lots of people are saying this. \\nSome people in 1st world countries are using very flawed logic though....\\n\"Its OK for me to buy lots of new stuff, commute to work and fly on holiday as much as I like if we have less children. I won\\'t mind at all if in 30 years time old people outnumber the young and there aren\\'t enough taxpayers to support me or our society when I\\'m old and want to retire...\"', \"We need less billionaires, poor don't have much output\", \"It's already expensive to have children so there already is a huge financial incentive not to.\", \"The population increase is a result of our ability to burn fossil fuel which means more energy, more efficient means of production and thus a better capacity to support more population and denser population as well. The population increased naturally as we could support more people and as each individual was able to consume more.\\n\\nYou cannot really convince or force an entire race to stop having kids but you can convince or entice people to stop burning oil. This will lead to a natural decrease in consumption, population and emissions.\\n\\nForcing people not to have kids will be a band aid solution because that's not the root of the problem. Even if you get 50% you have not addressed the fact that the remaining ones use single use plastic and burn oil and are still polluting\", 'Population decline has to be low, otherwise you get a rapidly aging population that canโ€™t be supported by the productivity of the working people. Many western nations already have decreasing domestic populations that are supplemented with immigration, which isnโ€™t good for the immigrant countries of origin like in Africa or Eastern Europe because the immigrants tend to be the people who can afford to move, which draws money, labor, and knowledge out of those nations.\\n\\nAlso if you reduced the population by half, you wouldnโ€™t necessarily reduce the consumption or pollution by half. For example, an oil or coal power plant producing power for electric cars will be more efficient than thousands of little gasoline engines for cars, but a small population wouldnโ€™t really justify a power plantโ€™s cost so even though they use less fossil fuels overall, itโ€™s still more per person. Itโ€™s like the term โ€œeconomies of scaleโ€.', 'One thought i have read recently i found very intersting: Education of women in developing countries can help tackle climate change. The growth in population we see in many poor countries is mostly because women there are not meant or able to have a career therefore their only goal is to have children/marry. 130 Million girls today are denied education and it believed, that a 100% enrollment of girls in all countries could decrease population by nearly 900 million people by 2050. So your thought isnt stupid, but it certainly is a hard thing to achieve.', \"It's a combination of both, not just population reduction but also proactive climate mitigation actions like investment in renewables.\\n\\nThis is the crux of the ongoing debate when it comes to climate treaties. Most of the pollution comes from wealthy nations from the first world yet developing nations are criticized for growing their economies and their population by using fossil fuels. No one can dictate how much each nation can grow in terms of population. \\n\\nWhat really needs to happen is for the developed world to give developing world the technologies to bypass the use of fossil fuels to grow their economies and to help girls get an education and careers in the developing world. Wealthier families generally have fewer children. This is why there has been a general push to move developing countries out of poverty. It's all interlinked so we need to stop looking through the nationalistic lens and return to the global perspective.\", 'โ€œHow to decarbonize Americaโ€: https://podcasts.apple.com/us/podcast/the-ezra-klein-show/id1081584611?i=1000489267291\\n\\nBasically he says if we aggressively convert everything to electric (vehicles, home heating) we donโ€™t have to change our lifestyles and weโ€™ll pay $1-2k less per year.', \"You bring up an uncomfortable truth that most people don't want to entertain. And when you think about it, that reaction makes sense. Many biologists believe that the drive to reproduce is the fundamental force that drives all human activity (all life on Earth, really). It is a hard instinct to turn against.\\n\\nBut really, over time, there is not another option that will yield nearly as much benefit when it comes to slowing global warming. The naysayers in this thread can't argue the simple logic that fewer people on Earth = less energy consumption and reduced Co2 emissions inherently. And frankly, if not for our absolutely massive population boom over the past 200 years, global warming would not be nearly as a big a threat today as it currently is.\\n\\nReally, citizens should be pushing for this to happen now, while they still have some kind of chance at helping to shape the legislation to make this cutback in reproduction as ethical as possible. Because if we wait too long, governments won't decide to incentivize not having kids - they'll just enforce but having kids, probably with some kind of lottery system.\", 'Less people is less emissions? Sure. But the amount and which people is where this gets complicated:\\nBasically, 50% less people โ‰  50% less emissions. The people who make the most emissions are a lot less than 50% of us. \\n\\nWe all have a different footprint. (Do you own a car? Do you take private jets? How big is your house? Are you the CEO of Amazon?)', \"Every week we have new genius popping up with exactly the same stupid idea. Can't we just ban these posts?\", \"I already have a massive incentive to not have kids, at least in the US: They're expensive. Just _having_ the kid is prohibitively expensive. Which is why my wife and I have no plans on having any.\", 'The problem is that the changes are in the **probability** of events occurring, so it\\'s hard to point to an individual event and say \"This wouldn\\'t have happened but for Global Warming\".\\n\\nYou have to look at statistical changes in frequency or intensity that are consistent with global warming, like the greater strength of Atlantic hurricanes, or frequency of California wildfires.\\n\\nThat said, some things are unprecedented, like loss of polar ice.', 'Extreme weather events occur from time to time even without warming. The key is when they occur too often or set new records. For example, sucessive years of hot weather have caused huge areas of coral reef to die around Australia. This summer there was a record highest temperature in Siberia, rsing temperatures caused huge fires where there used to be permafrost.', \"And we'll probably still be told to turn them off at every stop during 100+ degree days.\", 'Political activism. Obviously reducing your carbon footprint is the ethical thing to do, but success is going to depend on government policy. So politics is the most important front in the battle to save the future.', \"I like the reply here about political activism, but let's be real here. Without boatloads of money or fame, our voice and our vote lack meaningful agency. \\n\\nIts absolutely the 'right' way to approach things. But in terms of effecting change, I think the only realistic way for the people to move the needle is by looking at coordinating a range of non-violent activism (sit-ins, blocking traffic) to destruction of corporate property. \\n\\nSo since these groups do not advertise themselves if they do exist, you could start one, or start small by finding an Extinction Rebellion group to join. \\n\\nAm I wrong here?\", 'Consume less animal products, plant some trees, have compost pile in home if you can', \"Similar age, similar climate anxiety. If you're in the US, try out this tool to see some projections. [US climate projections](https://www.climate.gov/maps-data/data-snapshots/averagemaxtemp-decade-LOCA-rcp85-2030-07-00?theme=Projections)\\nEither way, I recommend the UN reports from 2019.\\n\\nOn a personal note, I find listening to experts extremely helpful. I tune in for live broadcasts and read all the reports I can. I like to know what's going on & feel like I'm doing all the right things for my family -- but also I have friends who know how bad this is and choose to not be as in touch for their own sanity. I don't think there's a wrong way to feel about something like this.\", 'You should come to terms with this now. Get therapy, or find others to talk about this with. Placing any emotional value on the future is not a good strategy in 2020.', 'dont put too much faith into anything other than maybe global temperature increase and/or co2 ppm, co2e ppm. there is no way to know what exact effect it will have on your life as an โ€˜individualโ€™. we can loosely predict the effects to our **civilisation**. \\nas a user of r/collapse, if you ever go there especially discount any โ€œโ€predictionsโ€โ€ if you can even call it that. NTHE is thrown about constantly and measured posts saying that BOE wont be this or next year, etc. ridiculous, dont listen to redditors in general , not even me', \"I know somebody who studied climate science who told me climate predictions are just that: predictions, models. Not even the smartest climate scientist in the world knows for sure what the world will look like in 20, 50 or 100 years. Humans causing the climate to change is a new phenomenon, it's not like we have previous experiences to compare this with.\\n\\nPersonally, I can't deal with climate reports anymore. I've cared for so long while most people around me are still indifferent. I do what I can and I do worry about the future sometimes, but it's not like that wouldn't have been the case if climate change wasn't a thing. Illness, death, natural disasters have always been a part of the human experience. The only major thing I still worry about is having kids - looking at climate change reports and still deciding to have them seems cruel.\", 'Very', \"There are plenty of scenarios and none look good. But not having ice on poles isn't looking good. From more extreme temperatures, being more cold in colder zones and more heat in hotter zones, to more heat absorption from having less of a reflective surface on the planet no bounce sun light, from the release of even worse gases locked in by ice, to the ocean acidification, to global ocean conveyor belt stopping, breaking food chains, and whatever more we don't know, I would say it is really normal you're feeling depressed. I know I was feeling depressed myself over this. And I was reading stuff everyday, sharing stuff everyday, but the thing is, while personal mindset of ourselves have to act, global political changes have to happen. So you should be very worried. I don't understand why people can sometimes stand up for animal rights, human rights, huge protest over a single and some times simple thing, but for what makes everything for us, our world, everybody ignore it in general.\\nBut life goes on, we have to live right now, you may even die before you can see real changes. It can help to ease your mind, to develop some survival skills. I've learned to brew beer!\\nNew technology is coming up everyday.\\nI think we are at a point of something really big to come, as the next step we will face it will be a global change. Covid-19 has showed us we aren't prepared to big things, but we are learning and we will have to change. A global energy solution would most certainly help.\\nSometime I feel hopeful, other times depressed. But when you're walking on a thin line, maybe it's just better to don't look down.\\nSorry for the confusing speech\", '/r/ClimatePreparation', \"You should have a healthy appreciation for the present. It's not going to get much better than this\", 'I know this is really to discuss Global Warming, but as someone pretty close in age (24) and battling depression and anxiety I can 100% relate. I wholeheartedly do my best to force my mentality to steer towards an ideology something like this. \"Worry about what you have control over and can actively work on to obtain betterment or progress in.\" Let\\'s say the world as we know it ends in the next 20 years. Is there anything you can actively work on to change or stop this? I mean if you want to reallllyyyy get into it. You could \"waste\" a bunch of your money on supplies, weapons, storage facility, bunker etc. that\\'s entirely up to you. Moving more towards the subreddits topic in particular. There are things you can attempt to do that will minimize your environmental footprint. Recycle, buy electric over gas powered things. I bought an electric mower and am saving up for an electric/hybrid car. Bike or walk to places you visit in your immediate vicinity instead of driving, if you do. Use public transportation if it\\'s available to you. Carpool with friends or co-workers if this would be viable for you. Try to eat less meat. You don\\'t have to go full vegetarian or vegan, but I believe any bit helps. On a larger scale though, it\\'s debatable if these sort of efforts will really bear any meaningful impact. It\\'s pretty set in stone that major corporations are not globally advocating or making changes to go green. Many experts believe that it is already too late or getting very close to being too late. Any switches now would only dampen the snowballing effect, but it\\'s already happening. The evidence is far too great to realistically say otherwise. I hope this maybe helped in some way. Hope you have a nice day!\\n\\nEdit: spelling, re-wordings.', \"if you're that concerned, get a gun and shoot every c02 molecule you see. every bit helps.\", 'Probably we will be fine for at least 40-50 years.', 'We absolutely can, it will take an massive industrial undertaking to make it work.\\n\\nFor example, Brian von Herzen is a researcher who with his group is developing floating seaweed deployment platforms that can grow in the open ocean. With a few simple technologies, seaweed can be grown, plankton can be stimulated, and I believe personally that it could also work as a floating habitat for fish. It would be one of the many ways to heal our planet by in this case removing lots of carbon from the atmosphere. The seaweed could be harvested, and the excess could be put back into the ocean where it would sink about 1 km below the waterline, thereby taking carbon out of the atmospheric carbon cycle for 150-1000 years. \\n\\nIt would have to be done on a massive scale, but it avoids multiple problems of growing things on land. \\n\\nThere are many possibilities, they all take work. Its up to humanity if it wants to do the work or not.', 'Yes because they are going to experience the worst of the climate effects. They have no choice but to adapt. The excuses are running out for the deniers as the evidence of global warming becomes more common each year.', 'The best thing Gen Z can do to stop climate change is to not have any kids.', 'No, when weโ€™re ยซin chargeยป itโ€™s too late. And do not underestimate how much people change as they age. A lot of people will have earned money and inherited wealth, so now their priorities has changed.', 'No. Carbon in the ocean has locked in temperature increases for at least 1000 years. All we can do is hope to adapt.', \"while they can certainly make changes, with the release of methane starting in the north, the beginning of the end is upon us. electric vehicles, end of fossil fuels etc. won't stop the methane releases.\", 'I am a biology graduate and I am pretty worried. What is your plan', 'Industrial design engineer here. Tell me your plan', '[this!!](https://giphy.com/gifs/crossover-noah-moses-1lkF5OJeezodO)', 'Computer science student here. Tell me if I can help', 'Ref:\\n\\nAbrupt CO2 release to the atmosphere under glacial and early interglacial climate conditions\\n\\nhttps://science.sciencemag.org/content/369/6506/1000.abstract', '> Carbon dioxide stays in our atmosphere for 300 to potentially thousands of years. Methane doesnโ€™t: Approximately 95% of it breaks down in the atmosphere in about 10 years, with a small portion eventually converting to carbon dioxide.\\n\\nThis ignores the problem entirely and paints it as if methane doesnโ€™t contribute much at all, which is actually what they explicitly state in the very next paragraph. It doesnโ€™t matter that methane lasts roughly a decade, because itโ€™s still there for a decade and is being continuously replenished by cows. You wouldnโ€™t say that hair canโ€™t be long because all the hair follicles end up falling out, would you?\\n\\n> ...the amount of food produced per cow varies immensely by country and that affects global cattle emissions. Less productive cows mean you need more cows for the same amount of food produced, hence higher emissions per pound of food... Cattle only contribute to 4% of U.S. total emissions, much less than overall global statistics.\\n\\nThis is highly misleading. The most efficient method of livestock production by far is animal farming. Despite this, itโ€™s getting more a more pushback for ethical and public health concerns (as it should). This article is advocating for grazing, which would end up with more emissions for the same production.\\nThe US statistic doesnโ€™t paint an accurate picture either. With this, I would assume cattle donโ€™t contribute much to emissions at all, but thatโ€™s not the case. The US is one of the most developed countries and outputs incredibly large amounts, well beyond what it ought to. On top of that, agriculture makes up 10% of the nationโ€™s emissions according to the EPA. Thus, if the 4% figure is to be trusted, 40% of that sector is solely made up by cattle.\\n\\n\\n\\nThe rest of the article seems to be a mixed bag of truths and half truths. The bias is very clear and Iโ€™m not particularly interested in what was said.', \"Lowering methane levels does not have 'A strong cooling effect', or indeed, any cooling effect at all - it simply has less of a warming effect - like turning your gas ring down low, heats your beans slower!\", 'Parts of Florida have been leveled many times. They just rebuild with their government subsidized home owner insurance every year. \\n\\nStronger buildings and a seawall would be a long term solution. Buy that cost more in the short term and thats all people look at.', 'Shit holes of America', 'Short answer: No.\\n\\n\\n\\n\\nLong answer: Noooooo, but everything helps\\n\\n\\n\\n^Like ^being ^vegan', 'Cycling to work is way better than an electric car, but even if everybody cycled, that is still not enough. \\n\\nEvery time we buy something, lots of people used energy to make it. \\n\\nEvery time my countries military invade somewhere, or even practice invading somewhere, they burn a lot of fuel. \\n\\nCement production produces a lot of CO2.\\n\\nCattle farmers chop down forests to make room for more cows.\\n\\nEtc', 'Electric vehicles would not make a significant impact because not only will trucking be a major part of transportation emissions (electric trucks are a lot farther out technologically than cars), but that also just puts a greater reliance on electricity rather than gas. While that is more efficient, youโ€™ll never get even close to net 0 emissions like that.\\n\\nPublic transportation and urban planning should be the primary focus imo', \"Don't be guilted into believing it's your fault. It's big companies doing 70% of it. Everything helps, but we should focus on regulating those corporations.\", 'https://www.youtube.com/watch?v=26qzmw_xG58', \"Ev helps. I drive everyday. The ev helps on the transportation side.\\n\\nI generate electricity from solar panels. We've had our panels for 4 years now and it's reduced carbon to the equivalent of planting 225 trees so far. I don't have a backyard that large. That helps on the electricity generation front.\\n\\nI try to eat less red meat. That helps on the ag front. \\n\\nI try to support organizations like Ikea and ms and AAPL and Ecosia which have pledged to be carbon neutral or plant trees in ecosia's case so that helps on the industry front. \\n\\nNo silver bullet but everything helps your fellow human.\", 'You are leaving out natural emissions from forests, farms and oceans. These contribute a great deal of CO2.', \"No, electricity still requires power plants which are fueled by things like fossil fuels. What's more, batteries contain rare earth metals that require extraction (usually fueled by fossil fuels) and often leave lots of toxic waste (not global warming but still environmentally damaging). More cars means more roads, which requires construction, etc.\\n\\nIf everyone switched to bicycles on the other hand...\", 'No one really knows, but for the record, climate scientists long ago were worried that their genuine findings were too \"doom and gloom\" for people to take seriously, so they watered their message down. Called it \"Global Warming\" instead of \"Runaway Greenhouse Effect\" because it sounded less scary. Then when people called them alarmist anyway they changed it to \"Climate Change.\" I don\\'t think their strategy has worked.\\n\\nBut to at least give you something valuable, NASA and the IPCC are probably the most up-to-date resources for global warming data. If it seems \"doom and gloom,\" it\\'s because it is doom and gloom, not because of some spin.', 'dont take stock of specific predictions, its pretty impossible to actually know what we will see other than maybe carbon ppm and temperature', \"Not all carbon footprints are the same. Leaving aside the ethical implications (and there are major ethical implications -- see China's former one child policy for how this works in practice) it's worth considering where emissions actually come from. I know you said only effecting certain countries, so let's take a look at: \\n\\nChina = 10.06GT \\nThe US = 5.41GT\\nIndia = 2.65GT\\n(GT = Metric gigatons)\\n\\nThe average number of children for these countries is:\\nChina: 1.6 (Families are currently encouraged to have 2)\\nThe US: 1.9\\nIndia: 2.27\\n\\nAll these numbers to say: in the countries where there are high emissions people already aren't having as many children. The emissions (and wealth) are not spread around evenly within these populations, so limiting the population wouldn't be enough to reduce the emissions.\", 'Half of the countries in the world already average less than 2 children per woman, and the birth rates in other countries are going down. Many demographers believe that there will be an average of less than 2 children per woman globally by the end of this century. It could happen as early as the year 2060.', 'The problem is more that rich countries pollute 5 or 6 times per person more than poor countries. You are penalizing the folks who pollute only a tiny bit.', \"I don't remember such a policy reducing the emissions in China.\", 'Why only per woman?', '[deleted]', 'This deprives one of the most fundamental human rights and essentially grants a pass to the rich western nations which have already benefited from their polluting hay day while punishing the poor countries. Thereโ€™s no way in hell they could even afford to handle a declining population that would come from 2 per woman.', 'That sounds very sexist. It is only fair if you make it 2 pr. person.', \"I'm ok with that. I only had one kid. I mean, I don't think I'd want to bring any more into this world given how things are going anyway.\", 'How about: A maximum of one biological child. If somebody wants more children they have to adopt. There are countless of children waiting to be adopted anyway.', \"I am 100% for a maximum of 2 billion humans on earth at any given point in time, but it is a bit harder to sell someone the idea that they shouldn't exist. Lol\", 'Great in theory. Many European countries have a declining birth rate because they have the wealth to do it. Education, contraception, medical support, legal support, financial support, etc. Bit different in countries without any of that. Imagine if Italians were just told not to have sex anymore to limit the risk of getting pregnant. Can you imagine Joey from friends keeping it in his pants?', 'Max one children per couple', \"If you haven't already seen it, this is worth watching: [https://www.gapminder.org/videos/dont-panic-the-facts-about-population/](https://www.gapminder.org/videos/dont-panic-the-facts-about-population/) \\n\\nIn it, Hans Rosling explains that as a whole, we have reached peak child, meaning the number of children in the world has stopped increasing.\\n\\nWe're currently at almost 8 billion people. Because we are living longer, world population is expected to peak at about 11 billion around the year 2100. By then, it's projected that there will be an average of about 2 children per woman; currently, it's about 2.5 children per woman.\\n\\nThere's no need to mandate what's already happening naturally, and it's certainly unethical to make the mandate in other countries that don't have good access to education, health care, and birth control.\\n\\nI suppose if every country allowed a maximum of 1 child per woman, that would keep our population at par for now. Good luck with that.\\n\\nAnd as others have pointed out, more children per women doesn't necessarily correlate to higher emissions.\", 'Two is one too many.', 'Ref:\\n\\nSea-ice-free Arctic during the Last Interglacial supports fast future loss\\n\\nhttps://www.nature.com/articles/s41558-020-0865-2', '1) It\\'s important to stress that this article conflates loss of Arctic sea-ice with loss of Arctic **summer** sea-ice. \"...complete loss of Arctic sea-ice...\" means something entirely different from what is stated in the actual study, \"...a fast retreat of future Arctic summer sea ice.\"\\n\\n2) The term \"free\" in regards to Arctic sea-ice doesn\\'t actually mean what people assume it to mean. When discussing Arctic sea-ice \"free\" actually means an ice-extent of less than one million square kilometers \\n\\nThe Intergovernmental Panel on Climate Change (IPCC) concluded it was likely that the Arctic would be reliably ice-free in September before 2050, assuming high future greenhouse-gas emissions (where โ€˜reliably ice-freeโ€™ means five consecutive years with less than a million square kilometres of sea ice). Individual years will be ice-free sometime earlier โ€“ in the 2020s, 2030s or 2040s โ€“ depending on both future greenhouse gas emissions and the natural erratic fluctuations.^[1](https://nerc.ukri.org/latest/publications/planetearth/aut15-ice-free/)', '2020 just keeps getting worse huh...', '!RemindMe 2035', \"Won't get better till we Dump Trump!\", \"Lol, we can't even make Americans agree to put on a mask. This world is doomed.\", 'You can see this happening in real time on [earth.nullschool.net](https://earth.nullschool.net) \\n\\nPut on Ocean overlay, then select SSTA (Sea Surface Temp Anomaly)', 'What? Summary please', \"Nobody has overlooked Nuclear. It's probably useful in some contexts, but it's also incredibly capital intensive, generated political opposition everywhere it goes, and has the potential to contribute to proliferation of radioactive material into places we dont want it.\", \"Impossible to read this block of words. From what I understood you're talking about nuclear energy. \\n\\nYou're not the only person in the world that knows about it. Sorry to disappoint you...\", 'Leta go for Dyson sphere', 'When ice thats floating melts, the water level isnt affected, but if ice that currently on land melts, watet levels will rise (and theres a lot of ice in places like greenland). Also, water has its highest density at 4 degrees Celsius, after that it expands, so if the sea gets above 4 degrees, thermal expansaion will cause further sea level rise.', 'Yep thatโ€™s the main contributor. Also thermal expansion of the oceans from warmer conditions plays a role.', 'I believe thermal expansion is the main factor.', 'It comes from glaciers on Land, melts and runs into the ocean', 'Think of it this way: what happens when you boil a pot of water? Molecules in the liquid get excited. As a result, liquid expands.', \"Water expands when it's hotter. Doesn't have much to do with landlocked ice, but still true.\", 'I read somewhere water levels near the north pole will drop when the ice melts.\\n\\nThe explanation is that ice attracts water, in a bowl of water with an icecube, water will be higher around the ice cube.\\n\\n Is this true? And how big is this effect?', \"How do you get that? It's up to 3ยฐC in several places, nowhere is it below normal temps. \\n\\nIf it gets cooler in some areas over a year or two, bear in mind the polar vortex has shifted more towards Greenland now that the North Pole is melted.\\n\\nI hope you're not trying to imply the world's biosphere isn't gaining heat, because that would be disengenous at best.\", 'Yes', \"Regardless of how long methane remains in the atmosphere, we know two things: 1) Methane is a much more effective greenhouse gas than carbon 2) Methane concentration in the atmosphere is rising rapidly. We can therefore conclude that Methane is a significant greenhouse gas even if it doesn't stay in the atmosphere very long.\", \"Methane DOES cause warming for hundreds of years, because you don't just emit one burst of methane and then never do it again. This is just propaganda, and the guy posting it is cringy as hell.\", 'They are cutting rainforest killing its inhabitants to provide pasture for those methane emitters. \\nIf this is not enough: it takes much more land to grow animal calories vs same amount of plant calories.', 'Anyone else notice the โ€œopinionโ€ in the title (of the screenshot) and not even watch it?', \"I think that this is a very useful video. It explains that methane from farming is part of a natural cycle, only stays in the atmosphere for 10 years, and is eventually converted to CO2 and reabsorbed by plants before being eaten by a new generation of cows and sheep. Methane concentration is higher than ever and is warming the planet. This is a bad thing, but it is only a TEMPORARY bad thing, because changes in farming and eating habits could make it all go away in 10 years. \\n\\nUnfortunately the other kind of global warming which comes from using fossil fuels to heat homes, generate electricity, fuel cars and make cement will.. last... 1000.... years!!!! It's a permanent problem with no cheap or simple solution!!! \\n\\nTherefore changes to fossil fuel use need to happen immediately, because the CO2 we all emit from using these fuels will still be here in the year 3000. However, if we all carry on eating meat until 2040, then go vegan as the effects of climate change get more obvious, the methane will all have vanished by 2050 and the earth will cool down a bit. \\n\\nSo when people tell me that the best thing I can do for the planet is to stop eating meat, they are misguided because they are prioritising a temporary problem over a permanant problem.\", \"Eastern Siberian Sea shelf. It's only really started. That dark red spot will slowly grow.\", \"Antarctica would be a horrible place to farm solar energy because of the angle at which the sun will be is always low, and there's 4 months of complete night. Large variations in power output are not good for a grid. How would you even build or maintain a solar farm there? That's not even the hardest part though, because you'd have transport the energy. The shortest point between Antarctica and Chile is 1000km. South Africa is 4000km away, and both Australia and New Zealand are 2600km away. That's not feasible. \\n\\n\\nThis also completely ignores the environmental and health disasters that would result. Even ignoring the problems of ozone depletion, a solar farm could be devastation to local wildlife if you just plop it down without consideration to the effects.\", 'Fuck', 'Double fuck', 'cool. didnt the IPCC have this in the ar5 as โ€˜low confidenceโ€™ by 2100? cool.', 'Yes, yes we should! I am very curious if thorium reactors could really be made to work.', 'Nuclear power plants are fairly safe compared to the alternatives. Think of all the environmental damage and human deaths from e.g. the oil industry or the coal industry. Unfortunately building a nuclear plant is very very expensive. Many of the originals were funded by governments to make nukes. To build modern ones for commercial profit is not easy, and the electricity they produce is more expensive than wind turbines. See this [https://en.wikipedia.org/wiki/Areva](https://en.wikipedia.org/wiki/Areva) and this [https://en.wikipedia.org/wiki/Hinkley\\\\_Point\\\\_C\\\\_nuclear\\\\_power\\\\_station](https://en.wikipedia.org/wiki/Hinkley_Point_C_nuclear_power_station) for more details', 'Yes! Itโ€™s impossible for a country to rely on renewables entirely (without massive battery banks which all use lots of finite resources). Countries should base their power off what renewables they can get the most out of for their geography then have nuclear to top it up in case of a dip in the supply or spike in demand.\\n\\nIn the Netflix doc on bill gates it details the evolution of the nuclear tech itself, us now being able to reuse the waste matter and other amazing developments. But they spoke a lot about how the huge problem for nuclear is PR. \\n\\nYou think nuclear, you think bomb, you think meltdown. One thing governments are good at is propaganda, it would be in everyoneโ€™s interest to shift the opinion on nuclear to being viable and beneficial, that is something that really needs to be tackled by governments in the world but theyโ€™re mostly all still reaping the rewards from FF so I doubt they will change until that goes away first. :/', 'No because Fukushima.', 'Nothing about that \"rebutts\" overpopulation. They\\'re separate, but connected issues.', 'Must needed area to focus.', '> The doubling in CO2 per capita between 1945 and 1980 is US, Europe, Japan, Soviet bloc. In each case energy intensity, coal, oil are the keys not population growth which is mainly in what we then called โ€œ3rd worldโ€.\\n\\nhttps://twitter.com/adam_tooze/status/1283392242225418240', '[Global Warming & Climate Change Myths](https://www.skepticalscience.com/argument.php)', \"Poe's Law is strong with this one\", 'Video Link [https://youtu.be/-VDoBSH4-C8](https://youtu.be/-VDoBSH4-C8)', 'commercial air travel and relying on planes in the global supply chains, one of our biggest mistakes', 'Did they ever start taxing airplane fuel for international flights?', 'Video Link\\nhttps://youtu.be/-14m7tKeOo4', \"Going vegan is the least we can do. It's the biggest difference to reduce emissions, pollution, deforestation, water use, species extinctions and a whole lot of suffering of innocent animals. Its something we can do with out waiting for politicians to get the memo.\\ufeff https://www.theguardian.com/environment/2018/may/31/avoiding-meat-and-dairy-is-single-biggest-way-to-reduce-your-impact-on-earth\", 'Sorry but this video is not great. You do have some valid points but you don\\'t make good arguments for them.\\n\\nSecond 35 onward:\"One of the most discouraging things when talking about the environmental issue is the lack of immediate interest from the public. Various public poles about climate change give a strong indication that more than half of the world think it as a problem for the future.\"\\n\\nYou show three polls:\\n\\n1) No source, likely just about New Zealand parties. Not the public. Not the world.\\n\\n69% think it is an urgent problem, directly contradicting your statement.\\n\\n&#x200B;\\n\\n2) No source, directly contradicts your statement.\\n\\n11% see it as a problem for the future in this graph in 2019. No information on the country or anything.\\n\\n&#x200B;\\n\\n3) 35% of people think the threat of climate change is generally exaggerated, the rest believes it is correct or underestimated. That is not exactly the topic but the data again contradicts the statement.\\n\\nI\\'m not going through the entire video but this just jumped out to me and I thought I\\'d let you know.', 'And people like my dad and similar baby boomers: \"I don\\'t give a shit\"\\n\\nMy dad has literally said that to me with a straight face, it\\'s sad', 'Kinda scary! Literally just last night I had a nightmare that the Earths temperature was going to reach 73 degrees Celsius. My arms and my familyโ€™s skin all began peel and blister as the sun rose. It seemed so real! Then I see this article...', \"There could still be a chance to limit the extent of the devastation to various non-extinction event levels, and we need to continue to try for that as much as possible. However, after 25 years of environmental activism I've concluded it's become nearly certain humanity will suffer unprecedented turmoil and hardship soon, and it will likely only get worse and worse with each coming decade. \\n\\nWhat I've chosen to do is not to fall into despair but to shift much of my energy to prepare to help others in this coming crisis. I've moved to a place that won't be hit as hard and I'm establishing sustainable living practices and various resources to help climate refugees and community members when it gets bad.\\n\\nPerhaps most importantly I'm studying meditation and psychology practices that could help manage despair and hopelessness in the face of this. In short we need people who will help others maintain stable outer lives and stable inner lives in the face of chaos and seeming hopelessness.\\n\\nThere's no telling whats coming but i hope and work for the best and prepare for the worst.\", \"Chemical engineer here. Yes, there is hope, but it depends on politics. We have technology ready to fix pretty much all of it, we just need regulation to kickstart the economies of scale.\\n\\nNobody has any way of telling how much of society will collapse, if at all. 2030 is a very pessimistic view, they are just speculating and full of shit imo. People are very powerful and can endure/persevere IF we decide to. We have to get serious, unite. I think of it like WWII. The US fucking *mobilized* like never before in history. But it took a shocking event to do so. Now that we have increasingly shocking events like crazy storms, fires, videos of rivers flowing off of greenland, acres of dead coral, etc., I think it's starting to feel real for more people all the time. There's hope. And it's not binary. Even if it will get bad, anything we do will help it from getting even worse.\", '>by 2030โ€™s, There will be a collapse of civilization due to climate change. \\n\\nWhere have you read that?', \"It's fair to note that it doesn't look good.\\n\\nCivilization won't collapse overall. Parts of the world will (and have) collapse. But HSBC and JP Morgan Chase will keep maintaining their accounts. And most cities will still keep collecting the rubbish.\", 'Yes. There is always hope.\\nIf you want to see some action, come over to /r/climateactionplan', \"I dont know much about this subject, but from the things I've heard/read, and my relatively short experience observing how humans cant really seem to agree on anything and that humans normally dont react until until something bad happens, I feel that space exploration may be our only hope in a couple of decades. I think colonizing Mars should be a bigger priority right now, because that job will heavily rely on earth being around for a long time.\", \"There isn't going to be civilizational collapse due to warming. There are going to be problems, though.\\n\\nLet's consider what the effects are going to be (the real observable effects instead of hysteria):\\n\\nHeavier, more concentrated rains.\\n\\nExpansion of tornado alley.\\n\\nCoastal flooding.\\n\\nThe first one means agriculture is going to have to adapt to crops potentially been rained out.\\n\\nThe middle one means stronger basements and storm doors on most buildings in the area.\\n\\nThe third one is the tough one: Think New Orleans scale disasters on coastal cities.\", 'What a beautiful place to live. \\n\\nNot trying to make light of the horrible situation.', \"It's happening everywhere for the past 50 years.\", '>\\tglobal surface temperatures tied with a 12-month high from August 2015-September 2016. \\n\\n>\\tWith records from half the year now available, it is likely that 2020 will be the warmest year on record. (Calc by @hausfath) [sic]\\n\\nhttps://twitter.com/carbonbrief/status/1280396592760205312?s=21', \"I'm really excited for all the extra CO2 and methane that this will release /s\", 'that is so concerning jfc', 'this is the most depressing on land effect of global warming ive seen by far', 'Kinda ironic, I see humanity using fossil fuels as a fire spreading through a forest. It was slow at first, but it keeps accelerating.', 'Itโ€™s sadly looking like a golf course', 'Siberia is over 5 million square miles. By comparison, USA is 3.8 million square miles.\\n\\n.\\n\\nImagine a continent bigger than the USA all of a sudden giving off the most potent and damaging greenhouse gases (methane) than the entire USA.\\n\\n.\\n\\nThatโ€™s what this is like.', 'The simple answer is that fusion is always 30 years in the future and it has been for decades. \\n\\nTo add to that, solar, wind and cheaper storage is fast making nuclear uneconomical.', 'It may be better. Maybe. I think though you should read [this](https://grist.org/climate-energy/fusion-wont-save-us-from-climate-change/) first. Lemme know what you think.', 'Same reason we ain\\'t talking about dyson spheres...\\n\\nIf the world was Charlie Brown, Fusion research would be played by Lucy. In the last 70 years there has not been a decade when some group of scientists wouldn\\'t stand up an claim that fusion is less than dozen years away. In Europe we are building the ITER and got the Wendelstain 7x \"stellarator\" breaking records and performing above expectations. Even if both these projects end up perfectly succesfully, we are still decades from commercially viable systems.\\n\\n\\nLuckilly, nuclear energy potential is far from exhausted. Since the end of Cold war nuclear industry is going through rapid development because it can finally do what it was ment to do - produce energy - not bombs. The examples are molten salt reactors popping out all around the world (China has at least 3), Canadian Terrestrial Energy is already negotiating a commercial setup for their IMSR. Another great candidate is american NUSCALE - who are building small high pressure reactors without \"user replaceable\" fuel - when the fuel runs out, you just send the reactor back.. \\n\\nAll these are at least several orders of magnitude safer than anything currently in commercial use and unlike fusion - they already work.....\\n(edit: my english)', \"Aren't they building iter\", 'Source?', 'Where is your science?', 'Link to study can be found [here](https://www.nature.com/articles/s41467-020-16970-7#citeas). \\n\\nReference:\\n\\n>\\tPerkins-Kirkpatrick, S.E., Lewis, S.C. Increasing trends in regional heatwaves. Nat Commun 11, 3357 (2020). https://doi.org/10.1038/s41467-020-16970-7', '#arewegreatyet', 'Ultra-high temperatures were recorded before we started filling the atmosphere with various gases. Evidence suggests that ~100 million years ago the half a year frozen wasteland I live in was a tropical rainforest. \\n\\nWe need to quit worrying about climate change and start fixing the problems destroying our world. Reducing the amount of plastic in the ocean is a good place to start.', 'Refrigeration is just moving heat from one place to another.\\n\\nsorry\\n\\nWell, unless you can figure out a way to vent it to space :)\\n\\nEdit: hmmm vent is the wrong word; I think transmit would be more apropos.\\n\\nMaybe you can figure out a way to transmit the heat out of our atmosphere through conduction like possibly hang a long copper/aluminum/whatever rod \"vertically\" on the dark side of earth. :)\\n\\nMy real idea for fixing global warming unfortunately has potential military applications soooo Imma wait to see just how stupid the \\'Murikkkun voting public truly is.\\n\\nPLEASE somebody demand these erections be closely monitored by the U.N.', '&#x200B;\\n\\nHere is the link: \\n\\n[https://www.theguardian.com/business/2020/jun/29/uk-ministers-send-mixed-messages-over-climate-commitments-says-fund-manager](https://www.theguardian.com/business/2020/jun/29/uk-ministers-send-mixed-messages-over-climate-commitments-says-fund-manager)', 'I canโ€™t tell if this is negative', 'lol no shit', 'It probably will... People and industries will see the changes that happened during those months and will maybe lower their CO2 emissions.', 'No quite the opposite, global warming threatens to make some places uninhabitable due to extreme heat.:\\nhttps://en.m.wikipedia.org/wiki/2019_heat_wave_in_India_and_Pakistan', 'I will combine two of my comments in another thread to answer this. \\n\\n>\\tIt is 30 times more likely to occur now than before the industrial revolution because of the higher concentration of carbon dioxide (a greenhouse gas) in the atmosphere. As greenhouse gas concentrations increase **heatwaves of similar intensity are projected to become even more frequent,** perhaps occurring as regularly as every other year by the 2050s. The Earthโ€™s surface temperature has risen by 1 ยฐC since the pre-industrial period (1850-1900) and UK temperatures have risen by a similar amount. [sic]\\n\\n~ [Met Office](https://www.metoffice.gov.uk/weather/learn-about/weather/types-of-weather/temperature/heatwave)\\n\\n>\\tGlobal warming is increasing the frequency, duration and intensity of heatwaves.\\n\\n>>\\tIf we contine to rely heavily on fossil fuels, **extreme** heatwaves will become the norm across most of the world by the late 21^st century.^^1 [sic]\\n\\n~ [skepticalscience](https://www.skepticalscience.com/heatwaves-past-global-warming-climate-change-basic.htm)\\n\\n***\\n\\n^^1 Dim Coumou^1 and Alexander Robinson^1,2,3\\nPublished 14 August 2013 โ€ข 2013 IOP Publishing Ltd\\nEnvironmental Research Letters, Volume 8, Number 3', 'it varies. its global warming because totally it will go up, though some places will be colder', 'Yesnt and no but maybe', '[https://www.youtube.com/watch?v=Z4bSxb5THm4](https://www.youtube.com/watch?v=Z4bSxb5THm4)', 'Some cold places will get colder and some will get hotter. The same goes for hot places. I\\'m purposefully not using the term \"countries\" here because climate change doesn\\'t respect borders, so some countries, like Russia, will experience all of the above.\\n\\nJust off the top of my head it will depend on altitude, proximity to the ocean, warm oceanic currents, local landscape... That\\'s why it\\'s important to be skeptical of Fox News or Steven Crowder presenting a bombshell report about one iceberg gaining mass in some recent time period as a proof that global warming is a hoax.', 'Nice video haha', \"Uhh what, I'm sorry but this is horrifying\", '\"Temperatures reached +38ยฐC within the Arctic Circle on Saturday, 17ยฐC hotter than normal for 20 June. \\\\#GlobalHeating is accelerating, and some parts of the world are heating a lot faster than others. \\n\\nThe \\\\#RaceToZero emissions is a race for survival. \\n\\nDataviz via @ScottDuncanWX \" \\n \\n>posted by @UNFCCC \\n ___ \\n \\nmedia in tweet: https://video.twimg.com/ext_tw_video/1274949754997350406/pu/pl/XlCZBoWfkEaJX3x4.m3u8?tag=10', 'Kill off About 12 countries', 'https://www.nature.com/articles/s41467-020-16941-y', 'this guy charles down the block is. all he does is eat beans and be farting all the time. total jerk if you ask me.', 'Everyone is responsable', 'Rusty Cage backround music', 'Double negatives are not the unworst language to use.\\n\\n*Based on the current resource consumption rates and best estimate of technological rate growth our study shows that we have very low probability, less than 10% in most optimistic estimate, to survive without facing a catastrophic collapse.*', 'The hell does that mean', 'Nero porcupine', 'So no hope? Well in that case just start rioting, kill your self, no point in doing anything', 'I have a feeling that some of the members are beings pessimistic because they actually want global warming to kill us', \"I'm interested in the data on how much less greenhouse gas emissions were.\\nThe problem is that this was just 3 months of lockdown in Europe and the USA, so it might not be visible in the weather patterns.\", \"First thing that would have to happen is the technique being advocated has to be proven to capture carbon efficiently, store it indefinitely, and have more than modest carbon offsets (more carbon must be captured than produced).\\n\\nSo far, there aren't many methods that can achieve these goals and they can't be scaled up because the geologic conditions and existing infrastructure only exists in limited and remote areas. You can only transport captured carbon so far before the carbon gasses emitted will exceed the carbon captured.\", 'No, because that would be like saying โ€œAlright, weโ€™re going to sacrifice ourselves because the government canโ€™t stand against the billionairesโ€. Effectively giving them a free pass. Never should the working class be the ones paying for this.', \"Depends on how the system works. If it's carbon capture to energy consumption ratio is right, I would give it a try.\\n\\nDisclaimer:\\n-I am not an expert these are just some thoughts that came up my mind-\\n\\nBut it also needs to be way more efficient than trees, forests etc. In conclusion to that I guess, that it has to be either very big or suck air in like a vacuum cleaner, which would cause noise and more energy loss. \\nI think another problem would be that you need a loot of environmental data to find a place for this thing in order to get the best carbon capture ratio as possible. \\nThere are also other bad greenhouse gasses that cause much more harm to the environment than carbon does. \\nAnd in the end: What would be the impact of it if, let's say, we get the money, fundig etc. to get a Million of these?\", 'I d k', \"You can't really take this serious, or do you? You really believe that maybe one good side of excess of carbon surpasses the hundreds of downsides. Not to mention the author of this 'scientific' paper is an industrial consultant and climate change denier.\", 'Before commenting here, *please* read the whole paper.', 'Try /r/datasets.', 'Have you really been far even as decided to use even go want to do look more like?', 'No.', 'That literally made no sense but\\nPositive news????', 'Idk much about science but logically further we are from sun the colder it would be. But if we keep up the CO2 emission then greenhouse effect wouldnt dissapear. So im not sure if it would change anthing in the long run.', \"Consider the mass of the planet and how much force would be required to move it. That is your baseline energy requirement.\\n\\n&#x200B;\\n\\nAlso consider that if you were somehow able to change the orbital trajectory, it would likely just change the eccentricity of the orbit. You'd have to apply the exact amount of force at the exact moment several times throughout the year to extend the orbit. This would also extend the orbital period (longer year) and may affect rotation as well (increase or decrease daily periods). \\n\\n\\ntl;dr no, it is not feasible nor would it be a good method.\", \"Go play KSP for a little while. It's $40 at the humble store right now, and it goes on sale fairly regularly.\\n\\nOnce you've got an idea of how orbits work, and how orbital mechanics work, you'll realize just how much energy it takes to change something's orbit... and that's for object with masses in the metric ton range.\\n\\nThe earth has a mass of approx 5,972,200,000,000,000,000,000 metric tons. \\n\\nThe biggest reason we have a global warming issue is energy consumption. Gasoline, coal, natural gas, fossil fuels in general, they are why we are having problems with climate change.\\n\\nThe energy involved in implementing your idea is simply astronomical, and any benefits from the solution will be FAR outweighed by the side effects of that energy use.\", 'It would not affect global warming but it would most likely change the times of day for each continent and country this is a simple answer but it would really not change anything if humans donโ€™t stop burning fossil fuels in the next 5-10 years we are all doomed', 'Whoโ€™s โ€œyโ€™allโ€ and where did you read that ~~people~~ scientists want global warming to end us?\\n\\nAnyway, here are some solutions Iโ€™ve found that you may be interested in:\\n\\n* [NatGeo](https://www.nationalgeographic.com/environment/global-warming/global-warming-solutions/)\\n\\n* [NASA](https://climate.nasa.gov/faq/16/is-it-too-late-to-prevent-climate-change/)\\n\\n* [Scientific American ](https://www.scientificamerican.com/article/10-solutions-for-climate-change/)\\n\\n* [BBC](https://www.bbc.com/future/article/20181102-what-can-i-do-about-climate-change) (one of the more lay person friendly sites Iโ€™ve seen)\\n\\nEDIT: clarification.', 'Globox', 'When I say yโ€™all is the people who are like โ€œ oh my Jesus, in like 3 days or 5 years global warming will kill usโ€ cough cough Greta thunberg', 'And what does that mean', 'This is a forum full of people that have already woken up.', \"Watching Americans' response to Coronavirus helped me to understand this as well. \\n\\nIf a pandemic can't convince us to save lives, then a much longer-timeframed catastrophe isn't going to make us do anything either. \\n\\nWe're fucked. I feel sad for anyone under the age of 20 at this point. I'm glad I don't have kids. I'll likely die before shit gets too bad. Thank god.\", \"Chemical engineer here. It's more a willpower problem than a technical one at this point. \\n\\nIf we decide to buckle down and actually get started, we can do pretty much anything we want. I'm less qualified on talking about people, but I think people are too reactive instead of proactive. We'll probably need to see a lot more negative effects before enough people get serious.\", 'Hey maybe one in those asteroids will do the trick?', 'u/crow-of-darkness\\n\\nHang out on /r/collapse \\n\\nThat sounds like your jam', 'The thing is that if the virus stunts global warming, we die from the virus, but if the virus stops, we die from warming', 'Nice', 'We are so fucked the clathrate gun has fired and has turned into a methane gusher thousands of miles across.', '[Here](https://youtu.be/3sqdyEpklFU) is a video showcasing Global Warming from 1880 all the time at up to 2019.', '\"Anomalous warmth stretching from western Siberia to the central \\\\#Arctic basin over the last month ๐Ÿ”ฅ \" \\n \\n>posted by @ZLabe \\n ___ \\n \\nmedia in tweet: https://i.imgur.com/M58PYRm.jpg', \"I live in Siberia, this year's May was somewhere between the usual June and July in terms of temperatures. A handful of days nearly reached 30ยฐC. Usually, it's not too uncommon to have some snowfall in May, with temperatures around 10-15 degrees. It's mad. Nothing like that has happened in the 30 years that I've lived here. [Here's the 10 years average day/night temperatures compared to the 2020 ones (grayed out, below the average values) for each day of May](https://i.imgur.com/kmvNHNc.png).\\n\\nThere were also 2 storms. Nothing like the hurricanes of the Caribbean but you have to realize that this kind of weather used to be extremely uncommon around here and neither the infrastructure nor people are prepared for it. [Roofs go flying](https://youtu.be/y8FRXkGPZhA) when the wind picks up.\\n\\nIf this sort of thing becomes regular, I don't know how humanity's gonna fare.\", 'Correction: CO2e levels have reached highest level ever since Miocene. The human species appeared after the end of Miocene.', 'The fleet of container ships traveling between China and the US and Europe emit as much CO2 as all the autos in the US. Why not convert these ships to nuclear power?', 'Half the planet is on lockdown. This is proof we can do absolutely nothing to stop this now. Might as well accept it', 'The news is not completely true.\\n\\nKindly follow the [Link](https://www.thequint.com/news/webqoof/old-images-passed-off-to-show-massive-fire-in-uttarakhand-forests-fact-check)', 'Unfortunately, this has been true for years. For factual climate change info, itโ€™s better to follow /r/climate.', \"I wouldn't rule out the possibility that these are paid operators with many alts\", \"Yup, have been for years. There's the same grito of 5 or 6 users who are on climateskeptic and climatechange. The mods are from climateskeptic, etc. It's a garbage honeypot sub.\", \"It's run by climate skeptics, you say?\\n\\nThat's a nice way of putting it, I would even say incorrect.\\n\\nIt's run by climate denialists, that I had the misfortune of conversing with back in the day. As the voice of science and reason, I am now of course banned on both of his (the main mod's) subs. He was also incredibly rude and resorted to personal attacks against me.\\n\\nThere are some decent people who know their stuff on r/climatechange, but not many. As a European with atmospheric science background, I just gave up - many of them argue in bad faith, some seem to be loonies across the pond on that sub (no offense meant, I have many clever friends from there too, but the most opposition to climate change science is from the US). Thinking specifically about people linking Judith Curry, wattsupwiththat, Heartland Institute, and some other denialists' bs.\\n\\nI would say the best course of action would be to ignore them, instead of even give them the attention they want, but because the sub name gives them so much exposure, it might be a decent idea to show how faulty their reasoning and science background is. Not sure how else to deal with biased mods, with horrible intentions about the most important subject in the history of mankind.\", '>\" In his own words, he does not believe climate change poses a great danger to humanity\\'s survival and future. \"\\n\\nThat\\'s considered denial now? Do you believe humans can live on Mars soon, and if so, why not also on a warmer Earth? \\n\\n\\nEdit: Why downvote this? I\\'m on your side.', 'How do you get from skepticism to denial to fossil fuel funding and tinfoil hats in 10 seconds? I find the discussion most fruitful on r/climatechange and consider r/climate and r/climateskeptics to be quite similar echo chambers of different sides of the argument. If you only want to hear what you believe you can stick in your echo chambers - or then you can expose yourself to conversation about the science. To be fair r/climate is fairly ok but the atmosphere is definitely less tolerant to debate. \\n\\nHow do you even define a skeptic? The way you do it here is anyone with an optimistic outlook of the future as it sounds. If only negativity is to be allowed I recommend r/collapse', \"Hasn't the concentration of CO2 been highest ever every day for a few decades now?\", \"> In April 2020 the average concentration of CO2 in the atmosphere was 416.21 parts per million (ppm), the highest since measurements began in Hawaii in 1958.\\n\\n> Furthermore, ice core records indicate that such levels have not been seen in the last 800,000 years.\\n\\nIt's time for politicians and voters to wake up and make global warming the number one priority for every government in every major country.\", 'Youtuber Simon Clark has a video on the subject, and he said that likely this year will be warmer. Also, due to the damage caused to the economy, some countries are considering rolling back on their environmental programs', \"Yeah, it will be warmer. the climate isn't a switch we can collectively turn on or off. We still need to do more to reduce greenhouse gases.\", ' Global warming is so yesterday, welcome to Grand Solar Minimum.\\n\\n&#x200B;\\n\\n[https://www.youtube.com/watch?v=FqcL1JGlA2I&feature=share](https://www.youtube.com/watch?v=FqcL1JGlA2I&feature=share)', 'Check out the [ProjectVesta video](https://youtu.be/X5m3an3f5S0)', 'Fossil fuel industry propaganda.', 'r/jokes is that way -->', \"This is why you'll never enter the circle of knowledge, it's impossible for a square peg.\", 'Where does the water come from to sustain the trees?', 'I like the formal suit as a the tree-planting dress code.', 'Even during coronavirus, we have not reduced co2 emissions below the threshold for the planet to heal. The planet is still warming at an increasing rate, even with our reduced emissions.', \"I think what this unplanned global experiment shows is that improving the planet's condition doesn't like *solely* in the hands of regular people. That's one part but the bigger element is governments and corporations completely changing how they operate.\", \"It's like getting punched in the face less hard. The situation isn't really improved until you can get the person to stop hitting you altogether\", 'So your saying weโ€™re fucked', \"The effects of COVID-19 are the only thing making this seem remotely attainable. I like to think it's a wake up call but my concern is that all the people currently sheltering in place, etc. are just waiting to explode back to their normal activity levels which, in turn, will raise our emissions right back up. In fact, I've heard various rumblings of the YOLO mindset that makes me fear of attaining even higher levels of emissions by way of expanded travelling, activities, etc.\\n\\nI SO wish we had leadership across the globe with a genuine interest in working to solve this crisis but that's not currently the case. We have some concerned leaders but far too many that are either indifferent or even in explicit defiance of our best interests here. Sigh.\", 'No thanks. We are not a smart species. We will take the various catastrophes and then cut our emissions by more than 7.6%.\\n\\n-Humanity', 'https://grist.org/climate-energy/current-global-warming-is-just-part-of-a-natural-cycle/', 'Muay thai or jujitsu. Guns also work', 'I ask them what would be causing temperature to rise in recent decades. The temperature does naturally fluctuate, all climate researchers agree on that. But based on the measurements of the natural factors that can influence mean global temperature, it should not be increasing currently.\\n\\nFor example, solar activity canโ€™t explain the warming since 1950, because [solar inputs haven\\'t been increasing](https://imgur.com/vf3jTHY) during that time. Theyโ€™ve been generally steady or decreasing, while temperature has risen.\\n\\nSimilarly, [the recent trend in cosmic rays is the opposite of what it would need to be, to cause warming](https://imgur.com/llsqkSB).\\n\\nEven \"skeptic\" blogs like [wattsupwiththat](https://wattsupwiththat.com/2019/12/12/deep-solar-minimum-on-the-verge-of-an-historic-milestone/) have acknowledged these recent trends in solar activity and cosmic rays.\\n\\nCyclical variations in the orbit and tilt of the Earth relative to the Sun are known as the [Milankovitch cycles](https://en.wikipedia.org/wiki/Milankovitch_cycles), which are understood to be an important driver of the glacial/interglacial cycle. But the Milankovitch cycles canโ€™t explain the recent warming - they are currently in a phase that should be leading to a slight cooling trend, if anything. [Here\\'s a graph that shows the Milankovitch forcing for the past 20,000 years](https://imgur.com/a/50Sotae). As you can see, it peaked around 10K years ago, corresponding to the warming that took us out of the last glacial period. It has been decreasing for thousands of years since then. (Source data available through [this page](https://biocycle.atmos.colostate.edu/shiny/Milankovitch/).)\\n\\nAnd so forth. If you take into account all of the measurements of natural factors, they can\\'t explain the recent warming trend. But it is well explained, when human-caused factors (such as the increase in CO2) are taken into account. [See here for a graph that illustrates this](https://www.gfdl.noaa.gov/wp-content/uploads/pix/model_development/climate_modeling/climatemodelingforcing3.jpg).', \"First off, you'll never convince them, you can't reason someone out of a position they didn't reason themselves into.\\n\\nWe disagree with idiocy in the hopes it sways the undecided.\\n\\nMy advice, don't bother when it's just you two, be prepared to destroy them when there's an audience.\\n\\nFirst rule of global heating, CO2 holds in heat. We spew it. He can try to say co2 doesn't trap heat, simply read up on it, he'll be wrong, you'll be right. Make them defend idiocy.\", \"Maybe the point is that capitalism won't save us. If you live in the U.S. capitalism - regardless of what Bernie Sanders and AOC espouse - is the state religion. After 9-11, Bush told people to go buy something, Obama bailed out the largest banks and Trump is cool with people dying as long as the economy looks rosey. Buying more shit and using it as an indulgence to paper over our unnecessary SUV or government subsized meat will never reach where we need to be to reduce methane, CO2 and other greenhouse gases. We have too many people striving for the same standard of living. At some point all the systems break. We can't physically have 8 billion people on the planet eating a diet that requires 49 billion acres of land (we only have 4 billion arable acres), but the standard American diet requires the 49 figure. Buying shit, be it the right car, solar panels or windmill or whatever may feel good, but ultimately your best bets are to 1) Go Vegan / plant based, 2) Not have children and 3) not fly internationally. Everything from COViD only added up to a tiny percentage of global warming emissions change. We are fu#$d.\\nBefore you attack, I have solar panels, and am following numbers 1 and 2 above. I just think most of the Green Movement has been co-opted by capitalism and won't change anything. It's almost in their mantra, they never asked anyone to change. Sort your trash sure, but give up meat or not have a brood, never.\", ' I have read several fact checks in this \"documantary\" and its an evil hit piece against renewable energy.', \"Capitalism won't save us. Buying solar panels or Teslas isn't going to do it. People need to make actual sacrifices. Give up meat, stop having kids, live on less.\", 'What is your goal?\\n\\nIs it not having a climate catastrophe? Please tell me what bad thing you think is going to happen and where that bad thing will hit hardest.\\n\\nOr is it some sort of destruction of human civilization for the sake of it?', \"It's unavoidable at this point - the only path forward is to actively remove greenhouse gases from the atmosphere and somehow sequester them. I foresee huge solar-powered machines that collect methane and co2 from the air and use large amounts of energy to solidify them for burying or perhaps blasting into space if that becomes affordable and environmentally-friendly one day.\", 'At least the rich and their children will die with us plebs.', 'Failure by design. We need a complete restructuring.', 'My ass. Been eating a lot of Hummus I had stocked in the fridge.', \"10 billion isn't enough but still a good initiative\", 'Are you paid by eyeball seconds?\\n\\nWhat a frustratingly inefficient way to display the data!', 'How far is this truthfull based on what science? I understand that there is a possibility that we are going back to the ice age but why does Russia have the hottest place on the plannet? Methane lakes? Or just in the right geo location?', 'The environment is all fucked up because of corporate greed and unfettered capitalism. Sure, there has always been some kind of sense of reuse and recycling among the lower economic class that survived things like the great depression and wwii rationing, but this sounds like a grumpy old boomer railing against those dang disrespectful millenials and their pokemons and vidya games and that Swedish rascal Greta whatshername.\\n\\nTake ur heart meds, old man, and keep ur social distance.', 'The old lady seems to be forgetting that it was their generation that allowed things to become the way they are now in the first place. They are the ones that allowed capitalism to dictate our way of life.', \"Society Moves on and as it advances the amount that will be sacrificed will increase too, the only realistic way we can solve this is if we are to find ways to technologically *limit* or even possibly *extract* carbon entering or already in the air. The blame game isn't going to work anymore, we can only improve and that can only happen if we can come together and start solving this problem as a whole not like some children who argue over the TV show but by the time they decide both shows are over.\", 'Ha ha ha.... This is pretty embarrassing.', 'Let me ask you, where does that concrete come from? It comes from the land, so the net weight change of the land is zero. Besides, the Earth weighs roughly 6,000,000,000,000,000,000,000,000 kg, a change of 6,600,000,000 is not even a drop in the bucket. \\n\\nItโ€™s true that the earth is warmed, for all practical purposes, entirely by solar radiation, so if the temperature is going up or down, the sun is a reasonable place to seek the cause.\\n\\nTurns out itโ€™s more complicated than one might think to detect and measure changes in the amount or type of sunshine reaching the earth. Detectors on the ground are susceptible to all kinds of interference from the atmosphere โ€” after all, one cloud passing overhead can cause a shiver on an otherwise warm day, but not because the sun itself changed. The best way to detect changes in the output of the sun โ€” versus changes in the radiation reaching the earthโ€™s surface through clouds, smoke, dust, or pollution โ€” is by taking readings from space.\\n\\nThis is a job for satellites. According to PMOD at the World Radiation Center there has been no increase in solar irradiance since at least 1978, when satellite observations began. This means that for the last 40+ years, while the temperature has been rising fastest, the sun has not changed.\\n\\nThere has been work done reconstructing the solar irradiance record over the last century, before satellites were available. According to the Max Planck Institute, where this work is being done, there has been no increase in solar irradiance since around 1940. This reconstruction does show an increase in the first part of the 20th century, which coincides with the warming from around 1900 until the 1940s. Itโ€™s not enough to explain all the warming from those years, but it is responsible for a large portion. See this chart of observed temperature, modeled temperature, and variations in the major forcings that contributed to 20th century climate. https://en.wikipedia.org/wiki/File:Climate_Change_Attribution.png\\n\\nRealClimate has a couple of detailed discussions on what we can conclude about solar forcing and how science reached those conclusions.\\nhttp://www.realclimate.org/index.php/archives/2005/07/the-lure-of-solar-forcing/\\nhttp://www.realclimate.org/index.php/archives/2005/08/did-the-sun-hit-record-highs-over-the-last-few-decades/', 'Co2', 'Can you give the source of this article ?', \"The main problem isn't the warming per se, it's the rapid change. A rapid cooling would also be very bad. If I had to pick, I'd prefer warming over cooling.\", 'And we already see that in effect...', 'Nobody likes smart people anyway.', 'Ah what...?', \"People are on quarantine. That means there is very little cars, planes, ships moving around. That means less co2 emissions. Not to mention a lot of industry has stopped that's less polution moving around etc\\n..\", 'I\\'m pretty sure it will NOT help here is why\\n\\nConsidering:\\n\\n- all the carbon emission and pollution levels are currently falling\\n\\n- governments have agreed to reducing carbon emissions as an essential step to protecting our environment\\n\\n- investment in green energy and environmental policy was growing before the crisis\\n\\nI feel that the reduction in pollution during the covid will allow governments to hit emission reduction targets and pollution reduction targets without addressing the core or the problem (it\\'s a side effect of the quarantine, not because our economy is more \"green\").\\nThat coupled with an economic crisis will shift priorities away from continuing to invest in renewable energies, sustainability and enforcing strict environmental policies which has always been more prevalent in times where the economy is doing good.\\nIt\\'s understandable, people will care less about the future of the planet and long term sustainability if they suffer from an economic crisis that makes it harder for them to sustain to their immediate needs.', '90 percent of carbon emissions come from the richest ten percent of people, and it probably will be mostly the poorer people that will suffer. meaning corona killing poor people will do little to nothing to slow climate change. if it only killed billionares on the other hand...', 'please by all means you be the first to volunteer.', \"This analysis is a middle and upper class priviledge. It's not the op-ed writers who will die, that duty goes to the impoverished who contributed the least to global warming. Even though I wouldn't recommend mass human extinction to solve global warming, if the population that died were the ones doing the polluting this line of reasoning would at least be coherent. But that is not the case and the sentiment only shows that smarmy columnists want the poor to die for them.\", \"That's just a temporary solution, people will reproduce again.\", 'Thanos has joined the chat.', 'Air quality here in South Africa has already started to increase', \"Prob doesn't account for extreme environmental rollback \\n\\nAnd myriad other extreme reactions from Power\\n\\nBut worth having the deep think\", \"We draw the line at advocating deaths. I'm not aware of any climate movement ever that advocated for deaths of people. Well, unless we count Koch brothers as people... The whole point of an anthropocentric climate movements is to prevent the death and destruction that catastrophic climate change would cause. \\n\\nAny and all claims of preventing overpopulation I've ever seen (not counting deranged lunatics on the internet) talked about family planning and birth control as ways of empowering people to not have children of they don't wish to or (in the more extreme cases) talking about things like the China one child policy as a possible solution. \\n\\nI'm really just unaware of anyone saying it would be a good idea to just straight up murder a bunch of poor people in order to save the climate.\", \"It would be a hug win if the 30% are killed in the more developed countries. \\n\\nIf it kills mainly in poor country such as the ones in Africa or East Asia, it won't change the current situation. \\n\\n&#x200B;\\n\\nMost of the CO2 emission are produced to make goods for western countries from what I know\", '[deleted]', 'There are too many people on Earth already, fact. They can decide not to reproduce like rabbits, or be chopped down by a huge epidemic, which is inevitable in a mono-species world. Alas, choosing not to reproduce too fast is contrary to the sense of life -passing on your genes. It takes more than intelligence.\\n\\nOn a side note, less people is good for wealth equality. Now workers are treated as disposable, many are in bullshit job positions. Plague history shows how surviving working class was rewarded.', \"It can be WW3 or something. I think it will be. 'Cause our resources are running low. It's inevitable. The humankind has to suffer. :(\", \"Spraying water into the atmosphere and raising the humidity level would actually slightly increase the world's temperature because humid air retains more heat than dry air. Additionally, it takes electricity to pump water, so you'd be using more electricity which generally means more global warming.\", 'Cool story, high entertainment potential.', \"That's the sad thing, isn't it? People think that you are fear-mongering, while it's a real issue.\", 'The best thing you can do is call or write your government representatives.', 'Recently. Oh brother.', \"If you really want to tell people I think you have to accept that some people will always think you're fear mongering. Not talking about it because of other people's feelings that don't believe it in the first place will never help to solve the issue. I agree with the other commenter though that right now is the best thing that could be happening for the planet. It won't last forever, but maybe when things begin to slowly return to normal it will have changed the way people think and people will finally realize that maybe there are some important things besides themselves\", 'Earth is healing herself at a rapid pace right now. Like aggressively fast. Due to the planet shutting down from Corona .', \"Sounds like a ton of dependencies on others and companies. You'll need zoning for your bunker and your geothermal system uses a ton of electricity to circulate. Solar panels are only rated for maybe 25-30 years before efficiency falls below acceptable levels. \\n\\nThink simpler. Less moving parts, less technology reliant upon electricity. Focus on the basics- shelter, food and water.\\n\\nAlso, others may try to take it from you. Including military and government.\\n\\nTry over at r/preppers\", \"You might want a waterproofed underground LIVING & SLEEPING space, for when air temperatures become intolerable, and there's not enough power for air conditioning. Also useful in case of \\ntornadoes.\\n\\nA Motte and Bailey construction would be good to provide earth insulation, yet still be above possible flooding from heacy rains.\\nReflective coating or solar-panels on all non-North facing surfaces.\\n\\nLarge openable panels on roof, to dump heat into space at night.\", \"I've thought a ton about just going it alone with me and my family in a little bunker like you describe. I do feel like its not super safe, and roving marauders will eventually stop by your house. I feel like a group of like minded people would be smart to form a community.\\n\\nyour thoughts?\", 'my plan is to die as painlessly as i can.', \"My plans for a 2 degree temperature rise. \\n\\n1. pay lots and lots of taxes so that the government can improve the infrastructure to cope with more extreme weather.\\n2. Mourn the lost wildlife that couldn't adapt to rapid change.\\n3. Welcome the migrants who have come to my country to avoid really bad weather where they used to live.\\n4. Wish that people had taken CO2 more seriously\\n\\nI'm not trying to be dismissive of the problems of climate change, but before preparing for a scenario out of a mad max film, please bear in mind...\\n\\na. people happily live in the sahara desert, where it is very hot and dry.\\n\\nb. people happily live in Holland, where the land is literally below sea level.\", 'Iโ€™m just gonna refer to global warming as climate change since cc includes the actual effects of warming. Anyways, hereโ€™s a collection of things you should be worried about (enjoy!):\\n\\n-rising sea levels as you mentioned which will cause flooding of coastal cities (itโ€™s important to note that 40% of the worldโ€™s population lives within 100 km of a coast so itโ€™s not a risk for โ€œa relatively small number of humansโ€) [source](https://sedac.ciesin.columbia.edu/es/papers/Coastal_Zone_Pop_Method.pdf) \\n\\n-extreme weather events such as heatwaves which lead to wildfires and drought (impacts agriculture) also heavy precipitation and blizzards\\n\\n-now in terms of the environment it causes effects such as ocean acidification (which badly harms the ocean life) and once again wildfires and drought (negatively impacts flora and fauna) \\n\\n-if youโ€™re still not feeling it w the above points then Iโ€™ll mention how even if youโ€™re somehow someplace where youโ€™re not experiencing the above, youโ€™ll experience it through an increase in prices of certain foods/goods (which would go on to threaten food/goods availability), higher tax rates, health problems, and for sure major (negative) impact on the economy \\n\\n-Iโ€™m probably forgetting some things rn because itโ€™s a little late but these are the main ideas\\n\\nThis still may not seem like much to you and thatโ€™s fine because itโ€™s hard to imagine this happening to our planet. Climate change is unprecedented and itโ€™s truly the greatest test of our humanity. As for our species it has the potential to wipe us out unless we take major action (Including the poor other living creatures :( ). The planet will still exist regardless because we (hopefully) canโ€™t blow it up and physically remove it. It will definitely be inhabitable without question (at least for our type). \\n\\nSorry if this doesnโ€™t make sense but I hope this answers your questions in some capacity and please ask more if this doesnโ€™t convince you!', 'Shifts in climate are projected to lead to the collapse of several of the major food growing regions on earth leading to famine on a scale never before experienced, mainly in Africa and Asia.', \"Gases dissolve in ocean water at greater rates if the planet / ocean is cooler. This means greater oxygen availability for the fauna / flora in the ocean. This flora is actually what creates breathable air for the landlubbers. Increasing temperature means less oxygen in the water. When this suffocates the organisms responsible for creating breathable O2, you will see a massive collapse of the ecosystem.\\n\\nCO2 is weird because it'll form H2CO3 (carbonic acid) and will dissolve in greater rates and pushes out even more CO2 as the acidification then further dissolves the chalk (CaCO3 - basically sequestered CO2) at the bottom of the ocean. Acidification then causes bleaching of corals and massive changes to the ocean environment. This will further exacerbate the situation. As there's more CO2 in the air, the more it dissolves into the water (Henry's law).\\n\\nWe're parasites in the grand scheme of things and when these ecosystems collapse, that's going to do the true damage. Less arable, farmable land. Less available clean water. Less hospitable living areas on the planet. Hell, if the CO2 concentration in the atmosphere increases past a certain point, you will not be able to leave your house for very long as you won't be able to breathe the air without getting loopy. 78% N2 / 21% O2 and trace other gases is the composition of the planet. If you fuck with that O2 number too much, you put everything in jeopardy. Including the way the planet keeps cool.\\n\\nI suggest you read a chemistry book or just literally any science text book. It's like you don't know how Henry's law works, or what an ecosystem is, and only understand what's been spat on the news (ocean level rise because glaciers are melting...4HEAD) I feel like you can't grasp basic concepts and are asking questions like this. It's not even hate, I just feel sad because this is likely the level of understanding the general populace has. No understanding of scale, no understanding of pretty basic science principles, no understanding of interconnectedness of systems, no understanding of stimulus / response, no understanding of time scales, and just general poor background in any sort of mathematics and / or science.\", 'Small Numbers of People?... think again... All major cities in the entire world are located by large water bodies. Geography check!', 'i think you underestimate the amout of people that life in coastal cities and island in the world that will be homeless due to rising sea levels. but its not just that, it will also get hotter and there will be places were it will just be to hot to live. Additionally droughts will be much more servere and it will be harder to grow food. Also there will be more extreme and more regular extrme weather events.\\n\\nso all these things will make places uninhabitable, and the people that lost their houses will become refugees and must go somewhere else. \\n\\nif you look at refugees crisis in europe had about 1 million refugees and caused alot of trouble. \\nthe number of refugees that loose their homes due global warming will be closer to 1 billion. \\nso where are these people gonna go?', 'War. There will be massive wars over migration and resources as resources become more scarce. We saw how poorly the world handled migration from the Middle East following the Arab spring....imagine if every person living near the equator moved north or south to escape the heat, wild fires, expanding desertification, and general unpleasantness. There will be lots of wars started because of climate change. The DoD plans their strategy with this assumption baked in.', \" My plans for a 2 degree temperature rise.\\n\\n1. pay lots and lots of taxes so that the government can improve the infrastructure to cope with more extreme weather.\\n2. Pay extra for food, to cover the cost of farmers changing crops or moving location to suit the new climate.\\n3. Mourn the lost wildlife that couldn't adapt to rapid change.\\n4. Welcome the migrants who have come to my country to avoid really bad weather where they used to live.\\n5. Wish that people had taken CO2 more seriously\\n\\nI'm not trying to be dismissive of the problems of climate change, but for those imagining a scenario out of a mad max film, please bear in mind...\\n\\na. people happily live in the Sahara desert, where it is very hot and dry.\\n\\nb. people happily live in Holland, where the land is literally below sea level.\\n\\nc. rising temperatures and CO2 are bad for some crops, but good for others.\", 'Are you a human being?', \"It comes from over consumption of land and resources.\\n\\nPopulation growth is related to those things but it isn't a root cause. \\n\\nOver consumption of land and resources is the best locus for the direct cause of global warming.\\n\\nInterestingly global warming is just one of several effects that locus is causing. The rest are bad too.\", 'I saw this earlier and I looked at the sources electroverse and the global temperature twitter account, needless to say when I looked at them further I felt no need to see where their sympathies laid.', \"Please don't be an insufferable prick to them. It will only confirm their suspicion of the world lying to them. Just give civil arguments and eventually they won't be able to defend their point anymore. They'll start shouting at you and that's the sweet taste of victory\", 'Gee, thanks, that\\'s, um, quite the website. I went to the main page and gagged. Antivax and \"COVID 19 won\\'t hurt you\" interspersed with worst winter ever. \\n\\nIt\\'s sad to see the chemtrails claim another victim.', 'Climate change? Cover the earth in 800M Wales and the problem is solved.', \"Isn't global warming from co2 a chinese myth and truly the sun is getting hotter and that's the reason why the globe is warming.\", 'I NEED TO KNOW WHAT IS THIS', 'Really its just capiralism shutting down for a bit', 'You can get near real time from NASA, [https://earthdata.nasa.gov/earth-observation-data/near-real-time/hazards-and-disasters/air-quality](https://earthdata.nasa.gov/earth-observation-data/near-real-time/hazards-and-disasters/air-quality)', 'There is a good app: https://apps.apple.com/de/app/airvisual-air-quality-forecast/id1048912974?l=en', 'World made by hand?', \"I picked up a book at the op-shop recently, the blurb sounded like this but haven't read it yet, the title is 'end of days'\", '1) None of your claims have any figures. Using words like โ€œhugeโ€ when thereโ€™s no reference frame at all doesnโ€™t mean much of anything.\\n\\n2) None of your claims have sources.\\n\\n3) Youre unfairly comparing the fuel cost of nuclear power to the construction cost of solar and wind. All the concrete and steel still needs to be shipped. The technicians and operators need to drive to the plant.\\n\\n4) I donโ€™t know if you even know about this, but there isnโ€™t a lot of risk with nuclear power, itโ€™s the [safest source of energy.](https://www.statista.com/statistics/494425/death-rate-worldwide-by-energy-source/) Think of it like a plane vs a car. Plane crashes are a catastrophe while car crashes are โ€œjustโ€ a tragedy. But cars crash more often to the point that theyโ€™re far outpace planes.\\n\\nIโ€™m sorry but after this it just seems like youโ€™re addressing nonsense arguments and itโ€™s making me cringe to even hear it, Iโ€™m tapping out\\n\\nGeneral improvements:\\nSunglasses makes the communication less personal, making you seem insincere and untrustworthy.\\nEstablishing yourself politically and separating yourself from the โ€œliberals and progressivesโ€ alienates yourself from liberals and progressives.', 'No offense but I donโ€™t think anyoneโ€™s going to watch the whole thing. You need to be quick to the point and have an ordered structure already on your head. If thereโ€™s nothing visually striking, it really has no basis being more than 5 minutes. People on the internet have short attention spans.\\n\\nedit: Iโ€™ll go through it anyway', 'True', 'I want to cry.', \"Well that's at least kinda funny I guess\", \"Let's not forget that a green new deal is the only way we can save this planet! ๐Ÿ’š๐ŸŒ๐ŸŒŽ๐ŸŒ\", 'Get involved with your local movements and see what they have planned next month. They need your help to plan and to participate!', \"This article is 16 years old and we still haven't done $h#*. Shameful. Our leaders should be tried and convicted Nuremberg style for the suffering and death they're responsible for.\", 'The report says that Britain will have a \"Siberian\" climate by 2020. Maybe the report was kept secret because it was badly done.', 'I donโ€™t understand how the world leaders are just going to let them do this. They live on this planet too. All the money in the universe isnโ€™t going to protect them.', \"Change is necessary but suggesting mass murder makes you no better. It's taking the same act you are condemning. If you truly want to elicit change then you need to actually BE different.\", \"Rule 1:\\n\\nDon't piss into to the wind. Especially a hot dry wind that's getting hotter and dryer all the time. You'll just end up all wet.\", 'I don\\'t want to get all gloom and doom here, but the author of this article fails to address several \"apocalyptic\" scenarios, and displays some ignorance about the scenarios he does address. For example in discussing climate change the author completely ignores the negative effects that increased atmospheric CO2 appears to be having (such as on [ocean acidity](https://www.pmel.noaa.gov/co2/story/What+is+Ocean+Acidification%3F) and on [C3 plant health](https://cosmosmagazine.com/biology/long-term-experiment-shows-we-ve-got-it-wrong-on-photosynthesis)), and frames the issue as a matter of temperature. And then he incorrectly suggests the only problems caused by increasing temperatures are consequences like sea level rise, when in fact there are several other consequences, such as (but not limited to) declining insect fertility ([source 1](https://www.bbc.com/news/science-environment-38559336), [source 2](https://www.bbc.com/news/science-environment-46194383)). The author\\'s failure to adequately address the issues doesn\\'t bode well for their conclusions.', 'So will roaches and thatโ€™s what we have to look forward to in the future. Thousand year wars between humanity and versatile radioactive beetles. \\n\\nThe futures looking bright, innit folks?', 'So, a plague saves the planet?', 'First and foremost, look at where you are getting your information from. YouTube videos made by random people online is not a valid source. Iโ€™ve noticed that a lot in climate skeptics is their lack of education; not to offend you but when they see a clickbait article from a publisher online in a social media site, they tend to follow it more and more. But going back to the topic, an overwhelming majority of scientists all agree that climate change is real and itโ€™s a threat to all societies. Even Exxon Mobil scientists were made aware of this issue, years ago but did nothing to raise concerns simply due to corporate interests. Of that slim minority of scientists who do reject climate change, are all backed by corporations to serve in their financial interests. My environmental geography professor has gone over the harmful impact misinformation has done to society, especially on this topic, so I ask that you take a look at what actual accredited scientists say on this matter, while looking at valid sources. I would recommend using โ€˜Google Scholarlyโ€™ and simply search up climate change to get all the statistics, reports, and analysis on climate change. The sources come from independent agencies, universities, and so on so forth.', '>They often claim that global warming is caused by other factors such as gravitational pull\\n\\nHow would \"gravitational pull\" affect mean global temperature?\\n\\n> the sun\\n\\nSolar activity canโ€™t explain the warming since 1950, because [solar inputs haven\\'t been increasing](https://imgur.com/vf3jTHY) during that time. Theyโ€™ve been generally steady or decreasing, while temperature has risen.\\n\\nSimilarly, [the recent trend in cosmic rays is the opposite of what it would need to be, to cause warming](https://imgur.com/llsqkSB).\\n\\nEven \"skeptic\" blogs like [wattsupwiththat](https://wattsupwiththat.com/2019/12/12/deep-solar-minimum-on-the-verge-of-an-historic-milestone/) have acknowledged these recent trends in solar activity and cosmic rays.\\n\\n>it\\'s just a fluctuation in a large cycle of warming and cooling\\n\\nCyclical variations in the orbit and tilt of the Earth relative to the Sun are known as the [Milankovitch cycles](https://en.wikipedia.org/wiki/Milankovitch_cycles), which are understood to be an important driver of the glacial/interglacial cycle. But the Milankovitch cycles canโ€™t explain the recent warming - they are currently in a phase that should be leading to a slight cooling trend, if anything. [Here\\'s a graph that shows the Milankovitch forcing for the past 20,000 years](https://imgur.com/a/50Sotae). As you can see, it peaked around 10K years ago, corresponding to the warming that took us out of the last glacial period. It has been decreasing for thousands of years since then. (Source data available through [this page](https://biocycle.atmos.colostate.edu/shiny/Milankovitch/).)\\n\\n>green house gases are only made up of .3% percent Carbon dioxide\\n\\nCO2 has now been raised from around 270 ppm of the dry atmosphere before we started adding to it, to above 400 ppm (0.04%) now. That sounds like a very small percentage, but keep in mind that [more than 99.9% of the dry atmosphere is composed of gases that are not greenhouse gases](https://en.wikipedia.org/wiki/Atmosphere_of_Earth#Composition). So even as a trace gas, CO2 is the second-biggest contributor to the greenhouse effect, after water vapor, accounting for between [9 and 26% of the total effect](https://en.wikipedia.org/wiki/Greenhouse_gas#Impacts_on_the_overall_greenhouse_effect).\\n\\n>while water vapor makes up 3%, and is more effective at trapping in heat radiation\\n\\nYes, water vapor makes up the biggest portion of the total greenhouse effect. [But it acts as a feedback, not a forcing](https://www.yaleclimateconnections.org/2008/02/common-climate-misconceptions-the-water-vapor-feedback-2/). It amplifies cooling and warming signals from other causes, including the increase in CO2.\\n\\nI recommend starting with [skepticalscience.com](https://skepticalscience.com/) to find well-sourced answers to most of the common \"skeptic\" arguments.', \"Youtube, or even random internet website, isn't a good way to seek information. It's very easy to take diagrams, numbers etc out of context and make them say what you want.\\n\\nA good way to inform yourself is for example reading IPCC's reports : [https://www.ipcc.ch/report/ar5/syr/](https://www.ipcc.ch/report/ar5/syr/) .\\n\\nA thing to keep in mind is that a very large part of people talking about global warming don't know what they're talking about.\\n\\nThe reality is that if you look at all the scientific publications about global warming, a vast majority come to the conclusion that the actual global warming is caused by the human activity. \\n\\nThose people are specialists, and know far better than random people on internet.\\n\\nAn other thing to see is that global warming isn't even the only issue caused by CO2 emission.\\n\\nCO2 dissolve in water and produce carbonic acid. This action change the pH of water and will cause important problems for underwater life.\\n\\nThe conclusion is that we have all interest in reducing our CO2 consumption.\", 'Try this podcast https://www.criticalfrequency.org/drilled', \"The most persuasive climate deniers are good at honestly telling PART of the story about the climate in a way which casts doubt on the argument for global warming, but they don't mention crucial facts which undermine their argument. \\n\\n&#x200B;\\n\\nFor example, it is true to post evidence showing that over thousands and millions of years the climate was only affected by the sun, volcanoes, ocean currents etc and not directly by CO2. However, this is deliberately misleading because until recently nobody was burning millions of tons of coal and oil and changing the balance of the atmosphere.\", \"You shouldn't believe a scientist, or two, or more.\\nWhat you should believe is the scientific consensus, ie the result of the majority of studies.\\nAround 97 to 99% of these studies are unanimous : global warming is real, and caused by human.\\nAlso you shouldn't try to understand exactly how and why, like you wouldn't try to be a specialist in molecular physic, or biology, or mathematics.\\nBut you should always thrust the scientific consensus.\", 'so there have been some good answers already but here is a graph that shows the correlation of c02 and average temperature. https://herdsoft.com/climate/widget/image.php?width=600&height=400&title=&temp_axis=Temperature%2BAnomaly%2B%28C%29&co2_axis=CO2%2BConcentration%2B%28ppm%29', \"You should check your sources, I was also brainwashed by skeptics back when I was in schooll. Now that I'm in uni and study something related to tbe field, I can tell that the claims that you often see on youtube and on blogs doesn't really make sense. You shouldn't get your information from scientists directly, becase they are people, and people can lie an be misleading. My recomendation is to see what scientific institutions have to say about it. Try looking what NASA, NOAA, IPCC, or the US Army Corps of Engineers, and you'll have a better picture of what the science actually says.\\nIf you are looking for a direct debunking of many of the claims made by skeptics I can't recomend you enough the youtuber potholer54.\\n[https://youtu.be/ugwqXKHLrGk](https://youtu.be/ugwqXKHLrGk)\", \"If you'd like to read a book that presents the evidence, Hansen's *Storms of My Grandchildren* is great. It focuses on basic physics and geological history, instead of just asking you to trust computer models.\\n\\nThere are chapters on the politics and what Hansen thinks we should do, so if you want you could just focus on a few chapters in the middle that go through the science. Just from reading that you'll easily see through a lot of the nonsense posted on youtube.\\n\\nHansen is the NASA scientist who testified to Congress about climate change back in 1988.\", 'Hey guys how about we donโ€™t attack this person. \\n\\nWhy not give him some good books to read or studies to look at?\\n\\nTry and Inconvenient Truth. \\n\\nMy only personal opinion is that changing to stop global warming will be harder than to maintain the status quo. Many people wish to maintain the systems in place at current because those very systems make them rich without refused to the effects it has on the planet, or itโ€™s peoples.', 'For me back in the 2000s when I was reading scientific papers on climate science and trying to figure out whether the arguments of the skeptics held any water, the most convincing evidence for me that global warming was happening and was caused by greenhouse gasses was the scientific data showing that the upper atmosphere of the earth was dramatically cooling. Here is one article that lays out some of the information:\\n\\n[http://www.theclimateconsensus.com/content/satellite-data-show-a-cooling-trend-in-the-upper-atmosphere-so-much-for-global-warming-right](http://www.theclimateconsensus.com/content/satellite-data-show-a-cooling-trend-in-the-upper-atmosphere-so-much-for-global-warming-right)\\n\\nThe upper atmosphere is cooling dramatically. There is no weather up there to cause fluctuations in temperature and the data is very clear. The cooling is causing the upper atmosphere to shrink. NASA noticed that satellites could fly lower without experiencing drag.\\n\\nWhy would a cooling upper atmosphere be evidence of global warming? Such cooling was a prediction of the scientists who argued that a rising greenhouse effect due to human emissions was warming the earth. Increasing greenhouse gasses trap heat in the lower atmosphere so there is less heat radiating to space and warming the upper atmosphere. In the 2000s, many sincere skeptics abandoned their skeptical stance when they saw the data showing a dramatic cooling of the upper atmosphere.\\n\\nIf global warming was caused by the sun, you wouldn\\'t see the lower atmosphere warming while the upper atmosphere was cooling.\\n\\nYou shouldn\\'t get all your information from one side. You lack the scientific training to find the problems with the skeptics arguments, so you will be tend to be persuaded by arguments that seem convincing.\\n\\nSee this youtube playlist by user Potholer54, science journalist Peter Hadfield:\\n\\n[https://www.youtube.com/playlist?list=PL82yk73N8eoX-Xobr\\\\_TfHsWPfAIyI7VAP](https://www.youtube.com/playlist?list=PL82yk73N8eoX-Xobr_TfHsWPfAIyI7VAP)\\n\\nPotholer54 lays out some of the basic evidence for human caused global warming and counters the arguments of the youtube climate skeptics.\\n\\nAlso check out [https://skepticalscience.com/](https://skepticalscience.com/). The website show the fallacies in the arguments presented by skeptics. It\\'s a never ending task. As soon as one argument is put to rest, skeptics come up with another \"reason\" why global warming isn\\'t happening, isn\\'t caused by humans, or isn\\'t dangerous.\\n\\nSkeptics arguments change over time because they are not arguing in good faith. They are looking to cast doubt on the scientific consensus about global warming rather than seeking to understand the scientific evidence.\\n\\nDo you really think scientific institutions all over the globe that have proclaimed that the earth is warming dangerously and that the cause are human emissions of greenhouse gasses are part of a vast left-wing conspiracy? It\\'s more plausible that the few individuals arguing the on the other side are on the payroll of fossil fuel companies. Many of them are, in fact, as you noted, indirectly receiving payments from industry.\\n\\nRegarding your concern about whether increasing concentrations of CO2 could cause a an increasing greenhouse effect, consider that a the greenhouse effect due to CO2 is settled science following a paper published in 1969. See the bottom of this post:\\n\\n[https://skepticalscience.com/basics\\\\_one.html](https://skepticalscience.com/basics_one.html)\\n\\nHow does increasing CO2 cause an increased greenhouse effect when water vapor is so much more important? CO2 and water work together to create a greenhouse effect because they have different and complementary absorption spectra. Radiation that would escape through water molecules is blocked by CO2. Water vapor is like a thick crocheted blanket with holes that let some heat escape. CO2 is like a thin blanket on top of the thick blanket that traps the heat that would have otherwise escaped. We\\'re getting into the weeds a little, but here\\'s a paper about this:\\n\\n[https://rmets.onlinelibrary.wiley.com/doi/pdf/10.1002/wea.2072](https://rmets.onlinelibrary.wiley.com/doi/pdf/10.1002/wea.2072)\\n\\nIncreasing CO2 warms the earth a little. The little extra warmth increases the amount of water vapor in the atmosphere since warm air holds more water than cold. The increase in water vapor causes the earth to warm more. Increasing CO2 causes a positive feedback from water vapor which causes more warming than that due to CO2 alone.\\n\\nScientists have been arguing about how much the earth would warm for a given increase in CO2 (climate sensitivity). I\\'m not going to try to get into the details. In recent years the estimates for climate sensitivity have been increasing as more and more warming is measured. In particular, the oceans are warming dramatically as they absorb most of the heat produced by the increasing greenhouse effect.\\n\\nTry to understand the basics: the increasing greenhouse effect from gasses produced from the combustion of fossil fuels. And keep in mind that you may lack sufficient science education to understand the more nuanced scientific arguments. Please trust the scientific institutions of the world.\\n\\nWhat the hell dude. Are you really leaning skeptic? Can\\'t you see Greenland is melting? Can\\'t you see that the Arctic ice is disappearing, coral reefs are dying, glaciers are melting, Spring is happening earlier, etc., etc., etc.??? Is there any good explanation for all of his other than the heat trapped by greenhouse gasses. Is there???', \"Hey, checkout this article.\\n\\n [https://www.bloomberg.com/graphics/2015-whats-warming-the-world/](https://www.bloomberg.com/graphics/2015-whats-warming-the-world/)\\n\\nit addresses most of the counterclaims that denialists put out (volcanos, sun, deforestation, etc.) You'll see that most claims are horseshit, put out by paid shills of the petrol gas industry.\", 'Propaganda from the left? What could the left possibly want besides cleaner air? Preventing the melting of glaciers? Donโ€™t you think oil and coal companies would want to stay in business? So what kind of studies do you think theyโ€™re funding?', \"I don't think asking for likes/upvotes on reddit is very wise. Make people think your karma hungry. But good luck getting solar panels.\", '20 to go', \"Maybe you guys should learn outside in the sunlight instead of using electricity to run the lights, or make the ceiling transparent. Hows that for innovation combined with conservation? The solar panels only utilize 20% of light anyway, why not use 100% of it? Maybe you can plant few trees while you're at it.\", 'No it wonโ€™t', 'I hope you get the solar panels!', \"Would help if you didnt upload a video in 240p. Cant even read what the text is saying, music is awful quality, tictoc???? It's a college that you're paying for a d they wa t you to raise money for them????? Private institution.... okayyyyyyyy surrreeeeeeeee...\", 'Same here, in Poland. No snow at all. Two years ago was at least cold and I had a chance to sleep in the woods in -15C. Nowadays - barely ever any freezy temperatures. That is so sad... and disturbing.', \"Yeah, it's like 62 degrees out today\", \"I just watched the first minute of this video and was shocked by how stupid it was. Yes, the earth may have been 2 degrees warmer in the past, but, it's the pace or how fast the temperature is changing that is so dramatic and why global warming is so bad. The local plants and animals of local ecology can adapt that fast. When the world was warmer, plants and animals were given thousands of years to slowly adapt to warmer temperatures, it was slowly evolve or go extinct. With global warming, the temperature is changing so rapidly, that most plants and animals can't adapt quickly enough to the new norms. It's the rate of change that is one of the biggest dangers of global warming. not just that the temperature might be a couple or more degrees warmer. Whoever put out this video is an idiot and should be ignored.\", \"Peer reviewed scientific studies examining the level of consensus among climate scientists that Earth is warming and the primary cause is human activity:\\n\\n Verheggen 2014 - 91% consensus\\n Powell, 2013 - 97% consensus\\n John Cook et al., 2013 - 97% consensus\\n Farnsworth and Lichter, 2011 - 84% consensus\\n Anderegg et al, 2010 - 97% consensus\\n Doran, 2009 - 97% consensus\\n Bray and von Storch, 2008 - 93.8% consensus\\n STATS, 2007 - 95% consensus\\n Oreskes, 2004 - 100% consensus\\n\\n\\nThe Cook 2016 meta analysis of consensus studies finds the rate of consensus between 90 - 100% and that agreement with the consensus approaches 100% as expertise in climatology increases.\\n\\nSO YEAH. I'll take the opinion of the phd experts who dedicate their lives to understanding climate and have been studying this for generations over some Bullshit YouTube video. Thanks.\", 'Trump administration\\'s NASA website still says its real. No longer updated, but still flying proud representing the scientific consensus that it\\'s us, cher. It\\'s us indeed.\\n\\n\"Oh say can you see...? \" It lifts my spirits TREMENDOUSLY to see the NASA site still up and waving climate truth proudly. Through the rockets red glare, bombs bursting in air, gave proof through the night that the truth of mankind\\'s role in global warming still stands unbowed, undefeated.\\n\\nGlobal warming is an earth cycle that our actions affect. That affect is most evident in the incredible rate of accelerating change we are experiencing: \"Models predict that Earth will warm between 2 and 6 degrees Celsius in the next century. When global warming has happened at various times in the past two million years, it has taken the planet about 5,000 years to warm 5 degrees. The predicted rate of warming for the next century is at least 20 times faster. This rate of change is extremely unusual.\" [https://earthobservatory.nasa.gov/features/GlobalWarming/page3.php](https://earthobservatory.nasa.gov/features/GlobalWarming/page3.php)', 'Earth is in a warming cycle right now but normally that would take dozens of years, were accelerating it with our C02 emissions (like really fast)', ' [https://grist.org/climate-energy/current-global-warming-is-just-part-of-a-natural-cycle/](https://grist.org/climate-energy/current-global-warming-is-just-part-of-a-natural-cycle/)', 'The glacial/interglacial cycle is well accepted by climate researchers, and not in dispute. But the recent warming is not part of that cycle.\\n\\n As our industrial age began, we were already in the relatively warm phase of the ice age cycle. The last glacial period ended about 11,000 years ago, the warming from that shift ended about 8,000 years ago, and basically all of human civilization has developed in a long, relatively stable interglacial period since then (known as the [Holocene](https://en.wikipedia.org/wiki/Holocene)).\\n\\nBut based on what is known of the causes of the glacial/interglacial cycle, we should not naturally be experiencing rapid warming now as part of that cycle. If anything, [we should be cooling slightly](http://www.climatedata.info/forcing/milankovitch-cycles/files/stacks-image-6a30b42-800x488.png) - although due to the current status of the [Milankovitch cycles](https://en.wikipedia.org/wiki/Milankovitch_cycles) (a primary driver of the glacial/interglacial changes), weโ€™re in a particularly stable interglacial period, and the next full glaciation would likely not be for the next 50,000 years ([Ref 1](http://science.sciencemag.org/content/297/5585/1287), [Ref 2](https://www.nature.com/articles/nature16494)).', \"It is 100% earth's cycle, we are in a warming period. However, we are making it go way too fast!\", 'The alternative is to use a bidet, which requires pumped water. Pumped water uses electricity, which contributes to global warming.', \"Or go on a keto diet, and your stools will be so hard that you don't need to wipe.\", 'Very very good point', 'Nah sorry I tried it, still need to use paper.', 'You do need to clean your ass even if you squat. Trust me, Iโ€™m Slav living in Japan.', \"Temperature is going to get hotter and hotter. If we do nothing, the projections are that first we hit 2 celsius, which is a crisis point for our planet. After that we are heading to 3, then 4, then 5 and 6 Celsius. Whether that takes 1 or a few hundred years, that is civilization ending for most of our planet. \\n\\nSo yes, bringing the cost down of our solutions and massively ramping them up is cost effective because we wont be able to air condition or eat money in the literal hellscape we are creating. \\n\\nApart from it being overall hotter, insects,.plants, animals, sea life, forests, coral, and so on are going to get wiped out. The moon is nice to look at but it's the ultimate desert w no life. \\n\\nAnd there are cheaper way to get out of this mess. \\n\\nPeople.need.to organize to do so though. \\n\\n1) There is a mess in getting organized. \\n2) information that is necessary to make decisions is all over the place but needs to be categorized\\n3) plans need to be made, by governmenta, by business, and most importantly by the other brilliant minds of the 7.5 billion or so ppl on this world that each studies and owns a chunk of the problem. \\n4) those plans need to be implemented on a mass scale, with the ever continuing drive towards efficiency (using the best solutions more, without expecting 1 silver bullet). \\n5) keep refining things and recruiting more and more people in a transnational organization that work on these issues, some free some paid, because there is a ton of work to do and it needs doing now. \\n\\nFixing these and many other in between steps is what our group LiveCivix is working on. Because whether you are rich, middle class, or poor, it is each our collective responsibility to build this world into a better and continuously livable one. Because all the extra 0's in the world won't make life pleasant living in a bunker (if you can even afford one and then for how long until supplies run out) when things get really bad. \\n\\nFor solutions as an example, there is a book/org called drawdown that outlines about 100 effective things people could do to fix climate change completely heading towards the goal of drawdown (pulling more carbon out of the atmosphere than we put up.)\\n\\nAnd a guy named Brian von Herzen who's research project has the potential to drop the cost of carbon removal to the low 2 digits per ton or even maybe make a profit on it with his off shore seaweed platform designs. \\n\\nThere are likely thousands of these potential solutions around and no one hears about them. \\n\\nSo if you want to know more,.or commit yourself to a better world, reach out to me as we work on building a standardized system to get people working on projects that fix the problem legally. We can by doing the hard but worthwhile work and getting out of this mental rut of waiting for the inevitable end by doing nothing.\", 'Doing nothing is actually the ultimate solution. Polluting industriesโ€™ workaholism is the problem.', \"The hotter it gets, the more the positive feedbacks kick in, making it get hotter even faster.\\n\\nThis is a fucking Catastrophe, and asking what would be 'cheaper' is absurd!\", 'I wonder if the $50 trillion includes the cost of human suffering? Or if we can put a price tag on biodiversity or quality of life? We must stop global warming by transitioning off fossil fuel. The change will require a carbon tax which will fund transitioning our transportation system to electric cars, electric trains and powering it all with new safe small scale modular nuclear reactors, (SMRs). SMRs are not you grandfathers nuclear power, Chernobyl, Fukushima or a Three Mile Island waiting to happen, but safe and reliable. Think of the difference between a Ford Model T and Tesla Roadster. If we could put a man on the moon in less than ten years we can become carbon neutral in less than 10 years. With proper planning a smooth transition can occur.', 'Well this is certainly an anticlimactic robot uprising.', \"I don't doubt this is true, but I just wish these folks would have published first, then got the media attention after. I like to read the report for myself, and then promote/discuss from an informed position.\", 'A denialist will say: \"But, but, bot are my friends. They are the only ones that talk to me while I am browsing the Internet from my grandma\\'s basement\".', \"Maybe using the fossil fuels to produce only plastic, and this would decrease DRASTICALLY the CO2 production, like in 80% or so (sorry if i'm wrong).\", \"There's just so much evidence. It's troubling that this guy is teaching science. I'd start with this link: https://climate.nasa.gov/evidence/\", \"Provide data sets. Data doesn't lie, but people aren't perfect either. The more data sets you have to reference the stronger the argument you can make.\\n\\nIf you're teacher is in fact a science teacher hit them with the ol' Scientific Method route. You'll come off a lot better and show them you understand how science works.\", '\"Claim 4: The sun or cosmic rays are much more likely the real causes of global warming. After all, Mars is warming up, too.\\n\\nAstronomical phenomena are obvious natural factors to consider when trying to understand climate, particularly the brightness of the sun and details of Earth\\'s orbit because those seem to have been major drivers of the ice ages and other climate changes before the rise of industrial civilization. Climatologists, therefore, do take them into account in their models. But in defiance of the naysayers who want to chalk the recent warming up to natural cycles, there is insufficient evidence that enough extra solar energy is reaching our planet to account for the observed rise in global temperatures.\\n\\nThe IPCC has noted that between 1750 and 2005, the radiative forcing from the sun increased by 0.12 watt per square meterโ€”less than a tenth of the net forcings from human activities (1.6 W/ m2). The largest uncertainty in that comparison comes from the estimated effects of aerosols in the atmosphere, which can variously shade Earth or warm it. Even granting the maximum uncertainties to these estimates, however, the increase in human influence on climate exceeds that of any solar variation.\\n\\nMoreover, remember that the effect of CO2 and the other greenhouse gases is to amplify the sun\\'s warming. Contrarians looking to pin global warming on the sun can\\'t simply point to any trend in solar radiance: they also need to quantify its effect and explain why CO2 does not consequently become an even more powerful driver of climate change. (And is what weakens the greenhouse effect a necessary consequence of the rising solar influence or an ad hoc corollary added to give the desired result?)\\n\\nContrarians therefore gravitated toward work by Henrik Svensmark of the Technical University of Denmark, who argues that the sun\\'s influence on cosmic rays needs to be considered. Cosmic rays entering the atmosphere help to seed the formation of aerosols and clouds that reflect sunlight. In Svensmark\\'s theory, the high solar magnetic activity over the past 50 years has shielded Earth from cosmic rays and allowed exceptional heating, but now that the sun is more magnetically quiet again, global warming will reverse. Svensmark claims that, in his model, temperature changes correlate better with cosmic-ray levels and solar magnetic activity than with other greenhouse factors.\\n\\nSvensmark\\'s theory failed to persuade most climatologists, however, because of weaknesses in its evidence. In particular, there do not seem to be clear long-term trends in the cosmic-ray influxes or in the clouds that they are supposed to form, and his model does not explain (as greenhouse explanations do) some of the observed patterns in how the world is getting warmer (such as that more of the warming occurs at night). For now, at least, cosmic rays remain a less plausible culprit in climate change.\\n\\n\\n \\n\\nAnd the apparent warming seen on Mars? It is based on a very small base of measurements, so it may not represent a true trend. Too little is yet known about what governs the Martian climate to be sure, but a period when there was a darker surface might have increased the amount of absorbed sunlight and raised temperatures.\" \\n\\nhttps://www.scientificamerican.com/article/7-answers-to-climate-contrarian-nonsense/?fbclid=IwAR2vkrOk0d36Dvl9xPZEfmYqXosawIStSWuJN09x5MXMjrsN0C5SYTtOxJM', 'Solar activity canโ€™t explain the warming since 1950, because [solar inputs haven\\'t been increasing](https://imgur.com/vf3jTHY) during that time. Theyโ€™ve been generally steady or decreasing, while temperature has risen.\\n\\nSimilarly, [the recent trend in cosmic rays (which is indirectly related to solar activity) is the opposite of what it would need to be, to cause warming](https://imgur.com/llsqkSB).\\n\\nEven \"skeptic\" blogs like [wattsupwiththat](https://wattsupwiththat.com/2019/12/12/deep-solar-minimum-on-the-verge-of-an-historic-milestone/) have acknowledged these recent trends in solar activity and cosmic rays.', \"I'm late but I would start with the skeptical science website. \\n\\nhttps://skepticalscience.com/solar-activity-sunspots-global-warming.htm\", 'This two articles might help :\\n\\n[https://www.newyorker.com/magazine/2017/02/27/why-facts-dont-change-our-minds](https://www.newyorker.com/magazine/2017/02/27/why-facts-dont-change-our-minds)\\n\\n[https://jamesclear.com/why-facts-dont-change-minds](https://jamesclear.com/why-facts-dont-change-minds)\\n\\n&#x200B;\\n\\nYale climate communication has a wealth of resources on talking about climate. Most often, spitting out facts dont work. I am an energy scientist and public speaker. After all these years, my biggest lesson : in certain scenarios, I may not be the person who should lecture a denier on climate. It has to come from someone they trust and agree with in some other aspect of their life. I always try to collaborate with a person from the community and make it a discussion instead of a debate.', 'Your \"science teacher\" is no teacher at all. He shouldn\\'t be \"teaching\" science if he is unable to look at the very real evidence all around us. I say this as a scientist (Ph.D.), having taught at university. By the way I\\'m a boomer and know that climate change is real. Might he be an evangelical?? Some of them believe god will save them so they don\\'t need to worry about it.', 'What country do you live in? Can you report your teacher to your principal for being unqualified to teach science?', \"1- You can learn a lot of thing about global warming on the website of IPCC : [https://www.ipcc.ch/report/ar5/syr/](https://www.ipcc.ch/report/ar5/syr/) . I suggest you to read as much as you can of the synthesis report. It is 167 pages long, but if you put the effort to understand the main diagrams and read the text in orange box, you'll learn a good amount of facts.\\n\\n2- Ask your professor if he got facts that back up what he is saying. A real scientific argument should be proved by number, multiple scientific publications, etc...\\n\\nFor every argument he says, ask him for proof, for the name of the study, basically, ask him where he found his informations on the subject.\\n\\nIf he don't know what to answer when you ask this, it will just show that he's not a scientific and basically know nothing about what he is talking.\", 'He is not giving away the money, but transferring them to his private fund. Educate yourself on how billionaires benefit financially from such schemes.', 'Thatโ€™s nice.\\n\\nNow go pay your taxes too.', 'He has announced it. Do good PR. Remains to be seen if he spends it... while it still matters.', 'I dont even have 7% left of my check when i get paid', 'Thank God for the largesse of insane wealth. What a hero.', \"I have - $40.00 I'll take my $3.08 plz\", '[deleted]', \"The Earth will warm no matter where the concentration occurs. The CO2 is acting like a blanket that's trapping the sun's radiation. Simply put, the heat is being absorbed faster than it can convect through the atmosphere and radiate back away from earth. This is the reason Venus is hotter than Mercury, despite Mercury being closer to the sun.\", 'Not sure I understand the question, but here is one thing to keep in mind: greenhouse gasses work by absorbing the infrared emitted by the Earth.', 'Mental gymnastics much.', 'We have to outbreed them and/or educate their children properly', 'You could always retort that climate change is Godโ€™s Will and we are nearing the end of Days, then quote the scriptures to prove it is real, e.g. \\n\\nโ€œFor behold, the day is coming, burning like an oven, when all the arrogant and all evildoers will be stubble. The day that is coming shall set them ablaze, says the Lord of hosts, so that it will leave them neither root nor branch.โ€\\n\\nMalachi 4:1', \"Know that you're not alone in this. I'm in my fifties and I am a mother. I have children who share your love for the planet. I have to believe there are enough of us out there that care as we do. I have to believe that we as a species will work together to heal this beautiful planet. To believe all is lost is not something my mind and my heart can tolerate. So I have to believe in the good of people. There are so many of us who really do care.\", 'Lol religion.... backwards as you can get', 'Ask her if she wears a seatbelt. If she says, \"yes\", then turn the argument back on her:\\n\\n\"Well seatbelts are just the wisdom of man and the Bible calls that foolish. People have been in accidents since before there were cars and we need to trust in God\\'s wisdom and not the wisdom of car manufacturers. All that is doing is creating fear in your heart.\"', 'There is no such thing as god. The Bible literally tells us that slavery is okay and that the world is 6,000 years old', 'I had this same conversation with my grandmother where she claimed โ€œGod will protect us.โ€ Or something to that effect, and to that I said โ€œIsnโ€™t climate science and modern green technology that logical protection he is providing us, arenโ€™t those the tools necessary to save ourselves?โ€', \"Use their religion against them. In this case, I would ask her is she is familiar with with the parable of the good steward. If she is a Christian and strongly religious she should know it. Then ask, when Jesus returns will you multiply what was given you or waste God's investment in your stewardship of the Earth? I have yet to hear a counter argument.\\n\\nAnother thing you could do is use the same reasoning for something they care about. Social Security, Medicare, Medicaid, the Military, etc. The first argument works best in my experience.\\n\\nGood luck.\", \"Try out some of Katharine Hayhoe's Points on her.\\n\\nHere's a short video of hers:\\n\\nhttps://youtu.be/SpjL_otLq6Y\\n\\nBut she's into science communication, so there's a lot of her out there.\", 'I am not a religious person but from my limited understanding, much of religion revolves around prayer. You might need to pray and tell your mom of your prayer so that the blind may see and the deaf may hear and then ask her to pray with you.', 'You can counter by saying that it is mans wisdom which has caused this issue and is detrimental to gods plan. You can stand in the ground that god would want us to save as many innocent people as we can from the problems caused by mans wisdom.', 'What was it, 2 days ago Antarctica got a new highest temperature of 69ยฐF (20.56ยฐC) and previously was 67ยฐF (19.4ยฐC)?', 'Have you tried saying โ€œdid you see god say thatโ€ and the watch them do backflips trying to justify their stance', 'Well its up to us to make a change', 'Just say the old timey religious phrase \"God helps those who help themselves.\"', 'I will admit that I am an atheist and even I do not care too much.. here are my reasons why...\\n\\n&#x200B;\\n\\n1. The world is always changing\\n2. We have had many major extinction events prior, all due to climate change\\n3. Don\\'t think you\\'re so special that this time because humans may be exacerbating the speed of climate change that you\\'re any different than any of the previous reasons behind environmental change, mass catastrophe, or extinction events.\\n4. Most environmentalists and vegans only seem to care for \"cute\" animals? Selfish? Why do they worry about Koalas? Go start a cockroach farm. They seem to treat animals, including humans, on a certain scale of what is worth more than the other. Why should I care more about a shark than a raccoon? Is not a life a life either way, regardless of its circumstances? Go take in a homeless man if you\\'re so concerned about wellbeing rather than a dog. We are all equal in the sense of life, so I do not get why dog\\'s are placed higher than humans when they are equal.\\n5. If humans are making the world a worse place, then isn\\'t mother nature doing its job to get rid of us? The best way to reduce your carbon footprint is to stop having children, stop flying, stop driving cars, and-just die?\\n6. Maybe the next species that arise some 25-100 million years from now will be SUPER-WOKE to maintaining the status quo more than we are. Let humans die out and allow something better to come along? I don\\'t hear anyone crying over the loss of the previous 99% of species that have ever lived on this planet for the last 4 billion years and how we should try to revive Pre Cambrian species.\\n7. Slowing down our demise only continues to maintain humans as the primate species of control over this planet. Let mother nature do it\\'s thang and wipe us out, as she always will.\\n8. Maybe go to another planet and bring animals with you. Noah\\'s Arc anyone?\\n\\n&#x200B;\\n\\nYes. We are obviously making climate change happen quicker, but if mother nature is the overall concern, maybe we should be wiped out or leave? That is where I end argument with environmentalists. You\\'re still an animal. Let the universe run its course. We\\'re just a minor hiccup in an endless universe. If you\\'re so progressive then let the change happen. I am more interested in seeing a planet earth without humans at this point.', \"I'm truly sorry your mother can't see it. That has to be painful.\", 'Shoulda said fuck God', ' [https://grist.org/climate-energy/current-global-warming-is-just-part-of-a-natural-cycle/](https://grist.org/climate-energy/current-global-warming-is-just-part-of-a-natural-cycle/)', \"The cycles are called milankovitch cycles and those are in a cooling phase. There are also solar cycles that can affect global temps, but those too are in a cooling phase. This warming is actually counter cyclical to the earth's natural cycles. Without greenhouse gas emissions the earth would likely be cooling very very slowly. Instead it is warming more rapidly than natural cycles could account for even if they were in warming phases.\", 'The xkcd global warming comic', \"Save your breath, if they don't believe by now the never will. They are probably Republicans so no use trying to change their opinion on anything at all...\", 'The are probably right. IMHO', \"Tell them it's not supposed to be happening as quickly as it should be. An ice age lastes over a few million years. Currently, we are exiting it, so we should be getting warmer. But based from the rising temperatures from the last 40 years, it shouldn't be happening this quickly.\\n\\nAnd we have achieved temperatures higher than the earth's highest natural temperature.\", \"I should thank my lucky stars every day my parents aren't brainwashed by fox type idiocy.\\n\\nI feel for you, it must be painful.\", 'Thats can be quite stressful but good luck buddy. you got plenty of time left wirh so guess at least you dont have to rush things.\\nhttps://skepticalscience.com\\nhttps://youtu.be/9M29ns1rUSE', 'Exactly how I feel', '\\nIf you or someone you know is contemplating suicide, please reach out. You can find help at a National Suicide Prevention Lifeline\\n\\nUSA: 18002738255\\nUS Crisis textline: 741741 text HOME\\n\\nUnited Kingdom: 116 123\\n\\nTrans Lifeline (877-565-8860)\\n\\nOthers: https://en.wikipedia.org/wiki/List_of_suicide_crisis_lines\\n\\nhttps://suicidepreventionlifeline.org', 'Existential dread is something we all share. I remember (maybe I was 5 or 6) being acutely aware of impending nuclear death. In fact, in my darkest moments, that is how I predict the tragic death of myself and my loved ones.\\n\\n There are no assurances in life, other than something is going to kill you. Climate change will impact our lives just as war and famine and disease always has; there is nothing you can do to control the circumstances of your inevitable passing.\\n\\nSo get on with adapting to your future. Focus on self sufficiency and sustainability. Learn how to build and how to grow. Get strong in body and mind. These are things that you can do now to lessen the impact that these external influences have. They will make you feel better about the future, by changing things you can have a direct influence over.\\n\\nGood luck brother.', \"Im in my late 30s in the US and feel very similar. i watch the fastest growing car segment are SUV's while people talk seemingly rationally about wanting cheaper gas prices and I just dont understand how they arent just panicking inside and we all arent trying to get off gas with every spare resource possible on top of all of the other areas really we need to focus.\", \"The problem with global warming is going to be that those who rule over us will probably leave us out to hang. Rather then shift tax dollars away from them and over to the rebuilding of society, in new locations. New Towns will be required, shifting to new arable land to farm. As well as an urgent need to back off the commercial foraging of wild life without total sustainability. Convenience foods designed for maximum nutrition and not salty, fatty comfort. Get it in everyone minds the fact of living within Limited Resources is a major priority. None of the positive aspects of transitioning to a new climate can be accomplished without the average human becoming proactive, educating themselves, and leaning in to press hard against the greedy and self centered minority of the wealthy. The squeaky wheel gets the grease. It will also require the awareness that human population must be reduced, probably to half the current population. Which can humanely be accomplished by acknowledging the urgent and eternal need to reduce births. Just halting and maintaining current population won't decrease the massive over use of limited resources that remain available to sustain a modern life style. We'll probably massive and grossly expensive tax use to build barriers around the large coastal cities. When in stead, that money needs to be spent on relocation. Relocation will be forced on us and better to spend the money now, then spend it and than spend even more after the rising water forces the issue.\", 'PBS, Nova, Polar Extremes, Season 47- Episode 1, is a great film to learn from.', \"I'm 15 and parents want me to go to college. Idk if it's even worth it if I'm gonna starve to death in the future.\", \"Ride it out, where are you going to go?\\n\\nDying is going to happen no matter if the world was perfect. Fourteen and a half billion years you were dead, when you leave, it could be billions more, for all you know.\\n\\nGo easy man, you get this time. Don't cheat yourself out of the infinitesimal time you know you have.\", 'The Netherlands, no snow this year (or for the past few years...). January was about 3.5c warmer than usual.\\n\\nThe snowy winters from my childhood are pretty much gone (although I do hate snow now....)', 'Yeah. The same in Central Europe (Czech Republic). It started to snow a little in February, otherwise Christmas on mud is normal in this times here.', 'Today Is 20 C is southern Europe', \"New England here... some of the same, but we're also getting feast-and-famine years. Our mild winters (like this one so far) feel like autumn. Our bad winters are unending with record levels of snow over a short period of time.\", \"I'm in Utah, USA. I feel like the snow comes 2 weeks later every year\", \"Not a similar experience at all but.. Yesterday, it snowed in Iraq. Yes, that's right. I said IRAQ. It snowed in a desert.\", \"Live in west-Norway. One year, I think ut was 2013, there was enough snow for is to make a huge snow pile, climb onto the roof of our house, and jump down into the snowpile! It was mye grandpa's idea. He told me he used to do it EVERY YEAR when he was younger! I thought it was a lot of fun and have always wanted to do it again and see whether mye little brother dares to do the same. But there never comes enough snow anymore to make a pile big enough so its safe to jump...\", 'Eastern Kentucky USA. Wore a T-shirt today. Why.', \"Same here in NYC. Usually we get a few massive blizzards in January. We've had snow maybe three times this winter and it was very light and never stuck more than a couple hours. I haven't worn gloves, a hat, a scarf, or thermal pants this winter. I wore shorts and a t shirt out this weekend and it wasn't a problem. Last winter and this winter feel more like the fall that slowly became spring. \\n\\nInterestingly enough however, the summers have not been that bad either the last couple years. It feels like the weather here is becoming more mild.\", 'Same in southern Germany. 20 years ago the snow got up to grown ups knees some winters. And thats in the valley big cities.\\n\\nThe last couple of years we had almost no snow and the little snow we had was way to late (started snowing November / December back then. Nowadays it starts in February / March and usually melts after one or two days.)', 'The real problem is that there is no quick fix to burning fossil fuels, in fact we humans are burning oil and gas faster than ever, at an accelerating rate and renewables arenโ€™t really making a dent.\\n\\nWe might as well resign ourselves to the fact that the atmospheric CO2 level has a long way to go before it peaks and learn to live with it.', \"I don't think you should minimize plastic pollution like that.\\n\\nThere is more and more speach about both global warming and plastic pollution, I don't think people in general are more concerned by plastic pollution than by global warming.\\n\\nI think that a good amount of people that cares about plastic pollution have the minimum of information and cares about global warming too.\\n\\n(and then there is the persons who doesn't give a single fuck about all of it)\\n\\nI don't think that the ocean cleanup that exist today is really efficient, but to me it's still a good thing because it raise awareness about a real problem, and will perhaps allow to develop better ocean-cleaning method in the future.\\n\\nCurrently I believe the best way to reduce plastic pollution is to cut a maximum of plastic consumption.\\n\\nTo conclude, I think everyone should try their best to cut the more they can their plastic consumption and their carbon footprint.\", \"The real problem is that with all of these problems, it's too late do anything to fix them.\", 'I think I like that you recognize a problem, and it seems to be something a lot of our fellow citizens are incapable of.', \"I'm expecting another broken record down there next year, or maybe even sooner than that.\", 'The meat industry produces a good chunk of green houses gases, plus it takes up a lot of land. However it is not your place to tell you friend how to live; though the meat industry has quite an impact on the environment, waste produced by corporations makes up around the production of 50% of greenhouse gases. Convince your friend to start voting, and maybe purchasing meat from ethical sources if he can afford to do so.', \"Here is an idea, if he doesn't share your views, you shouldn't keep attempting to convince him.\", 'instead of trying to convince them to sacrifice such a big part of their life, inform them on other ways they could contribute to the environment like recycling, stopping the use of single use plastics etc?', 'How about you donโ€™t force your views on to him and let him do what he wants', \"Compromise on chicken. We can't be asking people to fall into extreme life changes.\", 'Just start cooking him really good vegetarian meals.', 'Focus on better farming methods, emission standards, voting for the right candidates, renewable energy alternatives. Animal fat and protein is the most nutritious food on earth, and the reason we evolved into the creatures we are today. There are much bigger issues than the tiny fraction of GHG naturally caused by our most valuable food source.', 'Start with Impossible Burgers.', 'From r/vegan\\n\\n [https://www.reddit.com/r/vegan/wiki/beginnersguide#wiki\\\\_.2022\\\\_why\\\\_go\\\\_vegan.3F](https://www.reddit.com/r/vegan/wiki/beginnersguide#wiki_.2022_why_go_vegan.3F)', 'I mean... Is it really that important to you that everyone around you 100% agrees with you on everything? There really is no point in trying. If he is going to start eating less meat, it should be because HE wants to and not because you do.', 'Thereโ€™s bigger problems than your buddy eating meat. Youโ€™re literally on a device that is constructed of materials that are extremely harmful to the environment to mine and even worse when disposed of not to mention the energy using to voice this brainless opinion and the spent hydrocarbons your ear holes emit while you angrily explain to your friend that the meat industry is the only cause of global warming that he or she can have an impact on.', \"Sorry, this post was written in a hurry. O mean thar my friend, although he is a great guy, believes that it is in his right to eat meat every meal of the day. I believe that, sure, it is, but maybe eat a bit less. No, I'm not a vegan. No I am not forcing my diet on anybody. I am just trying to share my perspective on this with him. \\n\\nFurthermore he doesn't believe in Climate Change either (lmao) how do I convince him it is real?\", \"There are 2 ways I can see this. The way the cows are raised, or the factories the process the meat? \\n\\nSome people (not calling names) think that when cows die, they release methane that causes global warming. This natural methane is true, but it isn't enough.\\n\\nThe factories the process meant is much worse though. What's even worse is a clothing factory. Pretty much anything that is related to global warming is. You can't control your friend's diet like this.\", \"Although you can't fully change your friend's mind, you could always advise to him have one day out of the week where your friend doesn't eat meat, and make it a fun challenge. \\n\\nAlso humans have been eating meat for centuries...\", 'You should start eating meat.', \"Make them some amazing vegan meals and then give them the veganomicon cookbook and watch Dominion. \\nAlso point out that animal agriculture is often underreported for it's greenhouse gas emissions due to methane turning into ... Carbon dioxide. Finally wander over to any of the vegan subreddits and ask this question again.\", \"Disaster is inevitable.\\n\\nThe question now isn't how to stop it, but how to live with it for the next tens of thousands of years till our feeble efforts to slow it might take effect..\", \"It's coming on fast!\", 'The ski industry all over the world is in trouble.', \"The winter here in Lithuania has been an extraordinarily warm one. The temperature stays above 0ยฐC, while it used to drop to minus 20 at times. Must say we're truly f*cked.\", \"I can't be the only one who wants to tell these people to shut up. Where were you when we needed you? It's a done deal Dum Dum.\", '> recognizing the reality of climate change, but not whoโ€™s responsible or what could be done about itโ€”is reflected in todayโ€™s media coverage of climate. [...]\\n>\\n> **FAIR compiled every article mentioning โ€œclimate changeโ€ or โ€œglobal warmingโ€ that appeared in the New York Times, Washington Post, LA Times, USA Today and Wall Street Journal in August 2017, when only 277 articles ran, and August 2019, by which time the number had soared to 751. (These months were picked to ensure that results werenโ€™t skewed by time of yearโ€”climate coverage, like the storms and fires that often set it off, waxes and wanes seasonallyโ€”and also to precede the rush of coverage that accompanied Thunbergโ€™s visit.)** [...]\\n>\\n> ***while it turns out that the US media have indeed ramped up their coverage of the climate crisis, they continue to give short shrift to what are arguably the most important factors for determining our future: what specific human practices are responsible for the changing climate, why carbon emissions continue to rise, and what we can and should be doing about it*** [...]\\n>\\n> 2017: Fires and Floods, But Few Solutions [...]\\n>\\n> 2019: Costs of Decarbonizing, But Not of Inaction [...]\\n>\\n> The mediaโ€™s shift toward acknowledging the reality of climate change is welcome, if three decades too late, given that the IPCC has been sounding essentially the same alarm about a warming planet since 1988 [...] But **the public presentation of the climate crisis remains carefully constrained to focus on the horrors awaiting us, not on what** ***can be done*** **to ward off the worst, or who stands in the way of doing so. When climate coverage leaves that out, it amounts to mourning the Earth without trying to save it.**\\n\\n\\n-------------------------------------------------------------------------------------------------\\n\\n\\n**Cost of not acting on climate change -> https://www.theworldcounts.com/challenges/climate-change/global-warming/cost-of-climate-change**\\n>\\n> ***An estimate from the World Bank finds that climate inaction could reduce global GDP by at least 5 percent annually while the price of the necessary action is set to 1 percent of global GDP annually.***\\n>\\n> ***We already use 6.5% of global GDP subsidizing fossil fuels so a 1 % investment should be very possible.***', 'Im confused as to why the \"extremist left\" doesn\\'t commit any ecoterrorism against ExxonMobil!', 'Of course, climate change is, not a concerted effort since the eighties to disenfranchise small farmers and consolidate conglomerate farms.\\n\\nThey even spin the destruction of humanity to hide the crimes of the oligarchs.', 'You really cannot begin to think about solving global warming if you dont understand the greenhouse effect. I would reccomend looking at the wikipedia page. The best thing we can do to stop the planet from heating up too much is to stop polluting the atmosphere with greenhouse gasses like co2 and methane.', \"I hope it's a troll / sarcastic post.\", 'Youโ€™ve solved it!', 'We might need a cull, Or maybe this new virus might help mother nature out.', \"Give this man the noble Peace prize, the congressional metal of honor, and times Man of the year. It's the simple beauty of it. It's like poetry. HUMANITY OWES STAMPYWA A THANK YOU! You, sir will be remembered by countless generations.\\n\\nWHAT ARE WE WAITING FOR? LET'S ROLL UP OUR SLEEVES AND INSTITUTE THE STAMPYWAY!!!!!\", 'Fk up mate, go back to tent city', '> Over 55 scientists have signed an open letter rebuking Democratic presidential candidate and former Vice President Joe Bidenโ€™s claim that the climate plan rival contender Vermont Senator Bernie Sanders supports, the Green New Deal, isnโ€™t supported by anyone in the scientific field.\\n>\\n> Sanders has proposed spending $16.3 trillion through 2030 to radically reshape the U.S. economy, including $2.37 trillion to renewable energy and storage, over $2 trillion in grants for low- and middle-income families as well as small businesses to buy electric vehicles, and $964 billion in grants for those groups to electrify gas and propane heating systems. His plan also calls for $526 billion on a smart electric grid and hundreds of billions on replacing diesel trucks and buses and new mass transit and high-speed rail lines.\\n>\\n> Bidenโ€™s plan, while still more sweeping than any prior federal effort to address climate change, calls for $1.7 trillion in new spending and only arrived after immense pressure from environmentalists to detail a concrete approach. Biden has also called for ending fossil fuel subsidies across the G20 [...]\\n>\\n> Last week, Biden attacked Sandersโ€™ plan, telling reporters in New Hampshire that โ€œthereโ€™s not a single solitary scientist that thinks it can work,โ€ adding that he doesnโ€™t think zero emissions by 2030 wasnโ€™t possible (note that Sandersโ€™ plan actually calls for 71 percent cut in domestic emissions by that date). **57 scientists from universities and research institutes responded to Bidenโ€™s comments in an open letter in support of Sanders released Tuesday.**\\n>\\n> ***โ€œThe top scientific body on climate change, the United Nations Intergovernmental Panel on Climate Change (IPCC), tells us we must act immediately to bring the world together to stop the catastrophic impacts of climate change,โ€ the scientists wrote. โ€œThe Green New Deal you are proposing is not only possible, but it must be done if we want to save the planet for ourselves, our children, grandchildren, and future generations.โ€***\\n>\\n> ***โ€œNot only does your Green New Deal follow the IPCCโ€™s timeline for action, but the solutions you are proposing to solve our climate crisis are realistic, necessary, and backed by science,โ€ they added. โ€œWe must protect the air we breathe, the water we drink, and the planet we call home.โ€*** [...]', 'This is why I keep telling my parents that supporting Biden for his โ€œelectabilityโ€ makes zero sense. Heโ€™d be โ€œbetter than Trumpโ€, but so would a moldy old shoe. And neither of those options will begin to handle the issues that face Americans and our world.', 'I will never forget what my college science teacher told the whole class our freshman year. \"I know I shouldn\\'t be saying this, but dammnit your generation has the right to know this. I work with a lot of climate experts, I\\'ve done my own research h into climate change and if your generation dosent do something now, you will live in a world that none of your peers, mentors, government officals, ANYONE has ever lived in before.\" He lead on to say that \"if we dont do something by the year 2019 it will he too late.\" Here we are in 2020 essentially still doing fuck all about this. The boat has sailed people. We should still try SOMETHING, but it\\'s probably already too late. And yes he provided a lot of data points, logs, entries, etc. From his peers and the data was already very convincing back in 2016.', \"Ok, I'll try and dumb it down if I can.\\nSo Bernie is the first person in the federal state to do a move to fix climate change and reduce it by 71%, but scientists say it won't work and is now falsely restating what Bernie says. (Tell me if I got this correct)\\nPolitics are dumb. I know why we have some laws, but wtf, he is one of the first to try and make an effort and you turn it down!\", 'Im confused as to why the \"extremist left\" doesn\\'t commit any ecoterrorism against ExxonMobil!', 'Save the Bernie posts for r/politics thank you.', 'Ahh fuck', 'So many people are talking about it. None of those in power seem to care about their childrenโ€™s future though', \"We don't need Lifestyle/Consumerist change. This current neoliberal focus on Individualism is a hoax. We need to fight the oil corporations.\", \"I know it's been 5 days to this post, but look it's been 4 years to the video. Not much sense in sharing a piece on climate from that far back.\", 'Hahahaha \\n\\n# ***Weโ€™re fucked***', 'Oh, the irony of characterizing this as an American response, when the family in the video is French.\\n\\nIt\\'s so easy to point fingers and blame \"them\", but the reality is it\\'s \"us\". We, as a species populating planet Earth, need to recognize the threat. We can\\'t be thinking in terms of America this, France that, China didn\\'t, Australia did. We need to be solving this as a world community of humans, not as nationalists looking after our particular interests.', 'He left the kid behind', 'As soon as the rich stop buying ocean front homes, and building hotels on ocean front property, and the insurance companies and building departments refuse to build on ocean front....\\n\\nFaith without works is dead. Nobody really believes this as their actions tell me otherwise.', 'Oof', 'There is no such thing as global warming', 'What a stupid title for this video. America is doing a lot to mitigate climate change.', 'You are not wrong at all. If you follow Guy McPherson, he does talk about this, but hey, heโ€™s just a nut job, right? That is why we are so utterly fucked,', 'Didnโ€˜t check the numbers, but the ice reflects giant amounts of energy. Loosing it as a reflector is much worse than the buffer functionality', \"Your calculation looks approximately right if you consider, for example, that all the ice melt on a time of one year, and the next year, the same amount of energy which were used to melt the ice is used to heat up water.\\n\\nIn reality, this is of course not what is happening, this is much more complex than your approximation.\\n\\nAs I'm not a specialist of thermodynamics, I will not be able to correctly explain how this works in reality.\\n\\nAnyway, this phenomenon is basic and I'm very confident that every scientist working on global warming is well aware of it and that GIEC, for example, takes it in consideration for their prediction of temperature rise in the future.\", 'Well one could ask why the earth didnโ€™t turn into Venus when the last ice age ended. There are many feedback systems that play a role in global temperature, both positive and negative contributing. Just because ONE of the negative feedback loops is dampened or removed does not mean the climate will spiral out of control. It will simply reach a new equilibrium.', \"Why do they think that there's more of a threat of nuclear war now than there was during the Cuban missile crisis? Back when there was a Soviet Union, there were some scary times when it seemed like it could happen.\", 'Given that it has been set to about 2 minutes to midnight as long as I can remember, midnight isnโ€™t going to come in my lifetime so I am not going to worry about this silly clock that appears to be stuck.', 'Same story.\\n\\nClimate change has been noticeable in the past few years.\\n\\nI remember years back, when forecaster would say something along the lines of\"it will be quite a warm month, probably most warm in the past x years, the mid temperatures will be at โ€“8Cยฐ\". I don\\'t remember how forecasters talk, but something like this.\\n\\nNow it\\'s +4Cยฐ and it\\'s raining.', 'Coal and oil were first created in the carboniferous. CO2 was absorbed by the trees, and turned into fossil fuels. If we burn all the fossil fuels, we end up back with the 800ppm CO2 we had in the carboniferous. What did I get wrong?', 'I live in Ontario and itโ€™s the first time Iโ€™ve seen green grass in January. The trend is following last year but worse. A week of weather just below 0. Snow then a week of plus weather +1 or +2 and snow melts. \\nI really is evident with green grass in Ontario in January. Very scary.', 'Climate deniers: \"If global warming is real why is it cold?\"\\nActivists: \"The weather is not the climate you cretins!\\'\\nAlso activists: \"It\\'s warm so it must be climate change!\"\\n\\nNothing but hypocrites', 'Man, I want you to know that, we as humans, are coming to an end, we can\\'t see a bright outcome for the future. Some do, and those are the people that dont know how tragic global warming has come too. Or they outright deny the existence of it. I mean, the atmosphere that holds our oxygen in, is in grave danger of collapsing. We talk about wanting to plant more trees, and we are, and yes, its helping. But, if we dont have any atmosphere we dont have any oxygen. Now, I myself, am a Christian, I\\'m not like those \"Boomer\" Christian\\'s tho, that\\'s what I call all those., homophobic, racist, sexist Christian\\'s, etc, and personally, I believe that god is going to come back, so that\\'s why I\\'m, well be it, still worried about losing this planet. But, im not worried about losing my soul, now, I\\'m not saying to be a Christian or anything, but try to find something you can have faith in/be hopeful for. I would suggest studying about buddhism and Hinduism, actually, because those are very calming religions.', 'Yes, global warming is real, and some of it is man made. But climate change has happened before we started burning coal and oil.\\n\\nDid you know that before global warming, we had 100s of years of cold winters called the little ice age? Before that, the weather was even warmer than it is now. There used to be vineyards in northern Britain during Roman times.\\n\\nSo yes, we should protest about pollution, destruction of the environment and our stupid consumer society. \\n\\nMore importantly we should do stuff like cycling instead of driving, insulating our houses, and not buying stuff unless you intend to use it until it wears out.\\n\\nBut please stop talking about the end of the world.', \"I know exactly how you feel. I see it too. It's deeply upsetting. I don't have any real words of comfort and I'm not even terribly knowledgeable about climate change. I just know that things are different than they used to be. I suppose the thing that keeps me going is that I still believe that human beings can and will find some answer around this, though a lot of people will perish because of it. I believe that nature itself will find a balance again, should this be the end of the human race. It was a good run and a happy accident mostly. An interesting and unique accident. In the grand scheme of things, humanity and the planet have gone through countless changes over thousands, millions, and billions of years. This is one of those changes. We're witnessing history in the making, for whatever that's worth. Still though, I'm sad that winter won't be as cold or snowy as it used to be.\", 'As we look through an open window at the future, so many refused to even draw the curtains to look .', 'FadulLuix makes some good points. For example warming could cause release of methane hydrates from tundra would cause another big increase in temperature. Meltwater from greenland could divert the Atlantic current which keeps Europe warm. These would be major catastrophes, lots of animals and people would die. \\n\\nHowever, extinction is another level, means none of a species survive. So far only extinction only happened because humans hunting, farming, introducing rats and cats to isolated islands etc.\\n\\nI have seen no scientific study explaining how CO2 warming could lead to extinction.', 'Check it out, this may be what your looking for: \\nhttps://www.reekoscience.com/science-experiments/miscellaneous/how-to-create-terrarium-vivarium-self-sustainable-bottle-garden\\nD', \"Don't worry, extinction won't happen. \\n\\nWorst case we spend a load of money moving agriculture and wildlife north, fight wars over territory and immigration, have to build sea defences like the dutch.\\n\\nIt's bad, well worth protesting against and changing your lifestyle, but please don't lose hope\", 'r/collapse', '[removed]', 'Focus on what can be done, not what might happen. Focus your efforts to learn more and drive political and local change on this issue. Find a local group that spreads awareness. Etc.\\n\\nTalk about it.(most important)', \"Decide that you're going to be the one to figure out how to efficiently remove and sequester carbon dioxide from the atmosphere.\", 'I just picked up a good book called Truth to Power by Al Gore. Itโ€™s the sequel to An Inconvenient Truth. Truth to Power is a kind of handbook for what to talk to people about, how you can make change, and how you can talk to policymakers. Reading it gave me a sense of hope, that we can still turn this around. It wonโ€™t be easy but we all need to do our share. Your generation will help us change our ways. Sorry for my generations contribution to this mess.', 'If anything is to be done about the on-rushing climate disaster, it will be done because people face up to it, not worry about how to feel OK with it.', 'Totally feel this also, hope to see some inspiring responses in this thread because things feel bleak.', 'I feel worried about the changes of our planet also. Itโ€™s scary to think how life would be once we are beyond a tipping point. Heading for extinction.\\n\\nTo feel better about it, do what you can to help and just trust that everyone else is, or will, too. \\n\\nEasiest thing to do is learn more about it and then chat about what you know with people. Thatโ€™ll spread awareness (or improve your own) which is the first steps of making change.\\n\\nAfter I read up a bit, the most I felt I could do to oppose the fat cats of the world, as a small fish, was to support someone/thing that could.\\nThere are charities that fight the big corporations. I read the union of concerned scientists were a good one for that stuff. (Also read consistent donations to a specific charity is most efficient)\\nI suppose voting for the best (lesser of the evil) government is an easy one also.\\n\\nOther things to do are, eat less meat, eat more locally sourced foods. If you can afford it, get an electric car.\\n\\nThatโ€™s my 2 pence.', 'I couldnโ€™t tell you, I feel hopeless as hell all we can hope for is that once the droughts and famines and wars wipe out our foolish existence everyone who makes it through can rebuild and learn to co exist with the planet thatโ€™s trying to burn us off', 'Fight the oil companies. Fight them', 'Read this paper and do further research [link](https://www.thegwpf.org/content/uploads/2018/02/Groupthink.pdf)', \"Even if the planet is destroyed in the next few decades or so, it's unlikely humanity will die with it.\\n\\nSomething I think a lot of people either don't know or forget is that saving the planet isn't the only chance of us surviving, the most likely alternative is living on mars (which yes is a decently plausible idea) and it's definitely not ideal but at least we won't all be dead.\\nBut I doubt it will come to that, we've made decent strides toward helping the planet (not big enough strides but still) and more people are learning about it, it's being brought up more in politics, people are researching different things to help more, and we're coming closer to a time where the people who don't believe in it, or just don't have a reason to care, will no longer have power.\", 'There are other theories to answer the Fermi paradox that are more bright !\\n\\nThe possibility that the birth of life on a planet with good conditions remains extremely unlikely \\n\\nThe fact that alien spot is but donโ€™t consider us smart enough to make contact \\n\\nEtc ...', 'Stop being that pessimistic.\\n\\nNobody really knows what will happen in the future.\\n\\nOf course this is a grave subject and global warming + a lot of other things will be a challenge for the human race, but we are not at the point where we should think about what we could left behind us.', \"I don't think you quite understand the Fermi Paradox.\\n\\nThe Fermi Paradox is the fact that life *should* be incredibly abundant in the universe, but for some reason no one out there seems to be talking or making themselves known. We can't find anything.\\n\\nThere are a huge number of possible solutions to this paradox.The one you are thinking of is the Great Filter solution: the idea that there is some evolutionary step or technological leap that virtually all life cannot cross, and that keeps them from exploring the stars where we might detect traces of them. This is probably the most popular solution to this problem, but even those who believe it are very split on where the Great Filter lies - ahead of us, or before us. It could be either. Incidentally, if we ever find life on another planet in our solar system, the odds are very, very likely that the Great Filter is in front of us, not behind. That would be a bummer.\\n\\nBut as someone in this thread already stated, there are a number of other possible solutions. The Dark Forest Theory, the Apex Predator Theory, on and on and on. They are all worth looking into (here's the first in a cool series of videos explaining some of them: https://youtu.be/sNhhvQGsMEc).\\n\\nBut setting the Fermi Paradox aside - I mean, I understand your feelings. It's hard to deal with ignorance, but that has always been true. Continue to do what you know is right, that's all anyone can do.\\n\\nBesides hope. We need to hope! There are massive problems threatening humanity at an existential level, but we don't know the future. We don't know what we are ultimately capable of achieving when our back is in the corner (and boy, it will be soon). Look into carbon scrubbing and Co2 mineralization technology. It exists and it will get better with time. Vertical farms, lab grown meats, the theoretical science of weather manipulation. There is a fucking metric SHIT TON of things we can do and try before we throw in the towel and resign all hope for our planet and our species.\\n\\nBe one of the people that hope for a better future. We need you.\", 'Nobody is getting out of this life alive. All that matters is what you do while youโ€™re here. Your eyes are open. Live your best life and donโ€™t worry about what you canโ€™t control, just make better what you can.', 'Fact is, youโ€™re not going to likely change someoneโ€™s mind until an experience changes it for them. So the real question is, what are you physically doing to mitigate climate change? For example, I acknowledge I probably wonโ€™t change peopleโ€™s minds, but I can have a positive impact. I have studied environmental sciences and I currently work at a wastewater plant. It may not be huge in the grand scheme, but itโ€™s what I can currently do with my current limitations.', \"The Earth is not dying. Civilization is collapsing. It is hubris to think the Epoch we are causing will stack up to those life has survived.\\n\\nIf we don't what the far smaller amount of humans that will be around in 100 years to be hunter gathers with the culture and knowledge of civilization rapidly being erased by time... we need to get our shit together.\", 'Where did you find this? We need to get this on YT trending', \"I haven't watched the video, so I dont know if it mentions this, but not alot of people know that our atmosphere could collapse, within the next few years actually, and almost nobody is aware of that, no atmosphere = no oxygen, we would all die, if anybody wants evidence to back up this just reply to my comment and I'll provide it\", \"Increasing concentrations of greenhouse gasses are causing more of the sun's energy to be trapped in the climate system.\", 'Donโ€™t worry. Nuclear winter will cancel global warming out.', 'Did you just make an argument for why virtue signaling is ok? Why emotion should trump hard evidence? Thereโ€™s nothing wrong with having a gut feeling but to let that dictate action over rational thought is not something I think anyone will benefit from.', 'Holy moly!!!\\n\\nThis means the oceans have absorbed the energy equivalent to 3.6 billion Hiroshima atomic bombs (63,000,000,000,000 Joules) in 25 years, lead author Lijing Cheng, an associate professor in oceanography at the IAP, said in\\xa0a statement.', 'https://www.huffpost.com/entry/world-oceans-hottest-history_n_5e1d0bd9c5b6640ec3d9a1a4', 'As the article says I think heโ€™s a realist. I just hope the world wakes up before itโ€™s too late. I donโ€™t think youโ€™ll ever bring that many people together for a cause in time to save them selves', 'ECOCIDE', 'https://www.technologyreview.com/s/615035/australias-fires-have-pumped-out-more-emissions-than-100-nations-combined/', \"Weird how in Australian media we tend to have the courtesy to use the terminology local to the phenomenon in question, (brushfires, wildfires, as you like...) but nobody overseas bothers to use our term for our disasters. (They're called bushfires here in Australia, in case you were interested. We also have cyclones, not typhoons or hurricanes.)\\n\\n\\nEven more depressing is how many disasters we have here to make these issues of language stand out.\"]\n" ], [ "# Store comments in a DataFrame using a dictionary as our input\n# This sets the column name as the key of the dictionary, and the list of values as the values in the DataFrame\nsubreddit_comments_df = pd.DataFrame(data={'comment': subreddit_comments})\nsubreddit_comments_df", "_____no_output_____" ], [ "# This is an example of how we split up the comments into individual words.\n# This technique will be used again to get the scores of each individual word.\nfor comment in subreddit_comments_df['comment']: # loop over each word\n comment_words = comment.split() # split comments into individual words\n for word in comment_words: # loop over idndividual words in each comment\n word = word.strip('?:!.,;\"!@()#-') # remove extraneous characters\n word = word.replace(\"\\n\", \"\") # remove end of line\n print(word)\n \n break # end the loop after one comment", "Part\n2\nwould\nprobably\ngo\nsomething\nlike\nthis\nOk\nso\nhe's\nill\nbut\nthe\nimportant\nthing\nis\nthat\nit's\nnot\nmy\nfault\n" ] ], [ [ "### Now we will use the sentiment file called AFINN-en-165.txt. This file contains a sentiment score for 3382 words. More information can be found here: https://github.com/fnielsen/afinn With the sentiment file we will assign scores to words within the top comments that are found in the AFINN file", "_____no_output_____" ] ], [ [ "# We load the AFINN sentiment table into a Python dictionary\n\nsentimentfile = open(\"AFINN-en-165.txt\", \"r\") # open sentiment file\nscores = {} # an empty dictionary\nfor line in sentimentfile: # loop over each word / sentiment score\n word, score = line.split(\"\\t\") # file is tab-delimited\n scores[word] = int(score) # convert the scores to intergers\n \nsentimentfile.close()", "_____no_output_____" ], [ "# print out the first 10 entries of the dictionary\ncounter = 0\nfor key, value in scores.items():\n print(key, ':', value)\n counter += 1\n if counter >= 10:\n break", "abandon : -2\nabandoned : -2\nabandons : -2\nabducted : -2\nabduction : -2\nabductions : -2\nabhor : -3\nabhorred : -3\nabhorrent : -3\nabhors : -3\n" ], [ "# we create a dictionary for storing overall counts of sentiment values\nsentiments = {\"-5\": 0, \"-4\": 0, \"-3\": 0, \"-2\": 0, \"-1\": 0, \"0\": 0, \"1\": 0, \"2\": 0, \"3\": 0, \"4\": 0, \"5\": 0}\n\nfor word in subreddit_comments_df['comment']: # loop over each word\n comment_words = word.split() # split comments into individual words\n for word in comment_words: # loop over individual words in each comment\n word = word.strip('?:!.,;\"!@()#-') # remove extraneous characters\n word = word.replace(\"\\n\", \"\") # remove end of line\n if word in scores.keys(): # check if word is in sentiment dictionary\n score = scores[word] # check if word is in sentiment dictionary\n sentiments[str(score)] += 1 # add one to the sentiment score", "_____no_output_____" ], [ "# Print the scores\nfor sentiment_value in range(-5, 6):\n # this uses string formatting, more on this here: https://realpython.com/python-f-strings/\n print(f\"{sentiment_value} sentiment:\", sentiments[str(sentiment_value)])\n \n# this would be equivalent, but obviously much less compact and elegant\n# print(\"-5 sentiments \", sentiments[\"-5\"])\n# print(\"-4 sentiments \", sentiments[\"-4\"])\n# print(\"-3 sentiments \", sentiments[\"-3\"])\n# print(\"-2 sentiments \", sentiments[\"-2\"])\n# print(\"-1 sentiments \", sentiments[\"-1\"])\n# print(\" 0 sentiments \", sentiments[\"0\"])\n# print(\" 1 sentiments \", sentiments[\"1\"])\n# print(\" 2 sentiments \", sentiments[\"2\"])\n# print(\" 3 sentiments \", sentiments[\"3\"])\n# print(\" 4 sentiments \", sentiments[\"4\"])\n# print(\" 5 sentiments \", sentiments[\"5\"])", "-5 sentiment: 1\n-4 sentiment: 36\n-3 sentiment: 180\n-2 sentiment: 374\n-1 sentiment: 219\n0 sentiment: 0\n1 sentiment: 367\n2 sentiment: 442\n3 sentiment: 122\n4 sentiment: 9\n5 sentiment: 0\n" ], [ "# Now let us put the sentiment scores into a dataframe.\ncomment_sentiment_df = pd.DataFrame(data={'Sentiment_Value': list(sentiments.keys()), 'Counts': list(sentiments.values())})\n# the 'value' column is a string; convert to integer (numeric type)\ncomment_sentiment_df['Sentiment_Value'] = comment_sentiment_df['Sentiment_Value'].astype('int')\n\n# We normalize the counts so we will be able to compare between two subreddits on the same plot easily\ncomment_sentiment_df['Normalized_Counts'] = comment_sentiment_df['Counts'] / comment_sentiment_df['Counts'].sum() # Normalize the Count\ncomment_sentiment_df", "_____no_output_____" ] ], [ [ "# Prompt\n## We will plot the data so it is easier to visualize. \n## In each of the three cells below, plot the Count, Normalized Count, and Normalized Score vs Sentiment Value. In each plot add the appropriate x-label, y-label, plot title, and color", "_____no_output_____" ] ], [ [ "# Count vs Sentiment Value Plot\nplt.bar(comment_sentiment_df['Sentiment_Value'], comment_sentiment_df['Counts'], color='green') # add the y-values and color\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Sentiment Count') # add y-label\nplt.title('Reddit Global Warming Sentiment Analysis') # add title\nplt.show()", "_____no_output_____" ], [ "comment_sentiment_df['Normalized_Counts'] = comment_sentiment_df['Counts'] / comment_sentiment_df['Counts'].sum() # Normalize the Count\ncomment_sentiment_df", "_____no_output_____" ], [ "# Count vs Sentiment Value Plot\nplt.bar(comment_sentiment_df['Sentiment_Value'], comment_sentiment_df['Normalized_Counts'], color='gray') # add the y-values and color\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Normalized Counts') # add y-label\nplt.title('Normalized Counts vs Sentiment Value Plot') # add title\nplt.show()", "_____no_output_____" ] ], [ [ "# Prompt\n### In the cell below, enter a subreddit you which to compare the sentiment of the post comments, decide how far back to pull posts, and how many posts to pull comments from.\n\nPick a subreddit that can be compared with your first subreddit in terms of sentiment. You may want to go back up to the first subreddit section and change some parameters. For example, do you want to find top posts, or hot posts? From what time period? How many posts? If you change these settings above (the `number_of_posts` and `time_period` variables) you should re-run the notebook from the beginning.", "_____no_output_____" ], [ "The following code is the same as we did for our first subreddit, just condensed into one code cell.", "_____no_output_____" ] ], [ [ "subreddit_2 = reddit.subreddit('Futurology').hot(limit=number_of_posts)\n\n\n# Create an empty list to store the data\nsubreddit_comments_2 = []\n\n# go through each post in our subreddit and put the comment body and id in our dictionary\nfor post in tqdm(subreddit_2, total=number_of_posts):\n submission = reddit.submission(id=post)\n submission.comments.replace_more(limit=0) # This line of code expands the comments if โ€œload more commentsโ€ and โ€œcontinue this threadโ€ links are encountered\n for top_level_comment in submission.comments: \n subreddit_comments_2.append(top_level_comment.body) # add the comment to our list of comments\n \n\n# Store comments in a DataFrame using a dictionary as our input\n# This sets the column name as the key of the dictionary, and the list of values as the values in the DataFrame\nsubreddit_comments_df_2 = pd.DataFrame(data={'comment': subreddit_comments_2})\n \n# we create a dictionary for storing overall counts of sentiment values\nsentiments_2 = {\"-5\": 0, \"-4\": 0, \"-3\": 0, \"-2\": 0, \"-1\": 0, \"0\": 0, \"1\": 0, \"2\": 0, \"3\": 0, \"4\": 0, \"5\": 0}\n\nfor comment in subreddit_comments_df_2['comment']: # loop over each comment\n comment_words = comment.split() # split comments into individual words\n for word in comment_words: # loop over individual words in each comment\n word = word.strip('?:!.,;\"!@()#-') # remove extraneous characters\n word = word.replace(\"\\n\", \"\") # remove end of line\n if word in scores.keys(): # check if word is in sentiment dictionary\n score = scores[word] # check if word is in sentiment dictionary\n sentiments_2[str(score)] += 1 # add one to the sentiment score\n \n# Now let us put the sentiment scores into a dataframe.\ncomment_sentiment_df_2 = pd.DataFrame(data={'Sentiment_Value': list(sentiments_2.keys()), 'Counts': list(sentiments_2.values())})\n# the 'value' column is a string; convert to integer (numeric type)\ncomment_sentiment_df_2['Sentiment_Value'] = comment_sentiment_df_2['Sentiment_Value'].astype('int')\n\n# We normalize the counts so we will be able to compare between two subreddits on the same plot easily\ncomment_sentiment_df_2['Normalized_Counts'] = comment_sentiment_df_2['Counts'] / comment_sentiment_df_2['Counts'].sum() # Normalize the Count\ncomment_sentiment_df_2", "_____no_output_____" ] ], [ [ "# Prompt\n## We will plot the data so it is easier to visualize. \n## In each of the three cells below, plot the Count, Normalized Count, and Normalized Score data vs Sentiment Value. In each plot add the appropriate x-label, y-label, plot title , and color", "_____no_output_____" ] ], [ [ "# Count vs Sentiment Value Plot\nplt.bar(comment_sentiment_df_2['Sentiment_Value'], comment_sentiment_df_2['Counts'], color='blue') # add the y-values and color\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Sentiment Counts') # add y-label\nplt.title('Futurology Reddit Sentiment Value Analysis') # add title\nplt.show()", "_____no_output_____" ], [ "# Normalized Counts vs Sentiment Value Plot\nplt.bar(comment_sentiment_df_2['Sentiment_Value'], comment_sentiment_df_2['Normalized_Counts'], color='black') # add the y-values and color\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Normalized Counts') # add y-label\nplt.title('Normalized Counts vs Sentiment Value Plot') # add title\nplt.show()", "_____no_output_____" ] ], [ [ "# Prompt\n## Now we will overlay the baseline comment sentiment and the subreddit comment sentiment to help compare.\n\n## In each of the three cells below, overlay the plots the Count, Normalized Count, and Normalized Score data vs Sentiment Value. In each plot add the appropriate x-label, y-label, plot title, and plot color", "_____no_output_____" ] ], [ [ "# Count vs Sentiment Value Plot\nplt.bar(comment_sentiment_df['Sentiment_Value'], comment_sentiment_df['Counts'], color='green', label='Global Warming') # add first subreddit data and color\n\n# add second subreddit with a slight offset of x-axis; alpha is opacity/transparency\nplt.bar(comment_sentiment_df_2['Sentiment_Value'] + 0.2, comment_sentiment_df_2['Counts'], color='brown', label='Confidence in Future', alpha=0.5) # add second subreddit and color\nplt.legend() # show the legend\n\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Sentiment Count') # add y-label\nplt.title('Count vs Sentiment Value') # add title\nplt.tight_layout() # tight_layout() automatically adjusts margins to make it look nice\nplt.show() # show the plot", "_____no_output_____" ], [ "# Normalized Count vs Sentiment Value Plot\nplt.bar(comment_sentiment_df['Sentiment_Value'], comment_sentiment_df['Normalized_Counts'], color='gray', label='Global Warming') # add first subreddit data and color\nax = plt.gca() # gets current axes of the plot for adding another dataset to the plot\n\n# add second subreddit with a slight offset of x-axis\nplt.bar(comment_sentiment_df_2['Sentiment_Value'] + 0.2, comment_sentiment_df_2['Normalized_Counts'], color='blue', label='Confidence in Future', alpha=0.5) # add second subreddit and color\nplt.legend() # show the legend\n\nplt.xlabel('Sentiment Value') # add x-label\nplt.ylabel('Normalized Counts') # add y-label\nplt.title('Normalized Counts vs Sentiment Value') # add title\nplt.tight_layout() # tight_layout() automatically adjusts margins to make it look nice\nplt.show() # show the plot", "_____no_output_____" ] ], [ [ "# Stretch goal (bonus-ish)\n### Although this is not formally a bonus for points, it is a learning opportinity. You are not required to complete the following part of this notebook for the assignment.\n\nOur sentiment analysis technique above works, but has some shortcomings. The biggest shortcoming is that each word is treated individually. But what if we have a sentence with a negation? For example:\n\n'This is not a bad thing.'\n\nThis sentence should be positive overall, but AFINN only has the word 'bad' in the dictionary, and so the sentence gets an overall negative score of -3.\n\nThe most accurate sentiment analysis methods use neural networks to capture context as well as semantics. The drawback of NNs is they are computationally expensive to train and run.\n\nAn easier method is to use a slightly-improved sentiment analysis technique, such as TextBlob or VADER (https://github.com/cjhutto/vaderSentiment) in Python. Both libraries use a hand-coded algorithm with word scores like AFINN, but also with additions like negation rules (e.g. a word after 'not' has it's score reversed).\n\nOther sentiment analysis libraries in Python can be read about here: https://www.iflexion.com/blog/sentiment-analysis-python\n\n### The stretch goal\nThe stretch goal is to use other sentiment analysis libraries on the Reddit data we collected, and compare the various approaches (AFINN word-by-word, TextBlob, and VADER) using plots and statistics. For the AFINN word-by-word approach, you will need to either sum up the sentiment scores for each comment, or average them. You might also divide them by 5 to get the values between -1 and +1.\n\nHere is a brief example of getting scores from the 3 methods described above. We can see while the raw AFINN approach gives a score of -0.6 (if normalized), TextBlob shows 0.35 and VADER shows 0.43.", "_____no_output_____" ] ], [ [ "!conda install -c conda-forge textblob", "_____no_output_____" ], [ "!pip install textblob vaderSentiment", "_____no_output_____" ], [ "sentence = 'This is not a bad thing.'\n[(word, scores[word]) for word in sentence.split() if word in scores]", "_____no_output_____" ], [ "from textblob import TextBlob\ntb = TextBlob(sentence)\nprint(tb.polarity)\nprint(tb.sentiment_assessments)", "_____no_output_____" ], [ "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nanalyzer = SentimentIntensityAnalyzer()\nanalyzer.polarity_scores(sentence)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb0e75d33d48ff2c1adc94d76ac31a6e26f873e7
364,910
ipynb
Jupyter Notebook
notebooks/analysis1.ipynb
data301-2021-winter1/group05-project
57fc3fefe98b88d95e928c44cc22d4bfb01e23f0
[ "MIT" ]
null
null
null
notebooks/analysis1.ipynb
data301-2021-winter1/group05-project
57fc3fefe98b88d95e928c44cc22d4bfb01e23f0
[ "MIT" ]
null
null
null
notebooks/analysis1.ipynb
data301-2021-winter1/group05-project
57fc3fefe98b88d95e928c44cc22d4bfb01e23f0
[ "MIT" ]
null
null
null
126.091914
49,396
0.810181
[ [ [ "# Emily Harvey", "_____no_output_____" ], [ "## Research question/interests\n\nIt would be cool to see how much of the money made overall in Canada had to do with tourism from BC and the breakdown of which categories within tourism BC makes the most money in. As well as overall growth of tourism in Canada and BC from 2014-2017.", "_____no_output_____" ] ], [ [ "import pandas as pd \npd.read_csv('../data/raw/tourism.csv')", "_____no_output_____" ] ], [ [ "## Milestone 3:\n### Task 1: EDA", "_____no_output_____" ] ], [ [ "#Importing Libraries\nimport numpy as np\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport missingno\nimport pandas as pd \n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv('../data/raw/tourism.csv')\n\n# Simple Preview and Stats of Data\n# Shows column names, non-null count, and Dtype\n# Shows us that columns 13 and 14 only have null numbers and need to be deleted.\n# Also shows that the most common Dtype in our data are objects\ndf.info()\ndf.head()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8960 entries, 0 to 8959\nData columns (total 16 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 REF_DATE 8960 non-null int64 \n 1 GEO 8960 non-null object \n 2 DGUID 8960 non-null object \n 3 Indicators 8960 non-null object \n 4 Products 8960 non-null object \n 5 UOM 8960 non-null object \n 6 UOM_ID 8960 non-null int64 \n 7 SCALAR_FACTOR 8960 non-null object \n 8 SCALAR_ID 8960 non-null int64 \n 9 VECTOR 8960 non-null object \n 10 COORDINATE 8960 non-null object \n 11 VALUE 7521 non-null float64\n 12 STATUS 1439 non-null object \n 13 SYMBOL 0 non-null float64\n 14 TERMINATED 0 non-null float64\n 15 DECIMALS 8960 non-null int64 \ndtypes: float64(3), int64(4), object(9)\nmemory usage: 1.1+ MB\n" ], [ "# summary stats table\ndf.describe().T", "_____no_output_____" ], [ "# generate preview of entries with null values\nif df.isnull().any(axis=None):\n print(\"\\nPreview of data with null values:\\n\")\n print(df[df.isnull().any(axis=1)].head(5))\n missingno.matrix(df)\n plt.show()\n\n# Shows us that there are only null values in the SYMBOL & TERMINATED columns and many null values in the STATUS and VALUE columns\n# Both the status, symbol, & terminated columns are not being used in the analysis so can be deleted as well\n# We also need to get rid of the rows with the unknown values", "\nPreview of data with null values:\n\n REF_DATE GEO DGUID Indicators \\\n0 2014 Canada 2016A11124 Total domestic supply \n1 2014 Canada 2016A11124 Total domestic supply \n2 2014 Canada 2016A11124 Total domestic supply \n3 2014 Canada 2016A11124 Total domestic supply \n4 2014 Canada 2016A11124 Total domestic supply \n\n Products UOM UOM_ID SCALAR_FACTOR SCALAR_ID \\\n0 Total tourism expenditures Dollars 81 millions 6 \n1 Total tourism products Dollars 81 millions 6 \n2 Total transportation Dollars 81 millions 6 \n3 Passenger air transport Dollars 81 millions 6 \n4 Passenger rail transport Dollars 81 millions 6 \n\n VECTOR COORDINATE VALUE STATUS SYMBOL TERMINATED DECIMALS \n0 v1001801179 1.1.1 3654954.0 NaN NaN NaN 1 \n1 v1001801180 1.1.2 212829.4 NaN NaN NaN 1 \n2 v1001801181 1.1.3 98108.0 NaN NaN NaN 1 \n3 v1001801182 1.1.4 19961.5 NaN NaN NaN 1 \n4 v1001801183 1.1.5 309.6 NaN NaN NaN 1 \n" ], [ "#checking for duplicated entries\nif len(df[df.duplicated()]) > 0:\n print(\"No. of duplicated entries: \", len(df[df.duplicated()]))\n print(df[df.duplicated(keep=False)].sort_values(by=list(df.columns)).head())\nelse:\n print(\"No duplicated entries found\")", "No duplicated entries found\n" ], [ "#tells us how many unique values there are in our dataframe\ndf.nunique(axis=0)\n\n#further helps us decide which columns to get rid of as they are not useful", "_____no_output_____" ], [ "#looking at the unique values of the products\ndf.Products.unique()\n#gives us a list of the values in the products column", "_____no_output_____" ], [ "#looking at the unique values of the indicators\ndf.Indicators.unique()\n#gives us a list of the values in the indicators column", "_____no_output_____" ], [ "#looking at the unique values of the geo\ndf.GEO.unique()\n#gives us a list of the values in the geo column\n#helps us see which rows we want to get rid of as we only want to study BC and Canada", "_____no_output_____" ], [ "sns.set_theme(style=\"ticks\",\n font_scale=1\n )\nplt.rc(\"axes.spines\", top=False, right=False)", "_____no_output_____" ], [ "\ntotalDomesticSupply = [3654954, 3910989.7, 421110.5, 483396]\ntotalDemand = [84580.6, 95998, 15570.4, 19541.5]\ntotalExports = [32668.7, 42535.5, 8801.2, 11839]\ntotalImports = [60274.6, 67791.4, 10269.8, 11689.5]\nfig, ax = plt.subplots(figsize=(25, 25))\nnew_df = pd.DataFrame([['Total Domestic Supply', 'Canada 2014', 3654954], \n ['Total Domestic Supply', 'Canada 2017', 3910989.7],\n ['Total Domestic Supply', 'BC 2014', 421110.5],\n ['Total Domestic Supply', 'BC 2017', 483396],\n ['Total Demand', 'Canada 2014', 84580.6],\n ['Total Demand', 'Canada 2017', 95998],\n ['Total Demand', 'BC 2014', 15570.4],\n ['Total Demand', 'BC 2017', 19541.5],\n ['Total Exports', 'Canada 2014', 32668.7],\n ['Total Exports', 'Canada 2017', 42535.5],\n ['Total Exports', 'BC 2014', 8801.2],\n ['Total Exports', 'BC 2017', 11839],\n ['Total Imports', 'Canada 2014', 60274.6],\n ['Total Imports', 'Canada 2017', 67791.4],\n ['Total Imports','BC 2014',10269.8],\n ['Total Imports','BC 2017',11689.5]], columns=['Total Tourism Expenditures', 'Location and Year', 'Dollars (in millions)'])\nsns.barplot(data=new_df, x='Location and Year', y='Dollars (in millions)', hue='Total Tourism Expenditures')\n# this graph would be better by splitting it into seperate graphs in order to see the values better", "_____no_output_____" ], [ "TotalDomesticSupplyDf = pd.DataFrame([[ 'Canada 2014', 3654954], \n ['Canada 2017', 3910989.7],\n ['BC 2014', 421110.5],\n ['BC 2017', 483396]], columns=['Location and Year', 'Dollars (in millions)'])\nfig, ax = plt.subplots(figsize=(16, 8))\nsns.barplot(data=TotalDomesticSupplyDf, x='Location and Year', y='Dollars (in millions)')\nplt.title('Total Dometic Supply of BC and Canada in 2014 and 2017 in Millions')", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(18, 8))\nnew_df = pd.DataFrame([['Total Demand', 'Canada 2014', 84580.6],\n ['Total Demand', 'Canada 2017', 95998],\n ['Total Demand', 'BC 2014', 15570.4],\n ['Total Demand', 'BC 2017', 19541.5],\n ['Total Exports', 'Canada 2014', 32668.7],\n ['Total Exports', 'Canada 2017', 42535.5],\n ['Total Exports', 'BC 2014', 8801.2],\n ['Total Exports', 'BC 2017', 11839],\n ['Total Imports', 'Canada 2014', 60274.6],\n ['Total Imports', 'Canada 2017', 67791.4],\n ['Total Imports','BC 2014',10269.8],\n ['Total Imports','BC 2017',11689.5]], columns=['Indicator', 'Location and Year', 'Dollars (in millions)'])\nsns.barplot(data=new_df, x='Location and Year', y='Dollars (in millions)', hue='Indicator')\nplt.title('Total Tourism Expenditures per Indicator for 2014 and 2017')", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(18, 8))\nnew_df = pd.DataFrame([['Canada 2014','Total Demand', 84580.6],\n ['Canada 2017','Total Demand', 95998],\n ['BC 2014', 'Total Demand', 15570.4],\n ['BC 2017','Total Demand', 19541.5],\n ['Canada 2014','Total Exports', 32668.7],\n ['Canada 2017','Total Exports', 42535.5],\n ['BC 2014','Total Exports', 8801.2],\n ['BC 2017', 'Total Exports',11839],\n ['Canada 2014','Total Imports', 60274.6],\n ['Canada 2017','Total Imports', 67791.4],\n ['BC 2014','Total Imports', 10269.8],\n ['BC 2017','Total Imports', 11689.5]], columns=['Location and Year','Indicator', 'Dollars (in millions)'])\nsns.barplot(data=new_df, x='Indicator', y='Dollars (in millions)', hue='Location and Year')\nplt.title('Total Tourism Expenditures per Indicator for 2014 and 2017')\n# shows the difference between years better", "_____no_output_____" ] ], [ [ "### Task 2: Analysis Pipeline", "_____no_output_____" ], [ "#### Load in data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('../data/raw/tourism.csv')\ndf", "_____no_output_____" ] ], [ [ "#### Clean Data", "_____no_output_____" ] ], [ [ "#remove unwanted columns\ndf_cleaned = df.drop(['DGUID','UOM_ID','SCALAR_ID','VECTOR', 'COORDINATE', 'STATUS','SYMBOL','TERMINATED', 'DECIMALS'],axis=1)", "_____no_output_____" ], [ "#remove unwanted rows\ndf_cleaned = df_cleaned[df_cleaned['GEO'].isin(['Canada','British Columbia'])]\ndf_cleaned = df_cleaned[df_cleaned['UOM'].isin(['Dollars'])]", "_____no_output_____" ], [ "#remove rows with null values\ndf_cleaned = df_cleaned.dropna(axis=0)\n#reset index\ndf_cleaned.reset_index()", "_____no_output_____" ] ], [ [ "#### Process/Wrangle Data", "_____no_output_____" ] ], [ [ "df_cleaned = df_cleaned.rename(columns = {'REF_DATE':'Year', 'GEO': 'Location', 'SCALAR_FACTOR': 'Scalar factor', 'VALUE':'Value' })\ndf_cleaned", "_____no_output_____" ] ], [ [ "### Task 3: Method Chain", "_____no_output_____" ], [ "#### Step 1", "_____no_output_____" ] ], [ [ "df = (\n pd.read_csv(\"../data/raw/tourism.csv\")\n .drop(columns=['DGUID','UOM_ID','SCALAR_ID','VECTOR', 'COORDINATE', 'STATUS','SYMBOL','TERMINATED', 'DECIMALS'],axis=1)\n .dropna()\n .query(\"GEO != ['Nunavut', 'Northwest Territories','Yukon','Newfoundland and Labrador','Prince Edward Island','Nova Scotia','New Brunswick','Quebec','Ontario','Manitoba','Saskatchewan', 'Alberta']\")\n .query(\"UOM != ['Percentage']\")\n .rename(columns={\"REF_DATE\":\"Year\", \"GEO\":\"Location\", \"SCALAR_FACTOR\":\"Scalar Factor\", \"VALUE\":\"Value\"})\n)", "_____no_output_____" ] ], [ [ "#### Step 2", "_____no_output_____" ] ], [ [ "def load_and_process(url_or_path_to_csv_file):\n # Method Chain 1\n df = (\n pd.read_csv(url_or_path_to_csv_file)\n .drop(columns=['DGUID','UOM_ID','SCALAR_ID','VECTOR', 'COORDINATE', 'STATUS','SYMBOL','TERMINATED', 'DECIMALS'],axis=1)\n .dropna()\n .query(\"GEO != ['Nunavut', 'Northwest Territories','Yukon','Newfoundland and Labrador','Prince Edward Island','Nova Scotia','New Brunswick','Quebec','Ontario','Manitoba','Saskatchewan', 'Alberta']\")\n .query(\"UOM != ['Percentage']\")\n .rename(columns={\"REF_DATE\":\"Year\", \"GEO\":\"Location\", \"SCALAR_FACTOR\":\"Scalar Factor\", \"VALUE\":\"Value\"})\n )\n return df", "_____no_output_____" ], [ "load_and_process('../data/raw/tourism.csv')", "_____no_output_____" ] ], [ [ "#### Step 3", "_____no_output_____" ] ], [ [ "import project_functions1\ndf = project_functions1.load_and_process(\"../data/raw/tourism.csv\")\ndf", "_____no_output_____" ], [ "df_cleaned.to_csv('emily_cleaned_data.csv')", "_____no_output_____" ] ], [ [ "### Task 4: Conduct your analysis", "_____no_output_____" ], [ "#### Research Question 1: Which categories within tourism BC makes the most money in", "_____no_output_____" ], [ "##### In the table below, we can see which categories have the highest total demand in BC. We can compare these numbers with the total demands in Canada to see what percentage of these catagories is from BC. ", "_____no_output_____" ] ], [ [ "dfBC = df[df['Location'].isin(['British Columbia'])]\ndfBC = dfBC[dfBC['Indicators'].isin(['Total demand'])]\ndfBC = dfBC[['Year','Products','Value']]\ndfBC = dfBC.sort_values(by=[\"Value\"], ascending=[False]).groupby(\"Value\")\ndfBC.head(10)\n### I don't know why head isn't working properly", "_____no_output_____" ], [ "dfCan = df[df['Location'].isin(['British Columbia'])]\ndfCan = dfCan[dfCan['Indicators'].isin(['Total demand'])]\ndfCan = dfCan[['Year','Products','Value']]\ndfCan = dfCan.sort_values(by=[\"Value\"], ascending=[False]).groupby(\"Value\")\ndfCan.head(10)\n### I still don't know why head isn't working properly", "_____no_output_____" ] ], [ [ "##### The top 3 catagories of higest demand in BC are Total Tourism Expenditures, Total Tourism Products and Total Transportion. These catagories remain top 3 in 2017 as well. We can see some growth from 2014 to 2017 in these catagories within BC as well as Canada. We can use this data in these catagories to find out what percentage of of each of these catagories comes from BC. When we do this we find that in 2014, BC contributed 15.55% of Total Tourism Expenditures, 15.80% of Total Tourism Products, and 16.44% of Total Transportation. In 2017, BC contributed 16.91% of Total Tourism Expenditures, 17.08% of Total Tourism Products, and 17.01% of Total Transportation. From these values, we can see that although Total Tourism Expenditures makes the most money for BC, it is not what BC contributes to the most in Canada, that would be Total Transportation. We can also see that from 2014-2017, Total Tourism Expenditures grew 1.36%, Total Tourism Products grew 1.28%, and Total Transportation grew 0.57%.", "_____no_output_____" ], [ "#### Research Question 2: Overall growth of tourism in Canada and BC from 2014-2017.", "_____no_output_____" ], [ "##### On the graphs below, we can see that there is growth in the total demand, the exports and the imports from 2014 to 2017 in both BC and Canada. ", "_____no_output_____" ] ], [ [ "sns.barplot(data=new_df, x='Indicator', y='Dollars (in millions)', hue='Location and Year')\nplt.title('Total Tourism Expenditures per Indicator for 2014 and 2017')\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)", "_____no_output_____" ], [ "new_Can = pd.DataFrame([['2014','Total Demand', 84580.6],\n ['2017','Total Demand', 95998],\n ['2014','Total Exports', 32668.7],\n ['2017','Total Exports', 42535.5],\n ['2014','Total Imports', 60274.6],\n ['2017','Total Imports', 67791.4],\n ], columns=['Year','Indicator', 'Dollars (in millions)'])\nsns.lineplot(x='Year', y='Dollars (in millions)', hue='Indicator', \n data=new_Can)\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)\nplt.title('Tourism Growth Per Indicator in Canada From 2014-2018')", "_____no_output_____" ], [ "new_BC = pd.DataFrame([['2014', 'Total Demand', 15570.4],\n ['2017','Total Demand', 19541.5],\n ['2014','Total Exports', 8801.2],\n ['2017', 'Total Exports',11839],\n ['2014','Total Imports', 10269.8],\n ['2017','Total Imports', 11689.5]], columns=['Year','Indicator', 'Dollars (in millions)'])\nsns.lineplot(x='Year', y='Dollars (in millions)', hue='Indicator', \n data=new_BC)\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)\nplt.title('Tourism Growth Per Indicator in BC From 2014-2018')", "_____no_output_____" ] ], [ [ "##### Looking at these graphs we can tell that growth did occur for all of these catagories. Using this data we can calculate the compound growth rate of each catagory. When I did this I focused on the coumpound growth rate of the total demand. The CGR for the total demand in Canada from 2014-2017 was 13.50% and the CGR for the total demand in BC was 25.50%. For exports and imports, in BC the CGR was 34.52% and 13.82%, respectively, and in Canada were 30.20% and 12.47%. ", "_____no_output_____" ], [ "#### Conclusion", "_____no_output_____" ], [ "##### The main things I found out were that:\n- the three biggest contributors to total demands in BC are Total Tourism Expenditures, Total Tourism Products and Total Transportion.\n- Although Total Tourism Expenditures were the biggest contributers in BC, Total Transportation contributed more to Canada.\n- All of these catagories grew form 2014-2017 and the one that grew the most was Total Tourism Expenditures.\n- The compound growth rate of the total demand from 2014 to 2017 was 25.50% in BC and 13.50% in Canada.\n- Exports had the largest compound growth in both Canada and BC.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb0e790e517e8b404ec1452a466fb5c8bb96cd21
2,888
ipynb
Jupyter Notebook
docs/examples/driver_examples/Qcodes example with Minicircuits Switch boxes (USB-XSPDT).ipynb
jsaez8/Qcodes
74527ae6faf7ec914ae414c6a776307abf158675
[ "MIT" ]
2
2019-04-09T09:39:22.000Z
2019-10-24T19:07:19.000Z
docs/examples/driver_examples/Qcodes example with Minicircuits Switch boxes (USB-XSPDT).ipynb
jsaez8/Qcodes
74527ae6faf7ec914ae414c6a776307abf158675
[ "MIT" ]
null
null
null
docs/examples/driver_examples/Qcodes example with Minicircuits Switch boxes (USB-XSPDT).ipynb
jsaez8/Qcodes
74527ae6faf7ec914ae414c6a776307abf158675
[ "MIT" ]
1
2019-11-19T12:32:29.000Z
2019-11-19T12:32:29.000Z
20.195804
156
0.535665
[ [ [ "## Example for Minicircuits Switch Boxes controlled via USB", "_____no_output_____" ] ], [ [ "from qcodes.instrument_drivers.Minicircuits.USB_SPDT import USB_SPDT", "_____no_output_____" ] ], [ [ "change the serial number to the serial number on the sticker on the back of the device, or leave it blank if there is only one switch box connected\n\nThe driver_path should specify the url of the dll for controlling the instrument. You can find it here:\n\nhttps://ww2.minicircuits.com/softwaredownload/rfswitchcontroller.html\n\nDownload .NET dll and save somewhere. Unblock it (right click properties) and specify the path.", "_____no_output_____" ] ], [ [ "dev = USB_SPDT('test',\n serial_number='11703020018',\n driver_path= r'C:\\Users\\a-dovoge\\Qcodes\\qcodes\\instrument_drivers\\Minicircuits\\mcl_RF_Switch_Controller64')", "Connected to: Mini-Circuits RC-4SPDT-A18 (serial:11703020018, firmware:20) in 0.19s\n" ] ], [ [ "setting value to line one or two", "_____no_output_____" ] ], [ [ "dev.a(1)", "_____no_output_____" ] ], [ [ "reading value", "_____no_output_____" ] ], [ [ "dev.b()", "_____no_output_____" ] ], [ [ "setting all switches to line 2", "_____no_output_____" ] ], [ [ "dev.all(2)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0e7fdcb4d6ab22cea32b29ffc702ad3282467b
55,237
ipynb
Jupyter Notebook
sloleks_accetuation2.ipynb
lkrsnik/accetuation
02724147f88aa034487c7922eb75e0fc321aa93f
[ "MIT" ]
null
null
null
sloleks_accetuation2.ipynb
lkrsnik/accetuation
02724147f88aa034487c7922eb75e0fc321aa93f
[ "MIT" ]
null
null
null
sloleks_accetuation2.ipynb
lkrsnik/accetuation
02724147f88aa034487c7922eb75e0fc321aa93f
[ "MIT" ]
null
null
null
189.817869
39,843
0.601626
[ [ [ "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom keras.models import load_model\nimport sys\nimport pickle\nimport time\n\nfrom prepare_data import *\n\nnp.random.seed(7)", "_____no_output_____" ], [ "data = Data('l', shuffle_all_inputs=False)\ncontent = data._read_content('data/SlovarIJS_BESEDE_utf8.lex')\ndictionary, max_word, max_num_vowels, vowels, accented_vowels = data._create_dict(content)\nfeature_dictionary = data._create_slovene_feature_dictionary()\nsyllable_dictionary = data._create_syllables_dictionary(content, vowels)\naccented_vowels = ['ล•', 'รก', 'รค', 'รฉ', 'รซ', 'ฤ›', 'รญ', 'รฎ', 'รณ', 'รด', 'รถ', 'รบ', 'รผ']\n\n", "_____no_output_____" ], [ "environment = {}\nenvironment['dictionary'] = dictionary\nenvironment['max_word'] = max_word\nenvironment['max_num_vowels'] = max_num_vowels\nenvironment['vowels'] = vowels\nenvironment['accented_vowels'] = accented_vowels\nenvironment['feature_dictionary'] = feature_dictionary\nenvironment['eng_feature_dictionary'] = feature_dictionary\nenvironment['syllable_dictionary'] = syllable_dictionary\noutput = open('environment.pkl', 'wb')\npickle.dump(environment, output)\noutput.close()", "_____no_output_____" ], [ "i = 0\nfor el in syllable_dictionary:\n if el == \"da\":\n print(i)\n i += 1", "407\n" ], [ "%run prepare_data.py\n\ndata = Data('l', shuffle_all_inputs=False)\nletter_location_model, syllable_location_model, syllabled_letters_location_model = data.load_location_models(\n 'cnn/word_accetuation/cnn_dictionary/v5_3/20_final_epoch.h5',\n 'cnn/word_accetuation/syllables/v3_3/20_final_epoch.h5',\n 'cnn/word_accetuation/syllabled_letters/v3_3/20_final_epoch.h5')\n\nletter_location_co_model, syllable_location_co_model, syllabled_letters_location_co_model = data.load_location_models(\n 'cnn/word_accetuation/cnn_dictionary/v5_2/20_final_epoch.h5',\n 'cnn/word_accetuation/syllables/v3_2/20_final_epoch.h5',\n 'cnn/word_accetuation/syllabled_letters/v3_2/20_final_epoch.h5')\n\nletter_type_model, syllable_type_model, syllabled_letter_type_model = data.load_type_models(\n 'cnn/accent_classification/letters/v3_1/20_final_epoch.h5',\n 'cnn/accent_classification/syllables/v2_1/20_final_epoch.h5',\n 'cnn/accent_classification/syllabled_letters/v2_1/20_final_epoch.h5')\n\nletter_type_co_model, syllable_type_co_model, syllabled_letter_type_co_model = data.load_type_models(\n 'cnn/accent_classification/letters/v3_0/20_final_epoch.h5',\n 'cnn/accent_classification/syllables/v2_0/20_final_epoch.h5',\n 'cnn/accent_classification/syllabled_letters/v2_0/20_final_epoch.h5')", "_____no_output_____" ], [ "test_input = [['uradni', '', 'Agpmpn', 'uradni'], ['podatki', '', 'Ncmpn', 'podatki'], ['policije', '', 'Ncfsg', 'policije'], ['kaลพejo', '', 'Vmpr3p', 'kaลพejo'], ['na', '', 'Sa', 'na'], ['precej', '', 'Rgp', 'precej'], ['napete', '', 'Appfpa', 'napete'], ['razmere', '', 'Ncfpa', 'razmere'], ['v', '', 'Sl', 'v'], ['piranskem', '', 'Agpmsl', 'piranskem'], ['zalivu', '', 'Ncmsl', 'zalivu'], ['je', '', 'Va-r3s-n', 'je'], ['danes', '', 'Rgp', 'danes'], ['poroฤala', '', 'Vmpp-sf', 'poroฤala'], ['oddaja', '', 'Ncfsn', 'oddaja'], ['do', '', 'Sg', 'do'], ['danes', '', 'Rgp', 'danes'], ['se', '', 'Px------y', 'se'], ['je', '', 'Va-r3s-n', 'je'], ['zgodilo', '', 'Vmep-sn', 'zgodilo']]", "_____no_output_____" ], [ "accented_vowels = ['ล•', 'รก', 'รค', 'รฉ', 'รซ', 'ฤ›', 'รญ', 'รฎ', 'รณ', 'รด', 'รถ', 'รบ', 'รผ']\nwords = [[\"Gorejevemu\", \"\", \"Psnsed\", \"Gorejevemu\"]]", "_____no_output_____" ], [ "pos = 4282\nprint(location_accented_words)\nprint(accented_words)\nprint(words)", "['Gorejรฉvemu']\n['Gorejฤ›vemu']\n[['Gorejevemu', '', 'Psnsed', 'Gorejevemu']]\n" ], [ "data = Data('s', shuffle_all_inputs=False)\nnew_content = data._read_content('data/sloleks-sl_v1.2.tbl')", "_____no_output_____" ], [ "words = [[el[0], '', el[2], el[0]] for el in new_content][1146450:1146550]\n\n ", "_____no_output_____" ], [ "print(words.append['nadnaravno', '', 'Ppnsei'])", "[['adventistovo', '', 'Psnzeo', 'adventistovo'], ['adventistovo', '', 'Psnzet', 'adventistovo'], ['adventistu', '', 'Somed', 'adventistu'], ['adventistu', '', 'Somem', 'adventistu'], ['adventiven', '', 'Ppnmein', 'adventiven'], ['adventiven', '', 'Ppnmetn', 'adventiven'], ['adventivna', '', 'Ppnmdi', 'adventivna'], ['adventivna', '', 'Ppnmdt', 'adventivna'], ['adventivna', '', 'Ppnsmi', 'adventivna'], ['adventivna', '', 'Ppnsmt', 'adventivna'], ['adventivna', '', 'Ppnzei', 'adventivna'], ['adventivne', '', 'Ppnmmt', 'adventivne'], ['adventivne', '', 'Ppnzer', 'adventivne'], ['adventivne', '', 'Ppnzmi', 'adventivne'], ['adventivne', '', 'Ppnzmt', 'adventivne'], ['adventivnega', '', 'Ppnmer', 'adventivnega'], ['adventivnega', '', 'Ppnmet', 'adventivnega'], ['adventivnega', '', 'Ppnser', 'adventivnega'], ['adventivnem', '', 'Ppnmem', 'adventivnem'], ['adventivnem', '', 'Ppnsem', 'adventivnem'], ['adventivnemu', '', 'Ppnmed', 'adventivnemu'], ['adventivnemu', '', 'Ppnsed', 'adventivnemu'], ['adventivni', '', 'Ppnmeid', 'adventivni'], ['adventivni', '', 'Ppnmetd', 'adventivni'], ['adventivni', '', 'Ppnmmi', 'adventivni'], ['adventivni', '', 'Ppnsdi', 'adventivni'], ['adventivni', '', 'Ppnsdt', 'adventivni'], ['adventivni', '', 'Ppnzdi', 'adventivni'], ['adventivni', '', 'Ppnzdt', 'adventivni'], ['adventivni', '', 'Ppnzed', 'adventivni'], ['adventivni', '', 'Ppnzem', 'adventivni'], ['adventivnih', '', 'Ppnmdm', 'adventivnih'], ['adventivnih', '', 'Ppnmdr', 'adventivnih'], ['adventivnih', '', 'Ppnmmm', 'adventivnih'], ['adventivnih', '', 'Ppnmmr', 'adventivnih'], ['adventivnih', '', 'Ppnsdm', 'adventivnih'], ['adventivnih', '', 'Ppnsdr', 'adventivnih'], ['adventivnih', '', 'Ppnsmm', 'adventivnih'], ['adventivnih', '', 'Ppnsmr', 'adventivnih'], ['adventivnih', '', 'Ppnzdm', 'adventivnih'], ['adventivnih', '', 'Ppnzdr', 'adventivnih'], ['adventivnih', '', 'Ppnzmm', 'adventivnih'], ['adventivnih', '', 'Ppnzmr', 'adventivnih'], ['adventivnima', '', 'Ppnmdd', 'adventivnima'], ['adventivnima', '', 'Ppnmdo', 'adventivnima'], ['adventivnima', '', 'Ppnsdd', 'adventivnima'], ['adventivnima', '', 'Ppnsdo', 'adventivnima'], ['adventivnima', '', 'Ppnzdd', 'adventivnima'], ['adventivnima', '', 'Ppnzdo', 'adventivnima'], ['adventivnim', '', 'Ppnmeo', 'adventivnim'], ['adventivnim', '', 'Ppnmmd', 'adventivnim'], ['adventivnim', '', 'Ppnseo', 'adventivnim'], ['adventivnim', '', 'Ppnsmd', 'adventivnim'], ['adventivnim', '', 'Ppnzmd', 'adventivnim'], ['adventivnimi', '', 'Ppnmmo', 'adventivnimi'], ['adventivnimi', '', 'Ppnsmo', 'adventivnimi'], ['adventivnimi', '', 'Ppnzmo', 'adventivnimi'], ['adventivno', '', 'Ppnsei', 'adventivno'], ['adventivno', '', 'Ppnset', 'adventivno'], ['adventivno', '', 'Ppnzeo', 'adventivno'], ['adventivno', '', 'Ppnzet', 'adventivno'], ['adventna', '', 'Ppnmdi', 'adventna'], ['adventna', '', 'Ppnmdt', 'adventna'], ['adventna', '', 'Ppnsmi', 'adventna'], ['adventna', '', 'Ppnsmt', 'adventna'], ['adventna', '', 'Ppnzei', 'adventna'], ['adventne', '', 'Ppnmmt', 'adventne'], ['adventne', '', 'Ppnzer', 'adventne'], ['adventne', '', 'Ppnzmi', 'adventne'], ['adventne', '', 'Ppnzmt', 'adventne'], ['adventnega', '', 'Ppnmer', 'adventnega'], ['adventnega', '', 'Ppnmet', 'adventnega'], ['adventnega', '', 'Ppnser', 'adventnega'], ['adventnem', '', 'Ppnmem', 'adventnem'], ['adventnem', '', 'Ppnsem', 'adventnem'], ['adventnemu', '', 'Ppnmed', 'adventnemu'], ['adventnemu', '', 'Ppnsed', 'adventnemu'], ['adventni', '', 'Ppnmeid', 'adventni'], ['adventni', '', 'Ppnmetd', 'adventni'], ['adventni', '', 'Ppnmmi', 'adventni'], ['adventni', '', 'Ppnsdi', 'adventni'], ['adventni', '', 'Ppnsdt', 'adventni'], ['adventni', '', 'Ppnzdi', 'adventni'], ['adventni', '', 'Ppnzdt', 'adventni'], ['adventni', '', 'Ppnzed', 'adventni'], ['adventni', '', 'Ppnzem', 'adventni'], ['adventnih', '', 'Ppnmdm', 'adventnih'], ['adventnih', '', 'Ppnmdr', 'adventnih'], ['adventnih', '', 'Ppnmmm', 'adventnih'], ['adventnih', '', 'Ppnmmr', 'adventnih'], ['adventnih', '', 'Ppnsdm', 'adventnih'], ['adventnih', '', 'Ppnsdr', 'adventnih'], ['adventnih', '', 'Ppnsmm', 'adventnih'], ['adventnih', '', 'Ppnsmr', 'adventnih'], ['adventnih', '', 'Ppnzdm', 'adventnih'], ['adventnih', '', 'Ppnzdr', 'adventnih'], ['adventnih', '', 'Ppnzmm', 'adventnih'], ['adventnih', '', 'Ppnzmr', 'adventnih'], ['adventnima', '', 'Ppnmdd', 'adventnima'], ['adventnima', '', 'Ppnmdo', 'adventnima'], ['adventnima', '', 'Ppnsdd', 'adventnima'], ['adventnima', '', 'Ppnsdo', 'adventnima'], ['adventnima', '', 'Ppnzdd', 'adventnima'], ['adventnima', '', 'Ppnzdo', 'adventnima'], ['adventnim', '', 'Ppnmeo', 'adventnim'], ['adventnim', '', 'Ppnmmd', 'adventnim'], ['adventnim', '', 'Ppnseo', 'adventnim'], ['adventnim', '', 'Ppnsmd', 'adventnim'], ['adventnim', '', 'Ppnzmd', 'adventnim'], ['adventnimi', '', 'Ppnmmo', 'adventnimi'], ['adventnimi', '', 'Ppnsmo', 'adventnimi'], ['adventnimi', '', 'Ppnzmo', 'adventnimi'], ['adventno', '', 'Ppnsei', 'adventno'], ['adventno', '', 'Ppnset', 'adventno'], ['adventno', '', 'Ppnzeo', 'adventno'], ['adventno', '', 'Ppnzet', 'adventno'], ['adventoma', '', 'Somdd', 'adventoma'], ['adventoma', '', 'Somdo', 'adventoma'], ['adventom', '', 'Someo', 'adventom'], ['adventom', '', 'Sommd', 'adventom'], ['adventov', '', 'Somdr', 'adventov'], ['adventov', '', 'Sommr', 'adventov'], ['adventu', '', 'Somed', 'adventu'], ['adventu', '', 'Somem', 'adventu'], ['adverba', '', 'Somdi', 'adverba'], ['adverba', '', 'Somdt', 'adverba'], ['adverba', '', 'Somer', 'adverba'], ['adverb', '', 'Somei', 'adverb'], ['adverb', '', 'Sometn', 'adverb'], ['adverbe', '', 'Sommt', 'adverbe'], ['adverbi', '', 'Sommi', 'adverbi'], ['adverbi', '', 'Sommo', 'adverbi'], ['adverbih', '', 'Somdm', 'adverbih'], ['adverbih', '', 'Sommm', 'adverbih'], ['adverboma', '', 'Somdd', 'adverboma'], ['adverboma', '', 'Somdo', 'adverboma'], ['adverbom', '', 'Someo', 'adverbom'], ['adverbom', '', 'Sommd', 'adverbom'], ['adverbov', '', 'Somdr', 'adverbov'], ['adverbov', '', 'Sommr', 'adverbov'], ['adverbu', '', 'Somed', 'adverbu'], ['adverbu', '', 'Somem', 'adverbu'], ['advokata', '', 'Somdi', 'advokata'], ['advokata', '', 'Somdt', 'advokata'], ['advokata', '', 'Somer', 'advokata'], ['advokata', '', 'Sometd', 'advokata'], ['advokat', '', 'Somei', 'advokat'], ['advokate', '', 'Sommt', 'advokate'], ['advokati', '', 'Sommi', 'advokati'], ['advokati', '', 'Sommo', 'advokati'], ['advokatih', '', 'Somdm', 'advokatih'], ['advokatih', '', 'Sommm', 'advokatih'], ['advokatka', '', 'Sozei', 'advokatka'], ['advokatk', '', 'Sozdr', 'advokatk'], ['advokatk', '', 'Sozmr', 'advokatk'], ['advokatkah', '', 'Sozdm', 'advokatkah'], ['advokatkah', '', 'Sozmm', 'advokatkah'], ['advokatkama', '', 'Sozdd', 'advokatkama'], ['advokatkama', '', 'Sozdo', 'advokatkama'], ['advokatkam', '', 'Sozmd', 'advokatkam'], ['advokatkami', '', 'Sozmo', 'advokatkami'], ['advokatke', '', 'Sozer', 'advokatke'], ['advokatke', '', 'Sozmi', 'advokatke'], ['advokatke', '', 'Sozmt', 'advokatke'], ['advokatki', '', 'Sozdi', 'advokatki'], ['advokatki', '', 'Sozdt', 'advokatki'], ['advokatki', '', 'Sozed', 'advokatki'], ['advokatki', '', 'Sozem', 'advokatki'], ['advokatko', '', 'Sozeo', 'advokatko'], ['advokatko', '', 'Sozet', 'advokatko'], ['advokatoma', '', 'Somdd', 'advokatoma'], ['advokatoma', '', 'Somdo', 'advokatoma'], ['advokatom', '', 'Someo', 'advokatom'], ['advokatom', '', 'Sommd', 'advokatom'], ['advokatova', '', 'Psnmdi', 'advokatova'], ['advokatova', '', 'Psnmdt', 'advokatova'], ['advokatova', '', 'Psnsmi', 'advokatova'], ['advokatova', '', 'Psnsmt', 'advokatova'], ['advokatova', '', 'Psnzei', 'advokatova'], ['advokatov', '', 'Somdr', 'advokatov'], ['advokatov', '', 'Sommr', 'advokatov'], ['advokatov', '', 'Psnmein', 'advokatov'], ['advokatov', '', 'Psnmetn', 'advokatov'], ['advokatove', '', 'Psnmmt', 'advokatove'], ['advokatove', '', 'Psnzer', 'advokatove'], ['advokatove', '', 'Psnzmi', 'advokatove'], ['advokatove', '', 'Psnzmt', 'advokatove'], ['advokatovega', '', 'Psnmer', 'advokatovega'], ['advokatovega', '', 'Psnmet', 'advokatovega'], ['advokatovega', '', 'Psnser', 'advokatovega'], ['advokatovem', '', 'Psnmem', 'advokatovem'], ['advokatovem', '', 'Psnsem', 'advokatovem'], ['advokatovemu', '', 'Psnmed', 'advokatovemu'], ['advokatovemu', '', 'Psnsed', 'advokatovemu'], ['advokatovi', '', 'Psnmmi', 'advokatovi'], ['advokatovi', '', 'Psnsdi', 'advokatovi'], ['advokatovi', '', 'Psnsdt', 'advokatovi'], ['advokatovi', '', 'Psnzdi', 'advokatovi'], ['advokatovi', '', 'Psnzdt', 'advokatovi'], ['advokatovi', '', 'Psnzed', 'advokatovi'], ['advokatovi', '', 'Psnzem', 'advokatovi'], ['advokatovih', '', 'Psnmdm', 'advokatovih'], ['advokatovih', '', 'Psnmdr', 'advokatovih'], ['advokatovih', '', 'Psnmmm', 'advokatovih'], ['advokatovih', '', 'Psnmmr', 'advokatovih'], ['advokatovih', '', 'Psnsdm', 'advokatovih'], ['advokatovih', '', 'Psnsdr', 'advokatovih'], ['advokatovih', '', 'Psnsmm', 'advokatovih'], ['advokatovih', '', 'Psnsmr', 'advokatovih'], ['advokatovih', '', 'Psnzdm', 'advokatovih'], ['advokatovih', '', 'Psnzdr', 'advokatovih'], ['advokatovih', '', 'Psnzmm', 'advokatovih'], ['advokatovih', '', 'Psnzmr', 'advokatovih'], ['advokatovima', '', 'Psnmdd', 'advokatovima'], ['advokatovima', '', 'Psnmdo', 'advokatovima'], ['advokatovima', '', 'Psnsdd', 'advokatovima'], ['advokatovima', '', 'Psnsdo', 'advokatovima'], ['advokatovima', '', 'Psnzdd', 'advokatovima'], ['advokatovima', '', 'Psnzdo', 'advokatovima'], ['advokatovim', '', 'Psnmeo', 'advokatovim'], ['advokatovim', '', 'Psnmmd', 'advokatovim'], ['advokatovim', '', 'Psnseo', 'advokatovim'], ['advokatovim', '', 'Psnsmd', 'advokatovim'], ['advokatovim', '', 'Psnzmd', 'advokatovim'], ['advokatovimi', '', 'Psnmmo', 'advokatovimi'], ['advokatovimi', '', 'Psnsmo', 'advokatovimi'], ['advokatovimi', '', 'Psnzmo', 'advokatovimi'], ['advokatovo', '', 'Psnsei', 'advokatovo'], ['advokatovo', '', 'Psnset', 'advokatovo'], ['advokatovo', '', 'Psnzeo', 'advokatovo'], ['advokatovo', '', 'Psnzet', 'advokatovo'], ['advokatska', '', 'Ppnmdi', 'advokatska'], ['advokatska', '', 'Ppnmdt', 'advokatska'], ['advokatska', '', 'Ppnsmi', 'advokatska'], ['advokatska', '', 'Ppnsmt', 'advokatska'], ['advokatska', '', 'Ppnzei', 'advokatska'], ['advokatske', '', 'Ppnmmt', 'advokatske'], ['advokatske', '', 'Ppnzer', 'advokatske'], ['advokatske', '', 'Ppnzmi', 'advokatske'], ['advokatske', '', 'Ppnzmt', 'advokatske'], ['advokatskega', '', 'Ppnmer', 'advokatskega'], ['advokatskega', '', 'Ppnmet', 'advokatskega'], ['advokatskega', '', 'Ppnser', 'advokatskega'], ['advokatskem', '', 'Ppnmem', 'advokatskem'], ['advokatskem', '', 'Ppnsem', 'advokatskem'], ['advokatskemu', '', 'Ppnmed', 'advokatskemu'], ['advokatskemu', '', 'Ppnsed', 'advokatskemu'], ['advokatski', '', 'Ppnmeid', 'advokatski'], ['advokatski', '', 'Ppnmetd', 'advokatski'], ['advokatski', '', 'Ppnmmi', 'advokatski'], ['advokatski', '', 'Ppnsdi', 'advokatski'], ['advokatski', '', 'Ppnsdt', 'advokatski'], ['advokatski', '', 'Ppnzdi', 'advokatski'], ['advokatski', '', 'Ppnzdt', 'advokatski'], ['advokatski', '', 'Ppnzed', 'advokatski'], ['advokatski', '', 'Ppnzem', 'advokatski'], ['advokatskih', '', 'Ppnmdm', 'advokatskih'], ['advokatskih', '', 'Ppnmdr', 'advokatskih'], ['advokatskih', '', 'Ppnmmm', 'advokatskih'], ['advokatskih', '', 'Ppnmmr', 'advokatskih'], ['advokatskih', '', 'Ppnsdm', 'advokatskih'], ['advokatskih', '', 'Ppnsdr', 'advokatskih'], ['advokatskih', '', 'Ppnsmm', 'advokatskih'], ['advokatskih', '', 'Ppnsmr', 'advokatskih'], ['advokatskih', '', 'Ppnzdm', 'advokatskih'], ['advokatskih', '', 'Ppnzdr', 'advokatskih'], ['advokatskih', '', 'Ppnzmm', 'advokatskih'], ['advokatskih', '', 'Ppnzmr', 'advokatskih'], ['advokatskima', '', 'Ppnmdd', 'advokatskima'], ['advokatskima', '', 'Ppnmdo', 'advokatskima'], ['advokatskima', '', 'Ppnsdd', 'advokatskima'], ['advokatskima', '', 'Ppnsdo', 'advokatskima'], ['advokatskima', '', 'Ppnzdd', 'advokatskima'], ['advokatskima', '', 'Ppnzdo', 'advokatskima'], ['advokatskim', '', 'Ppnmeo', 'advokatskim'], ['advokatskim', '', 'Ppnmmd', 'advokatskim'], ['advokatskim', '', 'Ppnseo', 'advokatskim'], ['advokatskim', '', 'Ppnsmd', 'advokatskim'], ['advokatskim', '', 'Ppnzmd', 'advokatskim'], ['advokatskimi', '', 'Ppnmmo', 'advokatskimi'], ['advokatskimi', '', 'Ppnsmo', 'advokatskimi'], ['advokatskimi', '', 'Ppnzmo', 'advokatskimi'], ['advokatsko', '', 'Ppnsei', 'advokatsko'], ['advokatsko', '', 'Ppnset', 'advokatsko'], ['advokatsko', '', 'Ppnzeo', 'advokatsko'], ['advokatsko', '', 'Ppnzet', 'advokatsko'], ['advokatsko', '', 'Rsn', 'advokatsko'], ['advokatu', '', 'Somed', 'advokatu'], ['advokatu', '', 'Somem', 'advokatu'], ['advokatura', '', 'Sozei', 'advokatura'], ['advokatur', '', 'Sozdr', 'advokatur'], ['advokatur', '', 'Sozmr', 'advokatur'], ['advokaturah', '', 'Sozdm', 'advokaturah'], ['advokaturah', '', 'Sozmm', 'advokaturah'], ['advokaturama', '', 'Sozdd', 'advokaturama'], ['advokaturama', '', 'Sozdo', 'advokaturama'], ['advokaturam', '', 'Sozmd', 'advokaturam'], ['advokaturami', '', 'Sozmo', 'advokaturami'], ['advokature', '', 'Sozer', 'advokature'], ['advokature', '', 'Sozmi', 'advokature'], ['advokature', '', 'Sozmt', 'advokature'], ['advokaturi', '', 'Sozdi', 'advokaturi'], ['advokaturi', '', 'Sozdt', 'advokaturi'], ['advokaturi', '', 'Sozed', 'advokaturi'], ['advokaturi', '', 'Sozem', 'advokaturi'], ['advokaturo', '', 'Sozeo', 'advokaturo'], ['advokaturo', '', 'Sozet', 'advokaturo'], ['Adลพiฤ‡eva', '', 'Psnmdi', 'Adลพiฤ‡eva'], ['Adลพiฤ‡eva', '', 'Psnmdt', 'Adลพiฤ‡eva'], ['Adลพiฤ‡eva', '', 'Psnsmi', 'Adลพiฤ‡eva'], ['Adลพiฤ‡eva', '', 'Psnsmt', 'Adลพiฤ‡eva'], ['Adลพiฤ‡eva', '', 'Psnzei', 'Adลพiฤ‡eva'], ['Adลพiฤ‡ev', '', 'Psnmein', 'Adลพiฤ‡ev'], ['Adลพiฤ‡ev', '', 'Psnmetn', 'Adลพiฤ‡ev'], ['Adลพiฤ‡eve', '', 'Psnmmt', 'Adลพiฤ‡eve'], ['Adลพiฤ‡eve', '', 'Psnzer', 'Adลพiฤ‡eve'], ['Adลพiฤ‡eve', '', 'Psnzmi', 'Adลพiฤ‡eve'], ['Adลพiฤ‡eve', '', 'Psnzmt', 'Adลพiฤ‡eve'], ['Adลพiฤ‡evega', '', 'Psnmer', 'Adลพiฤ‡evega'], ['Adลพiฤ‡evega', '', 'Psnmet', 'Adลพiฤ‡evega'], ['Adลพiฤ‡evega', '', 'Psnser', 'Adลพiฤ‡evega'], ['Adลพiฤ‡evem', '', 'Psnmem', 'Adลพiฤ‡evem'], ['Adลพiฤ‡evem', '', 'Psnsem', 'Adลพiฤ‡evem'], ['Adลพiฤ‡evemu', '', 'Psnmed', 'Adลพiฤ‡evemu'], ['Adลพiฤ‡evemu', '', 'Psnsed', 'Adลพiฤ‡evemu'], ['Adลพiฤ‡evi', '', 'Psnmmi', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnsdi', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnsdt', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnzdi', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnzdt', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnzed', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evi', '', 'Psnzem', 'Adลพiฤ‡evi'], ['Adลพiฤ‡evih', '', 'Psnmdm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnmdr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnmmm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnmmr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnsdm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnsdr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnsmm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnsmr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnzdm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnzdr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnzmm', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evih', '', 'Psnzmr', 'Adลพiฤ‡evih'], ['Adลพiฤ‡evima', '', 'Psnmdd', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evima', '', 'Psnmdo', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evima', '', 'Psnsdd', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evima', '', 'Psnsdo', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evima', '', 'Psnzdd', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evima', '', 'Psnzdo', 'Adลพiฤ‡evima'], ['Adลพiฤ‡evim', '', 'Psnmeo', 'Adลพiฤ‡evim'], ['Adลพiฤ‡evim', '', 'Psnmmd', 'Adลพiฤ‡evim'], ['Adลพiฤ‡evim', '', 'Psnseo', 'Adลพiฤ‡evim'], ['Adลพiฤ‡evim', '', 'Psnsmd', 'Adลพiฤ‡evim'], ['Adลพiฤ‡evim', '', 'Psnzmd', 'Adลพiฤ‡evim'], ['Adลพiฤ‡evimi', '', 'Psnmmo', 'Adลพiฤ‡evimi'], ['Adลพiฤ‡evimi', '', 'Psnsmo', 'Adลพiฤ‡evimi'], ['Adลพiฤ‡evimi', '', 'Psnzmo', 'Adลพiฤ‡evimi'], ['Adลพiฤ‡evo', '', 'Psnsei', 'Adลพiฤ‡evo'], ['Adลพiฤ‡evo', '', 'Psnset', 'Adลพiฤ‡evo'], ['Adลพiฤ‡evo', '', 'Psnzeo', 'Adลพiฤ‡evo'], ['Adลพiฤ‡evo', '', 'Psnzet', 'Adลพiฤ‡evo'], ['Aera', '', 'Slmer', 'Aera'], ['aerira', '', 'Ggvste', 'aerira'], ['aeriraj', '', 'Ggvvde', 'aeriraj'], ['aerirajmo', '', 'Ggvvpm', 'aerirajmo'], ['aerirajo', '', 'Ggvstm', 'aerirajo'], ['aerirajta', '', 'Ggvvdd', 'aerirajta'], ['aerirajte', '', 'Ggvvdm', 'aerirajte'], ['aerirajva', '', 'Ggvvpd', 'aerirajva'], ['aerirala', '', 'Ggvd-dm', 'aerirala'], ['aerirala', '', 'Ggvd-ez', 'aerirala'], ['aerirala', '', 'Ggvd-ms', 'aerirala'], ['aeriral', '', 'Ggvd-em', 'aeriral'], ['aerirale', '', 'Ggvd-mz', 'aerirale'], ['aerirali', '', 'Ggvd-ds', 'aerirali'], ['aerirali', '', 'Ggvd-dz', 'aerirali'], ['aerirali', '', 'Ggvd-mm', 'aerirali'], ['aeriralo', '', 'Ggvd-es', 'aeriralo'], ['aeriram', '', 'Ggvspe', 'aeriram'], ['aeriramo', '', 'Ggvspm', 'aeriramo'], ['aeriranja', '', 'Soser', 'aeriranja'], ['aeriranja', '', 'Sosmi', 'aeriranja'], ['aeriranja', '', 'Sosmt', 'aeriranja'], ['aeriranj', '', 'Sosdr', 'aeriranj'], ['aeriranj', '', 'Sosmr', 'aeriranj'], ['aeriranje', '', 'Sosei', 'aeriranje'], ['aeriranje', '', 'Soset', 'aeriranje'], ['aeriranjema', '', 'Sosdd', 'aeriranjema'], ['aeriranjema', '', 'Sosdo', 'aeriranjema'], ['aeriranjem', '', 'Soseo', 'aeriranjem'], ['aeriranjem', '', 'Sosmd', 'aeriranjem'], ['aeriranji', '', 'Sosdi', 'aeriranji'], ['aeriranji', '', 'Sosdt', 'aeriranji'], ['aeriranji', '', 'Sosmo', 'aeriranji'], ['aeriranjih', '', 'Sosdm', 'aeriranjih'], ['aeriranjih', '', 'Sosmm', 'aeriranjih'], ['aeriranju', '', 'Sosed', 'aeriranju'], ['aeriranju', '', 'Sosem', 'aeriranju'], ['aeriraลก', '', 'Ggvsde', 'aeriraลก'], ['aerirata', '', 'Ggvsdd', 'aerirata'], ['aerirata', '', 'Ggvstd', 'aerirata'], ['aerirat', '', 'Ggvm', 'aerirat'], ['aerirate', '', 'Ggvsdm', 'aerirate'], ['aerirati', '', 'Ggvn', 'aerirati'], ['aerirava', '', 'Ggvspd', 'aerirava'], ['Aero', '', 'Slmei', 'Aero'], ['Aero', '', 'Slmetn', 'Aero'], ['aeroben', '', 'Ppnmein', 'aeroben'], ['aeroben', '', 'Ppnmetn', 'aeroben'], ['aerobika', '', 'Sozei', 'aerobika'], ['aerobike', '', 'Sozer', 'aerobike'], ['aerobiki', '', 'Sozed', 'aerobiki'], ['aerobiki', '', 'Sozem', 'aerobiki'], ['aerobiko', '', 'Sozeo', 'aerobiko'], ['aerobiko', '', 'Sozet', 'aerobiko'], ['aerobna', '', 'Ppnmdi', 'aerobna'], ['aerobna', '', 'Ppnmdt', 'aerobna'], ['aerobna', '', 'Ppnsmi', 'aerobna'], ['aerobna', '', 'Ppnsmt', 'aerobna'], ['aerobna', '', 'Ppnzei', 'aerobna'], ['aerobne', '', 'Ppnmmt', 'aerobne'], ['aerobne', '', 'Ppnzer', 'aerobne'], ['aerobne', '', 'Ppnzmi', 'aerobne'], ['aerobne', '', 'Ppnzmt', 'aerobne'], ['aerobnega', '', 'Ppnmer', 'aerobnega'], ['aerobnega', '', 'Ppnmet', 'aerobnega'], ['aerobnega', '', 'Ppnser', 'aerobnega'], ['aerobnem', '', 'Ppnmem', 'aerobnem'], ['aerobnem', '', 'Ppnsem', 'aerobnem'], ['aerobnemu', '', 'Ppnmed', 'aerobnemu'], ['aerobnemu', '', 'Ppnsed', 'aerobnemu'], ['aerobni', '', 'Ppnmeid', 'aerobni'], ['aerobni', '', 'Ppnmetd', 'aerobni'], ['aerobni', '', 'Ppnmmi', 'aerobni'], ['aerobni', '', 'Ppnsdi', 'aerobni'], ['aerobni', '', 'Ppnsdt', 'aerobni'], ['aerobni', '', 'Ppnzdi', 'aerobni'], ['aerobni', '', 'Ppnzdt', 'aerobni'], ['aerobni', '', 'Ppnzed', 'aerobni'], ['aerobni', '', 'Ppnzem', 'aerobni'], ['aerobnih', '', 'Ppnmdm', 'aerobnih'], ['aerobnih', '', 'Ppnmdr', 'aerobnih'], ['aerobnih', '', 'Ppnmmm', 'aerobnih'], ['aerobnih', '', 'Ppnmmr', 'aerobnih'], ['aerobnih', '', 'Ppnsdm', 'aerobnih'], ['aerobnih', '', 'Ppnsdr', 'aerobnih'], ['aerobnih', '', 'Ppnsmm', 'aerobnih'], ['aerobnih', '', 'Ppnsmr', 'aerobnih'], ['aerobnih', '', 'Ppnzdm', 'aerobnih'], ['aerobnih', '', 'Ppnzdr', 'aerobnih'], ['aerobnih', '', 'Ppnzmm', 'aerobnih'], ['aerobnih', '', 'Ppnzmr', 'aerobnih'], ['aerobnima', '', 'Ppnmdd', 'aerobnima'], ['aerobnima', '', 'Ppnmdo', 'aerobnima'], ['aerobnima', '', 'Ppnsdd', 'aerobnima'], ['aerobnima', '', 'Ppnsdo', 'aerobnima'], ['aerobnima', '', 'Ppnzdd', 'aerobnima'], ['aerobnima', '', 'Ppnzdo', 'aerobnima'], ['aerobnim', '', 'Ppnmeo', 'aerobnim'], ['aerobnim', '', 'Ppnmmd', 'aerobnim'], ['aerobnim', '', 'Ppnseo', 'aerobnim'], ['aerobnim', '', 'Ppnsmd', 'aerobnim'], ['aerobnim', '', 'Ppnzmd', 'aerobnim'], ['aerobnimi', '', 'Ppnmmo', 'aerobnimi'], ['aerobnimi', '', 'Ppnsmo', 'aerobnimi'], ['aerobnimi', '', 'Ppnzmo', 'aerobnimi'], ['aerobno', '', 'Ppnsei', 'aerobno'], ['aerobno', '', 'Ppnset', 'aerobno'], ['aerobno', '', 'Ppnzeo', 'aerobno'], ['aerobno', '', 'Ppnzet', 'aerobno'], ['aerodinamiฤen', '', 'Ppnmein', 'aerodinamiฤen'], ['aerodinamiฤen', '', 'Ppnmetn', 'aerodinamiฤen'], ['aerodinamiฤna', '', 'Ppnmdi', 'aerodinamiฤna'], ['aerodinamiฤna', '', 'Ppnmdt', 'aerodinamiฤna'], ['aerodinamiฤna', '', 'Ppnsmi', 'aerodinamiฤna'], ['aerodinamiฤna', '', 'Ppnsmt', 'aerodinamiฤna'], ['aerodinamiฤna', '', 'Ppnzei', 'aerodinamiฤna'], ['aerodinamiฤne', '', 'Ppnmmt', 'aerodinamiฤne'], ['aerodinamiฤne', '', 'Ppnzer', 'aerodinamiฤne'], ['aerodinamiฤne', '', 'Ppnzmi', 'aerodinamiฤne'], ['aerodinamiฤne', '', 'Ppnzmt', 'aerodinamiฤne'], ['aerodinamiฤnega', '', 'Ppnmer', 'aerodinamiฤnega'], ['aerodinamiฤnega', '', 'Ppnmet', 'aerodinamiฤnega'], ['aerodinamiฤnega', '', 'Ppnser', 'aerodinamiฤnega'], ['aerodinamiฤnem', '', 'Ppnmem', 'aerodinamiฤnem'], ['aerodinamiฤnem', '', 'Ppnsem', 'aerodinamiฤnem'], ['aerodinamiฤnemu', '', 'Ppnmed', 'aerodinamiฤnemu'], ['aerodinamiฤnemu', '', 'Ppnsed', 'aerodinamiฤnemu'], ['aerodinamiฤni', '', 'Ppnmeid', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnmetd', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnmmi', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnsdi', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnsdt', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnzdi', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnzdt', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnzed', 'aerodinamiฤni'], ['aerodinamiฤni', '', 'Ppnzem', 'aerodinamiฤni'], ['aerodinamiฤnih', '', 'Ppnmdm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnmdr', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnmmm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnmmr', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnsdm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnsdr', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnsmm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnsmr', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnzdm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnzdr', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnzmm', 'aerodinamiฤnih'], ['aerodinamiฤnih', '', 'Ppnzmr', 'aerodinamiฤnih'], ['aerodinamiฤnima', '', 'Ppnmdd', 'aerodinamiฤnima'], ['aerodinamiฤnima', '', 'Ppnmdo', 'aerodinamiฤnima'], ['aerodinamiฤnima', '', 'Ppnsdd', 'aerodinamiฤnima'], ['aerodinamiฤnima', '', 'Ppnsdo', 'aerodinamiฤnima'], ['aerodinamiฤnima', '', 'Ppnzdd', 'aerodinamiฤnima'], ['aerodinamiฤnima', '', 'Ppnzdo', 'aerodinamiฤnima'], ['aerodinamiฤnim', '', 'Ppnmeo', 'aerodinamiฤnim'], ['aerodinamiฤnim', '', 'Ppnmmd', 'aerodinamiฤnim'], ['aerodinamiฤnim', '', 'Ppnseo', 'aerodinamiฤnim'], ['aerodinamiฤnim', '', 'Ppnsmd', 'aerodinamiฤnim'], ['aerodinamiฤnim', '', 'Ppnzmd', 'aerodinamiฤnim'], ['aerodinamiฤnimi', '', 'Ppnmmo', 'aerodinamiฤnimi'], ['aerodinamiฤnimi', '', 'Ppnsmo', 'aerodinamiฤnimi'], ['aerodinamiฤnimi', '', 'Ppnzmo', 'aerodinamiฤnimi'], ['aerodinamiฤno', '', 'Ppnsei', 'aerodinamiฤno'], ['aerodinamiฤno', '', 'Ppnset', 'aerodinamiฤno'], ['aerodinamiฤno', '', 'Ppnzeo', 'aerodinamiฤno'], ['aerodinamiฤno', '', 'Ppnzet', 'aerodinamiฤno'], ['aerodinamiฤno', '', 'Rsn', 'aerodinamiฤno'], ['aerodinamika', '', 'Sozei', 'aerodinamika'], ['aerodinamike', '', 'Sozer', 'aerodinamike'], ['aerodinamiki', '', 'Sozed', 'aerodinamiki'], ['aerodinamiki', '', 'Sozem', 'aerodinamiki'], ['aerodinamiko', '', 'Sozeo', 'aerodinamiko'], ['aerodinamiko', '', 'Sozet', 'aerodinamiko'], ['aerodroma', '', 'Somdi', 'aerodroma'], ['aerodroma', '', 'Somdt', 'aerodroma'], ['aerodroma', '', 'Somer', 'aerodroma'], ['aerodrom', '', 'Somei', 'aerodrom'], ['aerodrom', '', 'Sometn', 'aerodrom'], ['aerodrome', '', 'Sommt', 'aerodrome'], ['aerodromi', '', 'Sommi', 'aerodromi'], ['aerodromi', '', 'Sommo', 'aerodromi'], ['aerodromih', '', 'Somdm', 'aerodromih'], ['aerodromih', '', 'Sommm', 'aerodromih'], ['aerodromoma', '', 'Somdd', 'aerodromoma'], ['aerodromoma', '', 'Somdo', 'aerodromoma'], ['aerodromom', '', 'Someo', 'aerodromom'], ['aerodromom', '', 'Sommd', 'aerodromom'], ['aerodromov', '', 'Somdr', 'aerodromov'], ['aerodromov', '', 'Sommr', 'aerodromov'], ['aerodromu', '', 'Somed', 'aerodromu'], ['aerodromu', '', 'Somem', 'aerodromu'], ['aerofotografija', '', 'Sozei', 'aerofotografija'], ['aerofotografij', '', 'Sozdr', 'aerofotografij'], ['aerofotografij', '', 'Sozmr', 'aerofotografij'], ['aerofotografijah', '', 'Sozdm', 'aerofotografijah'], ['aerofotografijah', '', 'Sozmm', 'aerofotografijah'], ['aerofotografijama', '', 'Sozdd', 'aerofotografijama'], ['aerofotografijama', '', 'Sozdo', 'aerofotografijama'], ['aerofotografijam', '', 'Sozmd', 'aerofotografijam'], ['aerofotografijami', '', 'Sozmo', 'aerofotografijami'], ['aerofotografije', '', 'Sozer', 'aerofotografije'], ['aerofotografije', '', 'Sozmi', 'aerofotografije'], ['aerofotografije', '', 'Sozmt', 'aerofotografije'], ['aerofotografiji', '', 'Sozdi', 'aerofotografiji'], ['aerofotografiji', '', 'Sozdt', 'aerofotografiji'], ['aerofotografiji', '', 'Sozed', 'aerofotografiji'], ['aerofotografiji', '', 'Sozem', 'aerofotografiji'], ['aerofotografijo', '', 'Sozeo', 'aerofotografijo'], ['aerofotografijo', '', 'Sozet', 'aerofotografijo'], ['aerofotogrametrija', '', 'Sozei', 'aerofotogrametrija'], ['aerofotogrametrij', '', 'Sozdr', 'aerofotogrametrij'], ['aerofotogrametrij', '', 'Sozmr', 'aerofotogrametrij'], ['aerofotogrametrijah', '', 'Sozdm', 'aerofotogrametrijah'], ['aerofotogrametrijah', '', 'Sozmm', 'aerofotogrametrijah'], ['aerofotogrametrijama', '', 'Sozdd', 'aerofotogrametrijama'], ['aerofotogrametrijama', '', 'Sozdo', 'aerofotogrametrijama'], ['aerofotogrametrijam', '', 'Sozmd', 'aerofotogrametrijam'], ['aerofotogrametrijami', '', 'Sozmo', 'aerofotogrametrijami'], ['aerofotogrametrije', '', 'Sozer', 'aerofotogrametrije'], ['aerofotogrametrije', '', 'Sozmi', 'aerofotogrametrije'], ['aerofotogrametrije', '', 'Sozmt', 'aerofotogrametrije'], ['aerofotogrametriji', '', 'Sozdi', 'aerofotogrametriji'], ['aerofotogrametriji', '', 'Sozdt', 'aerofotogrametriji'], ['aerofotogrametriji', '', 'Sozed', 'aerofotogrametriji'], ['aerofotogrametriji', '', 'Sozem', 'aerofotogrametriji'], ['aerofotogrametrijo', '', 'Sozeo', 'aerofotogrametrijo'], ['aerofotogrametrijo', '', 'Sozet', 'aerofotogrametrijo'], ['aerokluba', '', 'Somdi', 'aerokluba'], ['aerokluba', '', 'Somdt', 'aerokluba'], ['aerokluba', '', 'Somer', 'aerokluba'], ['aeroklub', '', 'Somei', 'aeroklub'], ['aeroklub', '', 'Sometn', 'aeroklub'], ['aeroklube', '', 'Sommt', 'aeroklube'], ['aeroklubi', '', 'Sommi', 'aeroklubi'], ['aeroklubi', '', 'Sommo', 'aeroklubi'], ['aeroklubih', '', 'Somdm', 'aeroklubih'], ['aeroklubih', '', 'Sommm', 'aeroklubih'], ['aerokluboma', '', 'Somdd', 'aerokluboma'], ['aerokluboma', '', 'Somdo', 'aerokluboma'], ['aeroklubom', '', 'Someo', 'aeroklubom'], ['aeroklubom', '', 'Sommd', 'aeroklubom'], ['aeroklubov', '', 'Somdr', 'aeroklubov'], ['aeroklubov', '', 'Sommr', 'aeroklubov'], ['aeroklubu', '', 'Somed', 'aeroklubu'], ['aeroklubu', '', 'Somem', 'aeroklubu'], ['aerolita', '', 'Somdi', 'aerolita'], ['aerolita', '', 'Somdt', 'aerolita'], ['aerolita', '', 'Somer', 'aerolita'], ['aerolit', '', 'Somei', 'aerolit'], ['aerolit', '', 'Sometn', 'aerolit'], ['aerolite', '', 'Sommt', 'aerolite'], ['aeroliti', '', 'Sommi', 'aeroliti'], ['aeroliti', '', 'Sommo', 'aeroliti'], ['aerolitih', '', 'Somdm', 'aerolitih'], ['aerolitih', '', 'Sommm', 'aerolitih'], ['aerolitoma', '', 'Somdd', 'aerolitoma'], ['aerolitoma', '', 'Somdo', 'aerolitoma'], ['aerolitom', '', 'Someo', 'aerolitom'], ['aerolitom', '', 'Sommd', 'aerolitom'], ['aerolitov', '', 'Somdr', 'aerolitov'], ['aerolitov', '', 'Sommr', 'aerolitov'], ['aerolitu', '', 'Somed', 'aerolitu'], ['aerolitu', '', 'Somem', 'aerolitu'], ['Aerom', '', 'Slmeo', 'Aerom'], ['aerometer', '', 'Somei', 'aerometer'], ['aerometer', '', 'Sometn', 'aerometer'], ['aerometra', '', 'Somdi', 'aerometra'], ['aerometra', '', 'Somdt', 'aerometra'], ['aerometra', '', 'Somer', 'aerometra'], ['aerometre', '', 'Sommt', 'aerometre'], ['aerometri', '', 'Sommi', 'aerometri'], ['aerometri', '', 'Sommo', 'aerometri'], ['aerometrih', '', 'Somdm', 'aerometrih'], ['aerometrih', '', 'Sommm', 'aerometrih'], ['aerometroma', '', 'Somdd', 'aerometroma'], ['aerometroma', '', 'Somdo', 'aerometroma'], ['aerometrom', '', 'Someo', 'aerometrom'], ['aerometrom', '', 'Sommd', 'aerometrom'], ['aerometrov', '', 'Somdr', 'aerometrov'], ['aerometrov', '', 'Sommr', 'aerometrov'], ['aerometru', '', 'Somed', 'aerometru'], ['aerometru', '', 'Somem', 'aerometru'], ['aeromitinga', '', 'Somdi', 'aeromitinga'], ['aeromitinga', '', 'Somdt', 'aeromitinga'], ['aeromitinga', '', 'Somer', 'aeromitinga'], ['aeromiting', '', 'Somei', 'aeromiting'], ['aeromiting', '', 'Sometn', 'aeromiting'], ['aeromitinge', '', 'Sommt', 'aeromitinge'], ['aeromitingi', '', 'Sommi', 'aeromitingi'], ['aeromitingi', '', 'Sommo', 'aeromitingi'], ['aeromitingih', '', 'Somdm', 'aeromitingih'], ['aeromitingih', '', 'Sommm', 'aeromitingih'], ['aeromitingoma', '', 'Somdd', 'aeromitingoma'], ['aeromitingoma', '', 'Somdo', 'aeromitingoma'], ['aeromitingom', '', 'Someo', 'aeromitingom'], ['aeromitingom', '', 'Sommd', 'aeromitingom'], ['aeromitingov', '', 'Somdr', 'aeromitingov'], ['aeromitingov', '', 'Sommr', 'aeromitingov'], ['aeromitingu', '', 'Somed', 'aeromitingu'], ['aeromitingu', '', 'Somem', 'aeromitingu'], ['aeronautika', '', 'Sozei', 'aeronautika'], ['aeronautik', '', 'Sozdr', 'aeronautik'], ['aeronautik', '', 'Sozmr', 'aeronautik'], ['aeronautikah', '', 'Sozdm', 'aeronautikah'], ['aeronautikah', '', 'Sozmm', 'aeronautikah'], ['aeronautikama', '', 'Sozdd', 'aeronautikama'], ['aeronautikama', '', 'Sozdo', 'aeronautikama'], ['aeronautikam', '', 'Sozmd', 'aeronautikam'], ['aeronautikami', '', 'Sozmo', 'aeronautikami'], ['aeronautike', '', 'Sozer', 'aeronautike'], ['aeronautike', '', 'Sozmi', 'aeronautike'], ['aeronautike', '', 'Sozmt', 'aeronautike'], ['aeronautiki', '', 'Sozdi', 'aeronautiki'], ['aeronautiki', '', 'Sozdt', 'aeronautiki'], ['aeronautiki', '', 'Sozed', 'aeronautiki'], ['aeronautiki', '', 'Sozem', 'aeronautiki'], ['aeronautiko', '', 'Sozeo', 'aeronautiko'], ['aeronautiko', '', 'Sozet', 'aeronautiko'], ['aeronavta', '', 'Somdi', 'aeronavta'], ['aeronavta', '', 'Somdt', 'aeronavta'], ['aeronavta', '', 'Somer', 'aeronavta'], ['aeronavta', '', 'Sometd', 'aeronavta'], ['aeronavt', '', 'Somei', 'aeronavt'], ['aeronavte', '', 'Sommt', 'aeronavte'], ['aeronavti', '', 'Sommi', 'aeronavti'], ['aeronavti', '', 'Sommo', 'aeronavti'], ['aeronavtiฤen', '', 'Ppnmein', 'aeronavtiฤen'], ['aeronavtiฤen', '', 'Ppnmetn', 'aeronavtiฤen'], ['aeronavtiฤna', '', 'Ppnmdi', 'aeronavtiฤna'], ['aeronavtiฤna', '', 'Ppnmdt', 'aeronavtiฤna'], ['aeronavtiฤna', '', 'Ppnsmi', 'aeronavtiฤna'], ['aeronavtiฤna', '', 'Ppnsmt', 'aeronavtiฤna'], ['aeronavtiฤna', '', 'Ppnzei', 'aeronavtiฤna'], ['aeronavtiฤne', '', 'Ppnmmt', 'aeronavtiฤne'], ['aeronavtiฤne', '', 'Ppnzer', 'aeronavtiฤne'], ['aeronavtiฤne', '', 'Ppnzmi', 'aeronavtiฤne'], ['aeronavtiฤne', '', 'Ppnzmt', 'aeronavtiฤne'], ['aeronavtiฤnega', '', 'Ppnmer', 'aeronavtiฤnega'], ['aeronavtiฤnega', '', 'Ppnmet', 'aeronavtiฤnega'], ['aeronavtiฤnega', '', 'Ppnser', 'aeronavtiฤnega'], ['aeronavtiฤnem', '', 'Ppnmem', 'aeronavtiฤnem'], ['aeronavtiฤnem', '', 'Ppnsem', 'aeronavtiฤnem'], ['aeronavtiฤnemu', '', 'Ppnmed', 'aeronavtiฤnemu'], ['aeronavtiฤnemu', '', 'Ppnsed', 'aeronavtiฤnemu'], ['aeronavtiฤni', '', 'Ppnmeid', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnmetd', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnmmi', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnsdi', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnsdt', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnzdi', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnzdt', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnzed', 'aeronavtiฤni'], ['aeronavtiฤni', '', 'Ppnzem', 'aeronavtiฤni'], ['aeronavtiฤnih', '', 'Ppnmdm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnmdr', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnmmm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnmmr', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnsdm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnsdr', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnsmm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnsmr', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnzdm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnzdr', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnzmm', 'aeronavtiฤnih'], ['aeronavtiฤnih', '', 'Ppnzmr', 'aeronavtiฤnih'], ['aeronavtiฤnima', '', 'Ppnmdd', 'aeronavtiฤnima'], ['aeronavtiฤnima', '', 'Ppnmdo', 'aeronavtiฤnima'], ['aeronavtiฤnima', '', 'Ppnsdd', 'aeronavtiฤnima'], ['aeronavtiฤnima', '', 'Ppnsdo', 'aeronavtiฤnima'], ['aeronavtiฤnima', '', 'Ppnzdd', 'aeronavtiฤnima'], ['aeronavtiฤnima', '', 'Ppnzdo', 'aeronavtiฤnima'], ['aeronavtiฤnim', '', 'Ppnmeo', 'aeronavtiฤnim'], ['aeronavtiฤnim', '', 'Ppnmmd', 'aeronavtiฤnim'], ['aeronavtiฤnim', '', 'Ppnseo', 'aeronavtiฤnim'], ['aeronavtiฤnim', '', 'Ppnsmd', 'aeronavtiฤnim'], ['aeronavtiฤnim', '', 'Ppnzmd', 'aeronavtiฤnim'], ['aeronavtiฤnimi', '', 'Ppnmmo', 'aeronavtiฤnimi'], ['aeronavtiฤnimi', '', 'Ppnsmo', 'aeronavtiฤnimi'], ['aeronavtiฤnimi', '', 'Ppnzmo', 'aeronavtiฤnimi'], ['aeronavtiฤno', '', 'Ppnsei', 'aeronavtiฤno'], ['aeronavtiฤno', '', 'Ppnset', 'aeronavtiฤno'], ['aeronavtiฤno', '', 'Ppnzeo', 'aeronavtiฤno'], ['aeronavtiฤno', '', 'Ppnzet', 'aeronavtiฤno'], ['aeronavtih', '', 'Somdm', 'aeronavtih'], ['aeronavtih', '', 'Sommm', 'aeronavtih'], ['aeronavtika', '', 'Sozei', 'aeronavtika'], ['aeronavtik', '', 'Sozdr', 'aeronavtik'], ['aeronavtik', '', 'Sozmr', 'aeronavtik'], ['aeronavtikah', '', 'Sozdm', 'aeronavtikah'], ['aeronavtikah', '', 'Sozmm', 'aeronavtikah'], ['aeronavtikama', '', 'Sozdd', 'aeronavtikama'], ['aeronavtikama', '', 'Sozdo', 'aeronavtikama'], ['aeronavtikam', '', 'Sozmd', 'aeronavtikam'], ['aeronavtikami', '', 'Sozmo', 'aeronavtikami'], ['aeronavtike', '', 'Sozer', 'aeronavtike'], ['aeronavtike', '', 'Sozmi', 'aeronavtike'], ['aeronavtike', '', 'Sozmt', 'aeronavtike'], ['aeronavtiki', '', 'Sozdi', 'aeronavtiki'], ['aeronavtiki', '', 'Sozdt', 'aeronavtiki'], ['aeronavtiki', '', 'Sozed', 'aeronavtiki'], ['aeronavtiki', '', 'Sozem', 'aeronavtiki'], ['aeronavtiko', '', 'Sozeo', 'aeronavtiko'], ['aeronavtiko', '', 'Sozet', 'aeronavtiko'], ['aeronavtka', '', 'Sozei', 'aeronavtka'], ['aeronavtk', '', 'Sozdr', 'aeronavtk'], ['aeronavtk', '', 'Sozmr', 'aeronavtk'], ['aeronavtkah', '', 'Sozdm', 'aeronavtkah'], ['aeronavtkah', '', 'Sozmm', 'aeronavtkah'], ['aeronavtkama', '', 'Sozdd', 'aeronavtkama'], ['aeronavtkama', '', 'Sozdo', 'aeronavtkama'], ['aeronavtkam', '', 'Sozmd', 'aeronavtkam'], ['aeronavtkami', '', 'Sozmo', 'aeronavtkami'], ['aeronavtke', '', 'Sozer', 'aeronavtke'], ['aeronavtke', '', 'Sozmi', 'aeronavtke'], ['aeronavtke', '', 'Sozmt', 'aeronavtke'], ['aeronavtki', '', 'Sozdi', 'aeronavtki'], ['aeronavtki', '', 'Sozdt', 'aeronavtki'], ['aeronavtki', '', 'Sozed', 'aeronavtki'], ['aeronavtki', '', 'Sozem', 'aeronavtki'], ['aeronavtko', '', 'Sozeo', 'aeronavtko'], ['aeronavtko', '', 'Sozet', 'aeronavtko'], ['aeronavtoma', '', 'Somdd', 'aeronavtoma'], ['aeronavtoma', '', 'Somdo', 'aeronavtoma'], ['aeronavtom', '', 'Someo', 'aeronavtom'], ['aeronavtom', '', 'Sommd', 'aeronavtom'], ['aeronavtov', '', 'Somdr', 'aeronavtov'], ['aeronavtov', '', 'Sommr', 'aeronavtov'], ['aeronavtu', '', 'Somed', 'aeronavtu'], ['aeronavtu', '', 'Somem', 'aeronavtu'], ['aeroplana', '', 'Somdi', 'aeroplana'], ['aeroplana', '', 'Somdt', 'aeroplana'], ['aeroplana', '', 'Somer', 'aeroplana'], ['aeroplan', '', 'Somei', 'aeroplan'], ['aeroplan', '', 'Sometn', 'aeroplan'], ['aeroplane', '', 'Sommt', 'aeroplane'], ['aeroplani', '', 'Sommi', 'aeroplani'], ['aeroplani', '', 'Sommo', 'aeroplani'], ['aeroplanih', '', 'Somdm', 'aeroplanih'], ['aeroplanih', '', 'Sommm', 'aeroplanih'], ['aeroplanoma', '', 'Somdd', 'aeroplanoma'], ['aeroplanoma', '', 'Somdo', 'aeroplanoma'], ['aeroplanom', '', 'Someo', 'aeroplanom'], ['aeroplanom', '', 'Sommd', 'aeroplanom'], ['aeroplanov', '', 'Somdr', 'aeroplanov'], ['aeroplanov', '', 'Sommr', 'aeroplanov'], ['aeroplanu', '', 'Somed', 'aeroplanu'], ['aeroplanu', '', 'Somem', 'aeroplanu'], ['aeroporta', '', 'Somdi', 'aeroporta'], ['aeroporta', '', 'Somdt', 'aeroporta'], ['aeroporta', '', 'Somer', 'aeroporta'], ['aeroport', '', 'Somei', 'aeroport'], ['aeroport', '', 'Sometn', 'aeroport'], ['aeroporte', '', 'Sommt', 'aeroporte'], ['aeroporti', '', 'Sommi', 'aeroporti'], ['aeroporti', '', 'Sommo', 'aeroporti'], ['aeroportih', '', 'Somdm', 'aeroportih'], ['aeroportih', '', 'Sommm', 'aeroportih'], ['aeroportoma', '', 'Somdd', 'aeroportoma'], ['aeroportoma', '', 'Somdo', 'aeroportoma'], ['aeroportom', '', 'Someo', 'aeroportom'], ['aeroportom', '', 'Sommd', 'aeroportom'], ['aeroportov', '', 'Somdr', 'aeroportov'], ['aeroportov', '', 'Sommr', 'aeroportov'], ['aeroportu', '', 'Somed', 'aeroportu'], ['aeroportu', '', 'Somem', 'aeroportu'], ['aeroposnetek', '', 'Somei', 'aeroposnetek'], ['aeroposnetek', '', 'Sometn', 'aeroposnetek'], ['aeroposnetka', '', 'Somdi', 'aeroposnetka'], ['aeroposnetka', '', 'Somdt', 'aeroposnetka'], ['aeroposnetka', '', 'Somer', 'aeroposnetka'], ['aeroposnetke', '', 'Sommt', 'aeroposnetke'], ['aeroposnetki', '', 'Sommi', 'aeroposnetki'], ['aeroposnetki', '', 'Sommo', 'aeroposnetki'], ['aeroposnetkih', '', 'Somdm', 'aeroposnetkih'], ['aeroposnetkih', '', 'Sommm', 'aeroposnetkih'], ['aeroposnetkoma', '', 'Somdd', 'aeroposnetkoma'], ['aeroposnetkoma', '', 'Somdo', 'aeroposnetkoma'], ['aeroposnetkom', '', 'Someo', 'aeroposnetkom'], ['aeroposnetkom', '', 'Sommd', 'aeroposnetkom'], ['aeroposnetkov', '', 'Somdr', 'aeroposnetkov'], ['aeroposnetkov', '', 'Sommr', 'aeroposnetkov'], ['aeroposnetku', '', 'Somed', 'aeroposnetku'], ['aeroposnetku', '', 'Somem', 'aeroposnetku'], ['aerosola', '', 'Somdi', 'aerosola'], ['aerosola', '', 'Somdt', 'aerosola'], ['aerosola', '', 'Somer', 'aerosola'], ['aerosol', '', 'Somei', 'aerosol'], ['aerosol', '', 'Sometn', 'aerosol'], ['aerosole', '', 'Sommt', 'aerosole'], ['aerosoli', '', 'Sommi', 'aerosoli'], ['aerosoli', '', 'Sommo', 'aerosoli'], ['aerosolih', '', 'Somdm', 'aerosolih'], ['aerosolih', '', 'Sommm', 'aerosolih'], ['aerosoloma', '', 'Somdd', 'aerosoloma'], ['aerosoloma', '', 'Somdo', 'aerosoloma'], ['aerosolom', '', 'Someo', 'aerosolom'], ['aerosolom', '', 'Sommd', 'aerosolom'], ['aerosolov', '', 'Somdr', 'aerosolov'], ['aerosolov', '', 'Sommr', 'aerosolov'], ['aerosolu', '', 'Somed', 'aerosolu'], ['aerosolu', '', 'Somem', 'aerosolu'], ['aerostata', '', 'Somdi', 'aerostata'], ['aerostata', '', 'Somdt', 'aerostata'], ['aerostata', '', 'Somer', 'aerostata'], ['aerostat', '', 'Somei', 'aerostat'], ['aerostat', '', 'Sometn', 'aerostat'], ['aerostate', '', 'Sommt', 'aerostate'], ['aerostati', '', 'Sommi', 'aerostati'], ['aerostati', '', 'Sommo', 'aerostati'], ['aerostatih', '', 'Somdm', 'aerostatih'], ['aerostatih', '', 'Sommm', 'aerostatih'], ['aerostatika', '', 'Sozei', 'aerostatika'], ['aerostatike', '', 'Sozer', 'aerostatike'], ['aerostatiki', '', 'Sozed', 'aerostatiki'], ['aerostatiki', '', 'Sozem', 'aerostatiki'], ['aerostatiko', '', 'Sozeo', 'aerostatiko'], ['aerostatiko', '', 'Sozet', 'aerostatiko'], ['aerostatoma', '', 'Somdd', 'aerostatoma'], ['aerostatoma', '', 'Somdo', 'aerostatoma'], ['aerostatom', '', 'Someo', 'aerostatom'], ['aerostatom', '', 'Sommd', 'aerostatom'], ['aerostatov', '', 'Somdr', 'aerostatov'], ['aerostatov', '', 'Sommr', 'aerostatov'], ['aerostatu', '', 'Somed', 'aerostatu'], ['aerostatu', '', 'Somem', 'aerostatu'], ['Aerova', '', 'Psnmdi', 'Aerova'], ['Aerova', '', 'Psnmdt', 'Aerova'], ['Aerova', '', 'Psnsmi', 'Aerova'], ['Aerova', '', 'Psnsmt', 'Aerova'], ['Aerova', '', 'Psnzei', 'Aerova'], ['Aerov', '', 'Psnmein', 'Aerov'], ['Aerov', '', 'Psnmetn', 'Aerov'], ['Aerove', '', 'Psnmmt', 'Aerove'], ['Aerove', '', 'Psnzer', 'Aerove'], ['Aerove', '', 'Psnzmi', 'Aerove'], ['Aerove', '', 'Psnzmt', 'Aerove'], ['Aerovega', '', 'Psnmer', 'Aerovega'], ['Aerovega', '', 'Psnmet', 'Aerovega']]\n" ], [ "#Words proccesed: 650250\n#Word indeks: 50023\n#Word number: 50023\n\n#done_lexical_entries = 33522\n\n#new_content = data._read_content('sloleks-sl_v1.2.tbl')\nrate = 100000\nstart_timer = time.time()\nwith open(\"data/new_sloleks/new_sloleks.tab\", \"a\") as myfile:\n for index in range(0, len(new_content), rate):\n if index+rate >= len(new_content):\n words = [[el[0], '', el[2], el[0]] for el in new_content][index:len(new_content)]\n else:\n words = [[el[0], '', el[2], el[0]] for el in new_content][index:index+rate]\n data = Data('l', shuffle_all_inputs=False)\n location_accented_words, accented_words = data.accentuate_word(words, letter_location_model, syllable_location_model, syllabled_letters_location_model,\n letter_location_co_model, syllable_location_co_model, syllabled_letters_location_co_model,\n letter_type_model, syllable_type_model, syllabled_letter_type_model,\n letter_type_co_model, syllable_type_co_model, syllabled_letter_type_co_model,\n dictionary, max_word, max_num_vowels, vowels, accented_vowels, feature_dictionary, syllable_dictionary)\n\n res = ''\n for i in range(index, index + len(words)):\n res += new_content[i][0] + '\\t' + new_content[i][1] + '\\t' + new_content[i][2] + '\\t' \\\n + new_content[i][3][:-1] + '\\t' + location_accented_words[i-index] + '\\t' + accented_words[i-index] + '\\n'\n\n print('Writing data from ' + str(index) + ' onward.')\n end_timer = time.time()\n print(\"Elapsed time: \" + \"{0:.2f}\".format((end_timer - start_timer)/60.0) + \" minutes\")\n myfile.write(res)", "Writing data from 0 onward.\nElapsed time: 46.20 minutes\nWriting data from 100000 onward.\nElapsed time: 89.81 minutes\nWriting data from 200000 onward.\nElapsed time: 134.45 minutes\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0e8dda0eae5604192c929e097d8c0b71efa329
24,601
ipynb
Jupyter Notebook
.ipynb_checkpoints/push_tutorial-checkpoint.ipynb
darkfirzenmax/Line_chatbot_tutorial
bdd46379c3efaa3243be992a51480fcea18bf35b
[ "MIT" ]
26
2018-12-01T10:55:53.000Z
2021-11-28T23:54:01.000Z
.ipynb_checkpoints/push_tutorial-checkpoint.ipynb
darkfirzenmax/Line_chatbot_tutorial
bdd46379c3efaa3243be992a51480fcea18bf35b
[ "MIT" ]
1
2019-08-09T18:19:19.000Z
2019-08-09T18:27:25.000Z
.ipynb_checkpoints/push_tutorial-checkpoint.ipynb
darkfirzenmax/Line_chatbot_tutorial
bdd46379c3efaa3243be992a51480fcea18bf35b
[ "MIT" ]
14
2018-07-23T06:53:59.000Z
2021-05-25T04:00:22.000Z
26.538296
169
0.453965
[ [ [ "from linebot import LineBotApi\nfrom linebot.exceptions import LineBotApiError", "_____no_output_____" ] ], [ [ "# ๅฎ˜ๆ–นDEMO- Message Type :https://developers.line.me/en/docs/messaging-api/message-types/\n", "_____no_output_____" ], [ "# Doc : https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/send_messages.py ", "_____no_output_____" ] ], [ [ "CHANNEL_ACCESS_TOKEN = \"YOUR CHANNEL TOKEN\"", "_____no_output_____" ], [ "# user ID - Get by reply message object.\nto = \"YOUR USER ID\"", "_____no_output_____" ], [ "from linebot.models import TextSendMessage", "_____no_output_____" ] ], [ [ "# TextSendMessage", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\ntry:\n line_bot_api.push_message(to, TextSendMessage(text='ๅฐ็ง‘ๅคง้›ป่…ฆ็ ”็ฟ’็คพ'))\nexcept LineBotApiError as e:\n # error handle\n raise e", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendText_Output.png)", "_____no_output_____" ] ], [ [ "from linebot.models import ImageSendMessage, VideoSendMessage, LocationSendMessage, StickerSendMessage", "_____no_output_____" ] ], [ [ "# ImageSendMessage", "_____no_output_____" ], [ "### ้€ฃ็ต้œ€่ฆไฝฟ็”จhttps", "_____no_output_____" ], [ "็‰ฉไปถไธญ็š„่ผธๅ…ฅ original_content_url ไปฅๅŠ preview_image_url้ƒฝ่ฆๅฏซๆ‰ไธๆœƒๅ ฑ้Œฏใ€‚<br>\n่ผธๅ…ฅ็š„็ถฒๅ€่ฆๆ˜ฏไธ€ๅ€‹ๅœ–็‰‡๏ผŒๆ‡‰่ฉฒ่ชชๅช่ƒฝๆ˜ฏไธ€ๅ€‹ๅœ–็‰‡๏ผŒไธ็„ถไธๆœƒๅ ฑ้Œฏไฝ†ๆ˜ฏๅ‚ณ้ŽๅŽปๆ˜ฏ็ฐ่‰ฒไธ่ƒฝ็”จ็š„ๅœ–<br>\nไป–ๅœ–็‰‡็š„ๅŠŸ่ƒฝ้ƒฝๆœ‰้ปžๅ•้กŒ๏ผŒๆˆ‘้ƒฝ่ฆไธŸๅˆฐimgurๅœ–ๅบŠๆ‰่ƒฝ้ˆ็ตใ€‚ใ€‚ใ€‚<br>\n็›ดๆŽฅไธŸ้€ฒgithub ็”จrawไนŸไธ่ƒฝ่ฎ€<br>", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\nimage_url = \"https://i.imgur.com/eTldj2E.png?1\"\ntry:\n line_bot_api.push_message(to, ImageSendMessage(original_content_url=image_url, preview_image_url=image_url))\nexcept LineBotApiError as e:\n # error handle\n raise e", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendImage_Output.png)", "_____no_output_____" ] ], [ [ "from linebot.models import LocationSendMessage", "_____no_output_____" ] ], [ [ "# LocationSendMessage", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\ntitle = \"ๅœ‹็ซ‹่‡บ็ฃ็ง‘ๆŠ€ๅคงๅญธ\"\naddress = \"106ๅฐๅŒ—ๅธ‚ๅคงๅฎ‰ๅ€ๅŸบ้š†่ทฏๅ››ๆฎต43่™Ÿ\"\nlatitude = 25.0136906\nlongitude = 121.5406792\ntry:\n line_bot_api.push_message(to, LocationSendMessage(title=title,\n address=address,\n latitude=latitude,\n longitude=longitude))\nexcept LineBotApiError as e:\n # error handle\n raise e", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendLocation.png)", "_____no_output_____" ] ], [ [ "from linebot.models import StickerSendMessage", "_____no_output_____" ] ], [ [ "# StickerSendMessage", "_____no_output_____" ], [ "็…งไธ‹้ข้€™ๆฎต่ฉฑ็š„ๆ„ๆ€ๆ˜ฏ่ชช๏ผŒๅช่ƒฝ็”จ้ ่จญ็š„๏ผ๏ผ๏ผ<br>\nMessage object which contains the sticker data sent from the source.<br>\nFor a list of basic LINE stickers and sticker IDs, see sticker list.<br>\n\nstick list:https://developers.line.me/media/messaging-api/messages/sticker_list.pdf", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\npackage_id = \"1\"\nsticker_id = \"1\"\n# package_id = \"1181660\"\n# sticker_id = \"7389429\"\ntry:\n line_bot_api.push_message(to, StickerSendMessage(package_id=package_id, sticker_id=sticker_id))\nexcept LineBotApiError as e:\n # error handle\n raise e\n", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendSticker.png)", "_____no_output_____" ], [ "# ImagemapSendMessage", "_____no_output_____" ] ], [ [ "from linebot.models import ImagemapSendMessage, BaseSize, URIImagemapAction, MessageImagemapAction, ImagemapArea", "_____no_output_____" ] ], [ [ "้€™้‚Š่งฃ่ชฌไธ€ไธ‹\n่ผธๅ…ฅไธ€ๅผตๅœ–็‰‡็š„็ถฒๅ€https๏ผŒ\nไป–ๆœƒ้กฏ็คบไธ€ๅผตๅœ–็‰‡๏ผŒ\nไฝ†ๆ˜ฏๅฏไปฅๅฐ้€™ๅผตๅœ–็‰‡็š„้ปž้ธ็ฏ„ๅœๅšไธ€ไบ›ๆ“ไฝœ\nไพ‹ๅฆ‚ๅฐๆŸๅ€ๅกŠ้ปžๆ“Šๆœƒ็™ผ็”Ÿไป€้บผไบ‹\n", "_____no_output_____" ], [ "่ˆ‰ไพ‹๏ผš่ผธๅ…ฅไธ€ๅผตๅœ–็‰‡๏ผˆๅฆ‚ไธ‹ๅœ–๏ผ‰by colors https://coolors.co/ffb8d1-e4b4c2-e7cee3-e0e1e9-ddfdfe", "_____no_output_____" ], [ "Imagemap ่ฎ“ๆˆ‘ๅ€‘ๅฏไปฅๅฐๅœ–็‰‡็š„ๅ€ๅกŠ๏ผˆ็ตฆๅฎšไธ€ๅ€‹็ฏ„ๅœ๏ผ‰ๅšๆ“ไฝœ๏ผŒ<br>\nไพ‹ๅฆ‚ๆˆ‘ๅ€‘่ฆไฝฟ็”จ<br>\nๆœ€ๅทฆ้‚Š็š„้ก่‰ฒ<br>\n้ปžๆ“ŠๅพŒ่ผธๅ‡บ่‰ฒ็ฅจ<br>\n<br>\nๆœ€ๅณ้‚Š็š„้ก่‰ฒ<br>\n้ปžๆ“ŠๅพŒ่ฝ‰่‡ณ็ถฒๅ€<br>\nไป–ๅœ–็‰‡็š„ๅŠŸ่ƒฝ้ƒฝๆœ‰้ปžๅ•้กŒ๏ผŒๆˆ‘้ƒฝ่ฆไธŸๅˆฐimgurๅœ–ๅบŠๆ‰่ƒฝ้ˆ็ตใ€‚ใ€‚ใ€‚<br>\n็›ดๆŽฅไธŸ้€ฒgithub ็”จrawไนŸไธ่ƒฝ่ฎ€<br>\n่€Œไธ”ไป–ไธๆœƒ่‡ชๅ‹•็ธฎๆ”พ๏ผŒๆœƒ่ฃๆމ", "_____no_output_____" ], [ "![](img/colors.png)", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n# image_url = \"https://raw.githubusercontent.com/xiaosean/Line_tutorial/master/img/colors.png\"\nimage_url = \"https://i.imgur.com/mB9yDO0.png\"\n\ntext = \"#FFB8D1\"\nclick_link_1 = \"https://www.facebook.com/ntustcc\"\n\n\ntry:\n line_bot_api.push_message(to, ImagemapSendMessage(base_url=image_url,\n alt_text=\"ImageMap Example\",\n base_size=BaseSize(height=1040, width=1040),\n actions=[\n MessageImagemapAction(\n text=text,\n area=ImagemapArea(\n x=0, y=0, width=1040/5, height=1040\n )\n ),\n URIImagemapAction(\n link_uri=click_link_1,\n area=ImagemapArea(\n x=int(1040*0.8), y=0, width=int(1040/5), height=1040\n )\n )\n ]))\nexcept LineBotApiError as e:\n # error handle\n raise e\n", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendImagemap_Output_revise.png)", "_____no_output_____" ], [ "# TemplateSendMessage - ButtonsTemplate ๅชๅฏๅœจๆ™บๆ…งๆ‰‹ๆฉŸไธŠ้กฏ็คบ", "_____no_output_____" ], [ "doc:https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/template.py", "_____no_output_____" ], [ "้€™้ƒจๅˆ†ๆˆ‘ๅปบ่ญฐ็œ‹้€™ๅ€‹ไบบๆ‰€ๅฏซ็š„ - ไป–็š„ๅœ–็‰‡ๅพˆ็”จๅฟƒ๏ผŒ็œŸๅฅฝ็œ‹๏ผ๏ผ\nhttps://ithelp.ithome.com.tw/articles/10195640?sc=iThomeR", "_____no_output_____" ] ], [ [ "from linebot.models import TemplateSendMessage, ButtonsTemplate, PostbackTemplateAction, MessageTemplateAction, URITemplateAction", "_____no_output_____" ], [ "button_template_message =ButtonsTemplate(\n thumbnail_image_url=\"https://i.imgur.com/eTldj2E.png?1\",\n title='Menu', \n text='Please select',\n ratio=\"1.51:1\",\n image_size=\"cover\",\n actions=[\n# PostbackTemplateAction ้ปžๆ“Š้ธ้ …ๅพŒ๏ผŒ\n# ้™คไบ†ๆ–‡ๅญ—ๆœƒ้กฏ็คบๅœจ่Šๅคฉๅฎคไธญ๏ผŒ\n# ้‚„ๅ›žๅ‚ณdataไธญ็š„่ณ‡ๆ–™๏ผŒๅฏ\n# ๆญค้กž้€้Ž Postback event ่™•็†ใ€‚\n PostbackTemplateAction(\n label='postback้‚„ๆœƒๅ›žๅ‚ณdataๅƒๆ•ธ', \n text='postback text',\n data='action=buy&itemid=1'\n ),\n MessageTemplateAction(\n label='messageๆœƒๅ›žๅ‚ณtextๆ–‡ๅญ—', text='message text'\n ),\n URITemplateAction(\n label='uriๅฏๅ›žๅ‚ณ็ถฒๅ€', uri='http://www.xiaosean.website/'\n )\n ]\n )\n ", "_____no_output_____" ], [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\ntry:\n# alt_text ๅ› templateๅช่ƒฝๅค ๅœจๆ‰‹ๆฉŸไธŠ้กฏ็คบ๏ผŒๅ› ๆญคๅœจPC็‰ˆๆœƒไฝฟ็”จalt_Textๆ›ฟไปฃ\n line_bot_api.push_message(to, TemplateSendMessage(alt_text=\"Template Example\", template=button_template_message))\nexcept LineBotApiError as e:\n # error handle\n raise e\n", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/SendTemplate_Output_revise.png)", "_____no_output_____" ], [ "# TemplateSendMessage - CarouselTemplate ๅชๅฏๅœจๆ™บๆ…งๆ‰‹ๆฉŸไธŠ้กฏ็คบ", "_____no_output_____" ], [ "ไป–ๅ’Œๅ‰ไธ€ๅ€‹็š„ๅทฎๅˆฅๆ˜ฏไป–ๅฏไปฅไธ€ๆฌกๅ‚ณ้€ๅคšๅ€‹Templateไธฆไธ”ๅฏไปฅ็”จๆ—‹่ฝ‰็š„ๆ–นๅผ่ฝ‰้ŽๅŽป 1...n", "_____no_output_____" ] ], [ [ "from linebot.models import TemplateSendMessage, CarouselTemplate, CarouselColumn, ButtonsTemplate, PostbackTemplateAction, MessageTemplateAction, URITemplateAction", "_____no_output_____" ], [ "image_url_1 = \"https://i.imgur.com/eTldj2E.png?1\"\nimage_url_2 = \"https://i.imgur.com/mB9yDO0.png\"\nclick_link_1 = \"https://www.facebook.com/ntustcc\"\nclick_link_2 = \"https://www.facebook.com/ntustcc\"\ncarousel_template = template=CarouselTemplate(\n columns=[\n CarouselColumn(\n thumbnail_image_url=image_url_1,\n title='template-1',\n text='text-1',\n actions=[\n PostbackTemplateAction(\n label='postback-1',\n text='postback text1',\n data='result=1'\n ),\n MessageTemplateAction(\n label='message-1',\n text='message text1'\n ),\n URITemplateAction(\n label='uri-1',\n uri=click_link_1\n )\n ]\n ),\n CarouselColumn(\n thumbnail_image_url=image_url_2,\n title='template-2',\n text='text-2',\n actions=[\n PostbackTemplateAction(\n label='postback-2',\n text='postback text2',\n data='result=2'\n ),\n MessageTemplateAction(\n label='message-2',\n text='message text2'\n ),\n URITemplateAction(\n label='link-2',\n uri=click_link_2\n )\n ]\n )]\n )\n ", "_____no_output_____" ], [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\ntry:\n# alt_text ๅ› templateๅช่ƒฝๅค ๅœจๆ‰‹ๆฉŸไธŠ้กฏ็คบ๏ผŒๅ› ๆญคๅœจPC็‰ˆๆœƒไฝฟ็”จalt_Textๆ›ฟไปฃ\n line_bot_api.push_message(to, TemplateSendMessage(alt_text=\"Carousel Template Example\", template=carousel_template))\nexcept LineBotApiError as e:\n # error handle\n raise e\n", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/Carousel-button.png)", "_____no_output_____" ], [ "# TemplateSendMessage - ImageCarouselTemplate ๅชๅฏๅœจๆ™บๆ…งๆ‰‹ๆฉŸไธŠ้กฏ็คบ", "_____no_output_____" ], [ "ไป–ๅ’Œๅ‰ไธ€ๅ€‹็š„ๅทฎๅˆฅๆ˜ฏๆ•ดๅ€‹็‰ˆ้ข้ƒฝๆ˜ฏๅœ–็‰‡ๅ’Œไธ€่กŒๆ–‡ๅญ—๏ผŒ่ผƒ็‚บ็ฐกๆฝ”๏ผŒ่ซ‹็œ‹็ตๆžœใ€‚", "_____no_output_____" ] ], [ [ "from linebot.models import TemplateSendMessage, ImageCarouselTemplate, ImageCarouselColumn, PostbackTemplateAction, MessageTemplateAction, URITemplateAction", "_____no_output_____" ], [ "image_url_1 = \"https://i.imgur.com/eTldj2E.png?1\"\nimage_url_2 = \"https://i.imgur.com/mB9yDO0.png\"\ncarousel_template = template=ImageCarouselTemplate(\n columns=[\n ImageCarouselColumn(\n image_url=image_url_1,\n action=MessageTemplateAction(\n label='message-1',\n text='message text1'\n )\n ),\n ImageCarouselColumn(\n image_url=image_url_2,\n action=PostbackTemplateAction(\n label='postback-2',\n text='postback text2',\n data='result=2'\n ),\n )]\n )\n ", "_____no_output_____" ], [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\ntry:\n# alt_text ๅ› templateๅช่ƒฝๅค ๅœจๆ‰‹ๆฉŸไธŠ้กฏ็คบ๏ผŒๅ› ๆญคๅœจPC็‰ˆๆœƒไฝฟ็”จalt_Textๆ›ฟไปฃ\n line_bot_api.push_message(to, TemplateSendMessage(alt_text=\"Image Carousel Template Example\", template=carousel_template))\nexcept LineBotApiError as e:\n # error handle\n raise e", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/Image-Carousel.png)", "_____no_output_____" ], [ "# TemplateActionๆœ‰ๅ€‹DatetimePickerTemplateAction", "_____no_output_____" ] ], [ [ "from linebot.models import TemplateSendMessage, ButtonsTemplate, DatetimePickerTemplateAction", "_____no_output_____" ], [ "button_template_message =ButtonsTemplate(\n thumbnail_image_url=\"https://i.imgur.com/eTldj2E.png?1\",\n title='Menu', \n text='Please select',\n actions=[\n DatetimePickerTemplateAction(\n label=\"datetime picker date\",\n # ็ญ‰ๅŒPostbackTemplateActionไธญ็š„data, in the postback.data property of the postback event\n data=\"action=sell&itemid=2&mode=date\",\n mode=\"date\",\n initial=\"2013-04-01\",\n min=\"2011-06-23\",\n max=\"2017-09-08\"\n ),\n DatetimePickerTemplateAction(\n label=\"datetime picker time\",\n data=\"action=sell&itemid=2&mode=time\",\n mode=\"time\",\n initial=\"10:00\",\n min=\"00:00\",\n max=\"23:59\"\n )\n# below part failed, I have reported issue\n# https://github.com/line/line-bot-sdk-python/issues/100\n# DatetimePickerTemplateAction(\n# label=\"datetime picker datetime\",\n# data=\"action=sell&itemid=2&mode=datetime\",\n# mode=\"datetime\",\n# initial=\"2018-04-01T10:00\",\n# min=\"2011-06-23T00:00\",\n# max=\"2019-09-08T23:59\"\n# )\n ]\n )\n ", "_____no_output_____" ], [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\n\ntry:\n# alt_text ๅ› templateๅช่ƒฝๅค ๅœจๆ‰‹ๆฉŸไธŠ้กฏ็คบ๏ผŒๅ› ๆญคๅœจPC็‰ˆๆœƒไฝฟ็”จalt_Textๆ›ฟไปฃ\n line_bot_api.push_message(to, TemplateSendMessage(alt_text=\"Template Example\", template=button_template_message))\nexcept LineBotApiError as e:\n # error handle\n raise e\n", "_____no_output_____" ] ], [ [ "# Output", "_____no_output_____" ], [ "![](img/Date-picker.png)", "_____no_output_____" ], [ "# FileMessage - ๆฒ’ๅฏฆไฝœ", "_____no_output_____" ], [ "DOC https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/messages.py", "_____no_output_____" ] ], [ [ "from linebot.models import VideoSendMessage", "_____no_output_____" ] ], [ [ "# VideoSendMessage - ๆˆ‘ๆฒ’่ฉฆๆˆๅŠŸ Failed", "_____no_output_____" ], [ "ๆ–‡ไปถไธญ็š„inputๆœ‰่ชช\"ๅฝฑ็‰‡ๆ™‚้•ท่ฆ < 1miuntes\" <br>\n<br>\nๆˆ‘็Œœๆ‹‰ ๅช่ฆๆ‰พๅˆฐ็ถฒๅ€ๅพŒ็ถดๆ˜ฏ.mp4ๆ‡‰่ฉฒๅฐฑๅฏไปฅ<br>\nๅชๆ˜ฏๆˆ‘ๆ‰พไธๅˆฐ้€™็จฎๅฝฑ็‰‡<br>", "_____no_output_____" ] ], [ [ "line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)\nviedo_url = \"\"\nimage_url = \"\"\ntry:\n line_bot_api.push_message(to, VideoSendMessage(original_content_url=viedo_url, preview_image_url=image_url))\nexcept LineBotApiError as e:\n # error handle\n raise e", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb0eae4c38050eabfa80c54734c2845e79a9ea66
214,623
ipynb
Jupyter Notebook
notebooks/02_fpl_predict_random_forest.ipynb
aneesh2606/fpl-prediction
960438a26408b074dbf5c9546a1ad8f7bdbe1d91
[ "MIT" ]
39
2019-08-08T14:51:22.000Z
2022-03-20T15:24:34.000Z
notebooks/02_fpl_predict_random_forest.ipynb
aneesh2606/fpl-prediction
960438a26408b074dbf5c9546a1ad8f7bdbe1d91
[ "MIT" ]
2
2020-09-08T17:11:46.000Z
2021-02-17T15:12:01.000Z
notebooks/02_fpl_predict_random_forest.ipynb
aneesh2606/fpl-prediction
960438a26408b074dbf5c9546a1ad8f7bdbe1d91
[ "MIT" ]
14
2020-04-10T12:25:22.000Z
2021-12-30T03:22:34.000Z
130.074545
63,860
0.821659
[ [ [ "import pandas as pd\nimport numpy as np\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.inspection import plot_partial_dependence\nfrom dtreeviz.trees import *\nimport scipy as sp\nfrom scipy.cluster import hierarchy as hc\nimport sys\n\nsys.path.append('..')\nfrom fpl_predictor.util import *", "_____no_output_____" ], [ "# path to project directory\npath = Path('../')", "_____no_output_____" ], [ "# read in training dataset\ntrain_df = pd.read_csv(path/'fpl_predictor/data/train_v8.csv', \n index_col=0, \n dtype={'season':str,\n 'squad':str,\n 'comp':str})", "_____no_output_____" ] ], [ [ "## Random Forest\n\nRandom Forest is an ensemble tree-based predictive algorithm. In this case we will be using it for regression - we want to predict a continuous number, predicted points, for each player each game. It works by training many separate decision trees, each using a subset of the training data, and outputs the average prediction across all trees.\n\nApplying it to a time series problem, where metrics from recent time periods can be predicitve, requires us to add in window features (e.g. points scored last gameweek). These are created using the player_lag_features function from 00_fpl_features.", "_____no_output_____" ] ], [ [ "# add a bunch of player lag features\nlag_train_df, team_lag_vars = team_lag_features(train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10])\nlag_train_df, player_lag_vars = player_lag_features(lag_train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10])", "_____no_output_____" ] ], [ [ "Similar to the simple model, we'll set the validation period to be gameweeks 20-25 of the 2019/20 season - the model will be trained on all data prior to that period. This time however, we'll be using some additional features: the season, gameweek, player position, home/away, and both teams, as well as all the lagging features we created above.", "_____no_output_____" ] ], [ [ "# set validaton point/length and categorical/continuous variables\nvalid_season = '2021'\nvalid_gw = 20\nvalid_len = 6\ncat_vars = ['season', 'position', 'was_home', 'team', 'opponent_team']\ncont_vars = ['gw']#, 'minutes']\ndep_var = ['total_points']", "_____no_output_____" ] ], [ [ "Some of the features have an order (2019/20 season is after 2019 season) whereas others do not (position). We can set this in the data where appropriate using an ordered category (e.g. 1617 < 1718 < 1819 < 1920 < 2021).", "_____no_output_____" ] ], [ [ "# we want to set gw and season as ordered categorical variables\n# need lists with ordered categories\nordered_gws = list(range(1,39))\nordered_seasons = ['1617', '1718', '1819', '1920', '2021']\n\n# set as categories with correct order \nlag_train_df['gw'] = lag_train_df['gw'].astype('category')\nlag_train_df['season'] = lag_train_df['season'].astype('category')\n\nlag_train_df['gw'].cat.set_categories(ordered_gws, ordered=True, inplace=True)\nlag_train_df['season'].cat.set_categories(ordered_seasons, ordered=True, inplace=True)", "_____no_output_____" ], [ "lag_train_df['season']", "_____no_output_____" ] ], [ [ "And now we can go ahead and create our training and validation sets using the function we defined in the last notebook.", "_____no_output_____" ] ], [ [ "# create dataset with adjusted post-validation lag numbers\ntrain_valid_df, train_idx, valid_idx = create_lag_train(lag_train_df, \n cat_vars, cont_vars, \n player_lag_vars, team_lag_vars, dep_var,\n valid_season, valid_gw, valid_len)", "_____no_output_____" ] ], [ [ "The way we calculate our lag features means that there will be null values in our dataset. This will cause an error when using random forest in scikit learn, so we will set them all to zero for now (although note that this may not be the best fill strategy). ", "_____no_output_____" ] ], [ [ "lag_train_df[~np.isfinite(lag_train_df['total_points_pg_last_1'])]", "_____no_output_____" ], [ "# imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n# need to think about imputing NaN instead of setting to zero\n# imp.fit(X_train[team_lag_vars + player_lag_vars])\ntrain_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0)", "_____no_output_____" ] ], [ [ "The random forest regressor will only take numbers as inputs, so we need to transform our caterogical features into a format that the random forest regressor object will be able to use, numbers instead of strings in one or more columns.", "_____no_output_____" ] ], [ [ "# split out dependent variable\nX, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy()", "_____no_output_____" ], [ "# since position is categorical, it should be a string\nX['position'] = X['position'].apply(str)\n\n# need to transform season\nenc = LabelEncoder()\nX['season'] = enc.fit_transform(X['season'])\nX_dict = X.to_dict(\"records\")\n\n# Create the DictVectorizer object: dv\ndv = DictVectorizer(sparse=False, separator='_')\n\n# Apply dv on df: df_encoded\nX_encoded = dv.fit_transform(X_dict)\n\nX_df = pd.DataFrame(X_encoded, columns=dv.feature_names_)", "_____no_output_____" ] ], [ [ "For example, season is now represented by a number (0 -> 2016/17, 1 -> 2017/18, etc.) in a single column, and position is represented by a 1 or 0 in multiple columns.", "_____no_output_____" ] ], [ [ "X_df[['season', 'position_1', 'position_2', 'position_3', 'position_4']]", "_____no_output_____" ], [ "X_df.columns", "_____no_output_____" ] ], [ [ "Let's now split out our training (everything prior to the validation gameweek) and validation (6 gameweeks from the validation gameweek, only rows with >0 minutes)", "_____no_output_____" ] ], [ [ "# split out training and validation sets\nX_train = X_df.loc[train_idx]\ny_train = y.loc[train_idx]\nX_test = X_df.loc[valid_idx]\n# we only want look at rows with >0 minutes (i.e. the player played)\n# test_mask = (X_test['minutes'] > 0)\n# X_test = X_test[test_mask]\n# y_test = y.loc[valid_idx][test_mask]\ny_test = y.loc[valid_idx]", "_____no_output_____" ], [ "# X_train = X_train.drop('minutes', axis=1)\n# X_test = X_test.drop('minutes', axis=1)", "_____no_output_____" ] ], [ [ "We can now create the RandomForestRegessor with set parameters, train using the training data, and look at the error on the validation set.", "_____no_output_____" ] ], [ [ "# def rf(xs, y, n_estimators=40, max_samples=50_000,\n# max_features=0.5, min_samples_leaf=5, **kwargs):\n# return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators,\n# max_samples=max_samples, max_features=max_features,\n# min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)", "_____no_output_____" ], [ "def rf(xs, y, max_depth=7, **kwargs):\n return RandomForestRegressor(n_jobs=-1, max_depth=max_depth, oob_score=True).fit(xs, y)", "_____no_output_____" ], [ "# fit training data\nm = rf(X_train, y_train.values.ravel())", "_____no_output_____" ], [ "# predict validation set and output metrics\npreds = m.predict(X_test)\nprint(\"RMSE: %f\" % (r_mse(preds, y_test.values.ravel())))\nprint(\"MAE: %f\" % mae(preds, y_test.values.ravel()))\n", "RMSE: 2.147401\nMAE: 1.217610\n" ] ], [ [ "Right away this looks like it's a significant improvement on the simple model, good to see. Let's go ahead and use the same approach with validation across the whole of the 2019/20 season.", "_____no_output_____" ] ], [ [ "def rf_season(df, valid_season='2021'):\n # empty list for scores\n scores = []\n valid_len = 6\n \n for valid_gw in range(1,40-valid_len):\n # create dataset with adjusted post-validation lag numbers\n train_valid_df, train_idx, valid_idx = create_lag_train(df, cat_vars, cont_vars, \n player_lag_vars, team_lag_vars, dep_var,\n valid_season, valid_gw, valid_len)\n \n train_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0)\n \n # split out dependent variable\n X, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy()\n \n # since position is categorical, it should be a string\n X['position'] = X['position'].apply(str)\n\n # need to transform season\n enc = LabelEncoder()\n X['season'] = enc.fit_transform(X['season'])\n X_dict = X.to_dict(\"records\")\n\n # Create the DictVectorizer object: dv\n dv = DictVectorizer(sparse=False, separator='_')\n\n # Apply dv on df: df_encoded\n X_encoded = dv.fit_transform(X_dict)\n X_df = pd.DataFrame(X_encoded, columns=dv.feature_names_)\n \n # split out training and validation sets\n X_train = X_df.loc[train_idx]\n y_train = y.loc[train_idx]\n X_test = X_df.loc[valid_idx]\n # we only want look at rows with >0 minutes (i.e. the player played)\n# test_mask = (X_test['minutes'] > 0)\n# X_test = X_test[test_mask]\n# y_test = y.loc[valid_idx][test_mask]\n y_test = y.loc[valid_idx]\n \n m = rf(X_train, y_train.values.ravel()) \n preds, targs = m.predict(X_test), y_test.values.ravel()\n gw_mae = mae(preds, targs)\n print(\"GW%d MAE: %f\" % (valid_gw, gw_mae))\n \n scores.append(gw_mae)\n \n return scores", "_____no_output_____" ], [ "scores = rf_season(lag_train_df)", "GW1 MAE: 1.668954\nGW2 MAE: 1.549401\nGW3 MAE: 1.488440\nGW4 MAE: 1.464948\nGW5 MAE: 1.382913\nGW6 MAE: 1.352737\nGW7 MAE: 1.301638\nGW8 MAE: 1.319514\nGW9 MAE: 1.300561\nGW10 MAE: 1.260692\nGW11 MAE: 1.277469\nGW12 MAE: 1.303000\nGW13 MAE: 1.284237\nGW14 MAE: 1.277750\nGW15 MAE: 1.279188\nGW16 MAE: 1.257393\nGW17 MAE: 1.265857\nGW18 MAE: 1.285237\nGW19 MAE: 1.258736\nGW20 MAE: 1.227599\nGW21 MAE: 1.238979\nGW22 MAE: 1.246036\nGW23 MAE: 1.242905\nGW24 MAE: 1.197667\nGW25 MAE: 1.203571\nGW26 MAE: 1.173743\nGW27 MAE: 1.142987\nGW28 MAE: 1.128093\nGW29 MAE: 1.152473\nGW30 MAE: 1.149692\nGW31 MAE: 1.122338\nGW32 MAE: 1.136649\nGW33 MAE: 1.128147\n" ], [ "plt.plot(scores)\nplt.ylabel('GW MAE')\nplt.xlabel('GW')\nplt.text(15, 1.55, 'Season Avg MAE: %.2f' % np.mean(scores), bbox={'facecolor':'white', 'alpha':1, 'pad':5})\nplt.show()", "_____no_output_____" ] ], [ [ "Looking across the whole season we see about a 10% improvement versus the simple model. Also interesting is that the performance again improves as the season progresses - this makes sense, more data about each of teams and players (particularly new ones) means improved ability to predict the next 6 gameweeks.\n\nLet's add these validation scores to our comparison dataset.", "_____no_output_____" ] ], [ [ "model_validation_scores = pd.read_csv(path/'charts/model_validation_scores.csv', index_col=0)\nmodel_validation_scores['random_forest'] = scores\nmodel_validation_scores.to_csv(path/'charts/model_validation_scores.csv')", "_____no_output_____" ] ], [ [ "A feature of the random forest algorithm is that we can see how often features are being used in trees. This will give us an indication of how important each feature is i.e. is it predictive of todal points scored. Simple models are usually better, so this also gives us a way of seeing if there are any features that are not particularly useful, and can therefore be removed.", "_____no_output_____" ] ], [ [ "def rf_feat_importance(m, df):\n return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}\n ).sort_values('imp', ascending=False)", "_____no_output_____" ], [ "fi = rf_feat_importance(m, X_train)\nfi[:32]", "_____no_output_____" ], [ "def plot_fi(fi):\n return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False).invert_yaxis()\n\nplot_fi(fi[:30]);", "_____no_output_____" ] ], [ [ "At the moment this algorithm is given minutes played in the gameweek so it's unsurprising that this is by far the most important feature - the more minutes a player plays, the more opportunity to score points. But strictly speaking we don't actually have this information prior to a gameweek (in practice it is estimated using previous minutes and injury status), so we can ignore it for now.\n\nBelow that the top features are:\n1. minutes_last_1 - number of minutes in the last fixture for the player\n2. minutes_last_2 - number of minutes in the last two fixtures for the player\n3. total_points_pg_last_all - the player's average points per game in all of history (since start of 2016/17 season)\n4. total_points_team_pg_last_all_opponent - the opposition's average points per game in all of history\n5. minutes_last_3 - number of minutes in the last three fixtures for the player\n6. total_points_team_pg_last_all - the player's team's average points per game in all of history\n7. total_points_pg_last_10 - the player's average points per game in the last 10 fixtures\n8. total_points_pg_last_1 - the player's average points per game in the last fixture\n\nThis is interesting. It seems to be saying that the amount of minutes a player has played recently and their underlying ability to score points in all of history, along with their team's and opponent team's points scoring in all of history, is most important.\n\nRecent performance (i.e. 'form') is also important, but to a lesser extent.\n\nIt also shows that the lag features are far more useful than the categorical features such as team, opponent and position. Again not too surprising since information on these categories are already captured in the lag features.\n\nLet's test this... we can remove anything with a feature importance of less than 0.005 and see how the model performs on the original 2019/20 week 20 validation point (going from 94 features to just 32).", "_____no_output_____" ] ], [ [ "to_keep = fi[fi.imp>0.005].cols\nlen(to_keep)", "_____no_output_____" ], [ "len(X_train.columns)", "_____no_output_____" ], [ "X_train_imp = X_train[to_keep]\nX_test_imp = X_test[to_keep]", "_____no_output_____" ], [ "m = rf(X_train_imp, y_train.values.ravel())", "_____no_output_____" ], [ "mae(m.predict(X_test_imp), y_test.values.ravel())\n# mae(m.predict(X_train_imp), y_train.values.ravel())", "_____no_output_____" ] ], [ [ "Very similar albeit slightly higher error (less than 1% worse performance) than previously, and still a long way ahead of the simple model.\n\nContinuing our thinking about improving/simplifying the model features, we can also look to see if there are any similar features - quite often we will find that some features are so similar that some of them may be redundant.\n\nThe following function determines the similarity between columns in a dataset and visualises it using a dendrogram.", "_____no_output_____" ] ], [ [ "def cluster_columns(df, figsize=(10,6), font_size=12):\n corr = np.round(sp.stats.spearmanr(df).correlation, 4)\n corr_condensed = hc.distance.squareform(1-corr)\n z = hc.linkage(corr_condensed, method='average')\n fig = plt.figure(figsize=figsize)\n hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size)\n plt.show()", "_____no_output_____" ], [ "cluster_columns(X_train_imp)", "_____no_output_____" ] ], [ [ "We can see that our lagging features are somewhat similar - absolutely expected since, for example, minutes_last_5 is equal to minutes_last_4 + minutes 5 games ago. They are still different enough to be of value separately, but it does make me wonder whether separating out each historic game in some way (up to a point) would be valuable.", "_____no_output_____" ], [ "A final useful tool we can use is partial dependency plots. These try to look at the impact of single features on the dependent variable (points scored). ", "_____no_output_____" ] ], [ [ "fig,ax = plt.subplots(figsize=(12, 3))\nplot_partial_dependence(m, X_test_imp, ['total_points_pg_last_all', \n 'total_points_team_pg_last_all_opponent',\n 'total_points_pg_last_1'],\n grid_resolution=20, ax=ax);", "_____no_output_____" ] ], [ [ "Again, these make sense. The higher a player's historic points per game (defined as 90 minutes) is, the higher we predict their score will be. Conversely, the higher their opposition's historic points per game, the harder they are as an opponent and the lower their predicted score will be.\n\nLooking at the player's most recent game, again the higher their score, the more it will push up our prediction (the impact of their 'form'), but the relationship is far weaker than the player's underlying per minute scoring stats.\n\nHere we just try to look at features in isolation, there will lots of interactions going on between features that improve performance. For example, a player may have a high 'total_points_pg_last_1' from the previous fixture but only played 5 minutes in total - in this case the algorithm is likely to have learned that a high 'total_points_pg_last_1' coupled with a low 'minutes_last_1' is not an indicator that the player will score higher in the next fixture.\n\nOk, now we can move onto the next algorithm - xgboost.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb0eb206bfb43d8971a4e7b4c08e3dcb9a2105a1
327,180
ipynb
Jupyter Notebook
PSAR Strategy.ipynb
Roshanmahes/Quant-Finance
a653c7b45ef107599af3d0a55b7ec90b4e304359
[ "MIT" ]
39
2020-09-04T17:57:41.000Z
2022-03-28T21:57:29.000Z
PSAR Strategy.ipynb
Roshanmahes/Quant-Finance
a653c7b45ef107599af3d0a55b7ec90b4e304359
[ "MIT" ]
null
null
null
PSAR Strategy.ipynb
Roshanmahes/Quant-Finance
a653c7b45ef107599af3d0a55b7ec90b4e304359
[ "MIT" ]
11
2020-09-08T23:15:11.000Z
2022-02-17T12:11:18.000Z
39.438284
22,947
0.505767
[ [ [ "# Heikin-Ashi PSAR Strategy\n_Roshan Mahes_", "_____no_output_____" ], [ "In this tutorial, we implement the so-called _Parabolic Stop and Reverse (PSAR)_ strategy. Given any stock, currency or commodity, this indicator tells us whether to buy or sell the stock at any given time. The momentum strategy is based on the open, high, low and close price for each time period. This can be represented with a traditional Japanese candlestick chart. Later on, we apply the PSAR strategy on so-called Heikin-Ashi ('average bar') data, which reduces some noise, making it easier to identify trends.\n\nThe following packages are required:", "_____no_output_____" ] ], [ [ "%pip install pandas\n%pip install yfinance\n%pip install plotly", "Requirement already satisfied: pandas in c:\\programdata\\anaconda3\\lib\\site-packages (1.0.5)\nRequirement already satisfied: python-dateutil>=2.6.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (2020.1)\nRequirement already satisfied: numpy>=1.13.3 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (1.18.5)\nRequirement already satisfied: six>=1.5 in c:\\programdata\\anaconda3\\lib\\site-packages (from python-dateutil>=2.6.1->pandas) (1.15.0)\nNote: you may need to restart the kernel to use updated packages.\n" ] ], [ [ "Now we can import the following modules:", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd\nimport yfinance as yf\nimport plotly.graph_objects as go", "_____no_output_____" ] ], [ [ "This strategy works on any stock. In this notebook, we take the stock of Apple, represented by the ticker symbol AAPL. Let's download the pricing data and plot a (Japanese) candlestick chart:", "_____no_output_____" ] ], [ [ "symbol = 'AAPL'\ndf = yf.download(symbol, start='2020-01-01')\ndf.index = df.index.strftime('%Y-%m-%d') # format index as dates only\n\ncandles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close)\n\n# plot figure\nfig = go.Figure(candles)\nfig.layout.xaxis.type = 'category' # remove weekend days\nfig.layout.xaxis.dtick = 20 # show x-axis ticker once a month\nfig.layout.xaxis.rangeslider.visible = False\nfig.layout.title = f'Japanese Candlestick Chart ({symbol})'\nfig.layout.template = 'plotly_white'\nfig.show()", "[*********************100%***********************] 1 of 1 completed\n" ] ], [ [ "## The PSAR Indicator\n\nThe _Parabolic Stop and Reverse (PSAR) indicator,_ developed by J. Wells Wilder, is a momentum indicator used by traders to determine trend direction and potential reversals in price. It is a trend-following (lagging) indicator that uses a trailing stop and reverse method called SAR (Stop and Reverse), to identify suitable exit and entry points. The concept draws on the idea that 'time is the enemy', i.e., unless a security can continue to generate more profits over time, it should be liquidated.\n\nThe PSAR indicator appears on a chart as a series of dots, either above or below an asset's price, depending on the direction the price is moving. A dot is placed below the price when it is trending upward, and above the price when it is trending downward. There is a dot for every price bar, hence the indicator is always producing information.\n\nThe parabolic SAR is calculated almost independently for each trend in the price. When the price is in an uptrend, the SAR emerges below the price and converges upwards towards it. Similarly, on a downtrend, the SAR emerges above the price and converges downwards. At each step within a trend, the SAR is calculated one period in advance, i.e., tomorrow's SAR value is built using data available today. The general formula used for this is:\n\n\\begin{align*}\nSAR_t = SAR_{t-1} + \\alpha_t (EP_t - SAR_{t-1}),\n\\end{align*}\n\nwhere $SAR_t$ is the SAR value at time $t$.\n\nThe _extreme point_ $EP$ is a record kept during each trend that represents the highest value reached by the price during the current uptrend, or lowest value during a downtrend. During each period, if a new maximum (or minimum) is observed, the EP is updated with that value.\n\nThe $\\alpha$ value is the _acceleration factor._ Usually, this is initially set to a value of $0.02$. The factor is increased by $0.02$ each time a new EP is recorded. The rate will then quicken to a point where the SAR converges towards the price. To prevent it from getting too large, a maximum value for the acceleration factor is normally set to $0.20$. Generally, it is preferable in stocks to set the acceleration factor to $0.01$ so that it is not too sensitive to local decreases, whereas for commodity or currency trading the preferred value is $0.02$.\n\nThere are special cases that modify the SAR value:\n\n1. If the next period's SAR value is inside (or beyond) the current period or the previous period's price range, the SAR must be set to the closest price bound. For example, if in an upward trend, the new SAR value is calculated and if it results to be more than today's or yesterday's lowest price, it must be set equal to that lower boundary.\n2. If the next period's SAR value is inside (or beyond) the next period's price range, a new trend direction is then signaled. The SAR must then switch sides.\n3. Upon a trend switch, the first SAR value for this new trend is set to the last $EP$ recorded on the prior trend. Then, the $EP$ is reset accordingly to this period's maximum, and the acceleration factor is reset to its initial value of $0.01$ (stocks) or $0.02$ (commodities/currencies).\n\nAs we can see, it's quite a difficult strategy as the formulas are not that straightforward. We have implemented it in the following function:", "_____no_output_____" ] ], [ [ "def PSAR(df, alpha_start=0.01):\n \"\"\"\n Returns the dataframe with the given PSAR indicator for each time period.\n \"\"\"\n \n trend = 0\n alpha = alpha_start\n SAR = [df['Open'][0]] + [0] * (len(df) - 1)\n isUpTrend = lambda x: x > 0\n trendSwitch = lambda x: abs(x) == 1\n \n # initialisation\n if df['Close'][1] > df['Close'][0]:\n trend = 1\n SAR[1] = df['High'][0]\n EP = df['High'][1]\n else:\n trend = -1\n SAR[1] = df['Low'][0]\n EP = df['Low'][1]\n \n # recursion\n for t in range(2,len(df)):\n \n # general formula\n SAR_new = SAR[t-1] + alpha * (EP - SAR[t-1])\n \n # case 1 & 2\n if isUpTrend(trend):\n SAR[t] = min(SAR_new, df['Low'][t-1], df['Low'][t-2])\n \n if SAR[t] > df['Low'][t]:\n trend = -1\n else:\n trend += 1\n else:\n SAR[t] = max(SAR_new, df['High'][t-1], df['High'][t-2])\n \n if SAR[t] < df['High'][t]:\n trend = 1\n else:\n trend -= 1\n \n # case 3\n if trendSwitch(trend):\n SAR[t] = EP\n alpha = alpha_start\n \n if isUpTrend(trend):\n EP_new = df['High'][t]\n else:\n EP_new = df['Low'][t]\n else:\n if isUpTrend(trend):\n EP_new = max(df['High'][t], EP)\n else:\n EP_new = min(df['Low'][t], EP)\n \n if EP != EP_new:\n alpha = min(alpha + 0.02, 0.20)\n \n # update EP\n EP = EP_new\n \n # store values\n df['SAR'] = SAR\n df['Signal'] = (df['SAR'] < df['Close']).apply(int).diff() # records trend switches\n\n return df\n ", "_____no_output_____" ] ], [ [ "After applying the PSAR strategy on Apple's stock, we end up with the following trading decisions:", "_____no_output_____" ] ], [ [ "# apply PSAR\ndf = PSAR(df)\n\n# extract trend switches (buying/selling advice)\nbuy = df.loc[df['Signal'] == 1]\nsell = df.loc[df['Signal'] == -1]\n\n# candles & psar\ncandles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close, name='candles')\npsar = go.Scatter(x=df.index, y=df['SAR'], mode='markers', name='PSAR', line={'width': 10, 'color': 'midnightblue'})\n\n# buy & sell symbols\nbuys = go.Scatter(x=buy.index, y=buy.Close, mode='markers', marker_size=15, marker_symbol=5,\n marker_color='green', name='Buy', marker_line_color='black', marker_line_width=1)\nsells = go.Scatter(x=sell.index, y=sell.Close, mode='markers', marker_size=15, marker_symbol=6,\n marker_color='red', name='Sell', marker_line_color='black', marker_line_width=1)\n\n# plot figure\nfig = go.Figure(data=[candles, psar, buys, sells])\nfig.layout.xaxis.type = 'category' # remove weekend days\nfig.layout.xaxis.dtick = 20 # show x-axis ticker once a month\nfig.layout.xaxis.rangeslider.visible = False\nfig.layout.title = f'PSAR indicator ({symbol})'\nfig.layout.template = 'plotly_white'\nfig.show()", "_____no_output_____" ] ], [ [ "We see that most of the times our indicator predicted a correct trend! Instead of using the open, high, low and close data, represented by this traditional candlestick chart, we can also apply the PSAR strategy on so-called _Heikin-Ashi charts_.\n\n## Heikin-Ashi Charts\n\n_Heikin-Ashi_ means 'average bar' in Japanese. Heikin-Ashi charts, developed by Munehisa Homma in the 1700s, display prices that, at a glance, look similar to a traditional Japanese chart. The Heikin-Ashi technique averages price data to create a Japanese candlestick chart that filters out market noise. Instead of using the open, high, low, and close like standard candlestick charts, the Heikin-Ashi technique uses a modified formula based on two-period averages. This gives the chart a smoother appearance, making it easier to spots trends and reversals, but also obscures gaps and some price data.\n\nThe formulas are as follows:\n\n\\begin{align*}\nH_{open,t} &= \\frac{H_{open,t-1} + H_{close,t-1}}{2}, \\\\\nH_{close,t} &= \\frac{C_{open,t} + C_{high,t} + C_{low,t} + C_{close,t}}{4}, \\\\\nH_{high,t} &= \\max\\{H_{open,t}, H_{close,t}, C_{high,t}\\}, \\\\\nH_{low,t} &= \\min\\{H_{open,t}, H_{close,t}, C_{low,t}\\},\n\\end{align*}\n\nwith initial condition $H_{open, 0} = C_{open,0}$. In here, $H_{open,t}$ is the opening value in the Heikin-Ashi chart at time $t \\in \\mathbb{N}_0$, and $C_{open,t}$ is the opening value of the stock, which is used in the traditional Japanese candlestick chart etc.\n\nIn the following function we transform a given dataframe of stock prices to a Heikin-Ashi one.", "_____no_output_____" ] ], [ [ "def heikin_ashi(df):\n \"\"\"\n Converts a dataframe according to the Heikin-Ashi.\n \"\"\"\n \n df_HA = pd.DataFrame(index=df.index, columns=['Open', 'High', 'Low', 'Close'])\n \n df_HA['Open'][0] = df['Open'][0]\n df_HA['Close'] = (df['Open'] + df['High'] + df['Low'] + df['Close']) / 4\n \n for t in range(1,len(df)):\n df_HA.iat[t,0] = (df_HA['Open'][t-1] + df_HA['Close'][t-1]) / 2 # change H_open without warnings\n \n df_HA['High'] = df_HA[['Open', 'Close']].join(df['High']).max(axis=1)\n df_HA['Low'] = df_HA[['Open', 'Close']].join(df['Low']).min(axis=1)\n \n return df_HA", "_____no_output_____" ] ], [ [ "Let's convert the Apple's (Japanese) candlestick chart to a Heikin-Ashi chart:", "_____no_output_____" ] ], [ [ "df_HA = heikin_ashi(df)\ncandle = go.Candlestick(x=df_HA.index, open=df_HA['Open'], high=df_HA['High'], low=df_HA['Low'], close=df_HA['Close'])\n\n# plot figure\nfig = go.Figure(candle)\nfig.layout.xaxis.type = 'category' # remove weekend days\nfig.layout.xaxis.dtick = 20 # show x-axis ticker once a month\nfig.layout.xaxis.rangeslider.visible = False\nfig.layout.title = f'Heikin-Ashi Chart ({symbol})'\nfig.layout.template = 'plotly_white'\nfig.show()", "_____no_output_____" ] ], [ [ "As we can see, the Heikin-Ashi technique can be used to identify a trend more easily. Because the Heikin-Ashi technique smooths price information over two periods, it makes trends, price patterns, and reversal points easier to spot. Candles on a traditional candlestick chart frequently change from up to down, which can make them difficult to interpret. Heikin-Ashi charts typically have more consecutive colored candles, helping traders to identify past price movements easily.\n\nThe Heikin-Ashi technique reduces false trading signals in sideways and choppy markets to help traders avoid placing trades during these times. For example, instead of getting two false reversal candles before a trend commences, a trader who uses the Heikin-Ashi technique is likely only to receive the valid signal.", "_____no_output_____" ], [ "## Heikin-Ashi PSAR indicator\n\nIt is straightforward to apply the PSAR strategy on our Heikin-Ashi data:", "_____no_output_____" ] ], [ [ "# apply PSAR\ndf = PSAR(df_HA)\n\n# extract trend switches (buying/selling advice)\nbuy = df.loc[df['Signal'] == 1]\nsell = df.loc[df['Signal'] == -1]\n\n# candles & psar\ncandles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close, name='candles')\npsar = go.Scatter(x=df.index, y=df['SAR'], mode='markers', name='PSAR', line={'width': 10, 'color': 'midnightblue'})\n\n# buy & sell symbols\nbuys = go.Scatter(x=buy.index, y=buy.Close, mode='markers', marker_size=15, marker_symbol=5,\n marker_color='green', name='Buy', marker_line_color='black', marker_line_width=1)\nsells = go.Scatter(x=sell.index, y=sell.Close, mode='markers', marker_size=15, marker_symbol=6,\n marker_color='red', name='Sell', marker_line_color='black', marker_line_width=1)\n\n# plot figure\nfig = go.Figure(data=[candles, psar, buys, sells])\nfig.layout.xaxis.type = 'category' # remove weekend days\nfig.layout.xaxis.dtick = 20 # show x-axis ticker once a month\nfig.layout.xaxis.rangeslider.visible = False\nfig.layout.title = f'Heikin-Ashi PSAR indicator on Heikin-Ashi ({symbol})'\nfig.layout.template = 'plotly_white'\nfig.show()", "_____no_output_____" ] ], [ [ "In this case, there are small differences. In fact, only on one date the Heikin-Ashi SAR value is different from the traditional SAR value. This might change when clear trends are less visible, so feel free to try other stocks!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb0eb4993021e7cd4c50a1d9a8be1b1371ae98dd
22,422
ipynb
Jupyter Notebook
examples/notebooks/intro_pyLHD.ipynb
toledo60/pyLHD
40df7f2015e06e9e1190cce49c68f17068b86070
[ "MIT" ]
1
2021-11-20T17:33:43.000Z
2021-11-20T17:33:43.000Z
examples/notebooks/intro_pyLHD.ipynb
toledo60/pyLHD
40df7f2015e06e9e1190cce49c68f17068b86070
[ "MIT" ]
1
2021-11-20T18:43:01.000Z
2021-11-20T20:04:48.000Z
examples/notebooks/intro_pyLHD.ipynb
toledo60/pyLHD
40df7f2015e06e9e1190cce49c68f17068b86070
[ "MIT" ]
null
null
null
29.157347
523
0.396619
[ [ [ "## 1. Introduction to pyLHD\r\n\r\npyLHD is a python implementation of the R package [LHD](https://cran.r-project.org/web/packages/LHD/index.html) by Hongzhi Wang, Qian Xiao, Abhyuday Mandal. As of now, only the algebraic construction of Latin hypercube designs (LHD) are implemented in this package. For search algorithms to construct LHDs such as: Simulated annealing, particle swarm optimization, and genetic algorithms refer to the R package.\r\n\r\nIn section 2 algebraic construction methods for LHDs are discussed", "_____no_output_____" ], [ "To evalute the generated LHDs we consider the following criteria\r\n\r\n### Maximin distance Criterion\r\n\r\nLet $X$ denote an LHD matrix. Define the $L_q$-distance between two run $x_i$ and $x_j$ of $X$ as $d_q(x_i,x_j) = \\left( \\sum_{k=1}^m |x_{ik}-x_{jk}|^q \\right)^{1/q}$ where $q$ is an integer. Define the $L_q$-distance of design $X$ as $d_q(X) = \\min \\{ d_q(x_i,x_j), 1 \\leq i\\leq j \\leq n \\}$. If $q=1$, we are considering the Manhattan $(L_1)$ distance. If $q=2$, the Euclidean $(L_2)$ distance is considered. A design $X$ is called a maximim $L_q$-distance if it has the unique largest $d_q(X)$ value.\r\n\r\nMorris and Mitch (1995) and Jin et al. (2005) proposed the $\\phi_p$ criterion which is defined as\r\n$$\r\n\\phi_p = \\left( \\sum_{i=1}^{n-1} \\sum_{j=i+1}^n d_q (x_i,x_j)^{-p} \\right)^{1/p} \r\n$$\r\n\r\nThe $\\phi_p$ criterion is asymptotically equivalent to the Maximin distance criterion as $p \\rightarrow \\infty$. In practice $p=15$ often suffices.\r\n\r\n### Maximum Projection Criterion\r\n\r\nJoseph et al (2015) proposed the maximum projection LHDs that consider designs' space-filling properties in all possible dimensional spaces. Such designs minimize the maximum projection criterion, which is defined as \r\n\r\n$$\r\n\\underset{X}{\\min} \\psi(X) = \\left( \\frac{1}{{n \\choose 2}} \\sum_{i=1}^{n-1} \\sum_{j=i+1}^n \\frac{1}{ \\prod_{l=1}^k (x_{il}-x_{jl})^2} \\right)^{1/k}\r\n$$\r\n\r\n\r\nWe can wee that any two design points should be apart from each other in any projection to minimize the value of $\\psi(x)$\r\n\r\n### Orthogonality Criteria\r\n\r\nTwo major correlation-based criteria to measure designs' orthogonality is the average absolute correlation criterion and the maximum absolute correlation\r\n\r\n$$\r\nave(|q|) = \\frac{2 \\sum_{i=1}^{k-1} \\sum_{j=i+1}^k |q_{ij}|}{k(k-1)} \\quad \\text{and} \\quad \\max |q| = \\underset{i,j}{\\max} |q_{ij}|\r\n$$\r\n\r\nwhere $q_{ij}$ is the correlation between the $i$th and $j$th columns of the design matrix $X$. Orthogonal design have $ave(|q|)=0$ and $\\max|q|=0$, which may not exist for all design sizes. Designs with smaller $ave(|q|)$ or $\\max|q|$ are generally preferred in practice.\r\n\r\n", "_____no_output_____" ] ], [ [ "import pyLHD as pl", "_____no_output_____" ] ], [ [ "Lets start by generating a random LHD with 5 rows and 3 columns", "_____no_output_____" ] ], [ [ "X = pl.rLHD(nrows=5,ncols=3)\r\nX", "_____no_output_____" ] ], [ [ "We evaluate the above design with the different optimamlity criteria described earlier:\r\n\r\nThe maximin distance criterion (Manhattan)", "_____no_output_____" ] ], [ [ "pl.phi_p(X,p=15,q=1) # using default parameters", "_____no_output_____" ] ], [ [ "The maximin distance criterion (Euclidean)", "_____no_output_____" ] ], [ [ "pl.phi_p(X,p=10,q=2) # different p used than above", "_____no_output_____" ] ], [ [ "The average absolute correlation", "_____no_output_____" ] ], [ [ "pl.AvgAbsCor(X)", "_____no_output_____" ] ], [ [ "The maximum absolute correlation", "_____no_output_____" ] ], [ [ "pl.MaxAbsCor(X)", "_____no_output_____" ] ], [ [ "The maximum projection criterion", "_____no_output_____" ] ], [ [ "pl.MaxProCriterion(X)", "_____no_output_____" ] ], [ [ "We can apply Williams transformation on X defined as:\r\n$$\r\nW(x) = \\begin{cases} \r\n 2x & 0 \\leq x \\leq (p-1)/2 \\\\\r\n 2(p-x)-1 & (p+1)/2 \\leq x \\leq p-1 \r\n \\end{cases}\r\n$$", "_____no_output_____" ] ], [ [ "W_x = pl.williams_transform(X)\r\nW_x", "_____no_output_____" ] ], [ [ "Lets evaluate the new transformed design", "_____no_output_____" ] ], [ [ "pl.phi_p(W_x)", "_____no_output_____" ] ], [ [ "The $\\phi_p$ value of transformed $W_x$ is smaller than the original design $X$", "_____no_output_____" ], [ "## 2. Algebraic Construction Functions\r\n\r\nThe algebraic construction methods are demonstrated in the table below", "_____no_output_____" ], [ "| | Ye98 | Cioppa07 | Sun10 | Tang93 | Lin09 | Butler01 |\r\n|------------|---|---|---|---|---|----|\r\n| Run # $n$ | $2^m +1$ | $2^m +1$ | $r2^{m +1}$ or $r2^{m +1} +1$ | $n$ | $n^2$ | $n$ |\r\n| Factor # $k$ | $2m-2$ | $m + {m-1 \\choose 2}$ | $2^c$ | $m$ | $2fp$ | $k \\leq n-1$ |\r\n| Note | $m$ is a positive integer $m\\geq 2$ | $m$ is a positive integer $m\\geq 2$ | $r$ and $c$ are positive integers | $n$ and $m$ are from $OA(n,m,s,r)$ | $n^2,2f$ and $p$ are from $OA(n^2,2f,n,2)$ and $OLHD(n,p)$ | $n$ is an odd prime number |", "_____no_output_____" ], [ "For theoretical details on the construction methods, a good overview is **Section 4.2: Algebraic Constuctions for Orthogonal LHDs** from [Musings about Constructions of Efficient Latin Hypercube Designs with Flexible Run-sizes](https://arxiv.org/abs/2010.09154)\r\n\r\nWe start by implementing Ye 1998 construction, the resulting desig will have \r\n$2^m+1$ runs and $2m-2$ factors", "_____no_output_____" ] ], [ [ "Ye98 = pl.OLHD_Ye98(m=4)\r\nYe98", "_____no_output_____" ], [ "pl.MaxAbsCor(Ye98) # column-wise correlation are 0", "_____no_output_____" ] ], [ [ "Cioppa and Lucas 2007 construction, the resulting design will be a $2^m+1$ by $m+ {m-1 \\choose 2}$ orthogonal LHD. Note $m \\geq 2$", "_____no_output_____" ] ], [ [ "Cioppa07 = pl.OLHD_Cioppa07(m=3)\r\nCioppa07", "_____no_output_____" ], [ "pl.MaxAbsCor(Cioppa07) # column-wise correlation are 0", "_____no_output_____" ] ], [ [ "Sun et al. 2010 construction, the resulting design will be $r2^{c+1}$ by $2^c$ if type='even'. If type='odd'\r\nthe resulting design will be $r2^{c+1} + 1$ by $2^c$, where $r$ and $c$ are positive integers.", "_____no_output_____" ] ], [ [ "Sun10_odd = pl.OLHD_Sun10(C=2,r=2,type='odd')\r\nSun10_odd", "_____no_output_____" ], [ "Sun10_even = pl.OLHD_Sun10(C=2,r=2,type='even')\r\nSun10_even", "_____no_output_____" ] ], [ [ "Line et al. 2009 construction, the resulting design will be $n^2$ by $2fp$. This is obtained by using a\r\n$n$ by $p$ orthogonal LHD with a $n^2$ by $2f$ strength 2 and level $n$ orthogonal array.\r\n\r\nStart by generating an orthogonal LHD", "_____no_output_____" ] ], [ [ "OLHD_example = pl.OLHD_Cioppa07(m=2)", "_____no_output_____" ] ], [ [ "Next, create an orthogonal array with 25 rows, 6 columns, 5 levels, and strength 2 OA(25,6,5,2)", "_____no_output_____" ] ], [ [ "import numpy as np\r\n\r\nOA_example = np.array([[2,2,2,2,2,1],[2,1,5,4,3,5],\r\n [3,2,1,5,4,5],[1,5,4,3,2,5],\r\n [4,1,3,5,2,3],[1,2,3,4,5,2],\r\n [1,3,5,2,4,3],[1,1,1,1,1,1],\r\n [4,3,2,1,5,5],[5,5,5,5,5,1],\r\n [4,4,4,4,4,1],[3,1,4,2,5,4],\r\n [3,3,3,3,3,1],[3,5,2,4,1,3],\r\n [3,4,5,1,2,2],[5,4,3,2,1,5],\r\n [2,3,4,5,1,2],[2,5,3,1,4,4],\r\n [1,4,2,5,3,4],[4,2,5,3,1,4],\r\n [2,4,1,3,5,3],[5,3,1,4,2,4],\r\n [5,2,4,1,3,3],[5,1,2,3,4,2],\r\n [4,5,1,2,3,2] ])", "_____no_output_____" ] ], [ [ "Now using Lin at al. 2009 construction, we couple OLHD and OA to obtain", "_____no_output_____" ] ], [ [ "Lin09 = pl.OLHD_Lin09(OLHD=OLHD_example,OA=OA_example)\r\nLin09", "_____no_output_____" ] ], [ [ "We can convert an orthogonal array into a LHD using the function OA2LHD. Consider the \r\nearlier OA_example with 25 rows and 6 columns.", "_____no_output_____" ] ], [ [ "pl.OA2LHD(OA_example)", "_____no_output_____" ] ], [ [ "Lastly, we consider Butler 2001 construction by generating a $n$ by $k$ OLHD", "_____no_output_____" ] ], [ [ "Butler01 = pl.OLHD_Butler01(nrows=11,ncols=5)\r\nButler01", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0ebb51d1827191b09c910cacc33eb1e04c3e36
98,594
ipynb
Jupyter Notebook
Code/UpdatingResourceData/UpdateHumanProteomeVersion.ipynb
NaegleLab/KinPred
636d6148bf7f46ab23e51313a688609bebe638e0
[ "MIT" ]
null
null
null
Code/UpdatingResourceData/UpdateHumanProteomeVersion.ipynb
NaegleLab/KinPred
636d6148bf7f46ab23e51313a688609bebe638e0
[ "MIT" ]
null
null
null
Code/UpdatingResourceData/UpdateHumanProteomeVersion.ipynb
NaegleLab/KinPred
636d6148bf7f46ab23e51313a688609bebe638e0
[ "MIT" ]
null
null
null
34.091978
281
0.398594
[ [ [ "# Update the Human Proteome Reference File and KinPred Final Data\n\nThis notebook shows the steps to upgrade from 2019-12-12 to 2020-2-26 reference humam proteome, and update the KinPred final data with the new reference human proteome.", "_____no_output_____" ] ], [ [ "# IMPORTS\nimport pandas as pd\nimport os\nimport sys\nsys.path.append('../PreprocessingPredictionData/')\nimport humanProteomesReference", "_____no_output_____" ], [ "##################\n# File Location #\n##################\n# local (../../)\nbase = '../../'\n\n#####################\n# Defining File Dir #\n#####################\n# Human Proteome fasta file dir\nHP_fasta = base + 'Data/Raw/HumanProteome/'\n# human proteome referece csv file dir\nHP_csv = base + 'Data/Map/'\n\n#########################################################\n# Defining the file names for the current (old) version #\n#########################################################\n# Old version Date\nold_version = '2019-12-11'\nold_HP_fasta = HP_fasta + 'humanProteome_' + old_version + '.fasta'\nold_HP_csv = HP_csv + 'humanProteome_' + old_version + '.csv'\n\n#########################################################\n# Defining the file names for the updated (new) version #\n#########################################################\n# New version Date: uniprot.org (Date of last sequence modification)\nnew_version = '2020-02-26'\nnew_HP_fasta = HP_fasta + 'humanProteome_' + new_version + '.fasta'\nnew_HP_csv = HP_csv + 'humanProteome_' + new_version + '.csv'\nseq_mod_fasta = HP_fasta + 'UpdatedSeq_' + new_version + '.fasta'", "_____no_output_____" ] ], [ [ "### Download the updated (new) version of Human Proteome fasta file", "_____no_output_____" ] ], [ [ "# Downloads the updated Human Proteomes (canonical) from Unipro.org\n# saves as fasta formate at the given dir/name + last sequence modification date.\n\nhumanProteomesReference.downloadHumanProteomes(new_HP_fasta)", "_____no_output_____" ], [ "# Convert the input fasta file into a dataframe\n# saves as csv formate at the given dir/name + last sequence modification date.\n\nhumanProteomesReference.fastaToCSV(new_HP_fasta, new_HP_csv)", "_____no_output_____" ] ], [ [ "### Compare the Current (old) version and the Updated (new) version\n\n- get a list of uniportIDs from the current (old) human proteome referece file that become obsolet/secondary in the updated (new) human proteome referece file\n- get a list of uniportIDs from the updated (new) human proteome referece file that have different sequence in the current (old) human proteome referece file\n - prediction with those uniprotIDs (substrate_acc) will be removed from the current prediction data files", "_____no_output_____" ] ], [ [ "df_old = pd.read_csv(old_HP_csv, usecols = ['UniprotID', 'sequence'], sep = '\\t')\ndf_new = pd.read_csv(new_HP_csv, usecols = ['UniprotID', 'sequence'], sep = '\\t')\n\ncommon_acc = df_old.merge(df_new, on=['UniprotID'])\ncommon_seq = df_old.merge(df_new, on=['UniprotID' , 'sequence'])\n\nold_id = df_old[(~df_old.UniprotID.isin(common_acc.UniprotID))][['UniprotID']]\nnew_seq = df_new[(~df_new.UniprotID.isin(common_seq.UniprotID))|(~df_new.sequence.isin(common_seq.sequence))][['UniprotID']]\n\nprint ('Outdated Protein UniprotIDs: \\n', old_id, '\\n')\nprint ('Protein UniprotIDs with Sequence Modifacation:\\n', new_seq)", "Outdated Protein UniprotIDs: \n UniprotID\n1103 Q9UGB4\n16384 Q5JT78 \n\nProtein UniprotIDs with Sequence Modifacation:\n UniprotID\n1873 Q96LP2\n8355 Q6DHV5\n10813 Q9UN66\n11600 P19013\n17838 P0C617\n19253 Q8N4F4\n19605 O75123\n20283 A6QL64\n" ] ], [ [ "- download the fasta file of the Protein UniprotIDs with Sequence Modifacation", "_____no_output_____" ] ], [ [ "humanProteomesReference.downloadFasta (new_seq, seq_mod_fasta)", "_____no_output_____" ] ], [ [ "### Re-run predictions of the Protein UniprotIDs with Sequence Modifacation in each predictor\nPlease see the `Get Results` section in \n[FormattingPhosphoPICK.ipynb](https://github.com/NaegleLab/KinPred/blob/master/Code/PreprocessingPredictionData/FormattingPhosphoPICK.ipynb), [FormattingGPS.ipynb](https://github.com/NaegleLab/KinPred/blob/master/Code/PreprocessingPredictionData/FormattingGPS.ipynb), and \n[FormattingNetworKIN.ipynb](https://github.com/NaegleLab/KinPred/blob/master/Code/PreprocessingPredictionData/FormattingNetworKIN.ipynb)\nfor instruction on how to run prediction with each predictor", "_____no_output_____" ], [ "### Update the Prediction Data", "_____no_output_____" ] ], [ [ "# IMPORTS\nsys.path.append('../PreprocessingPredictionData/')\nimport gps_convert, phosphoPick_convert, networKin_convert", "_____no_output_____" ], [ "#####################\n# Defining File Dir #\n#####################\n\n# Resource Files\nSubstrateMap = base + 'Data/Map/globalSubstrateMap.csv' # add all unique substrate in HPRD to the global file \nKinaseMap = base + 'Data/Map/globalKinaseMap.csv' # add all unique kinase in HPRD to the global file\n\n# GPS\n# Current (old) prediction data file\ngps_old = base + 'Data/Formatted/GPS/GPS_formatted_' + old_version + '.csv'\n# updated (new) prediction data file\ngps_new = base + 'Data/Formatted/GPS/GPS_formatted_' + new_version + '.csv'\n# manually prepared GPS valid kinase table\ngps_kinase = base + 'Data/Raw/GPS/gps_valid_kinases.csv'\n# dir for the predictions of the updated sequences\ngps_update_dir = base + 'Data/Raw/GPS/updated/updated/'\n# temp dir for processing the predictions for the updated sequences\ngps_temp_dir_acc_update = base + 'Data/Temp/GPS/mappedAcc/updated/updated/'\ngps_temp_dir_site_update = base + 'Data/Temp/GPS/mappedSite/updated/updated/'\n\n# PhosphoPICK\n# Current (old) prediction data file\npick_old = base + 'Data/Formatted/PhosphoPICK/PhosphoPICK_formatted_' + old_version + '.csv'\n# updated (new) prediction data file\npick_new = base + 'Data/Formatted/PhosphoPICK/PhosphoPICK_formatted_' + new_version + '.csv'\n# dir for the predictions of the updated sequences\npick_update_dir = base + 'Data/Raw/PhosphoPICK/updated/updated/'\n# temp dir for processing the predictions for the updated sequences\npick_temp_dir_acc_update = base + 'Data/Temp/PhosphoPICK/mappedAcc/updated/updated/'\npick_temp_dir_site_update = base + 'Data/Temp/PhosphoPICK/mappedSite/updated/updated/'\n\n# NetworKIN\n# Current (old) prediction data file\nkin_old = base + 'Data/Formatted/NetworKIN/NetworKIN_formatted_' + old_version + '.csv'\n# updated (new) prediction data file\nkin_new = base + 'Data/Formatted/NetworKIN/NetworKIN_formatted_' + new_version + '.csv'\n# dir for the predictions of the updated sequences\nkin_update_dir = base + 'Data/Raw/NetworKIN/updated/updated/'\n# temp dir for processing the predictions for the updated sequences\nkin_temp_dir_acc_update = base + 'Data/Temp/NetworKIN/mappedAcc/updated/updated/'\nkin_temp_dir_site_update = base + 'Data/Temp/NetworKIN/mappedSite/updated/updated/'", "_____no_output_____" ] ], [ [ "**Remove outdated data** of the above outdated Protein UniprotIDs and Protein UniprotIDs with Sequence Modifacation from each prediction data of each predictor", "_____no_output_____" ] ], [ [ "# append old_id and new_seq together\nrm_id = pd.concat([old_id, new_seq]).reset_index(drop = True)\nrm_id", "_____no_output_____" ], [ "def rmOutdated (predictor_old, predictor_new, rm_id_df):\n for chunk in pd.read_csv(predictor_old, chunksize = 1000000):\n chunk = chunk[~chunk.substrate_acc.isin(rm_id_df.UniprotID)]\n\n if not os.path.isfile(predictor_new):\n chunk.to_csv(predictor_new, mode='a', index=False, sep=',')\n else:\n chunk.to_csv(predictor_new, mode='a', index=False, sep=',', header=False)\n", "_____no_output_____" ] ], [ [ "- **GPS**", "_____no_output_____" ] ], [ [ "rmOutdated(gps_old, gps_new, rm_id)", "_____no_output_____" ] ], [ [ "- **PhosphoPICK**", "_____no_output_____" ] ], [ [ "rmOutdated(pick_old, pick_new, rm_id)", "_____no_output_____" ] ], [ [ "- **NetworKIN**", "_____no_output_____" ] ], [ [ "rmOutdated(kin_old, kin_new, rm_id)", "_____no_output_____" ] ], [ [ "**Process the rerunned predictions** of each predictor", "_____no_output_____" ] ], [ [ "#get Gene Name(substrate) from the globalSubstrateMap.csv \ndf_unique_sub = pd.read_csv(SubstrateMap, usecols = ['Gene Name','UniprotID'])\n#get Kinase Name from the globalKinaseMap.csv \ndf_unique_kin = pd.read_csv(KinaseMap, usecols = ['Kinase Name','UniprotID'])", "_____no_output_____" ], [ "def addNameCol (perdictor_df):\n # add Gene Name (substrate) column\n perdictor_df = perdictor_df.merge(df_unique_sub, left_on=['substrate_acc'], right_on=['UniprotID'], how = 'left')\n # drop the duplicated uniprotID column for substrate\n perdictor_df = perdictor_df.drop(columns = 'UniprotID')\n\n # add Kinase Name column\n perdictor_df = perdictor_df.merge(df_unique_kin, left_on=['kinase_acc'], right_on=['UniprotID'], how = 'left')\n # drop the duplicated uniprotID column for kinases\n perdictor_df = perdictor_df.drop(columns = 'UniprotID')\n \n return perdictor_df", "_____no_output_____" ], [ "# removing unmatched kinase type and phosphostie type\n\ndef rm_unmatched_kinase_type(df):\n df_y = df[df['Kinase Name'].isin(y_kin['Kinase Name'])]\n df_y = df_y[df_y['site'].str.contains('Y')]\n df_st = df[df['Kinase Name'].isin(st_kin['Kinase Name'])]\n df_st = df_st[df_st['site'].str.contains('S|T')]\n df_dual = df[df['Kinase Name'].isin(dual_kin['Kinase Name'])]\n \n df_final = pd.concat([df_y, df_st, df_dual])\n df_final = df_final.reset_index()\n \n return df_final", "_____no_output_____" ] ], [ [ "- **GPS**", "_____no_output_____" ] ], [ [ "# convert substrate_acc and kinase_acc\nconvert_type = 'acc'\ngps_convert.gps_convert_directory(gps_update_dir, gps_kinase, gps_temp_dir_acc_update, convert_type)\n# map the site to the updated (new) human proteome reference\nconvert_type = 'site'\ngps_convert.gps_convert_directory(gps_temp_dir_acc_update, new_HP_csv, gps_temp_dir_site_update, convert_type)\n", "Formatting UpdatedSeq_2020-02-26 ...\nDone. Time\t2.777\nFormatting UpdatedSeq_2020-02-26.csv ...\nReading input file...\nGet unique substrate sites...\nMap unique substrate sites...\nDone. Time\t3.185\n" ], [ "# print the converted df for the updated seq predictions\ndf_gps_update = pd.read_csv(gps_temp_dir_site_update+'UpdatedSeq_' + new_version + '_mappedSite.csv')\ndf_gps_update", "_____no_output_____" ], [ "# remove the ones that are not kinse (from FormattingGPS.ipynb)\nnot_kinase = ['PDK2', 'PDK3', 'PDK4', 'MSN', 'GTF2F1', 'MPS1']\ndf_gps_update = df_gps_update[~df_gps_update['kinase'].isin(not_kinase)]\ndf_gps_update", "_____no_output_____" ], [ "# add Gene (substrate) and Kinase Name columns\ndf_gps_update = addNameCol (df_gps_update)\n\n#rename columns\ndf_gps_update = df_gps_update[['substrate_id','substrate_acc','Gene Name','mapped site','pep', 'score', 'Kinase Name']]\ndf_gps_update = df_gps_update.rename(columns={'mapped site' : 'site', 'Gene Name' : 'substrate_name'})\ndf_gps_update", "_____no_output_____" ] ], [ [ "- **PhosphoPICK**", "_____no_output_____" ] ], [ [ "# convert substrate_acc and kinase_acc\nconvert_type = 'acc'\nphosphoPick_convert.pick_convert_directory(pick_update_dir, 'na', pick_temp_dir_acc_update, convert_type)\n# map the site to the updated (new) human proteome reference\nconvert_type = 'site'\nphosphoPick_convert.pick_convert_directory(pick_temp_dir_acc_update, new_HP_csv, pick_temp_dir_site_update, convert_type)\n", "Formatting UpdatedSeq_2020-02-26.txt ...\ngetting unique sub\ngetting sub_acc\nmerge\ndone 0.7367167472839355\ngetting unique kin\ngetting kin_acc\nRPSK6A5 no hit in human\nMAP3KB no hit in human\nmerge\ndone 143.17519211769104\nDone. Time\t144.134\nFormatting UpdatedSeq_2020-02-26 ...\nReading input file...\nGet unique substrate sites...\nDone. Time\t0.422\n" ], [ "# print the converted df for the updated seq predictions\ndf_pick_update = pd.read_csv(pick_temp_dir_site_update+'UpdatedSeq_' + new_version + '_mappedSite.csv')\ndf_pick_update", "_____no_output_____" ], [ "# add the uniprotID of the protein kinases 'RPSK6A5'\n# remove predictions with kinase 'MAP3KB' : didn't find any record in human\nid_dict = {'RPSK6A5':'O75582'}\n\nfor key in id_dict:\n df_pick_update.loc[df_pick_update.kinase == key, [\"kinase_acc\"]] = id_dict[key]\n # remove MAP3KB : didn't find any record in human\ndf_pick_update = df_pick_update[df_pick_update['kinase_acc'] != '(no hit in human)']\ndf_pick_update", "_____no_output_____" ], [ "# add Gene (substrate) and Kinase Name columns\ndf_pick_update = addNameCol (df_pick_update)\n\n#rename columns\ndf_pick_update = df_pick_update[['substrate_id','substrate_acc','Gene Name','site','pep', 'combined-p-value', 'Kinase Name']]\ndf_pick_update = df_pick_update.rename(columns={'combined-p-value' : 'score', 'Gene Name' : 'substrate_name'})\ndf_pick_update", "_____no_output_____" ] ], [ [ "- **NetworKIN**", "_____no_output_____" ] ], [ [ "# convert substrate_acc and kinase_acc\nconvert_type = 'acc'\nnetworKin_convert.kin_convert_directory(kin_update_dir, 'na', kin_temp_dir_acc_update, convert_type)\n# map the site to the updated (new) human proteome reference\nconvert_type = 'site'\nnetworKin_convert.kin_convert_directory(kin_temp_dir_acc_update, new_HP_csv, kin_temp_dir_site_update, convert_type)\n", "Formatting UpdatedSeq_2020-02-26 ...\ngetting unique sub\ngetting sub_acc\nmerge\ngetting unique kin\ngetting kin_acc\nmerge\nDone. Time\t143.550\nFormatting UpdatedSeq_2020-02-26 ...\nDone. Time\t1.851\n" ], [ "# print the converted df for the updated seq predictions\ndf_kin_update = pd.read_csv(kin_temp_dir_site_update+'UpdatedSeq_' + new_version + '_mappedSite.csv')\ndf_kin_update", "_____no_output_____" ], [ "# remove the ones that are not kinse (from FormattingNetworKIN.ipynb)\nnot_kinase = ['PDK2','PDK3','PDK4','LCA5']\ndf_kin_update = df_kin_update[~df_kin_update['kinase_name'].isin(not_kinase)]\ndf_kin_update", "_____no_output_____" ], [ "# add Gene (substrate) and Kinase Name columns\ndf_kin_update = addNameCol(df_kin_update)\n\n#rename columns\ndf_kin_update = df_kin_update[['substrate_id','substrate_acc','Gene Name','site','pep', 'score', 'Kinase Name']]\ndf_kin_update = df_kin_update.rename(columns={'Gene Name' : 'substrate_name'})\ndf_kin_update", "_____no_output_____" ] ], [ [ "**Append the rerunned predictions** of each predictor to the prediction files and save as the updated prediction files with version date", "_____no_output_____" ] ], [ [ "def appendUpdates(predictor_new, update_df):\n update_df = rm_unmatched_kinase_type(update_df)\n update_df.to_csv(predictor_new, mode='a', index = False, header=False)", "_____no_output_____" ] ], [ [ "- **GPS**", "_____no_output_____" ] ], [ [ "appendUpdates(gps_new, df_gps_update)", "_____no_output_____" ] ], [ [ "- **PhosphoPICK**", "_____no_output_____" ] ], [ [ "appendUpdates(pick_new, df_pick_update)", "_____no_output_____" ] ], [ [ "- **NetworKIN**", "_____no_output_____" ] ], [ [ "appendUpdates(kin_new, df_kin_update)", "_____no_output_____" ] ], [ [ "### Cross Referencing with ProteomeScout Phosphorylation Data\n\nsee [CrossReferenceWithProteomeScout.ipynb](https://github.com/NaegleLab/KinPred/blob/master/Code/CrossReferenceWithProteomeScout/CrossReferenceWithProteomeScout.ipynb) for detail", "_____no_output_____" ] ], [ [ "# IMPORTS\nsys.path.append('../CrossReferenceWithProteomeScout/')\nimport XRefProteomeScout\nfrom datetime import date", "_____no_output_____" ], [ "# version date\npscout_version = date.today().strftime('%Y-%m-%d')\n\n# file location\nref_proteome = base+\"Data/Raw/HumanProteome/humanProteome_\"+ref_version+\".fasta\"\npscout_data = base+'Data/Raw/ProteomeScout_'+pscout_version+'/data.tsv'", "_____no_output_____" ], [ "# download current ProteomeScout Data\nXRefProteomeScout.getPScoutData()", "_____no_output_____" ], [ "# run cross referencing\nXRefProteomeScout.XRefProteomeScout(pscout_data, ref_proteome, new_version)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb0ec2f4fea3084eb156112324296b8340f2a3ae
61,713
ipynb
Jupyter Notebook
code/scripts/Old_Checks/logchecks.ipynb
ojhall94/malatium
156e44b9ab386eebf1d4aa05254e1f3a7b255d98
[ "MIT" ]
1
2021-04-25T07:45:20.000Z
2021-04-25T07:45:20.000Z
code/scripts/Old_Checks/logchecks.ipynb
ojhall94/malatium
156e44b9ab386eebf1d4aa05254e1f3a7b255d98
[ "MIT" ]
null
null
null
code/scripts/Old_Checks/logchecks.ipynb
ojhall94/malatium
156e44b9ab386eebf1d4aa05254e1f3a7b255d98
[ "MIT" ]
1
2022-02-19T09:38:35.000Z
2022-02-19T09:38:35.000Z
280.513636
16,936
0.91595
[ [ [ "import seaborn as sns\nimport pylab as plt\nimport numpy as np", "_____no_output_____" ], [ "A = np.random.randn(1000) + 4\nA = np.log10(A)\nsns.distplot(A)", "_____no_output_____" ], [ "a, b, c = np.percentile(A, [16, 50, 84])\nsns.distplot(A)\nplt.axvline(a, ls='--')\nplt.axvline(b)\nplt.axvline(c, ls='--')", "_____no_output_____" ] ], [ [ "Now in log", "_____no_output_____" ] ], [ [ "B = np.log10(A)\nsns.distplot(B)", "/home/oliver/.local/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in log10\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "j, h, k = np.percentile(B, [16, 50, 84])\nsns.distplot(B)\nplt.axvline(j, ls='--')\nplt.axvline(h)\nplt.axvline(k, ls='--')", "_____no_output_____" ], [ "print(f'The median of A is: {b:.2f}')\nprint(f'The median of B is: {h:.2f}, and 10^{h:.2f} is: {10**h:.2f}')", "The median of A is: 10.01\nThe median of B is: 1.00, and 10^1.00 is: 10.01\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb0ed013cdc2631372b2d89eaaf03d48c4dcbb5f
12,166
ipynb
Jupyter Notebook
Lesson01/Exercise 1.ipynb
FelixOnduru1/Data-Science-for-Marketing-Analytics
6192d876eb2f2328482524a5a659c0e9b487514b
[ "MIT" ]
null
null
null
Lesson01/Exercise 1.ipynb
FelixOnduru1/Data-Science-for-Marketing-Analytics
6192d876eb2f2328482524a5a659c0e9b487514b
[ "MIT" ]
null
null
null
Lesson01/Exercise 1.ipynb
FelixOnduru1/Data-Science-for-Marketing-Analytics
6192d876eb2f2328482524a5a659c0e9b487514b
[ "MIT" ]
null
null
null
74.638037
5,137
0.504767
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "user_info = pd.read_json(\"user_info.json\")", "_____no_output_____" ], [ "user_info.head()", "_____no_output_____" ], [ "user_info.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6 entries, 0 to 5\nData columns (total 22 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 _id 6 non-null object \n 1 index 6 non-null int64 \n 2 guid 6 non-null object \n 3 isActive 6 non-null bool \n 4 balance 6 non-null object \n 5 picture 6 non-null object \n 6 age 6 non-null int64 \n 7 eyeColor 6 non-null object \n 8 name 6 non-null object \n 9 gender 6 non-null object \n 10 company 6 non-null object \n 11 email 6 non-null object \n 12 phone 6 non-null object \n 13 address 6 non-null object \n 14 about 6 non-null object \n 15 registered 6 non-null object \n 16 latitude 6 non-null float64\n 17 longitude 6 non-null float64\n 18 tags 6 non-null object \n 19 friends 6 non-null object \n 20 greeting 6 non-null object \n 21 favoriteFruit 6 non-null object \ndtypes: bool(1), float64(2), int64(2), object(17)\nmemory usage: 1.1+ KB\n" ], [ "user_info.shape", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb0ef7fd222064edf4a6627443a2384b9e41abfa
1,021,572
ipynb
Jupyter Notebook
examples/forecast/ex03 - SEAIRQ.ipynb
hidekb/pyross
6f309fc8739d98ce54ad1f478ecf5aeda1ff7ab1
[ "MIT" ]
null
null
null
examples/forecast/ex03 - SEAIRQ.ipynb
hidekb/pyross
6f309fc8739d98ce54ad1f478ecf5aeda1ff7ab1
[ "MIT" ]
null
null
null
examples/forecast/ex03 - SEAIRQ.ipynb
hidekb/pyross
6f309fc8739d98ce54ad1f478ecf5aeda1ff7ab1
[ "MIT" ]
null
null
null
1,931.137996
210,908
0.959426
[ [ [ "%matplotlib inline\nimport pyross\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Introduction: Forecast for SEAIRQ model with stochastic parameters", "_____no_output_____" ], [ "In this notebook, we consider the SEAIRQ model. \n\nWe assume that the parameters \n\n* $\\beta$ (probability of infection on contact),\n* $\\gamma_{E}$ (rate of progression for exposed individual to class A), \n* $\\gamma_{AA}$ (rate of progression from class A to asymptomatic infective class), \n* $\\gamma_{AS}$ (rate of progression from class A to symptomatic infective class), \n* $\\gamma_{I_a}$ (rate of removal for asymptomatic infected individuals), and\n* $\\gamma_{I_s}$ (rate of removal for symptomatic infected individuals) \n* $ \\tau_S$ (quarantining rate for susceptibles)\n* $ \\tau_E$ (quarantining rate for exposed)\n* $ \\tau_A$ (quarantining rate for A)\n* $ \\tau_{I_a}$ (quarantining rate for asymptomatic infectives)\n* $ \\tau_{I_s}$ (quarantining rate for symptomatic infectives)\n\nare not known exactly, but rather are characterized by a 11D Gaussian distribution with known mean and covariance matrix. The Gaussian distribution function is trunacted, i.e. set to zero if any parameter is $< 0$.\n\n**We now illustrate how uncertainties in the parameters affect the predictions of the SEAIRQ model.**\n\nFor this we simulate the SEIR model $N_s = 500$ times; for each simulation the above parameters are sampled from a given 11D Gaussian distribution. The resulting 500 trajectories are shown together with their mean, standard deviation, median, and 5 as well as 95 percentiles.\n\nWe perform this analysis for the deterministic SEAIRQ model.", "_____no_output_____" ], [ "# Define model parameters and initialise pyross.forecast.SEAIRQ", "_____no_output_____" ] ], [ [ "M = 1 # the SEAIRQ model we consider has no age structure\nNi = 50000*np.ones(M) # so there is only one age group \nN = np.sum(Ni) # and the total population is the size of this age group\n\n\nE0 = np.array([0])\nA0 = np.array([1])\nIa0 = np.array([0]) # the SEAIRQ model we consider has only one kind of infective \nIs0 = np.array([20]) # we take these to be symptomatic \nQ0 = np.array([0])\nR0 = np.array([0]) # and assume there are no recovered individuals initially \nS0 = N-(Ia0+Is0+R0+E0) # The initial susceptibles are obtained from S + E + A + Ia + Is + R = N\n\n\n# there is no contact structure\ndef contactMatrix(t): \n return np.identity(M) \n\n# duration of simulation and output datapoints\nTf = 500; Nt=Tf+1\n\n\n# These parameters we consider exact\nfsa = 1 # the self-isolation parameter \ntE = 0.00 # rate E -> Q\ntA = 0.00 # rate A -> Q\ntIa = 0.00 # rate Ia -> Q\ntIs = 0.05 # rate Is -> Q\n\n\n# These are the parameters that we sample stochastically\n# means\nalpha = 0.0 # fraction of asymptomatic infectives \nbeta = 0.2 # infection rate\ngIa = 0.1 # removal rate of asymptomatic infectives \ngIs = 0.1 # removal rate of symptomatic infectives \ngE = 0.04 # removal rate of E\ngA = 0.2 # rate to go from A to Ia\n\n\n\n# order in covariance matrix:\n# beta, gE, gAA, gAS, gIa, gIs, tS, tE, tA, tIa, tIs\n#\ncov = np.zeros([6,6],dtype=float)\ncov[0,0] = 0*alpha**2 # cov(alpha, alpha) = Var(alpha)\ncov[1,1] = 0.1*beta**2 # cov(beta, beta) = Var(beta)\ncov[2,2] = 0.01*gIa**2 # cov(gIa,gIa) = Var(gIa)\ncov[3,3] = 0.01*gIs**2 # cov(gIs,gIs) = Var(gIs)\ncov[4,4] = 0.01*gA**2 # cov(gA, gA) = Var(gA)\ncov[5,5] = 0.01*gE**2 # cov(gE, gE) = Var(gE)\n\n# \ncov[1,5] = 0.01*beta*gE # cov(beta, gE)\ncov[5,1] = cov[1,5] # covariance matrix is symmetric\n#\ncov[2,3] = cov[2,2] # cov(gIa, gIs)\ncov[3,2] = cov[2,3] \n\n\n# Define parameters for simulations\nparameters = {'alpha':alpha, 'beta':beta,\n 'gE':gE,'gA':gA,\n 'gIa':gIa, 'gIs':gIs, 'gE':gE, 'fsa':fsa,\n 'tE':tE,'tA':tA,'tIa':tIa,'tIs':tIs,\n 'cov':cov\n }\n\n# Initialise pyross forecast module\nmodel_forecast = pyross.forecast.SEAIRQ(parameters, M, Ni)\n\n\n# Number of simulations over which we average, use 500\nNs = 10 ", "_____no_output_____" ], [ "# Define a function which we use below to plot simulation results\ndef plot_trajectories(result,\n percentile=-1,\n plot_index = 4, # which time series should be plotted? \n filename='None'): # set filename for saving figures\n # plot_index class\n # 0 susceptibles\n # 1 exposed\n # 2 asymptomatic and infectious\n # 3 asymptomatic infectives\n # 4 symptomatic infectives\n # 5 quarantined\n if plot_index == 0:\n title='Susceptibles'\n ylabel = r'$N_S$'\n elif plot_index == 1:\n title='Exposed'\n ylabel = r'$N_{E}$'\n elif plot_index == 2:\n title=r'Asymptomatic, infectious (A)'\n ylabel = r'$N_{A}$'\n elif plot_index == 3:\n title='Asymptomatic infectives'\n ylabel = r'$N_{I,a}$'\n elif plot_index == 4:\n title='Symptomatic infectives'\n ylabel = r'$N_{I,s}$'\n elif plot_index == 5:\n title='Quarantined'\n ylabel = r'$N_{Q}$'\n else:\n raise RuntimeError(\"plot_index should be 0, 1, 2, or 3.\")\n #\n fontsize=25\n #\n #\n trajectories = result['X']\n t_arr = result['t']\n traj_mean = result['X_mean']\n traj_std = result['X_std']\n #\n #\n # Plot trajectories\n #\n fig, ax = plt.subplots(1,1,figsize=(7,5))\n ax.set_title(title,\n y=1.05,\n fontsize=fontsize)\n for i,e in enumerate(trajectories):\n ax.plot(t_arr,e[plot_index],\n alpha=0.15,\n )\n ax.fill_between(t_arr,traj_mean[plot_index] - traj_std[plot_index],\n traj_mean[plot_index] + traj_std[plot_index],\n alpha=0.7,\n color='limegreen',\n label='Std deviation')\n ax.plot(t_arr,traj_mean[plot_index] - traj_std[plot_index],\n alpha=1,\n label='Std deviation',\n lw=1.5,\n ls='--',\n color='black')\n ax.plot(t_arr,traj_mean[plot_index] + traj_std[plot_index],\n alpha=1,\n #label='Std deviation',\n lw=1.5,\n ls='--',\n color='black')\n ax.plot(t_arr,traj_mean[plot_index],\n alpha=1,\n lw=2,\n color='black',\n label='Mean')\n ax.set_xlim(np.min(t_arr),np.max(t_arr))\n ax.set_ylabel(ylabel,fontsize=fontsize)\n ax.set_xlabel(r'$t$ [days]',fontsize=fontsize)\n ax.legend(loc='upper right',fontsize=18)\n plt.show()\n if filename != 'None':\n fig.savefig(filename + '_trajs.png', bbox_inches='tight',dpi=100)\n plt.close()\n #\n #\n #\n # Plot percentiles\n #\n if percentile > 0:\n percentiles_lower = np.percentile(trajectories[:,plot_index],percentile,axis=0)\n percentiles_upper = np.percentile(trajectories[:,plot_index],100-percentile,axis=0)\n percentiles_median = np.percentile(trajectories[:,plot_index],50,axis=0)\n print(\"In the following plot, red dashed lines denote {0} and {1} percentiles of the numerical data:\".format(percentile,\n 100-percentile))\n fig, ax = plt.subplots(1,1,figsize=(7,5))\n ax.set_title(title,\n y=1.05,\n fontsize=fontsize)\n for i,e in enumerate(trajectories):\n ax.plot(t_arr,e[plot_index],\n alpha=0.15,\n )\n ax.fill_between(t_arr,percentiles_lower,\n percentiles_upper,\n alpha=0.1,\n color='red',\n label='Percentiles')\n ax.plot(t_arr,percentiles_lower,\n alpha=1,\n lw=2,\n label='Percentiles',\n ls='--',\n color='red',\n )\n ax.plot(t_arr,percentiles_upper,\n alpha=1,\n lw=2,\n color='red',\n ls='--',\n )\n ax.plot(t_arr,percentiles_median,\n alpha=1,\n lw=2,\n color='red',\n label='Median')\n ax.plot(t_arr,traj_mean[plot_index],\n alpha=1,\n lw=2,\n color='black',\n label='Mean')\n ax.set_xlim(np.min(t_arr),np.max(t_arr))\n ax.set_ylabel(ylabel,fontsize=fontsize)\n ax.set_xlabel(r'$t$ [days]',fontsize=fontsize)\n ax.legend(loc='upper right',fontsize=18)\n plt.show()\n if filename != 'None':\n fig.savefig(filename + '_trajs2.png', bbox_inches='tight',dpi=100)\n plt.close()\n \n \n# Define a function which we use below to plot parameters used for simulations\ndef plot_sample_parameters(result,\n filename='None'): # set filename for saving figures\n #\n fontsize=25\n #\n # Scatterplot of used parameters\n #\n sample_parameters = result['sample_parameters'].T\n beta = result['beta']\n gE = result['gE']\n gIa = result['gIa']\n gIs = result['gIs']\n #\n title = r'Samples for stochastic $\\beta$, $\\gamma_{E}$'\n labelx = r'$\\beta $'\n labely = r'$\\gamma_{E}$'\n x_mean = beta\n y_mean = gE\n labelx_mean = r'$\\langle \\beta \\rangle$'\n labely_mean = r'$\\langle \\gamma_{E} \\rangle$'\n data_index_x = 1\n data_index_y = 4\n fig, ax = plt.subplots(1,1,figsize=(7,5))\n ax.set_title(title,y=1.05,fontsize=fontsize)\n ax.axvline(x_mean,color='limegreen',ls='--',lw=2,label=labelx_mean)\n ax.axhline(y_mean,color='dodgerblue',ls='--',lw=2,label=labely_mean)\n ax.scatter(sample_parameters[data_index_x], sample_parameters[data_index_y] ,\n label='sampled data',\n color='black',s=10) #, c = truth)\n ax.set_xlabel(labelx,fontsize=fontsize)\n ax.set_ylabel(labely,fontsize=fontsize)\n ax.set_xlim(0,1.05*np.max(sample_parameters[data_index_x]))\n ax.set_ylim(0,1.05*np.max(sample_parameters[data_index_y]))\n ax.legend(loc='best',fontsize=15)\n plt.show()\n if filename != 'None':\n fig.savefig(filename + '_samples1.png', bbox_inches='tight',dpi=100)\n plt.close()\n #", "_____no_output_____" ] ], [ [ "# Forecast based on deterministic model", "_____no_output_____" ] ], [ [ "result = model_forecast.simulate(S0, E0, A0, Ia0, Is0, Q0,\n contactMatrix, Tf, Nt,\n verbose=True,\n Ns=Ns) \n\nplot_trajectories(result,\n plot_index = 2,\n percentile=5,\n )\n\nplot_trajectories(result,\n# filename='forecast_deterministic',\n percentile=5,\n )\n\nplot_trajectories(result,\n plot_index = 5,\n percentile=5,\n )\n\nplot_sample_parameters(result)", "Finished. Time needed for evaluation: 00:00:05\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb0f057ae2d952816a6dc2b555a4c831a5bd7c44
20,184
ipynb
Jupyter Notebook
notebooks/PlacesCNN_v3.2.experiment_4.ipynb
ifranco14/tf_real_estate_images_classification
a6aea3cb2b72fb88db815b0e6fcc72858a8d5f01
[ "MIT" ]
null
null
null
notebooks/PlacesCNN_v3.2.experiment_4.ipynb
ifranco14/tf_real_estate_images_classification
a6aea3cb2b72fb88db815b0e6fcc72858a8d5f01
[ "MIT" ]
null
null
null
notebooks/PlacesCNN_v3.2.experiment_4.ipynb
ifranco14/tf_real_estate_images_classification
a6aea3cb2b72fb88db815b0e6fcc72858a8d5f01
[ "MIT" ]
null
null
null
40.8583
331
0.609988
[ [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\ntf.keras.backend.clear_session()", "_____no_output_____" ], [ "from src.models import places_ontop_model\nfrom src import custom_losses, custom_metrics, optimizers\nfrom src.data import data", "Using TensorFlow backend.\n" ], [ "batch_size = 128\nn_classes = 6\nepochs = 100\nimg_size = 224\nn_channels = 3", "_____no_output_____" ], [ "model = places_ontop_model.PlacesOntop_Model(batch_size, n_classes, epochs, img_size, n_channels, version=8)", "WARNING: Logging before flag parsing goes to stderr.\nW0106 08:41:04.756145 140109375911744 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tesis_env/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\n" ], [ "from src.data import data", "_____no_output_____" ], [ "paths = data.PATH()\ndataset_path = f'{paths.PROCESSED_DATA_PATH}/'\ndataset = 'vision_based_dataset'\ntest_dataset_path = f'{dataset_path}/{dataset}/'", "_____no_output_____" ], [ "train_generator, validation_generator, test_generator = model.get_image_data_generator(test_dataset_path, train=True, validation=True, test=True, class_mode_validation='categorical', class_mode_test='categorical')", "Found 114361 images belonging to 6 classes.\nFound 6305 images belonging to 6 classes.\nFound 6328 images belonging to 6 classes.\n" ], [ "weights = model.get_class_weights(train_generator.classes, model)\nmodel.compile(loss=custom_losses.weighted_categorical_crossentropy(weights), metrics=['categorical_accuracy'],)\n# model.model.compile(optimizer='adam', loss=custom_losses.weighted_categorical_crossentropy(weights), metrics=['categorical_accuracy'],)\n# instance_model.compile(optimizer='adam', loss=custom_losses.weighted_categorical_crossentropy(weights), metrics=['categorical_accuracy'],)", "_____no_output_____" ], [ "model.show_summary()", "##### PlacesOntop_Model #####\nModel: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 224, 224, 3) 0 \n_________________________________________________________________\nblock1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n_________________________________________________________________\nglobal_average_pooling2d_1 ( (None, 512) 0 \n_________________________________________________________________\npredictions (Dense) (None, 6) 3078 \n=================================================================\nTotal params: 14,717,766\nTrainable params: 3,078\nNon-trainable params: 14,714,688\n_________________________________________________________________\n\n" ], [ "model.fit_from_generator(path=f'{dataset_path}/{dataset}', \n train_generator=train_generator, validation_generator=validation_generator,\n test_generator=test_generator,\n evaluate_net=False, use_model_check_point=True, use_early_stop=True, weighted=True,\n show_activations=False, n_workers=2)", "W0106 08:41:38.894395 140109375911744 callbacks.py:875] `period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen.\nW0106 08:41:38.894936 140109375911744 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:354: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.\n\nW0106 08:41:38.896728 140109375911744 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:355: The name tf.local_variables_initializer is deprecated. Please use tf.compat.v1.local_variables_initializer instead.\n\nW0106 08:41:38.898437 140109375911744 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:356: The name tf.keras.backend.get_session is deprecated. Please use tf.compat.v1.keras.backend.get_session instead.\n\nW0106 08:41:39.088199 140109375911744 deprecation.py:323] From /home/ifranco/Documents/facultad/tesis/tesis_env/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n" ], [ "model.model_path", "_____no_output_____" ], [ "model = model.load_model(model.model_path)", "_____no_output_____" ], [ "model.model_is_trained = True", "_____no_output_____" ], [ "model.save_model()", "weights [2.09728947 0.87794411 1.08704042 0.8524606 0.87239869 0.87343812]\n" ] ], [ [ "### Notas:\n#### - Probar configuraciones 6, 7, 8 y 9.\n#### - Comparar mejor resultado con notebook placescnn_v2.1\n#### - Probar configuraciones desfrizando bloques convolutivos de la red", "_____no_output_____" ] ], [ [ "model.fit_from_generator(path=f'{dataset_path}/{dataset}', \n train_generator=train_generator, validation_generator=validation_generator,\n test_generator=test_generator,\n evaluate_net=False, use_model_check_point=True, use_early_stop=True, weighted=True,\n show_activations=False,)", "W1215 16:28:29.501663 140671186286400 callbacks.py:875] `period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen.\nW1215 16:28:29.502433 140671186286400 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:352: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.\n\nW1215 16:28:29.507600 140671186286400 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:353: The name tf.local_variables_initializer is deprecated. Please use tf.compat.v1.local_variables_initializer instead.\n\nW1215 16:28:29.512035 140671186286400 deprecation_wrapper.py:119] From /home/ifranco/Documents/facultad/tesis/tf_real_estate_images_classification/src/models/base_model.py:354: The name tf.keras.backend.get_session is deprecated. Please use tf.compat.v1.keras.backend.get_session instead.\n\n" ], [ "model.model_is_trained = True", "_____no_output_____" ], [ "model.save_model()", "weights [2.09728947 0.87794411 1.08704042 0.8524606 0.87239869 0.87343812]\n" ], [ "model.predict_from_generator()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb0f0aac09da15028d2adeb18775b10c9dace202
42,198
ipynb
Jupyter Notebook
Presentations/2014-02-12_DMAC_webinar.ipynb
petercunning/notebook
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
[ "MIT" ]
32
2015-01-07T01:48:05.000Z
2022-03-02T07:07:42.000Z
Presentations/2014-02-12_DMAC_webinar.ipynb
petercunning/notebook
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
[ "MIT" ]
1
2015-04-13T21:00:18.000Z
2015-04-13T21:00:18.000Z
Presentations/2014-02-12_DMAC_webinar.ipynb
petercunning/notebook
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
[ "MIT" ]
30
2015-01-28T09:31:29.000Z
2022-03-07T03:08:28.000Z
230.590164
31,291
0.901228
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb0f0f1e13ebbf55c9be48fcbf3498a052b9dcf8
9,994
ipynb
Jupyter Notebook
04_webserver.ipynb
ArthurConner/midtail
b73dd85b4d131c3a578dd0dbabf89f7fbf9dd77b
[ "Apache-2.0" ]
null
null
null
04_webserver.ipynb
ArthurConner/midtail
b73dd85b4d131c3a578dd0dbabf89f7fbf9dd77b
[ "Apache-2.0" ]
null
null
null
04_webserver.ipynb
ArthurConner/midtail
b73dd85b4d131c3a578dd0dbabf89f7fbf9dd77b
[ "Apache-2.0" ]
null
null
null
25.757732
1,666
0.455773
[ [ [ "# default_exp webserver", "_____no_output_____" ] ], [ [ "# longtail\n\n> API details.", "_____no_output_____" ] ], [ [ "#export\nimport os\nfrom collections import namedtuple\nfrom longtail.engine import BlockTracker\nfrom longtail.commonlib import waste_some_time, lastWrapper, capture\nfrom longtail.transplier import Transpiler", "_____no_output_____" ], [ "#hide\ntr = Transpiler(\".\",\"test/src/basic.txt\",\"test/transpile/basic.py\")\ntr.parse()", "_____no_output_____" ], [ "!open .\n\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "#hide\n!python test/transpile/basic.py", "hello\r\nworld\r\nhello\r\nworld\r\nhuman fish\r\nblock 2\r\n" ], [ "\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb0f17df1528ec7a63f3a668954abee96a09cc78
22,525
ipynb
Jupyter Notebook
influxdb_setup_and_debug.ipynb
Skatinger/influxdb-grafana-dockerstack
0cb404a9c76a528375aa89b0f650e1e34c3a2067
[ "MIT" ]
null
null
null
influxdb_setup_and_debug.ipynb
Skatinger/influxdb-grafana-dockerstack
0cb404a9c76a528375aa89b0f650e1e34c3a2067
[ "MIT" ]
null
null
null
influxdb_setup_and_debug.ipynb
Skatinger/influxdb-grafana-dockerstack
0cb404a9c76a528375aa89b0f650e1e34c3a2067
[ "MIT" ]
null
null
null
40.29517
1,125
0.507614
[ [ [ "from influxdb import InfluxDBClient", "_____no_output_____" ], [ "client = InfluxDBClient(host='localhost', port=8086)", "_____no_output_____" ], [ "print(client)", "<influxdb.client.InfluxDBClient object at 0x7fab1b3718d0>\n" ], [ "client.create_database('pyexample')", "_____no_output_____" ], [ "client.switch_database('pyexample')", "_____no_output_____" ], [ "json_body = [\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-28T8:01:00Z\",\n \"fields\": {\n \"duration\": 127\n }\n },\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-29T8:04:00Z\",\n \"fields\": {\n \"duration\": 132\n }\n },\n {\n \"measurement\": \"brushEvents\",\n \"tags\": {\n \"user\": \"Carol\",\n \"brushId\": \"6c89f539-71c6-490d-a28d-6c5d84c0ee2f\"\n },\n \"time\": \"2018-03-30T8:02:00Z\",\n \"fields\": {\n \"duration\": 129\n }\n }\n]", "_____no_output_____" ], [ "test = [{\"measurement\": \"test_temperature\",\n \"tags\": {\"room\": \"room1\", \"id\": \"1\"},\n \"time\": \"2020-05-12T8:11:00Z\",\n \"fields\": { \"temperature\": \"22\"}\n },\n {\"measurement\": \"test_temperature\",\n \"tags\": {\"room\": \"room1\", \"id\": \"2\"},\n \"time\": \"2020-05-12T8:13:00Z\",\n \"fields\": { \"temperature\": \"25\"}\n },\n {\"measurement\": \"test_temperature\",\n \"tags\": {\"room\": \"room1\", \"id\": \"3\"},\n \"time\": \"2020-05-12T8:14:00Z\",\n \"fields\": { \"temperature\": \"24\"}\n },\n {\"measurement\": \"test_temperature\",\n \"tags\": {\"room\": \"room1\", \"id\": \"4\"},\n \"time\": \"2020-05-12T8:15:00Z\",\n \"fields\": { \"temperature\": \"23\"}\n },\n {\"measurement\": \"test_temperature\",\n \"tags\": {\"room\": \"room1\", \"id\": \"5\"},\n \"time\": \"2020-05-12T8:17:00Z\",\n \"fields\": { \"temperature\": \"26\"}\n }]", "_____no_output_____" ], [ "client.write_points(test)", "_____no_output_____" ], [ "test = b'45'", "_____no_output_____" ], [ "test.decode(\"ascii\")", "_____no_output_____" ], [ "client.write_points([{\"measurement\": \"room_temperature\", \"time\": \"2020-05-12T8:09:00Z\",\n \"fields\": { \"temperature\": \"25\"}}])", "_____no_output_____" ], [ "results = client.query('SELECT \"temperature\" FROM \"pyexample\".\"autogen\".\"room_temperature\"')\nresults.raw", "_____no_output_____" ], [ "client.write_points([{'measurement': 'test_temperature', 'tags': {'room': 'room1', 'id': '6'}, 'time': '2020-08-05T11:44:25Z', 'fields': {'temperature': '53.6'}}]\n)", "_____no_output_____" ], [ "results = client.query('SELECT \"temperature\" FROM \"pyexample\".\"autogen\".\"test_temperature\"')\nresults.raw", "_____no_output_____" ], [ "client.query('DELETE FROM \"pyexample\".\"test_temperature\" WHERE time < now()')", "_____no_output_____" ], [ "client.write_points(json_body)", "_____no_output_____" ], [ "results = client.query('SELECT \"duration\" FROM \"pyexample\".\"autogen\".\"brushEvents\" GROUP BY \"user\"')", "_____no_output_____" ], [ "results.raw", "_____no_output_____" ], [ "points = results.get_points(tags={'user':'Carol'})", "_____no_output_____" ], [ "for point in points:\n print(\"Time: %s, Duration: %i\" % (point['time'], point['duration']))", "Time: 2018-03-28T08:01:00Z, Duration: 127\nTime: 2018-03-29T08:04:00Z, Duration: 132\nTime: 2018-03-30T08:02:00Z, Duration: 129\n" ], [ "results = client.query('SELECT * FROM \"pyexample\".\"temperature\"')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0f214be64903ed0e1715cbf8c7908b222f96e6
16,150
ipynb
Jupyter Notebook
Morocco model/Energy calculations.ipynb
KTH-dESA/FAO
74459217a9e8ad8107b1d3a96fd52eebd93daebd
[ "MIT" ]
3
2020-09-17T11:12:52.000Z
2021-03-31T09:24:02.000Z
Morocco model/Energy calculations.ipynb
KTH-dESA/FAO
74459217a9e8ad8107b1d3a96fd52eebd93daebd
[ "MIT" ]
101
2019-10-02T10:16:28.000Z
2021-06-05T06:42:55.000Z
Morocco model/Energy calculations.ipynb
KTH-dESA/FAO
74459217a9e8ad8107b1d3a96fd52eebd93daebd
[ "MIT" ]
2
2020-02-23T13:28:00.000Z
2021-03-31T10:02:46.000Z
36.292135
588
0.623839
[ [ [ "# NEXUS tool: case study for the Souss-Massa basin - energy demand calculations\nIn this notebook a case study for the Souss-Massa basin is covered using the `nexustool` package. The water requirements for agricultural irrigation and domestic use were previously calculated using the Water Evaluation and Planning System (WEAP) model. In this case study, the energy requirements for groundwater pumping, wastewater treatment, desalination of seawater and pumping energy for water conveyance are estimated.\n\nFirst import the package by running the following block:", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append(\"..\") #this is to add the avobe folder to the package directory\nimport os\nimport nexustool\nimport pandas as pd\nfrom dashboard.scripts.plotting import water_delivered_plot, unmet_demand_plot, water_supply_plot, wtd_plot, energy_demand_plot, crop_production", "_____no_output_____" ] ], [ [ "## 1. Read scenario data\nAfter importing all required packages, the input GIS data is loaded into the variable `df`. Change the `data_folder`, `scenario` and `climate` variables to reflect the name and relative location of your data file. This dataset should already have the water demand for irrigation results.", "_____no_output_____" ] ], [ [ "data_folder = os.path.join('data', 'processed results')\nscenario = 'Desalination'\nclimate = 'Climate Change'\ninput_folder = os.path.join(data_folder, scenario, climate)", "_____no_output_____" ] ], [ [ "## 2. Create nexus model \nTo create a model simply create an instance of the `nexustool.Model()` class and store it in a variable name. The `nexustool.Model()` class requires a dataframe as input data. Several other properties and parameter values can be defined by explicitly passing values to them. To see a full list of parameters and their explaination refer to the documentation of the package. We wil create a model using the `demand_data.gz` data:", "_____no_output_____" ] ], [ [ "#Define the path to read the scenario input data and reads it in\nfile_path = os.path.join(input_folder, 'demand_data.gz')\ndf = pd.read_csv(file_path)\n\n#Creates the nexus model with the input dataframe\nsouss_massa = nexustool.Model(df)", "_____no_output_____" ] ], [ [ "## 3. Define variable names\nThe names of the properties of the model can be changed at any time. This is important for the model to know how each property is called withing your input data. To check the current property names run the `.print_properties()` method, a list with the names of each property and its current value will be displayed.\n\nThen you can provide the right names for each property, calling them and assigning a value as:\n```python\nsouss_massa.elevation_diff = 'elevation_delta'\nsouss_massa.gw_depth = 'name_of_ground_water_depth'\n```\n\nIn this particular case we will need to change the following default values:", "_____no_output_____" ] ], [ [ "souss_massa.elevation_diff = 'elevation_diff' #for the case of GW, the elevation_diff is set to be the wtd\nsouss_massa.L = 'distance' #for the case of GW, the distance is set to be the wtd\nsouss_massa.D = 'Pipe_diameter'\n\n#Defines the name of the variable for Peak Water Demand and Seasonal Scheme Water demand (monthly)\nsouss_massa.pwd = 'pwd' # Peak Water Demand\nsouss_massa.sswd = 'sswd' # Seassonal Scheme Water Demand\nsouss_massa.df.rename(columns={'value': 'sswd'}, inplace=True) #Renames the name of the column value to sswd \nsouss_massa.pp_e = 'pp_e' # Peak Pumping Energy\nsouss_massa.pa_e = 'pa_e' # Pumping Average Energy", "_____no_output_____" ] ], [ [ "## 4. Define pipelines diameters and average pumping hours, pumping efficiency\nNow we need to define the specifications of the water network, giving pipeline / canal diameter values:", "_____no_output_____" ] ], [ [ "souss_massa.df['Pipe_diameter'] = 1.2\nsouss_massa.df.loc[souss_massa.df['type'].str.contains('GW'), 'Pipe_diameter'] = 1000\nsouss_massa.df.loc[souss_massa.df['type'].str.contains('DS'), 'Pipe_diameter'] = 1.2\nsouss_massa.df.loc[souss_massa.df['type'].str.contains('Pipeline'), 'Pipe_diameter'] = 1.2\n\nsouss_massa.pumping_hours_per_day = 10\nsouss_massa.pump_eff = 0.6", "_____no_output_____" ] ], [ [ "## 5. Peak Water Demand (PWD)\nThe $PWD$ is definfe as the daily peak cubic meters of water pumped per second withing the month. To accomplish that, the $SSWD$ (m<sup>3</sup>/month) is divided by 30 days per month, 3600 seconds per hour and the amount of average pumping hours in a day. This provides the $PWD$ in m<sup>3</sup>/s:\n\n$$\nPWD\\,(m^3/s) = \\frac{SSWD\\,(m^3/month)}{30\\,(day/month)\\cdot PumpHours\\,(h/day)\\cdot 3600\\, (s/h)}\n$$\n\nMoreover, the $PWD$ for agricultural irrigation is assumed as double the normal $PWD$. We make this calculations as per the following cell:", "_____no_output_____" ] ], [ [ "#Defines the PWD. It is defined as double the seasonal demand for agricultural sites\nsouss_massa.df[souss_massa.pwd] = souss_massa.df[souss_massa.sswd] / 30 / souss_massa.pumping_hours_per_day / 3600 #to convert to cubic meter per second [m3/s]\nsouss_massa.df.loc[souss_massa.df['type']=='Agriculture', souss_massa.pwd] *= 2", "_____no_output_____" ] ], [ [ "## 6. Calculate pumping energy requirements\nTo estimate the pumping energy requirements for conveyance, first we need to calculate the Total Dinamic Head (TDH). This, is a measure in meters that accounts for the elevation difference between two points and the pressure loss in distribution.\n\nFor that, the area $A$ `.pipe_area()`, the velocity $V$ `.flow_velocity()`, the Reynolds number $Re$ `.reynolds()` and the friction factor $f$ `.friction_factor()` need to be estimated. The `nexustool` provides simple functions that allows us make an easy estimation of these variables, which have the following formulas implemented in the background:\n\n$$\nA\\,(m^2) = \\pi\\cdot \\frac{D^2}{4}\n$$\n\n$$\nV\\,(m/s) = \\frac{SSWD\\,(m^3/month)}{PumpHours\\,(h/day)\\cdot 30\\,(day/month)\\cdot 3600\\,(s/h)\\cdot A\\,(m^2)}\n$$\n\n$$\nRe = \\frac{V\\,(m/s)\\cdot D\\,(m)}{v\\,(m^2/s)}\n$$\n\nWhere $v$ is the kinematic viscosity of water at around 1.004e-06 m<sup>2</sup>/s. And the frction factor is estimated according to the Swameeโ€“Jain equation:\n\n$$\nf = \\frac{0.25}{\\left[log_{10}\\left(\\frac{\\epsilon}{3.7D}+\\frac{5.74}{Re^{0.9}}\\right)\\right]^2}\n$$\n\nWhere $\\epsilon$ is the roughness of the material. ", "_____no_output_____" ] ], [ [ "souss_massa.pipe_area() \nsouss_massa.flow_velocity()\nsouss_massa.reynolds()\nsouss_massa.friction_factor()", "_____no_output_____" ] ], [ [ "Then, the TDH can be calculated by simply calling the `.get_tdh()` function.\n\n$$\nTDH\\,(m) = f\\cdot \\frac{L\\,(m)}{D\\,(m)}\\cdot \\frac{V(m/s)^2}{2\\cdot g\\,(m/s^2)}\n$$\n\nWhereas the conveyance pumping energy requirements by calling the `.get_pumping_energy()` method. The equation used to calculate the Electricity Demand ($E_D$) for pumping is as follows:\n\n$$\nE_D\\,(kW_h) = \\frac{SSWD\\,(m^3)\\cdot \\rho\\,(kg/m^3)\\cdot g\\,(m/s^2)\\cdot TDH\\,(m)}{PP_{eff}\\,(\\%)\\cdot 3600\\,(s/h)\\cdot 1000\\,(W/kW)}\n$$\n\nThe variable withing the Model for the $E_D$ is the `pa_e` or Pumping Average Electricity requirements.\n\nMoreover, the Power Demand for pumping ($PD$) is denoted by the variable `pp_e` and calculated by the following formula:\n\n$$\nPD\\,(kW) = \\frac{PWD\\,(m^3/s)\\cdot \\rho\\,(kg/m^3)\\cdot g\\,(m/s^2)\\cdot TDH\\,(m)}{PP_{eff}\\,(\\%)\\cdot 1000\\,(W/kW)}\n$$\n\nThe `.get_pumping_energy()` method calculates both the $E_D$ (`pa_e`) and $PD$ (`pp_e`).", "_____no_output_____" ] ], [ [ "souss_massa.get_tdh()\nsouss_massa.get_pumping_energy()\n\nsouss_massa.df.loc[souss_massa.df.pp_e<0, souss_massa.pp_e] = 0 # ensures no negative energy values are considered\nsouss_massa.df.loc[souss_massa.df.pa_e<0, souss_massa.pa_e] = 0 # ensures no negative power values are considered\n\n# We exclude energy for pumping calculations done for the Complexe Aoulouz Mokhtar Soussi, \n# as this pipeline is known to be driven by gravity only\nsouss_massa.df.loc[souss_massa.df['Supply point'].str.contains('Complexe Aoulouz Mokhtar Soussi'), 'pa_e'] = None", "_____no_output_____" ] ], [ [ "## 7. Calculating desalination energy requirements\nDesalination energy requirements are estimated by multipliying the monthly average desalinated water (`sswd`), by an energy intensity factor (`desal_energy_int`) based on the characteristics of the desalination plant.", "_____no_output_____" ] ], [ [ "#Define energy intensity for seawater desalination project\ndesal_energy_int = 3.31 # kWh/m3\n\n#Create a new nexus Model with the data relevant to the desalination plant only, filtering by the key work DS (Desalination)\nsm_desal = nexustool.Model(souss_massa.df.loc[souss_massa.df['type'].str.contains('DS')].copy())\n\n#Multiply the sswd by the energy intensity for treatment\nsm_desal.df[souss_massa.pa_e] = sm_desal.df[souss_massa.sswd] * desal_energy_int", "_____no_output_____" ] ], [ [ "## 8. Calculating wastewater treatment energy requirements\nWastewater treatment energy is dependent on the type of treatment required. Wastewater treatment can be subdivided into three stages: primary, secondary and tertiary. The treatment stages used, are then dependent on the final quality requirements of the treated wastewater. Thus, for wastewater that will be treated and returned to the ecosystem, often primary to secondary treatment is enough. On the other hand, treated wastewater intended for agricultural irrigation or drinking purposes, should go through secondary to terciary treatment to ensure proper desinfecton levels. \n\nDepending on the scenario run, we will need then to use a proper wastewater treatment energy intensity. In general, the higher the number of stages, the higher the energy requirements. In this model, we used an energy intensity of **0.1 kWh/m<sup>3</sup>** for treated wastewater that is not being reused, and **0.8 kWh/m<sup>3</sup>** for treated wastewater reused in agricultural irrigation.", "_____no_output_____" ] ], [ [ "#Here we load the WWTP inflow data\nfile_path = os.path.join(input_folder, 'wwtp_inflow.gz')\ndf_wwtp = pd.read_csv(file_path)\n\n#We define an energy intensity for wastewater treatment and compute the energy demand\nwwtp_energy_int = 0.1 # kWh/m3\ndf_wwtp['pa_e'] = df_wwtp.value * wwtp_energy_int", "_____no_output_____" ] ], [ [ "## 9. Saving the results\nFinally, we save the resulting dataframes as `.gz` files, which is a compressed version of a `csv` file:", "_____no_output_____" ] ], [ [ "#Define and create the output folder\nresults_folder = os.path.join('dashboard', 'data', scenario, climate)\nos.makedirs(results_folder, exist_ok=True)\n\n#Save the results\nsouss_massa.df.to_csv(os.path.join(results_folder, 'results.gz'), index=False)\nsm_desal.df.to_csv(os.path.join(results_folder, 'desal_data.gz'), index=False)\ndf_wwtp.to_csv(os.path.join(results_folder, 'wwtp_data.gz'), index=False)", "_____no_output_____" ] ], [ [ "## 10. Visualizing some results \nUsing some functions imported from the visualization tool, we can plot some general results for the scenario:", "_____no_output_____" ], [ "### Water delivered (Mm<sup>3</sup>)", "_____no_output_____" ] ], [ [ "water_delivered_plot(souss_massa.df, 'Year', {})", "_____no_output_____" ] ], [ [ "### Enery demand (GWh)", "_____no_output_____" ] ], [ [ "energy_demand_plot(souss_massa.df, df_wwtp, sm_desal.df, 'Year', {})", "_____no_output_____" ] ], [ [ "### Unmet demand (%)", "_____no_output_____" ] ], [ [ "unmet_demand_plot(souss_massa.df, 'Year', {})", "_____no_output_____" ] ], [ [ "### Water supplied (Mm<sup>3</sup>/year)", "_____no_output_____" ] ], [ [ "water_supply_plot(souss_massa.df, 'Year', {})", "_____no_output_____" ] ], [ [ "### Groundwater depth (m)", "_____no_output_____" ] ], [ [ "wtd_plot(souss_massa.df, 'Date', {})", "_____no_output_____" ] ], [ [ "### Crop production (ton/year)", "_____no_output_____" ] ], [ [ "crop = pd.read_csv(os.path.join(input_folder, 'production.gz'))\ncrop_production(crop, 'crop', {})", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0f2e19beef5f92126df197c6e2c70c52481df1
153,303
ipynb
Jupyter Notebook
1-Machine-Learning-Basic/machinelearning.ipynb
hyu-cvlab/deeplearning-practice
1598fbc79b135935d8f219557b8d28eed51cbca7
[ "MIT" ]
null
null
null
1-Machine-Learning-Basic/machinelearning.ipynb
hyu-cvlab/deeplearning-practice
1598fbc79b135935d8f219557b8d28eed51cbca7
[ "MIT" ]
null
null
null
1-Machine-Learning-Basic/machinelearning.ipynb
hyu-cvlab/deeplearning-practice
1598fbc79b135935d8f219557b8d28eed51cbca7
[ "MIT" ]
null
null
null
400.26893
36,996
0.93867
[ [ [ "# ์‹œ๊ฐ ์‹ฌํ™”\n\n- **Instructor**: Jongwoo Lim / Jiun Bae\n- **Email**: [[email protected]](mailto:[email protected]) / [[email protected]](mailto:[email protected])\n\n## Machine Learnig Basic\n\nIn this example we will take a quick look at how machine learning works. The goals of this example are as follows:\n\n- Understand **Machine Learning** and how they work.\n- Learn basically how to **write and use code**.\n\nAnd this example also is written in [IPython Notebook](https://ipython.org/notebook.html), an interactive computational environment, in which you can run code directly.", "_____no_output_____" ], [ "## Machine Learning Model\n\nYou can think of a basic machine learning model as a function that returns a predicted value for an input.\n\n![](../assets/machine-learning-model.png)\n\nTo make this model return the results we want, we can use the training data to update the model with the differences from the desired results.\n\n![](../assets/machine-learning-model-train.png)", "_____no_output_____" ], [ "## Perceptron: <small>Artifical Neuron</small>\n\nAn artificial neuron is a mathematical function based on a model of biological neurons, where each neuron takes inputs, weighs them separately, sums them up and passes this sum through a nonlinear function to produce output. A perceptron is a neural network unit (an artificial neuron) that does certain computations to detect features or business intelligence in the input data.\n\n![](../assets/perceptron.png)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "data_size = 1000\ndimension = 2", "_____no_output_____" ] ], [ [ "### Random points\n\n1000 dots within the range of $x: 0..1, y: 0..1$.\n\nPoints with $y>x$ will be green (label=0) and points with no blue (label=1).", "_____no_output_____" ] ], [ [ "points = np.random.rand(data_size, dimension)\nlabels = np.zeros(data_size)\nlabels[points[:, 0] > points[:, 1]] = 1", "_____no_output_____" ], [ "line = np.arange(0, 1, .001)", "_____no_output_____" ], [ "plt.scatter(points[labels == 0][:, 0], points[labels == 0][:, 1], c='g')\nplt.scatter(points[labels == 1][:, 0], points[labels == 1][:, 1], c='b')\nplt.plot(line, line, '-r')", "_____no_output_____" ] ], [ [ "### Simple perceptron\n\n$w$ is weight, $b$ is bias\n\n$y = wx + b$", "_____no_output_____" ] ], [ [ "weight = np.random.rand(dimension)\nbias = np.random.rand(dimension)", "_____no_output_____" ], [ "def forward(weight, bias, X):\n return np.sum(np.multiply(X, weight) + bias)", "_____no_output_____" ], [ "prediction = forward(weight, bias, points[0])", "_____no_output_____" ], [ "print(f'We expected {labels[0]}, prediction is {0 if prediction > .5 else 1}')", "We expected 0.0, prediction is 0\n" ] ], [ [ "Calculate `error` and update `weight`, `bias`", "_____no_output_____" ] ], [ [ "error = prediction - labels[0]\nweight = weight - .1 * error * points[0]\nbias = bias - .1 * error", "_____no_output_____" ] ], [ [ "### Train & Test", "_____no_output_____" ] ], [ [ "train_size = int(data_size * .7)\ntrain_points = points[:train_size]\ntrain_labels = labels[:train_size]\ntest_points = points[train_size:]\ntest_labels = labels[train_size:]", "_____no_output_____" ], [ "for epoch in range(500):\n # train\n for x, y in zip(train_points, train_labels):\n # get prediction\n pred = forward(weight, bias, x)\n # calculate error\n error = pred - y\n \n # update model\n weight -= .01 * error * x \n bias -= .01 * error * x\n \n # test\n if not (epoch % 100):\n predictions = np.array([forward(weight, bias, x).item() > .5 for x, _ in zip(test_points, test_labels)])\n print(f'Acc: {(predictions == test_labels).sum() / len(test_labels):.4f}')\n \n plt.scatter(test_points[predictions == 0][:, 0], test_points[predictions == 0][:, 1], c='g')\n plt.scatter(test_points[predictions == 1][:, 0], test_points[predictions == 1][:, 1], c='b')\n plt.plot(line, line, '-r')\n plt.show()", "Acc: 0.9400\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0f331f85520aaea7634853a6e79eff8ace597a
65,032
ipynb
Jupyter Notebook
FinRL_Raytune_with_Alpaca_Paper_Trading.ipynb
NicoleRichards1998/FinRL
416d20fc475c8c485fe1d26b3d059a90ba2345b8
[ "MIT" ]
null
null
null
FinRL_Raytune_with_Alpaca_Paper_Trading.ipynb
NicoleRichards1998/FinRL
416d20fc475c8c485fe1d26b3d059a90ba2345b8
[ "MIT" ]
null
null
null
FinRL_Raytune_with_Alpaca_Paper_Trading.ipynb
NicoleRichards1998/FinRL
416d20fc475c8c485fe1d26b3d059a90ba2345b8
[ "MIT" ]
null
null
null
47.642491
9,797
0.522497
[ [ [ "<a href=\"https://colab.research.google.com/github/NicoleRichards1998/FinRL/blob/master/FinRL_Raytune_with_Alpaca_Paper_Trading.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "print(\"Setting up colab environment\")\n!pip uninstall -y -q pyarrow\n!pip install -q -U ray[tune]\n!pip install -q ray[debug]\n\n# A hack to force the runtime to restart, needed to include the above dependencies.\nprint(\"Done installing! Restarting via forced crash (this is not an issue).\")\nimport os\nos._exit(0)", "Setting up colab environment\n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 59.6 MB 1.2 MB/s \n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 175 kB 47.8 MB/s \n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 125 kB 37.7 MB/s \n\u001b[?25h\u001b[33mWARNING: ray 1.10.0 does not provide the extra 'debug'\u001b[0m\n" ], [ "## If you are running on Google Colab, please install TensorFlow 2.0 by uncommenting below..\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass", "_____no_output_____" ], [ "#Installing FinRL\n%%capture\n!pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git", "_____no_output_____" ], [ "%%capture\n!pip install \"ray[tune]\" optuna", "_____no_output_____" ], [ "%%capture\n!pip install int_date==0.1.8", "_____no_output_____" ], [ "!pip install tensorboardX", "Requirement already satisfied: tensorboardX in /usr/local/lib/python3.7/dist-packages (2.5)\nRequirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (3.17.3)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (1.15.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (1.21.5)\n" ], [ "!pip install bayesian-optimization", "Collecting bayesian-optimization\n Downloading bayesian-optimization-1.2.0.tar.gz (14 kB)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from bayesian-optimization) (1.21.5)\nRequirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from bayesian-optimization) (1.4.1)\nRequirement already satisfied: scikit-learn>=0.18.0 in /usr/local/lib/python3.7/dist-packages (from bayesian-optimization) (1.0.2)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.18.0->bayesian-optimization) (1.1.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.18.0->bayesian-optimization) (3.1.0)\nBuilding wheels for collected packages: bayesian-optimization\n Building wheel for bayesian-optimization (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for bayesian-optimization: filename=bayesian_optimization-1.2.0-py3-none-any.whl size=11685 sha256=fee4019ce2bb3c14ed6318906ad66f7abc7cf66a962d2cdd8534f50959e5cd7b\n Stored in directory: /root/.cache/pip/wheels/fd/9b/71/f127d694e02eb40bcf18c7ae9613b88a6be4470f57a8528c5b\nSuccessfully built bayesian-optimization\nInstalling collected packages: bayesian-optimization\nSuccessfully installed bayesian-optimization-1.2.0\n" ], [ "# Load the TensorBoard notebook extension\n%load_ext tensorboard", "_____no_output_____" ], [ "#Importing the libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport random\n\n# matplotlib.use('Agg')\nimport datetime\nimport optuna\n%matplotlib inline\nfrom finrl.apps import config\nfrom finrl.apps.config import DOW_30_TICKER\nfrom finrl.apps.config import TECHNICAL_INDICATORS_LIST\n\nfrom finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader\nfrom finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split\nfrom finrl.finrl_meta.env_stock_trading.env_stocktrading_np import StockTradingEnv\n#from intraday.env import SingleAgentEnv\nfrom finrl.finrl_meta.env_stock_trading.env_stock_papertrading import AlpacaPaperTrading\nfrom finrl.drl_agents.rllib.models import DRLAgent as DRLAgent_rllib\n#from stable_baselines3.common.vec_env import DummyVecEnv\nfrom finrl.finrl_meta.data_processor import DataProcessor\nfrom finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline\nimport ray\nfrom pprint import pprint\nfrom ray.rllib.agents.ppo import PPOTrainer\nfrom ray.rllib.agents.ddpg import DDPGTrainer\nfrom ray.rllib.agents.a3c import A2CTrainer\nfrom ray.rllib.agents.a3c import a2c\nfrom ray.rllib.agents.ddpg import ddpg, td3\nfrom ray.rllib.agents.ppo import ppo\nfrom ray.rllib.agents.sac import sac\nimport sys\nsys.path.append(\"../FinRL-Library\")\nimport os\nimport itertools\nfrom ray import tune\nfrom ray.tune.suggest import ConcurrencyLimiter\nfrom ray.tune.schedulers import AsyncHyperBandScheduler, PopulationBasedTraining\nfrom ray.tune.suggest.hebo import HEBOSearch\nfrom ray.tune.suggest.optuna import OptunaSearch\n\nfrom ray.tune.logger import (\n CSVLoggerCallback,\n JsonLoggerCallback,\n JsonLogger,\n CSVLogger,\n TBXLoggerCallback,\n TBXLogger,\n)\nfrom ray.tune.result import (\n EXPR_PARAM_FILE,\n EXPR_PARAM_PICKLE_FILE,\n EXPR_PROGRESS_FILE,\n EXPR_RESULT_FILE,\n)\n\nfrom ray.tune.registry import register_env\nfrom ray.tune import ExperimentAnalysis\nfrom ray.tune.suggest import Repeater\n\nimport time\nfrom typing import Dict, Optional, Any\n\nimport tensorflow as tf\nimport datetime, os\n\nfrom google.colab import files\n", "/usr/local/lib/python3.7/dist-packages/pyfolio/pos.py:27: UserWarning: Module \"zipline.assets\" not found; multipliers will not be applied to position notionals.\n 'Module \"zipline.assets\" not found; multipliers will not be applied'\n" ], [ "#!pip install 'scipy<1.7.0' 'pymoo<0.5.0' 'HEBO==0.1.0'", "_____no_output_____" ], [ "ticker_list = DOW_30_TICKER\naction_dim = len(DOW_30_TICKER)", "_____no_output_____" ], [ "import os\nif not os.path.exists(\"./\" + config.DATA_SAVE_DIR):\n os.makedirs(\"./\" + config.DATA_SAVE_DIR)\nif not os.path.exists(\"./\" + config.TRAINED_MODEL_DIR):\n os.makedirs(\"./\" + config.TRAINED_MODEL_DIR)\nif not os.path.exists(\"./\" + config.TENSORBOARD_LOG_DIR):\n os.makedirs(\"./\" + config.TENSORBOARD_LOG_DIR)\nif not os.path.exists(\"./\" + config.RESULTS_DIR):\n os.makedirs(\"./\" + config.RESULTS_DIR)", "_____no_output_____" ], [ "API_KEY = \"PKJSSCSTQJZ87LKDF5OP\"\nAPI_SECRET = \"nJOIVicXAh8HZy958ZYcGOWqO4behHpIGJEHaHDL\"\nAPCA_API_BASE_URL = 'https://paper-api.alpaca.markets'\ndata_url = 'wss://data.alpaca.markets'\nenv = StockTradingEnv", "_____no_output_____" ], [ "def sample_ddpg_params():\n \n return {\n \"buffer_size\": tune.choice([int(1e4), int(1e5), int(1e6)]),\n \"lr\": tune.loguniform(1e-5, 1),\n \"train_batch_size\": tune.choice([32, 64, 128, 256, 512])\n }\ndef sample_a2c_params():\n \n return{\n \"lambda\": tune.choice([0.1,0.3,0.5,0.7,0.9,1.0]),\n \"entropy_coeff\": tune.loguniform(0.00000001, 0.1),\n \"lr\": tune.loguniform(1e-5, 1) \n \n }\n\n# cite : https://medium.com/aureliantactics/ppo-hyperparameters-and-ranges-6fc2d29bccbe\ndef sample_ppo_params():\n return {\n \"entropy_coeff\": tune.loguniform(0.00001, 0.001),\n \"lr\": tune.loguniform(5e-6, 0.003),\n #\"sgd_minibatch_size\": tune.choice([ 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096 ]),\n \"lambda\": tune.loguniform(0.9, 1)\n } ", "_____no_output_____" ], [ "MODELS = {\"a2c\": a2c, \"ddpg\": ddpg, \"td3\": td3, \"sac\": sac, \"ppo\": ppo}", "_____no_output_____" ], [ "def get_train_env(start_date, end_date, ticker_list, data_source, time_interval, \n technical_indicator_list, env, model_name, if_vix = True,\n **kwargs):\n \n #fetch data\n DP = DataProcessor(data_source = data_source,\n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n APCA_API_BASE_URL = APCA_API_BASE_URL\n )\n data = DP.download_data(ticker_list, start_date, end_date, time_interval)\n data = DP.clean_data(data)\n data = DP.add_technical_indicator(data, technical_indicator_list)\n if if_vix:\n data = DP.add_vix(data)\n price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)\n train_env_config = {'price_array':price_array,\n 'tech_array':tech_array,\n 'turbulence_array':turbulence_array,\n 'if_train':True}\n \n return train_env_config", "_____no_output_____" ], [ "#Function to calculate the sharpe ratio from the list of total_episode_reward\ndef calculate_sharpe(episode_reward:list):\n perf_data = pd.DataFrame(data=episode_reward,columns=['reward'])\n perf_data['daily_return'] = perf_data['reward'].pct_change(1)\n if perf_data['daily_return'].std() !=0:\n sharpe = (252**0.5)*perf_data['daily_return'].mean()/ \\\n perf_data['daily_return'].std()\n return sharpe\n else:\n return 0\n\ndef get_test_config(start_date, end_date, ticker_list, data_source, time_interval, \n technical_indicator_list, env, model_name, if_vix = True,\n **kwargs):\n \n DP = DataProcessor(data_source = data_source,\n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n APCA_API_BASE_URL = APCA_API_BASE_URL\n )\n data = DP.download_data(ticker_list, start_date, end_date, time_interval)\n data = DP.clean_data(data)\n data = DP.add_technical_indicator(data, technical_indicator_list)\n \n if if_vix:\n data = DP.add_vix(data)\n \n price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)\n test_env_config = {'price_array':price_array,\n 'tech_array':tech_array,\n 'turbulence_array':turbulence_array,'if_train':False}\n return test_env_config\n\ndef val_or_test(test_env_config,agent_path,model_name,env):\n episode_total_reward = DRL_prediction(model_name,test_env_config,\n env = env,\n agent_path=agent_path)\n\n\n return calculate_sharpe(episode_total_reward),episode_total_reward", "_____no_output_____" ], [ "TRAIN_START_DATE = '2022-01-17'\nTRAIN_END_DATE = '2022-01-20'\n\nVAL_START_DATE = '2022-01-24'\nVAL_END_DATE = '2022-01-25'\n\nTEST_START_DATE = '2022-01-26'\nTEST_END_DATE = '2022-01-27'", "_____no_output_____" ], [ "technical_indicator_list = TECHNICAL_INDICATORS_LIST\n\nmodel_name = 'ppo'\nenv = StockTradingEnv\nticker_list = DOW_30_TICKER\ndata_source = 'alpaca'\ntime_interval = '1Min'", "_____no_output_____" ], [ "train_env_config = get_train_env(TRAIN_START_DATE, VAL_END_DATE, \n ticker_list, data_source, time_interval, \n technical_indicator_list, env, model_name)", "Alpaca successfully connected\nData before 2022-01-17T15:59:00-05:00 is successfully fetched\nData before 2022-01-18T15:59:00-05:00 is successfully fetched\nData before 2022-01-19T15:59:00-05:00 is successfully fetched\nData before 2022-01-20T15:59:00-05:00 is successfully fetched\nData before 2022-01-21T15:59:00-05:00 is successfully fetched\nData before 2022-01-22T15:59:00-05:00 is successfully fetched\nData before 2022-01-23T15:59:00-05:00 is successfully fetched\n" ], [ "from ray.tune.registry import register_env\n\nenv_name = 'StockTrading_train_env'\nregister_env(env_name, lambda config: env(train_env_config))", "_____no_output_____" ], [ "'''\npbt = PopulationBasedTraining(\n time_attr=\"training_iteration\",\n #metric=\"episode_reward_mean\",\n #mode=\"max\",\n perturbation_interval=10, # every 10 `time_attr` units\n # (training_iterations in this case)\n hyperparam_mutations={\n # Perturb factor1 by scaling it by 0.8 or 1.2. Resampling\n # resets it to a value sampled from the lambda function.\n \"factor_1\": lambda: random.uniform(0.0, 20.0),\n # Alternatively, use tune search space primitives.\n # The search space for factor_1 is equivalent to factor_2.\n \"factor_2\": tune.uniform(0.0, 20.0),\n # Perturb factor3 by changing it to an adjacent value, e.g.\n # 10 -> 1 or 10 -> 100. Resampling will choose at random.\n \"factor_3\": [1, 10, 100, 1000, 10000],\n # Using tune.choice is NOT equivalent to the above.\n # factor_4 is treated as a continuous hyperparameter.\n \"factor_4\": tune.choice([1, 10, 100, 1000, 10000]),\n })\n'''", "_____no_output_____" ], [ "MODEL_TRAINER = {'a2c':A2CTrainer,'ppo':PPOTrainer,'ddpg':DDPGTrainer}\n\nif model_name == \"ddpg\":\n sample_hyperparameters = sample_ddpg_params()\nelif model_name == \"ppo\":\n sample_hyperparameters = sample_ppo_params()\nelif model_name == \"a2c\":\n sample_hyperparameters = sample_a2c_params()\n \ndef run_optuna_tune():\n\n #bayesopt = BayesOptSearch(metric=\"episode_reward_mean\", mode=\"max\")\n #hebo = HEBOSearch()\n #re_search_alg = Repeater(search_alg, repeat=10)\n\n algo = OptunaSearch()\n algo = ConcurrencyLimiter(algo,max_concurrent=4)\n scheduler = AsyncHyperBandScheduler()\n #scheduler=pbt\n num_samples = 1\n training_iterations = 100\n\n analysis = tune.run(\n MODEL_TRAINER[model_name],\n metric=\"episode_reward_mean\", #The metric to optimize for tuning\n mode=\"max\", #Maximize the metric\n search_alg = algo,\n scheduler=scheduler, #To prune bad trials\n config = {**sample_hyperparameters,\n 'env':'StockTrading_train_env','num_workers':1,\n 'num_gpus':1,'framework':'tf2'},\n num_samples = num_samples, #Number of hyperparameters to test out\n stop = {'training_iteration':training_iterations},#Time attribute to validate the results\n verbose=1,\n local_dir=\"./tuned_models\",#Saving tensorboard plots\n # resources_per_trial={'gpu':1,'cpu':1},\n max_failures = 1,#Extra Trying for the failed trials\n raise_on_failed_trial=False,#Don't return error even if you have errored trials\n keep_checkpoints_num = num_samples, \n checkpoint_score_attr ='episode_reward_mean',#Only store keep_checkpoints_num trials based on this score\n checkpoint_freq=training_iterations,#Checpointing all the trials\n callbacks=[TBXLoggerCallback()]\n )\n print(\"Best hyperparameter: \", analysis.best_config)\n return analysis", "_____no_output_____" ], [ "analysis = run_optuna_tune()", "_____no_output_____" ], [ "%tensorboard --logdir ./tuned_models/PPOTrainer_2022-02-23_08-43-01", "_____no_output_____" ], [ "dfs = analysis.trial_dataframes\nax = None # This plots everything on the same plot\nfor d in dfs.values():\n ax = d.episode_reward_mean.plot(ax=ax, legend=False)\nax.set_xlabel(\"Epochs\")\nax.set_ylabel(\"Episode reward Mean\");", "_____no_output_____" ], [ "df = pd.DataFrame(dfs[0])\ndf.to_csv('traing_data')\n\nfiles.download('training_data')", "_____no_output_____" ], [ "ExpAnalysis = ExperimentAnalysis(\n experiment_checkpoint_path=\"~/tune_results/my_exp/state.json\")", "_____no_output_____" ], [ "best_logdir = analysis.get_best_logdir(metric='episode_reward_mean',mode='max')\nbest_logdir", "_____no_output_____" ], [ "best_checkpoint = analysis.best_checkpoint\nbest_checkpoint", "_____no_output_____" ], [ "best_config = analysis.best_config ", "_____no_output_____" ], [ "test_env_config = get_test_config(TEST_START_DATE, TEST_END_DATE, ticker_list, data_source, time_interval, \n technical_indicator_list, env, model_name)", "_____no_output_____" ], [ "def DRL_prediction(\n model_name,\n test_env_config,\n env,\n model_config,\n agent_path,\n env_name_test='StockTrading_test_env'\n ):\n \n env_instance = env(test_env_config)\n \n register_env(env_name_test, lambda config: env(test_env_config))\n model_config['env'] = env_name_test\n # ray.init() # Other Ray APIs will not work until `ray.init()` is called.\n if model_name == \"ppo\":\n trainer = MODELS[model_name].PPOTrainer(config=model_config)\n elif model_name == \"a2c\":\n trainer = MODELS[model_name].A2CTrainer(config=model_config)\n elif model_name == \"ddpg\":\n trainer = MODELS[model_name].DDPGTrainer(config=model_config)\n elif model_name == \"td3\":\n trainer = MODELS[model_name].TD3Trainer(config=model_config)\n elif model_name == \"sac\":\n trainer = MODELS[model_name].SACTrainer(config=model_config)\n\n try:\n trainer.restore(agent_path)\n print(\"Restoring from checkpoint path\", agent_path)\n except BaseException:\n raise ValueError(\"Fail to load agent!\")\n\n # test on the testing env\n state = env_instance.reset()\n episode_returns = list() # the cumulative_return / initial_account\n episode_total_assets = list()\n episode_total_assets.append(env_instance.initial_total_asset)\n done = False\n while not done:\n action = trainer.compute_single_action(state)\n state, reward, done, _ = env_instance.step(action)\n\n total_asset = (\n env_instance.amount\n + (env_instance.price_ary[env_instance.day] * env_instance.stocks).sum()\n )\n episode_total_assets.append(total_asset)\n episode_return = total_asset / env_instance.initial_total_asset\n episode_returns.append(episode_return)\n\n ray.shutdown()\n \n print(\"episode return: \" + str(episode_return))\n print(\"Test Finished!\")\n return episode_total_assets", "_____no_output_____" ], [ "episode_total_assets = DRL_prediction(\n model_name,\n test_env_config,\n env,\n best_config,\n best_checkpoint,\n env_name_test='StockTrading_test_env')", "_____no_output_____" ], [ "print('The test sharpe ratio is: ',calculate_sharpe(episode_total_assets))\ndf_account_test = pd.DataFrame(data=episode_total_assets,columns=['account_value'])\ndf_account_test.to_csv('account_memory')", "_____no_output_____" ], [ "files.download('account_memory')", "_____no_output_____" ] ], [ [ "# Paper Trading", "_____no_output_____" ] ], [ [ "ray.shutdown()", "_____no_output_____" ], [ "ray.init()", "_____no_output_____" ], [ "state_dim = 1 + 2 + 3 * action_dim + len(TECHNICAL_INDICATORS_LIST) * action_dim", "_____no_output_____" ], [ "import datetime\nimport threading\nfrom finrl.finrl_meta.data_processors.processor_alpaca import AlpacaProcessor\nimport alpaca_trade_api as tradeapi\nimport time\nimport pandas as pd\nimport numpy as np\nimport torch\nimport gym\n\nclass AlpacaPaperTrading():\n\n def __init__(self,ticker_list, time_interval, drl_lib, agent, cwd, net_dim, \n state_dim, action_dim, API_KEY, API_SECRET, \n APCA_API_BASE_URL, tech_indicator_list, turbulence_thresh=30, \n max_stock=1e2, latency = None):\n #load agent\n self.drl_lib = drl_lib\n if agent =='ppo':\n if drl_lib == 'elegantrl': \n from elegantrl.agent import AgentPPO\n from elegantrl.run import train_and_evaluate, init_agent\n from elegantrl.config import Arguments\n #load agent\n config = {'state_dim':state_dim,\n 'action_dim':action_dim,}\n args = Arguments(agent=AgentPPO, env=StockEnvEmpty(config))\n args.cwd = cwd\n args.net_dim = net_dim\n # load agent\n try:\n agent = init_agent(args, gpu_id = 0)\n self.act = agent.act\n self.device = agent.device\n except BaseException:\n raise ValueError(\"Fail to load agent!\")\n \n elif drl_lib == 'rllib':\n from ray.rllib.agents import ppo\n from ray.rllib.agents.ppo.ppo import PPOTrainer\n from ray.tune.registry import register_env\n\n train_env_config = {\n 'state_dim':state_dim,\n 'action_dim':action_dim,\n \"if_train\": False,}\n\n env = StockEnvEmpty(train_env_config)\n \n register_env(\"Stock_Env_Empty\", lambda config: env) \n\n print(\"environment is registered\")\n\n model_config = best_config\n model_config['env'] = \"Stock_Env_Empty\"\n model_config[\"log_level\"] = \"WARN\"\n model_config['env_config'] = train_env_config\n \n print(\"model config done\")\n\n trainer = PPOTrainer(env=\"Stock_Env_Empty\", config=model_config)\n\n print(\"the ppo trainer is initialised\")\n \n try:\n trainer.restore(cwd)\n self.agent = trainer\n print(\"Restoring from checkpoint path\", cwd)\n except:\n raise ValueError('Fail to load agent!')\n \n elif drl_lib == 'stable_baselines3':\n from stable_baselines3 import PPO\n \n try:\n #load agent\n self.model = PPO.load(cwd)\n print(\"Successfully load model\", cwd)\n except:\n raise ValueError('Fail to load agent!')\n \n else:\n raise ValueError('The DRL library input is NOT supported yet. Please check your input.')\n \n else:\n raise ValueError('Agent input is NOT supported yet.')\n \n \n \n #connect to Alpaca trading API\n try:\n self.alpaca = tradeapi.REST(API_KEY,API_SECRET,APCA_API_BASE_URL, 'v2')\n except:\n raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')\n \n #read trading time interval\n if time_interval == '1s':\n self.time_interval = 1\n elif time_interval == '5s':\n self.time_interval = 5\n elif time_interval == '1Min':\n self.time_interval = 60\n elif time_interval == '5Min':\n self.time_interval = 60 * 5\n elif time_interval == '15Min':\n self.time_interval = 60 * 15\n else:\n raise ValueError('Time interval input is NOT supported yet.')\n \n #read trading settings\n self.tech_indicator_list = tech_indicator_list\n self.turbulence_thresh = turbulence_thresh\n self.max_stock = max_stock \n \n #initialize account\n self.stocks = np.asarray([0] * len(ticker_list)) #stocks holding\n self.stocks_cd = np.zeros_like(self.stocks) \n self.cash = None #cash record \n self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = ticker_list)\n self.asset_list = []\n self.price = np.asarray([0] * len(ticker_list))\n self.stockUniverse = ticker_list\n self.turbulence_bool = 0\n self.equities = []\n \n def test_latency(self, test_times = 10): \n total_time = 0\n for i in range(0, test_times):\n time0 = time.time()\n self.get_state()\n time1 = time.time()\n temp_time = time1 - time0\n total_time += temp_time\n latency = total_time/test_times\n print('latency for data processing: ', latency)\n return latency\n \n def run(self):\n orders = self.alpaca.list_orders(status=\"open\")\n for order in orders:\n self.alpaca.cancel_order(order.id)\n \n # Wait for market to open.\n print(\"Waiting for market to open...\")\n tAMO = threading.Thread(target=self.awaitMarketOpen)\n tAMO.start()\n tAMO.join()\n print(\"Market opened.\")\n while True:\n\n # Figure out when the market will close so we can prepare to sell beforehand.\n clock = self.alpaca.get_clock()\n closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\n self.timeToClose = closingTime - currTime\n \n if(self.timeToClose < (60)):\n # Close all positions when 1 minutes til market close.\n print(\"Market closing soon. Stop trading.\")\n break\n \n '''# Close all positions when 1 minutes til market close.\n print(\"Market closing soon. Closing positions.\")\n \n positions = self.alpaca.list_positions()\n for position in positions:\n if(position.side == 'long'):\n orderSide = 'sell'\n else:\n orderSide = 'buy'\n qty = abs(int(float(position.qty)))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n \n # Run script again after market close for next trading day.\n print(\"Sleeping until market close (15 minutes).\")\n time.sleep(60 * 15)'''\n \n else:\n trade = threading.Thread(target=self.trade)\n trade.start()\n trade.join()\n last_equity = float(self.alpaca.get_account().last_equity)\n cur_time = time.time()\n self.equities.append([cur_time,last_equity])\n time.sleep(self.time_interval)\n \n def awaitMarketOpen(self):\n isOpen = self.alpaca.get_clock().is_open\n while(not isOpen):\n clock = self.alpaca.get_clock()\n openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\n timeToOpen = int((openingTime - currTime) / 60)\n print(str(timeToOpen) + \" minutes til market open.\")\n time.sleep(60)\n isOpen = self.alpaca.get_clock().is_open\n \n def trade(self):\n state = self.get_state()\n \n if self.drl_lib == 'elegantrl':\n with torch.no_grad():\n s_tensor = torch.as_tensor((state,), device=self.device)\n a_tensor = self.act(s_tensor) \n action = a_tensor.detach().cpu().numpy()[0] \n \n action = (action * self.max_stock).astype(int)\n \n elif self.drl_lib == 'rllib':\n action = self.agent.compute_single_action(state)\n \n elif self.drl_lib == 'stable_baselines3':\n action = self.model.predict(state)[0]\n \n else:\n raise ValueError('The DRL library input is NOT supported yet. Please check your input.')\n \n self.stocks_cd += 1\n if self.turbulence_bool == 0:\n min_action = 10 # stock_cd\n for index in np.where(action < -min_action)[0]: # sell_index:\n sell_num_shares = min(self.stocks[index], -action[index])\n qty = abs(int(sell_num_shares))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n self.cash = float(self.alpaca.get_account().cash)\n self.stocks_cd[index] = 0\n\n for index in np.where(action > min_action)[0]: # buy_index:\n if self.cash < 0:\n tmp_cash = 0\n else:\n tmp_cash = self.cash\n buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))\n qty = abs(int(buy_num_shares))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n self.cash = float(self.alpaca.get_account().cash)\n self.stocks_cd[index] = 0\n \n else: # sell all when turbulence\n positions = self.alpaca.list_positions()\n for position in positions:\n if(position.side == 'long'):\n orderSide = 'sell'\n else:\n orderSide = 'buy'\n qty = abs(int(float(position.qty)))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n \n self.stocks_cd[:] = 0\n \n \n def get_state(self):\n alpaca = AlpacaProcessor(api=self.alpaca)\n price, tech, turbulence = alpaca.fetch_latest_data(ticker_list = self.stockUniverse, time_interval='1Min',\n tech_indicator_list=self.tech_indicator_list)\n turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0\n \n turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)\n \n tech = tech * 2 ** -7\n positions = self.alpaca.list_positions()\n stocks = [0] * len(self.stockUniverse)\n for position in positions:\n ind = self.stockUniverse.index(position.symbol)\n stocks[ind] = ( abs(int(float(position.qty))))\n \n stocks = np.asarray(stocks, dtype = float)\n cash = float(self.alpaca.get_account().cash)\n self.cash = cash\n self.stocks = stocks\n self.turbulence_bool = turbulence_bool \n self.price = price\n \n \n \n amount = np.array(self.cash * (2 ** -12), dtype=np.float32)\n scale = np.array(2 ** -6, dtype=np.float32)\n state = np.hstack((amount,\n turbulence,\n self.turbulence_bool,\n price * scale,\n self.stocks * scale,\n self.stocks_cd,\n tech,\n )).astype(np.float32)\n print(len(self.stockUniverse))\n return state\n \n def submitOrder(self, qty, stock, side, resp):\n if(qty > 0):\n try:\n self.alpaca.submit_order(stock, qty, side, \"market\", \"day\")\n print(\"Market order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | completed.\")\n resp.append(True)\n except:\n print(\"Order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | did not go through.\")\n resp.append(False)\n else:\n print(\"Quantity is 0, order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | not completed.\")\n resp.append(True)\n\n @staticmethod\n def sigmoid_sign(ary, thresh):\n def sigmoid(x):\n return 1 / (1 + np.exp(-x * np.e)) - 0.5\n\n return sigmoid(ary / thresh) * thresh\n \n", "_____no_output_____" ], [ "class StockEnvEmpty(gym.Env):\n #Empty Env used for loading rllib agent\n def __init__(self,config):\n state_dim = config['state_dim']\n action_dim = config['action_dim']\n #price_ary = config[\"price_array\"]\n self.env_num = 1\n self.max_step = 10000\n self.env_name = 'StockEnvEmpty'\n self.state_dim = state_dim \n self.action_dim = action_dim\n #self.price_ary = price_ary.astype(np.float32)\n self.if_discrete = False \n self.target_return = 9999\n self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)\n self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)\n \n def reset(self):\n return\n\n def step(self, actions):\n return", "_____no_output_____" ], [ "paper_trading_erl = AlpacaPaperTrading(ticker_list = DOW_30_TICKER, \n time_interval = '1Min', \n drl_lib=\"rllib\", \n agent = 'ppo', \n cwd = best_checkpoint, \n net_dim = 512, \n state_dim = state_dim, \n action_dim= action_dim, \n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n APCA_API_BASE_URL = 'https://paper-api.alpaca.markets',\n tech_indicator_list = TECHNICAL_INDICATORS_LIST, \n turbulence_thresh=30, \n max_stock=1e2)\npaper_trading_erl.run()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb0f3dfc72e03f464d1994b48b903dcccc419ed1
15,034
ipynb
Jupyter Notebook
PAPERS/vSMC.ipynb
ChaosDonkey06/MODSIM_IBIO
e2ce70538aaba3b01f22cf037db21a80316a802f
[ "Apache-2.0" ]
null
null
null
PAPERS/vSMC.ipynb
ChaosDonkey06/MODSIM_IBIO
e2ce70538aaba3b01f22cf037db21a80316a802f
[ "Apache-2.0" ]
null
null
null
PAPERS/vSMC.ipynb
ChaosDonkey06/MODSIM_IBIO
e2ce70538aaba3b01f22cf037db21a80316a802f
[ "Apache-2.0" ]
null
null
null
32.261803
139
0.469004
[ [ [ "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport autograd.numpy as np\nfrom autograd import grad\nfrom autograd.extend import notrace_primitive\n\n@notrace_primitive\ndef resampling(w, rs):\n \"\"\"\n Stratified resampling with \"nograd_primitive\" to ensure autograd\n takes no derivatives through it.\n \"\"\"\n N = w.shape[0]\n bins = np.cumsum(w)\n ind = np.arange(N)\n u = (ind + rs.rand(N))/N\n\n return np.digitize(u, bins)", "_____no_output_____" ], [ "def vsmc_lower_bound(prop_params, model_params, y, smc_obj, rs, verbose=False, adapt_resamp=False):\n \"\"\"\n Estimate the VSMC lower bound. Amenable to (biased) reparameterization\n gradients\n .. math::\n ELBO(\\theta,\\lambda) =\n \\mathbb{E}_{\\phi}\\left[\\nabla_\\lambda \\log \\hat p(y_{1:T}) \\right]\n Requires an SMC object with 2 member functions:\n -- sim_prop(t, x_{t-1}, y, prop_params, model_params, rs)\n -- log_weights(t, x_t, x_{t-1}, y, prop_params, model_params)\n \"\"\"\n # Extract constants\n T = y.shape[0]\n Dx = smc_obj.Dx\n N = smc_obj.N\n\n # Initialize SMC\n X = np.zeros((N,Dx))\n Xp = np.zeros((N,Dx))\n logW = np.zeros(N)\n W = np.exp(logW)\n W /= np.sum(W)\n logZ = 0.\n ESS = 1./np.sum(W**2)/N\n\n for t in range(T):\n # Resampling\n if adapt_resamp:\n if ESS < 0.5:\n ancestors = resampling(W, rs)\n Xp = X[ancestors]\n logZ = logZ + max_logW + np.log(np.sum(W)) - np.log(N)\n logW = np.zeros(N)\n else:\n Xp = X\n else:\n if t > 0:\n ancestors = resampling(W, rs)\n Xp = X[ancestors]\n else:\n Xp = X\n\n # Propagation\n X = smc_obj.sim_prop(t, Xp, y, prop_params, model_params, rs)\n\n # Weighting\n if adapt_resamp:\n logW = logW + smc_obj.log_weights(t, X, Xp, y, prop_params, model_params)\n else:\n logW = smc_obj.log_weights(t, X, Xp, y, prop_params, model_params)\n max_logW = np.max(logW)\n W = np.exp(logW-max_logW)\n if adapt_resamp:\n if t == T-1:\n logZ = logZ + max_logW + np.log(np.sum(W)) - np.log(N)\n else:\n logZ = logZ + max_logW + np.log(np.sum(W)) - np.log(N)\n W /= np.sum(W)\n ESS = 1./np.sum(W**2)/N\n if verbose:\n print('ESS: '+str(ESS))\n return logZ", "_____no_output_____" ], [ "def sim_q(prop_params, model_params, y, smc_obj, rs, verbose=False):\n \"\"\"\n Simulates a single sample from the VSMC approximation.\n Requires an SMC object with 2 member functions:\n -- sim_prop(t, x_{t-1}, y, prop_params, model_params, rs)\n -- log_weights(t, x_t, x_{t-1}, y, prop_params, model_params)\n \"\"\"\n # Extract constants\n T = y.shape[0]\n Dx = smc_obj.Dx\n N = smc_obj.N\n\n # Initialize SMC\n X = np.zeros((N,T,Dx))\n logW = np.zeros(N)\n W = np.zeros((N,T))\n ESS = np.zeros(T)\n\n for t in range(T):\n # Resampling\n if t > 0:\n ancestors = resampling(W[:,t-1], rs)\n X[:,:t,:] = X[ancestors,:t,:]\n\n # Propagation\n X[:,t,:] = smc_obj.sim_prop(t, X[:,t-1,:], y, prop_params, model_params, rs)\n\n # Weighting\n logW = smc_obj.log_weights(t, X[:,t,:], X[:,t-1,:], y, prop_params, model_params)\n max_logW = np.max(logW)\n W[:,t] = np.exp(logW-max_logW)\n W[:,t] /= np.sum(W[:,t])\n ESS[t] = 1./np.sum(W[:,t]**2)\n\n # Sample from the empirical approximation\n bins = np.cumsum(W[:,-1])\n u = rs.rand()\n B = np.digitize(u,bins)\n\n if verbose:\n print('Mean ESS', np.mean(ESS)/N)\n print('Min ESS', np.min(ESS))\n\n return X[B,:,:]", "_____no_output_____" ], [ "import autograd.numpy.random as npr\n\ndef init_model_params(Dx, Dy, alpha, r, obs, rs = npr.RandomState(0)):\n mu0 = np.zeros(Dx)\n Sigma0 = np.eye(Dx)\n A = np.zeros((Dx,Dx))\n for i in range(Dx):\n for j in range(Dx):\n A[i,j] = alpha**(abs(i-j)+1)\n\n Q = np.eye(Dx)\n C = np.zeros((Dy,Dx))\n if obs == 'sparse':\n C[:Dy,:Dy] = np.eye(Dy)\n else:\n C = rs.normal(size=(Dy,Dx))\n R = r * np.eye(Dy)\n\n return (mu0, Sigma0, A, Q, C, R)", "_____no_output_____" ], [ "\ndef init_prop_params(T, Dx, scale = 0.5, rs = npr.RandomState(0)):\n return [(scale * rs.randn(Dx), # Bias\n 1. + scale * rs.randn(Dx), # Linear times A/mu0\n scale * rs.randn(Dx)) # Log-var\n for t in range(T)]\n\ndef generate_data(model_params, T = 5, rs = npr.RandomState(0)):\n mu0, Sigma0, A, Q, C, R = model_params\n Dx = mu0.shape[0]\n Dy = R.shape[0]\n\n x_true = np.zeros((T,Dx))\n y_true = np.zeros((T,Dy))\n\n for t in range(T):\n if t > 0:\n x_true[t,:] = rs.multivariate_normal(np.dot(A,x_true[t-1,:]),Q)\n else:\n x_true[0,:] = rs.multivariate_normal(mu0,Sigma0)\n y_true[t,:] = rs.multivariate_normal(np.dot(C,x_true[t,:]),R)\n\n return x_true, y_true", "_____no_output_____" ], [ "def log_marginal_likelihood(model_params, T, y_true):\n mu0, Sigma0, A, Q, C, R = model_params\n Dx = mu0.shape[0]\n Dy = R.shape[1]\n\n log_likelihood = 0.\n xfilt = np.zeros(Dx)\n Pfilt = np.zeros((Dx,Dx))\n xpred = mu0\n Ppred = Sigma0\n\n for t in range(T):\n if t > 0:\n # Predict\n xpred = np.dot(A,xfilt)\n Ppred = np.dot(A,np.dot(Pfilt,A.T)) + Q\n\n # Update\n yt = y_true[t,:] - np.dot(C,xpred)\n S = np.dot(C,np.dot(Ppred,C.T)) + R\n K = np.linalg.solve(S, np.dot(C,Ppred)).T\n xfilt = xpred + np.dot(K,yt)\n Pfilt = Ppred - np.dot(K,np.dot(C,Ppred))\n\n sign, logdet = np.linalg.slogdet(S)\n log_likelihood += -0.5*(np.sum(yt*np.linalg.solve(S,yt)) + logdet + Dy*np.log(2.*np.pi))\n\n return log_likelihood", "_____no_output_____" ], [ "class lgss_smc:\n \"\"\"\n Class for defining functions used in variational SMC.\n \"\"\"\n def __init__(self, T, Dx, Dy, N):\n self.T = T\n self.Dx = Dx\n self.Dy = Dy\n self.N = N\n\n def log_normal(self, x, mu, Sigma):\n dim = Sigma.shape[0]\n sign, logdet = np.linalg.slogdet(Sigma)\n log_norm = -0.5*dim*np.log(2.*np.pi) - 0.5*logdet\n Prec = np.linalg.inv(Sigma)\n return log_norm - 0.5*np.sum((x-mu)*np.dot(Prec,(x-mu).T).T,axis=1)\n\n def log_prop(self, t, Xc, Xp, y, prop_params, model_params):\n mu0, Sigma0, A, Q, C, R = model_params\n mut, lint, log_s2t = prop_params[t]\n s2t = np.exp(log_s2t)\n\n if t > 0:\n mu = mut + np.dot(A, Xp.T).T*lint\n else:\n mu = mut + lint*mu0\n\n return self.log_normal(Xc, mu, np.diag(s2t))\n\n def log_target(self, t, Xc, Xp, y, prop_params, model_params):\n mu0, Sigma0, A, Q, C, R = model_params\n if t > 0:\n logF = self.log_normal(Xc,np.dot(A,Xp.T).T, Q)\n else:\n logF = self.log_normal(Xc, mu0, Sigma0)\n logG = self.log_normal(np.dot(C,Xc.T).T, y[t], R)\n return logF + logG\n\n # These following 2 are the only ones needed by variational-smc.py\n def log_weights(self, t, Xc, Xp, y, prop_params, model_params):\n return self.log_target(t, Xc, Xp, y, prop_params, model_params) - \\\n self.log_prop(t, Xc, Xp, y, prop_params, model_params)\n\n def sim_prop(self, t, Xp, y, prop_params, model_params, rs = npr.RandomState(0)):\n mu0, Sigma0, A, Q, C, R = model_params\n mut, lint, log_s2t = prop_params[t]\n s2t = np.exp(log_s2t)\n\n if t > 0:\n mu = mut + np.dot(A, Xp.T).T*lint\n else:\n mu = mut + lint*mu0\n return mu + rs.randn(*Xp.shape)*np.sqrt(s2t)", "_____no_output_____" ], [ "# Model hyper-parameters\nT = 10\nDx = 5\nDy = 3\nalpha = 0.42\nr = .1\nobs = 'sparse'\n\n# Training parameters\nparam_scale = 0.5\nnum_epochs = 1000\nstep_size = 0.001\n\nN = 4\n\ndata_seed = npr.RandomState(0)\nmodel_params = init_model_params(Dx, Dy, alpha, r, obs, data_seed)\n\nprint(\"Generating data...\")\nx_true, y_true = generate_data(model_params, T, data_seed)\n\nlml = log_marginal_likelihood(model_params, T, y_true)\nprint(\"True log-marginal likelihood: \"+str(lml))\n\nseed = npr.RandomState(0)\n\n# Initialize proposal parameters\nprop_params = init_prop_params(T, Dx, param_scale, seed)\ncombined_init_params = (model_params, prop_params)\n\nlgss_smc_obj = lgss_smc(T, Dx, Dy, N)", "Generating data...\nTrue log-marginal likelihood: -46.89174359319463\n" ], [ "# Define training objective\ndef objective(combined_params, iter):\n model_params, prop_params = combined_params\n return -vsmc_lower_bound(prop_params, model_params, y_true, lgss_smc_obj, seed)\n\n# Get gradients of objective using autograd.\nobjective_grad = grad(objective)\n", "_____no_output_____" ], [ "\nfrom autograd.misc.optimizers import adam\n\n\ndef print_perf(combined_params, iter, grad):\n if iter % (num_epochs/10) == 0:\n model_params, prop_params = combined_params\n bound = -objective(combined_params, iter)\n message = \"{:15}|{:20}\".format(iter, bound)\n print(message)\n #with open(f_head+'_ELBO.csv', 'a') as f_handle:\n # np.savetxt(f_handle, [[iter,bound]], fmt='%i,%f')\n\n# SGD with adaptive step-size \"adam\"\noptimized_params = adam(objective_grad, combined_init_params, step_size=step_size,\n num_iters=num_epochs, callback=print_perf)\nopt_model_params, opt_prop_params = optimized_params\n", " 0| -136.52003692786687\n 100| -98.77446240248345\n 200| -57.6303750970804\n 300| -54.45751571342868\n 400| -65.08079520031357\n 500| -54.67798149962506\n 600| -43.741817571752065\n 700| -50.18870619980928\n 800| -55.92935048354515\n 900| -37.706093153539875\n" ], [ "opt_model_params[0].shape, opt_model_params[1].shape, opt_model_params[2].shape, opt_model_params[3].shape, opt_model_params[4].shape", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0f45db54f6a9ce91e18d07e6b260932b4a93da
827,286
ipynb
Jupyter Notebook
code/ch13/ch13.ipynb
wei-Z/Python-Machine-Learning
47101e2d7d28665bdbb3145fb503e039f944046e
[ "MIT" ]
null
null
null
code/ch13/ch13.ipynb
wei-Z/Python-Machine-Learning
47101e2d7d28665bdbb3145fb503e039f944046e
[ "MIT" ]
null
null
null
code/ch13/ch13.ipynb
wei-Z/Python-Machine-Learning
47101e2d7d28665bdbb3145fb503e039f944046e
[ "MIT" ]
null
null
null
538.948534
662,412
0.93323
[ [ [ "[Sebastian Raschka](http://sebastianraschka.com), 2015\n\nhttps://github.com/rasbt/python-machine-learning-book", "_____no_output_____" ], [ "# Python Machine Learning - Code Examples", "_____no_output_____" ], [ "# Chapter 13 - Parallelizing Neural Network Training with Theano", "_____no_output_____" ], [ "Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,matplotlib,theano,keras", "Sebastian Raschka \nLast updated: 08/27/2015 \n\nCPython 3.4.3\nIPython 4.0.0\n\nnumpy 1.9.2\nmatplotlib 1.4.3\ntheano 0.7.0\nkeras 0.1.2\n" ], [ "# to install watermark just uncomment the following line:\n#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py", "_____no_output_____" ] ], [ [ "### Overview", "_____no_output_____" ], [ "- [Building, compiling, and running expressions with Theano](#Building,-compiling,-and-running-expressions-with-Theano)\n - [What is Theano?](#What-is-Theano?)\n - [First steps with Theano](#First-steps-with-Theano)\n - [Configuring Theano](#Configuring-Theano)\n - [Working with array structures](#Working-with-array-structures)\n - [Wrapping things up โ€“ a linear regression example](#Wrapping-things-up:-A--linear-regression-example)\n- [Choosing activation functions for feedforward neural networks](#Choosing-activation-functions-for-feedforward-neural-networks)\n - [Logistic function recap](#Logistic-function-recap)\n - [Estimating probabilities in multi-class classification via the softmax function](#Estimating-probabilities-in-multi-class-classification-via-the-softmax-function)\n - [Broadening the output spectrum by using a hyperbolic tangent](#Broadening-the-output-spectrum-by-using-a-hyperbolic-tangent)\n- [Training neural networks efficiently using Keras](#Training-neural-networks-efficiently-using-Keras)\n- [Summary](#Summary)", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ] ], [ [ "from IPython.display import Image", "_____no_output_____" ] ], [ [ "# Building, compiling, and running expressions with Theano", "_____no_output_____" ], [ "Depending on your system setup, it is typically sufficient to install Theano via\n\n pip install Theano\n \nFor more help with the installation, please see: http://deeplearning.net/software/theano/install.html", "_____no_output_____" ] ], [ [ "Image(filename='./images/13_01.png', width=500) ", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## What is Theano?", "_____no_output_____" ], [ "...", "_____no_output_____" ], [ "## First steps with Theano", "_____no_output_____" ], [ "Introducing the TensorType variables. For a complete list, see http://deeplearning.net/software/theano/library/tensor/basic.html#all-fully-typed-constructors", "_____no_output_____" ] ], [ [ "import theano\nfrom theano import tensor as T", "_____no_output_____" ], [ "# initialize\nx1 = T.scalar()\nw1 = T.scalar()\nw0 = T.scalar()\nz1 = w1 * x1 + w0\n\n# compile\nnet_input = theano.function(inputs=[w1, x1, w0], outputs=z1)\n\n# execute\nnet_input(2.0, 1.0, 0.5)", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## Configuring Theano", "_____no_output_____" ], [ "Configuring Theano. For more options, see\n- http://deeplearning.net/software/theano/library/config.html\n- http://deeplearning.net/software/theano/library/floatX.html", "_____no_output_____" ] ], [ [ "print(theano.config.floatX)", "float64\n" ], [ "theano.config.floatX = 'float32'", "_____no_output_____" ] ], [ [ "To change the float type globally, execute \n\n export THEANO_FLAGS=floatX=float32 \n \nin your bash shell. Or execute Python script as\n\n THEANO_FLAGS=floatX=float32 python your_script.py", "_____no_output_____" ], [ "Running Theano on GPU(s). For prerequisites, please see: http://deeplearning.net/software/theano/tutorial/using_gpu.html\n\nNote that `float32` is recommended for GPUs; `float64` on GPUs is currently still relatively slow.", "_____no_output_____" ] ], [ [ "print(theano.config.device)", "cpu\n" ] ], [ [ "You can run a Python script on CPU via:\n\n THEANO_FLAGS=device=cpu,floatX=float64 python your_script.py\n\nor GPU via\n\n THEANO_FLAGS=device=gpu,floatX=float32 python your_script.py", "_____no_output_____" ], [ "It may also be convenient to create a `.theanorc` file in your home directory to make those configurations permanent. For example, to always use `float32`, execute\n\n echo -e \"\\n[global]\\nfloatX=float32\\n\" >> ~/.theanorc\n \nOr, create a `.theanorc` file manually with the following contents\n\n [global]\n floatX = float32\n device = gpu\n", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ], [ "## Working with array structures", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# initialize\n# if you are running Theano on 64 bit mode, \n# you need to use dmatrix instead of fmatrix\nx = T.fmatrix(name='x')\nx_sum = T.sum(x, axis=0)\n\n# compile\ncalc_sum = theano.function(inputs=[x], outputs=x_sum)\n\n# execute (Python list)\nary = [[1, 2, 3], [1, 2, 3]]\nprint('Column sum:', calc_sum(ary))\n\n# execute (NumPy array)\nary = np.array([[1, 2, 3], [1, 2, 3]], dtype=theano.config.floatX)\nprint('Column sum:', calc_sum(ary))", "Column sum: [ 2. 4. 6.]\nColumn sum: [ 2. 4. 6.]\n" ] ], [ [ "Updating shared arrays.\nMore info about memory management in Theano can be found here: http://deeplearning.net/software/theano/tutorial/aliasing.html", "_____no_output_____" ] ], [ [ "# initialize\nx = T.fmatrix(name='x')\nw = theano.shared(np.asarray([[0.0, 0.0, 0.0]], \n dtype=theano.config.floatX))\nz = x.dot(w.T)\nupdate = [[w, w + 1.0]]\n\n# compile\nnet_input = theano.function(inputs=[x], \n updates=update, \n outputs=z)\n\n# execute\ndata = np.array([[1, 2, 3]], dtype=theano.config.floatX)\nfor i in range(5):\n print('z%d:' % i, net_input(data))", "z0: [[ 0.]]\nz1: [[ 6.]]\nz2: [[ 12.]]\nz3: [[ 18.]]\nz4: [[ 24.]]\n" ] ], [ [ "We can use the `givens` variable to insert values into the graph before compiling it. Using this approach we can reduce the number of transfers from RAM (via CPUs) to GPUs to speed up learning with shared variables. If we use `inputs`, a datasets is transferred from the CPU to the GPU multiple times, for example, if we iterate over a dataset multiple times (epochs) during gradient descent. Via `givens`, we can keep the dataset on the GPU if it fits (e.g., a mini-batch). ", "_____no_output_____" ] ], [ [ "# initialize\ndata = np.array([[1, 2, 3]], \n dtype=theano.config.floatX)\nx = T.fmatrix(name='x')\nw = theano.shared(np.asarray([[0.0, 0.0, 0.0]], \n dtype=theano.config.floatX))\nz = x.dot(w.T)\nupdate = [[w, w + 1.0]]\n\n# compile\nnet_input = theano.function(inputs=[], \n updates=update, \n givens={x: data},\n outputs=z)\n\n# execute\nfor i in range(5):\n print('z:', net_input())", "z: [[ 0.]]\nz: [[ 6.]]\nz: [[ 12.]]\nz: [[ 18.]]\nz: [[ 24.]]\n" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## Wrapping things up: A linear regression example", "_____no_output_____" ], [ "Creating some training data.", "_____no_output_____" ] ], [ [ "import numpy as np\nX_train = np.asarray([[0.0], [1.0], [2.0], [3.0], [4.0],\n [5.0], [6.0], [7.0], [8.0], [9.0]], \n dtype=theano.config.floatX)\n\ny_train = np.asarray([1.0, 1.3, 3.1, 2.0, 5.0, \n 6.3, 6.6, 7.4, 8.0, 9.0], \n dtype=theano.config.floatX)", "_____no_output_____" ] ], [ [ "Implementing the training function.", "_____no_output_____" ] ], [ [ "import theano\nfrom theano import tensor as T\nimport numpy as np\n\ndef train_linreg(X_train, y_train, eta, epochs):\n\n costs = []\n # Initialize arrays\n eta0 = T.fscalar('eta0')\n y = T.fvector(name='y') \n X = T.fmatrix(name='X') \n w = theano.shared(np.zeros(\n shape=(X_train.shape[1] + 1),\n dtype=theano.config.floatX),\n name='w')\n \n # calculate cost\n net_input = T.dot(X, w[1:]) + w[0]\n errors = y - net_input\n cost = T.sum(T.pow(errors, 2)) \n\n # perform gradient update\n gradient = T.grad(cost, wrt=w)\n update = [(w, w - eta0 * gradient)]\n\n # compile model\n train = theano.function(inputs=[eta0],\n outputs=cost,\n updates=update,\n givens={X: X_train,\n y: y_train,}) \n \n for _ in range(epochs):\n costs.append(train(eta))\n \n return costs, w", "_____no_output_____" ] ], [ [ "Plotting the sum of squared errors cost vs epochs.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\ncosts, w = train_linreg(X_train, y_train, eta=0.001, epochs=10)\n \nplt.plot(range(1, len(costs)+1), costs)\n\nplt.tight_layout()\nplt.xlabel('Epoch')\nplt.ylabel('Cost')\nplt.tight_layout()\n# plt.savefig('./figures/cost_convergence.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "Making predictions.", "_____no_output_____" ] ], [ [ "def predict_linreg(X, w):\n Xt = T.matrix(name='X')\n net_input = T.dot(Xt, w[1:]) + w[0]\n predict = theano.function(inputs=[Xt], givens={w: w}, outputs=net_input)\n return predict(X)\n\nplt.scatter(X_train, y_train, marker='s', s=50)\nplt.plot(range(X_train.shape[0]), \n predict_linreg(X_train, w), \n color='gray', \n marker='o', \n markersize=4, \n linewidth=3)\n\nplt.xlabel('x')\nplt.ylabel('y')\n\nplt.tight_layout()\n# plt.savefig('./figures/linreg.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Choosing activation functions for feedforward neural networks", "_____no_output_____" ], [ "...", "_____no_output_____" ], [ "## Logistic function recap", "_____no_output_____" ], [ "The logistic function, often just called \"sigmoid function\" is in fact a special case of a sigmoid function.\n\nNet input $z$:\n$$z = w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=1}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x}$$\n\nLogistic activation function:\n\n$$\\phi_{logistic}(z) = \\frac{1}{1 + e^{-z}}$$\n\nOutput range: (0, 1)", "_____no_output_____" ] ], [ [ "# note that first element (X[0] = 1) to denote bias unit\n\nX = np.array([[1, 1.4, 1.5]])\nw = np.array([0.0, 0.2, 0.4])\n\ndef net_input(X, w):\n z = X.dot(w)\n return z\n\ndef logistic(z):\n return 1.0 / (1.0 + np.exp(-z))\n\ndef logistic_activation(X, w):\n z = net_input(X, w)\n return logistic(z)\n\nprint('P(y=1|x) = %.3f' % logistic_activation(X, w)[0])", "P(y=1|x) = 0.707\n" ] ], [ [ "Now, imagine a MLP perceptron with 3 hidden units + 1 bias unit in the hidden unit. The output layer consists of 3 output units.", "_____no_output_____" ] ], [ [ "# W : array, shape = [n_output_units, n_hidden_units+1]\n# Weight matrix for hidden layer -> output layer.\n# note that first column (A[:][0] = 1) are the bias units\nW = np.array([[1.1, 1.2, 1.3, 0.5],\n [0.1, 0.2, 0.4, 0.1],\n [0.2, 0.5, 2.1, 1.9]])\n\n# A : array, shape = [n_hidden+1, n_samples]\n# Activation of hidden layer.\n# note that first element (A[0][0] = 1) is for the bias units\n\nA = np.array([[1.0], \n [0.1], \n [0.3], \n [0.7]])\n\n# Z : array, shape = [n_output_units, n_samples]\n# Net input of output layer.\n\nZ = W.dot(A) \ny_probas = logistic(Z)\nprint('Probabilities:\\n', y_probas)", "Probabilities:\n [[ 0.87653295]\n [ 0.57688526]\n [ 0.90114393]]\n" ], [ "y_class = np.argmax(Z, axis=0)\nprint('predicted class label: %d' % y_class[0])", "predicted class label: 2\n" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## Estimating probabilities in multi-class classification via the softmax function", "_____no_output_____" ], [ "The softmax function is a generalization of the logistic function and allows us to compute meaningful class-probalities in multi-class settings (multinomial logistic regression).\n\n$$P(y=j|z) =\\phi_{softmax}(z) = \\frac{e^{z_j}}{\\sum_{k=1}^K e^{z_k}}$$\n\nthe input to the function is the result of K distinct linear functions, and the predicted probability for the j'th class given a sample vector x is:\n\nOutput range: (0, 1)", "_____no_output_____" ] ], [ [ "def softmax(z): \n return np.exp(z) / np.sum(np.exp(z))\n\ndef softmax_activation(X, w):\n z = net_input(X, w)\n return softmax(z)", "_____no_output_____" ], [ "y_probas = softmax(Z)\nprint('Probabilities:\\n', y_probas)", "Probabilities:\n [[ 0.40386493]\n [ 0.07756222]\n [ 0.51857284]]\n" ], [ "y_probas.sum()", "_____no_output_____" ], [ "y_class = np.argmax(Z, axis=0)\ny_class", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## Broadening the output spectrum using a hyperbolic tangent", "_____no_output_____" ], [ "Another special case of a sigmoid function, it can be interpreted as a rescaled version of the logistic function.\n\n$$\\phi_{tanh}(z) = \\frac{e^{z}-e^{-z}}{e^{z}+e^{-z}}$$\n\nOutput range: (-1, 1)", "_____no_output_____" ] ], [ [ "def tanh(z):\n e_p = np.exp(z) \n e_m = np.exp(-z)\n return (e_p - e_m) / (e_p + e_m) ", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nz = np.arange(-5, 5, 0.005)\nlog_act = logistic(z)\ntanh_act = tanh(z)\n\n# alternatives:\n# from scipy.special import expit\n# log_act = expit(z)\n# tanh_act = np.tanh(z)\n\nplt.ylim([-1.5, 1.5])\nplt.xlabel('net input $z$')\nplt.ylabel('activation $\\phi(z)$')\nplt.axhline(1, color='black', linestyle='--')\nplt.axhline(0.5, color='black', linestyle='--')\nplt.axhline(0, color='black', linestyle='--')\nplt.axhline(-1, color='black', linestyle='--')\n\nplt.plot(z, tanh_act, \n linewidth=2, \n color='black', \n label='tanh')\nplt.plot(z, log_act, \n linewidth=2, \n color='lightgreen', \n label='logistic')\n\nplt.legend(loc='lower right')\nplt.tight_layout()\n# plt.savefig('./figures/activation.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "Image(filename='./images/13_05.png', width=700) ", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Training neural networks efficiently using Keras", "_____no_output_____" ], [ "### Loading MNIST", "_____no_output_____" ], [ "1) Download the 4 MNIST datasets from http://yann.lecun.com/exdb/mnist/\n\n- train-images-idx3-ubyte.gz: training set images (9912422 bytes) \n- train-labels-idx1-ubyte.gz: training set labels (28881 bytes) \n- t10k-images-idx3-ubyte.gz: test set images (1648877 bytes) \n- t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)\n\n2) Unzip those files\n\n3 Copy the unzipped files to a directory `./mnist`", "_____no_output_____" ] ], [ [ "import os\nimport struct\nimport numpy as np\n \ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path, \n '%s-labels-idx1-ubyte' \n % kind)\n images_path = os.path.join(path, \n '%s-images-idx3-ubyte' \n % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', \n lbpath.read(8))\n labels = np.fromfile(lbpath, \n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\", \n imgpath.read(16))\n images = np.fromfile(imgpath, \n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "_____no_output_____" ], [ "X_train, y_train = load_mnist('mnist', kind='train')\nprint('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))", "Rows: 60000, columns: 784\n" ], [ "X_test, y_test = load_mnist('mnist', kind='t10k')\nprint('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))", "Rows: 10000, columns: 784\n" ] ], [ [ "### Multi-layer Perceptron in Keras", "_____no_output_____" ], [ "Once you have Theano installed, [Keras](https://github.com/fchollet/keras) can be installed via\n\n pip install Keras", "_____no_output_____" ], [ "In order to run the following code via GPU, you can execute the Python script that was placed in this directory via\n\n THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python mnist_keras_mlp.py", "_____no_output_____" ] ], [ [ "import theano \n\ntheano.config.floatX = 'float32'\nX_train = X_train.astype(theano.config.floatX)\nX_test = X_test.astype(theano.config.floatX)", "_____no_output_____" ] ], [ [ "One-hot encoding of the class variable:", "_____no_output_____" ] ], [ [ "from keras.utils import np_utils\n\nprint('First 3 labels: ', y_train[:3])\n\ny_train_ohe = np_utils.to_categorical(y_train) \nprint('\\nFirst 3 labels (one-hot):\\n', y_train_ohe[:3])", "First 3 labels: [5 0 4]\n\nFirst 3 labels (one-hot):\n [[ 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]]\n" ], [ "from keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\nnp.random.seed(1) \n\nmodel = Sequential()\nmodel.add(Dense(input_dim=X_train.shape[1], \n output_dim=50, \n init='uniform', \n activation='tanh'))\n\nmodel.add(Dense(input_dim=50, \n output_dim=50, \n init='uniform', \n activation='tanh'))\n\nmodel.add(Dense(input_dim=50, \n output_dim=y_train_ohe.shape[1], \n init='uniform', \n activation='softmax'))\n\nsgd = SGD(lr=0.001, decay=1e-7, momentum=.9)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd)\n\nmodel.fit(X_train, y_train_ohe, \n nb_epoch=50, \n batch_size=300, \n verbose=1, \n validation_split=0.1, \n show_accuracy=True)", "Train on 54000 samples, validate on 6000 samples\nEpoch 0\n54000/54000 [==============================] - 1s - loss: 2.2290 - acc: 0.3592 - val_loss: 2.1094 - val_acc: 0.5342\nEpoch 1\n54000/54000 [==============================] - 1s - loss: 1.8850 - acc: 0.5279 - val_loss: 1.6098 - val_acc: 0.5617\nEpoch 2\n54000/54000 [==============================] - 1s - loss: 1.3903 - acc: 0.5884 - val_loss: 1.1666 - val_acc: 0.6707\nEpoch 3\n54000/54000 [==============================] - 1s - loss: 1.0592 - acc: 0.6936 - val_loss: 0.8961 - val_acc: 0.7615\nEpoch 4\n54000/54000 [==============================] - 1s - loss: 0.8528 - acc: 0.7666 - val_loss: 0.7288 - val_acc: 0.8290\nEpoch 5\n54000/54000 [==============================] - 1s - loss: 0.7187 - acc: 0.8191 - val_loss: 0.6122 - val_acc: 0.8603\nEpoch 6\n54000/54000 [==============================] - 1s - loss: 0.6278 - acc: 0.8426 - val_loss: 0.5347 - val_acc: 0.8762\nEpoch 7\n54000/54000 [==============================] - 1s - loss: 0.5592 - acc: 0.8621 - val_loss: 0.4707 - val_acc: 0.8920\nEpoch 8\n54000/54000 [==============================] - 1s - loss: 0.4978 - acc: 0.8751 - val_loss: 0.4288 - val_acc: 0.9033\nEpoch 9\n54000/54000 [==============================] - 1s - loss: 0.4583 - acc: 0.8847 - val_loss: 0.3935 - val_acc: 0.9035\nEpoch 10\n54000/54000 [==============================] - 1s - loss: 0.4213 - acc: 0.8911 - val_loss: 0.3553 - val_acc: 0.9088\nEpoch 11\n54000/54000 [==============================] - 1s - loss: 0.3972 - acc: 0.8955 - val_loss: 0.3405 - val_acc: 0.9083\nEpoch 12\n54000/54000 [==============================] - 1s - loss: 0.3740 - acc: 0.9022 - val_loss: 0.3251 - val_acc: 0.9170\nEpoch 13\n54000/54000 [==============================] - 1s - loss: 0.3611 - acc: 0.9030 - val_loss: 0.3032 - val_acc: 0.9183\nEpoch 14\n54000/54000 [==============================] - 1s - loss: 0.3479 - acc: 0.9064 - val_loss: 0.2972 - val_acc: 0.9248\nEpoch 15\n54000/54000 [==============================] - 1s - loss: 0.3309 - acc: 0.9099 - val_loss: 0.2778 - val_acc: 0.9250\nEpoch 16\n54000/54000 [==============================] - 1s - loss: 0.3264 - acc: 0.9103 - val_loss: 0.2838 - val_acc: 0.9208\nEpoch 17\n54000/54000 [==============================] - 1s - loss: 0.3136 - acc: 0.9136 - val_loss: 0.2689 - val_acc: 0.9223\nEpoch 18\n54000/54000 [==============================] - 1s - loss: 0.3031 - acc: 0.9156 - val_loss: 0.2634 - val_acc: 0.9313\nEpoch 19\n54000/54000 [==============================] - 1s - loss: 0.2988 - acc: 0.9169 - val_loss: 0.2579 - val_acc: 0.9288\nEpoch 20\n54000/54000 [==============================] - 1s - loss: 0.2909 - acc: 0.9180 - val_loss: 0.2494 - val_acc: 0.9310\nEpoch 21\n54000/54000 [==============================] - 1s - loss: 0.2848 - acc: 0.9202 - val_loss: 0.2478 - val_acc: 0.9307\nEpoch 22\n54000/54000 [==============================] - 1s - loss: 0.2804 - acc: 0.9194 - val_loss: 0.2423 - val_acc: 0.9343\nEpoch 23\n54000/54000 [==============================] - 1s - loss: 0.2728 - acc: 0.9235 - val_loss: 0.2387 - val_acc: 0.9327\nEpoch 24\n54000/54000 [==============================] - 1s - loss: 0.2673 - acc: 0.9241 - val_loss: 0.2265 - val_acc: 0.9385\nEpoch 25\n54000/54000 [==============================] - 1s - loss: 0.2611 - acc: 0.9253 - val_loss: 0.2270 - val_acc: 0.9347\nEpoch 26\n54000/54000 [==============================] - 1s - loss: 0.2676 - acc: 0.9225 - val_loss: 0.2210 - val_acc: 0.9367\nEpoch 27\n54000/54000 [==============================] - 1s - loss: 0.2528 - acc: 0.9261 - val_loss: 0.2241 - val_acc: 0.9373\nEpoch 28\n54000/54000 [==============================] - 1s - loss: 0.2511 - acc: 0.9264 - val_loss: 0.2170 - val_acc: 0.9403\nEpoch 29\n54000/54000 [==============================] - 1s - loss: 0.2433 - acc: 0.9293 - val_loss: 0.2165 - val_acc: 0.9412\nEpoch 30\n54000/54000 [==============================] - 1s - loss: 0.2465 - acc: 0.9279 - val_loss: 0.2135 - val_acc: 0.9367\nEpoch 31\n54000/54000 [==============================] - 1s - loss: 0.2383 - acc: 0.9306 - val_loss: 0.2138 - val_acc: 0.9427\nEpoch 32\n54000/54000 [==============================] - 1s - loss: 0.2349 - acc: 0.9310 - val_loss: 0.2066 - val_acc: 0.9423\nEpoch 33\n54000/54000 [==============================] - 1s - loss: 0.2301 - acc: 0.9334 - val_loss: 0.2054 - val_acc: 0.9440\nEpoch 34\n54000/54000 [==============================] - 1s - loss: 0.2371 - acc: 0.9317 - val_loss: 0.1991 - val_acc: 0.9480\nEpoch 35\n54000/54000 [==============================] - 1s - loss: 0.2256 - acc: 0.9352 - val_loss: 0.1982 - val_acc: 0.9450\nEpoch 36\n54000/54000 [==============================] - 1s - loss: 0.2313 - acc: 0.9323 - val_loss: 0.2092 - val_acc: 0.9403\nEpoch 37\n54000/54000 [==============================] - 1s - loss: 0.2230 - acc: 0.9341 - val_loss: 0.1993 - val_acc: 0.9445\nEpoch 38\n54000/54000 [==============================] - 1s - loss: 0.2261 - acc: 0.9336 - val_loss: 0.1891 - val_acc: 0.9463\nEpoch 39\n54000/54000 [==============================] - 1s - loss: 0.2166 - acc: 0.9369 - val_loss: 0.1943 - val_acc: 0.9452\nEpoch 40\n54000/54000 [==============================] - 1s - loss: 0.2128 - acc: 0.9370 - val_loss: 0.1952 - val_acc: 0.9435\nEpoch 41\n54000/54000 [==============================] - 1s - loss: 0.2200 - acc: 0.9351 - val_loss: 0.1918 - val_acc: 0.9468\nEpoch 42\n54000/54000 [==============================] - 2s - loss: 0.2107 - acc: 0.9383 - val_loss: 0.1831 - val_acc: 0.9483\nEpoch 43\n54000/54000 [==============================] - 1s - loss: 0.2020 - acc: 0.9411 - val_loss: 0.1906 - val_acc: 0.9443\nEpoch 44\n54000/54000 [==============================] - 1s - loss: 0.2082 - acc: 0.9388 - val_loss: 0.1838 - val_acc: 0.9457\nEpoch 45\n54000/54000 [==============================] - 1s - loss: 0.2048 - acc: 0.9402 - val_loss: 0.1817 - val_acc: 0.9488\nEpoch 46\n54000/54000 [==============================] - 1s - loss: 0.2012 - acc: 0.9417 - val_loss: 0.1876 - val_acc: 0.9480\nEpoch 47\n54000/54000 [==============================] - 1s - loss: 0.1996 - acc: 0.9423 - val_loss: 0.1792 - val_acc: 0.9502\nEpoch 48\n54000/54000 [==============================] - 1s - loss: 0.1921 - acc: 0.9430 - val_loss: 0.1791 - val_acc: 0.9505\nEpoch 49\n54000/54000 [==============================] - 1s - loss: 0.1907 - acc: 0.9432 - val_loss: 0.1749 - val_acc: 0.9482\n" ], [ "y_train_pred = model.predict_classes(X_train, verbose=0)\nprint('First 3 predictions: ', y_train_pred[:3])", "First 3 predictions: [5 0 4]\n" ], [ "train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]\nprint('Training accuracy: %.2f%%' % (train_acc * 100))", "Training accuracy: 94.51%\n" ], [ "y_test_pred = model.predict_classes(X_test, verbose=0)\ntest_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]\nprint('Test accuracy: %.2f%%' % (test_acc * 100))", "Test accuracy: 94.39%\n" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Summary", "_____no_output_____" ], [ "...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb0f480f9a156a4b4f3462df0999ee8f839727ca
200,861
ipynb
Jupyter Notebook
notebooks/13/3/Confidence_Intervals.ipynb
choldgraf/textbook-jekyll
1259b1df346f2091db53ca09c46be7d320d823b2
[ "MIT" ]
null
null
null
notebooks/13/3/Confidence_Intervals.ipynb
choldgraf/textbook-jekyll
1259b1df346f2091db53ca09c46be7d320d823b2
[ "MIT" ]
null
null
null
notebooks/13/3/Confidence_Intervals.ipynb
choldgraf/textbook-jekyll
1259b1df346f2091db53ca09c46be7d320d823b2
[ "MIT" ]
null
null
null
203.506586
31,912
0.893633
[ [ [ "# HIDDEN\nfrom datascience import *\n%matplotlib inline\npath_data = '../../../data/'\nimport matplotlib.pyplot as plots\nplots.style.use('fivethirtyeight')\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Confidence Intervals ###\nWe have developed a method for estimating a parameter by using random sampling and the bootstrap. Our method produces an interval of estimates, to account for chance variability in the random sample. By providing an interval of estimates instead of just one estimate, we give ourselves some wiggle room.\n\nIn the previous example we saw that our process of estimation produced a good interval about 95% of the time, a \"good\" interval being one that contains the parameter. We say that we are *95% confident* that the process results in a good interval. Our interval of estimates is called a *95% confidence interval* for the parameter, and 95% is called the *confidence level* of the interval.\n\nThe situation in the previous example was a bit unusual. Because we happened to know value of the parameter, we were able to check whether an interval was good or a dud, and this in turn helped us to see that our process of estimation captured the parameter about 95 out of every 100 times we used it.\n\nBut usually, data scientists don't know the value of the parameter. That is the reason they want to estimate it in the first place. In such situations, they provide an interval of estimates for the unknown parameter by using methods like the one we have developed. Because of statistical theory and demonstrations like the one we have seen, data scientists can be confident that their process of generating the interval results in a good interval a known percent of the time.", "_____no_output_____" ], [ "### Confidence Interval for a Population Median: Bootstrap Percentile Method ###\n\nWe will now use the bootstrap method to estimate an unknown population median. The data come from a sample of newborns in a large hospital system; we will treat it as if it were a simple random sample though the sampling was done in multiple stages. [Stat Labs](https://www.stat.berkeley.edu/~statlabs/) by Deborah Nolan and Terry Speed has details about a larger dataset from which this set is drawn. \n\nThe table `baby` contains the following variables for mother-baby pairs: the baby's birth weight in ounces, the number of gestational days, the mother's age in completed years, the mother's height in inches, pregnancy weight in pounds, and whether or not the mother smoked during pregnancy.", "_____no_output_____" ] ], [ [ "baby = Table.read_table(path_data + 'baby.csv')", "_____no_output_____" ], [ "baby", "_____no_output_____" ] ], [ [ "Birth weight is an important factor in the health of a newborn infant โ€“ smaller babies tend to need more medical care in their first days than larger newborns. It is therefore helpful to have an estimate of birth weight before the baby is born. One way to do this is to examine the relationship between birth weight and the number of gestational days. \n\nA simple measure of this relationship is the ratio of birth weight to the number of gestational days. The table `ratios` contains the first two columns of `baby`, as well as a column of the ratios. The first entry in that column was calcualted as follows:\n\n$$\n\\frac{120~\\mbox{ounces}}{284~\\mbox{days}} ~\\approx ~ 0.4225~ \\mbox{ounces per day}\n$$", "_____no_output_____" ] ], [ [ "ratios = baby.select('Birth Weight', 'Gestational Days').with_column(\n 'Ratio BW/GD', baby.column('Birth Weight')/baby.column('Gestational Days')\n)", "_____no_output_____" ], [ "ratios", "_____no_output_____" ] ], [ [ "Here is a histogram of the ratios.", "_____no_output_____" ] ], [ [ "ratios.select('Ratio BW/GD').hist()", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "At first glance the histogram looks quite symmetric, with the density at its maximum over the interval 4 ounces per day to 4.5 ounces per day. But a closer look reveals that some of the ratios were quite large by comparison. The maximum value of the ratios was just over 0.78 ounces per day, almost double the typical value.", "_____no_output_____" ] ], [ [ "ratios.sort('Ratio BW/GD', descending=True).take(0)", "_____no_output_____" ] ], [ [ "The median gives a sense of the typical ratio because it is unaffected by the very large or very small ratios. The median ratio in the sample is about 0.429 ounces per day.", "_____no_output_____" ] ], [ [ "np.median(ratios.column(2))", "_____no_output_____" ] ], [ [ "But what was the median in the population? We don't know, so we will estimate it. \n\nOur method will be exactly the same as in the previous section. We will bootstrap the sample 5,000 times resulting in 5,000 estimates of the median. Our 95% confidence interval will be the \"middle 95%\" of all of our estimates.\n\nRecall the function `bootstrap_median` defined in the previous section. We will call this function and construct a 95% confidence interval for the median ratio in the population. Remember that the table `ratios` contains the relevant data from our original sample.", "_____no_output_____" ] ], [ [ "def bootstrap_median(original_sample, label, replications):\n \n \"\"\"Returns an array of bootstrapped sample medians:\n original_sample: table containing the original sample\n label: label of column containing the variable\n replications: number of bootstrap samples\n \"\"\"\n \n just_one_column = original_sample.select(label)\n medians = make_array()\n for i in np.arange(replications):\n bootstrap_sample = just_one_column.sample()\n resampled_median = percentile(50, bootstrap_sample.column(0))\n medians = np.append(medians, resampled_median)\n \n return medians", "_____no_output_____" ], [ "# Generate the medians from 5000 bootstrap samples\nbstrap_medians = bootstrap_median(ratios, 'Ratio BW/GD', 5000)", "_____no_output_____" ], [ "# Get the endpoints of the 95% confidence interval\nleft = percentile(2.5, bstrap_medians)\nright = percentile(97.5, bstrap_medians)\n\nmake_array(left, right)", "_____no_output_____" ] ], [ [ "The 95% confidence interval goes form about 0.425 ounces per day to about 0.433 ounces per day. We are estimating the the median \"birth weight to gestational days\" ratio in the population is somewhere in the interval 0.425 ounces per day to 0.433 ounces per day.\n\nThe estimate of 0.429 based on the original sample happens to be exactly half-way in between the two ends of the interval, though that need not be true in general.\n\nTo visualize our results, let us draw the empirical histogram of our bootstrapped medians and place the confidence interval on the horizontal axis.", "_____no_output_____" ] ], [ [ "resampled_medians = Table().with_column(\n 'Bootstrap Sample Median', bstrap_medians\n)\nresampled_medians.hist(bins=15)\nplots.plot(make_array(left, right), make_array(0, 0), color='yellow', lw=8);", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "This histogram and interval resembles those we drew in the previous section, with one big difference โ€“ there is no red dot showing where the parameter is. We don't know where that dot should be, or whether it is even in the interval.\n\nWe just have an interval of estimates. It is a 95% confidence interval of estimates, because the process that generates it produces a good interval about 95% of the time. That certainly beats guessing at random!", "_____no_output_____" ], [ "Keep in mind that this interval is an approximate 95% confidence interval. There are many approximations involved in its computation. The approximation is not bad, but it is not exact.", "_____no_output_____" ], [ "### Confidence Interval for a Population Mean: Bootstrap Percentile Method ###\nWhat we have done for medians can be done for means as well. Suppose we want to estimate the average age of the mothers in the population. A natural estimate is the average age of the mothers in the sample. Here is the distribution of their ages, and their average age which was about 27.2 years.", "_____no_output_____" ] ], [ [ "baby.select('Maternal Age').hist()", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ], [ "np.mean(baby.column('Maternal Age'))", "_____no_output_____" ] ], [ [ "What was the average age of the mothers in the population? We don't know the value of this parameter.\n\nLet's estimate the unknown parameter by the bootstrap method. To do this, we will edit the code for `bootstrap_median` to instead define the function `bootstrap_mean`. The code is the same except that the statistics are means instead of medians, and are collected in an array called `means` instead of `medians`", "_____no_output_____" ] ], [ [ "def bootstrap_mean(original_sample, label, replications):\n \n \"\"\"Returns an array of bootstrapped sample means:\n original_sample: table containing the original sample\n label: label of column containing the variable\n replications: number of bootstrap samples\n \"\"\"\n \n just_one_column = original_sample.select(label)\n means = make_array()\n for i in np.arange(replications):\n bootstrap_sample = just_one_column.sample()\n resampled_mean = np.mean(bootstrap_sample.column(0))\n means = np.append(means, resampled_mean)\n \n return means", "_____no_output_____" ], [ "# Generate the means from 5000 bootstrap samples\nbstrap_means = bootstrap_mean(baby, 'Maternal Age', 5000)\n\n# Get the endpoints of the 95% confidence interval\nleft = percentile(2.5, bstrap_means)\nright = percentile(97.5, bstrap_means)\n\nmake_array(left, right)", "_____no_output_____" ] ], [ [ "The 95% confidence interval goes from about 26.9 years to about 27.6 years. That is, we are estimating that the average age of the mothers in the population is somewhere in the interval 26.9 years to 27.6 years. \n\nNotice how close the two ends are to the average of about 27.2 years in the original sample. The sample size is very large โ€“ 1,174 mothers โ€“ and so the sample averages don't vary much. We will explore this observation further in the next chapter.\n\nThe empirical histogram of the 5,000 bootstrapped means is shown below, along with the 95% confidence interval for the population mean.", "_____no_output_____" ] ], [ [ "resampled_means = Table().with_column(\n 'Bootstrap Sample Mean', bstrap_means\n)\nresampled_means.hist(bins=15)\nplots.plot(make_array(left, right), make_array(0, 0), color='yellow', lw=8);", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "Once again, the average of the original sample (27.23 years) is close to the center of the interval. That's not very surprising, because each bootstrapped sample is drawn from that same original sample. The averages of the bootstrapped samples are about symmetrically distributed on either side of the average of the sample from which they were drawn.", "_____no_output_____" ], [ "Notice also that the empirical histogram of the resampled means has roughly a symmetric bell shape, even though the histogram of the sampled ages was not symmetric at all:", "_____no_output_____" ] ], [ [ "baby.select('Maternal Age').hist()", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "This is a consequence of the Central Limit Theorem of probability and statistics. In later sections, we will see what the theorem says.", "_____no_output_____" ], [ "### An 80% Confidence Interval ###\nYou can use the bootstrapped sample means to construct an interval of any level of confidence. For example, to construct an 80% confidence interval for the mean age in the population, you would take the \"middle 80%\" of the resampled means. So you would want 10% of the disribution in each of the two tails, and hence the endpoints would be the 10th and 90th percentiles of the resampled means.", "_____no_output_____" ] ], [ [ "left_80 = percentile(10, bstrap_means)\nright_80 = percentile(90, bstrap_means)\nmake_array(left_80, right_80)", "_____no_output_____" ], [ "resampled_means.hist(bins=15)\nplots.plot(make_array(left_80, right_80), make_array(0, 0), color='yellow', lw=8);", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "This 80% confidence interval is much shorter than the 95% confidence interval. It only goes from about 27.0 years to about 27.4 years. While that's a tight set of estimates, you know that this process only produces a good interval about 80% of the time. \n\nThe earlier process produced a wider interval but we had more confidence in the process that generated it.\n\nTo get a narrow confidence interval at a high level of confidence, you'll have to start with a larger sample. We'll see why in the next chapter.", "_____no_output_____" ], [ "### Confidence Interval for a Population Proportion: Bootstrap Percentile Method ###\nIn the sample, 39% of the mothers smoked during pregnancy.", "_____no_output_____" ] ], [ [ "baby.where('Maternal Smoker', are.equal_to(True)).num_rows/baby.num_rows", "_____no_output_____" ] ], [ [ "For what follows is useful to observe that this proportion can also be calculated by an array operation:", "_____no_output_____" ] ], [ [ "smoking = baby.column('Maternal Smoker')\nnp.count_nonzero(smoking)/len(smoking)", "_____no_output_____" ] ], [ [ "What percent of mothers in the population smoked during pregnancy? This is an unknown parameter which we can estimate by a bootstrap confidence interval. The steps in the process are analogous to those we took to estimate the population mean and median.\n\nWe will start by defining a function `bootstrap_proportion` that returns an array of bootstrapped sampled proportions. Once again, we will achieve this by editing our definition of `bootstrap_median`. The only change in computation is in replacing the median of the resample by the proportion of smokers in it. The code assumes that the column of data consists of Boolean values. The other changes are only to the names of arrays, to help us read and understand our code.", "_____no_output_____" ] ], [ [ "def bootstrap_proportion(original_sample, label, replications):\n \n \"\"\"Returns an array of bootstrapped sample proportions:\n original_sample: table containing the original sample\n label: label of column containing the Boolean variable\n replications: number of bootstrap samples\n \"\"\"\n \n just_one_column = original_sample.select(label)\n proportions = make_array()\n for i in np.arange(replications):\n bootstrap_sample = just_one_column.sample()\n resample_array = bootstrap_sample.column(0)\n resampled_proportion = np.count_nonzero(resample_array)/len(resample_array)\n proportions = np.append(proportions, resampled_proportion)\n \n return proportions", "_____no_output_____" ] ], [ [ "Let us use `bootstrap_proportion` to construct an approximate 95% confidence interval for the percent of smokers among the mothers in the population. The code is analogous to the corresponding code for the mean and median.", "_____no_output_____" ] ], [ [ "# Generate the proportions from 5000 bootstrap samples\nbstrap_props = bootstrap_proportion(baby, 'Maternal Smoker', 5000)\n\n# Get the endpoints of the 95% confidence interval\nleft = percentile(2.5, bstrap_props)\nright = percentile(97.5, bstrap_props)\n\nmake_array(left, right)", "_____no_output_____" ] ], [ [ "The confidence interval goes from about 36% to about 42%. The original sample percent of 39% is very close to the center of the interval, as you can see below.", "_____no_output_____" ] ], [ [ "resampled_proportions = Table().with_column(\n 'Bootstrap Sample Proportion', bstrap_props\n)\nresampled_proportions.hist(bins=15)\nplots.plot(make_array(left, right), make_array(0, 0), color='yellow', lw=8);", "/home/choldgraf/anaconda/envs/dev/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ] ], [ [ "### Care in Using the Bootstrap ###\nThe bootstrap is an elegant and powerful method. Before using it, it is important to keep some points in mind.\n\n- Start with a large random sample. If you don't, the method might not work. Its success is based on large random samples (and hence also resamples from the sample) resembling the population. The Law of Averages says that this is likely to be true provided the random sample is large.\n\n- To approximate the probability distribution of a statistic, it is a good idea to replicate the resampling procedure as many times as possible. A few thousand replications will result in decent approximations to the distribution of sample median, especially if the distribution of the population has one peak and is not very asymmetric. We used 5,000 replications in our examples but would recommend 10,000 in general.\n\n- The bootstrap percentile method works well for estimating the population median or mean based on a large random sample. However, it has limitations, as do all methods of estimation. For example, it is not expected to do well in the following situations.\n - The goal is to estimate the minimum or maximum value in the population, or a very low or very high percentile, or parameters that are greatly influenced by rare elements of the population.\n - The probability distribution of the statistic is not roughly bell shaped.\n - The original sample is very small, say less than 10 or 15.\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb0f5c2689a25de9c02b2c3db9c9541cc75e6a5c
189,811
ipynb
Jupyter Notebook
notebooks/Quick_and_dirty_compare.ipynb
caichac-dhi/fmskill
b67057de10a49b7a55f9cf2f204d314466ad1635
[ "MIT" ]
7
2021-03-19T15:49:07.000Z
2022-01-11T12:28:53.000Z
notebooks/Quick_and_dirty_compare.ipynb
caichac-dhi/fmskill
b67057de10a49b7a55f9cf2f204d314466ad1635
[ "MIT" ]
49
2021-03-19T12:40:02.000Z
2022-03-15T12:03:35.000Z
notebooks/Quick_and_dirty_compare.ipynb
caichac-dhi/fmskill
b67057de10a49b7a55f9cf2f204d314466ad1635
[ "MIT" ]
4
2021-08-31T14:25:55.000Z
2022-03-21T16:11:58.000Z
721.714829
117,202
0.949866
[ [ [ "## Quick and dirty compare\n\nSometimes all your need is a simple comparison of two time series. The `fmskill.compare()` method does just that.", "_____no_output_____" ] ], [ [ "from mikeio import Dfs0\r\nimport fmskill", "_____no_output_____" ] ], [ [ "### The model\r\nCan be either a dfs0 or a DataFrame. ", "_____no_output_____" ] ], [ [ "fn_mod = '../tests/testdata/SW/ts_storm_4.dfs0'\r\ndf_mod = Dfs0(fn_mod).read(items=0).to_dataframe() # select 1 item", "_____no_output_____" ] ], [ [ "### The observation\r\nCan be either a dfs0, a DataFrame or a PointObservation object. ", "_____no_output_____" ] ], [ [ "fn_obs = '../tests/testdata/SW/eur_Hm0.dfs0'", "_____no_output_____" ] ], [ [ "### compare()\r\n\r\nThe compare() method will return an object that can be used for scatter plots, skill assessment, time series plots etc.\r\n\r\nIf the observation or model has more than 1 item, the `obs_item` or `mod_item` can be given to specify the relevant item. ", "_____no_output_____" ] ], [ [ "c = fmskill.compare(fn_obs, df_mod) # both with 1 item only\r\n#c = fmskill.compare(fn_obs, fn_mod, mod_item=0) # need to specify item in model file", "_____no_output_____" ], [ "c.plot_timeseries();", "_____no_output_____" ], [ "c.skill()", "_____no_output_____" ], [ "c.scatter()", "_____no_output_____" ], [ "c.taylor(normalize_std=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb0f623e46b03a27166dcabbdf2fcbaf04df330d
15,954
ipynb
Jupyter Notebook
day5.ipynb
wojla/dw_matrix_car
b05882b70890cd22bbc461355c25022c866d5a15
[ "MIT" ]
null
null
null
day5.ipynb
wojla/dw_matrix_car
b05882b70890cd22bbc461355c25022c866d5a15
[ "MIT" ]
null
null
null
day5.ipynb
wojla/dw_matrix_car
b05882b70890cd22bbc461355c25022c866d5a15
[ "MIT" ]
null
null
null
15,954
15,954
0.698822
[ [ [ "!pip install --upgrade tables\n!pip install eli5\n!pip install xgboost\n!pip install hyperopt", "Requirement already up-to-date: tables in /usr/local/lib/python3.6/dist-packages (3.6.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nRequirement already satisfied: eli5 in /usr/local/lib/python3.6/dist-packages (0.10.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nRequirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.17.5)\nRequirement already satisfied: hyperopt in /usr/local/lib/python3.6/dist-packages (0.1.2)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from hyperopt) (4.28.1)\nRequirement already satisfied: networkx in /usr/local/lib/python3.6/dist-packages (from hyperopt) (2.4)\nRequirement already satisfied: pymongo in /usr/local/lib/python3.6/dist-packages (from hyperopt) (3.10.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.17.5)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.12.0)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from hyperopt) (0.16.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.4.1)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx->hyperopt) (4.4.1)\n" ], [ "import pandas as pd\nimport numpy as np\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score, KFold\n\nfrom hyperopt import hp, fmin, tpe, STATUS_OK\n\nimport eli5\n\nfrom eli5.sklearn import PermutationImportance\n", "_____no_output_____" ] ], [ [ "## Wczytywanie danych\n", "_____no_output_____" ] ], [ [ "cd '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car'", "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car\n" ], [ "df = pd.read_hdf('data/car.h5')\ndf.shape", "_____no_output_____" ] ], [ [ "## Feature Engineering", "_____no_output_____" ] ], [ [ "SUFFIX_CAT = '__cat'\n\nfor feat in df.columns:\n if isinstance(df[feat][0], list): continue\n\n factorized_values = df[feat].factorize()[0]\n\n if SUFFIX_CAT in feat: \n df[feat] = factorized_values\n else: \n df[feat + SUFFIX_CAT] = factorized_values", "_____no_output_____" ], [ "# cat_feats = [x for x in df.columns if SUFFIX_CAT in x]\n# cat_feats = [x for x in cat_feats if 'price' not in x]\n# cat_feats", "_____no_output_____" ], [ "df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))\ndf['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else str(x).split(' ')[0])\ndf['param_pojemnoล›ฤ‡-skokowa'] = df['param_pojemnoล›ฤ‡-skokowa'].map(lambda x: -1 if str(x) == 'None' else str(x).split('cm')[0].replace(' ', '') )", "_____no_output_____" ], [ "def run_model(model, feats):\n\n X = df[feats].values\n y = df['price_value'].values\n scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')\n return np.mean(scores), np.std(scores)", "_____no_output_____" ], [ "feats = ['param_napฤ™d__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegรณw__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemnoล›ฤ‡-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_ล‚opatki-zmiany-biegรณw__cat', 'feature_regulowane-zawieszenie__cat']\n\nxgb_params = {\n 'max_depth' : 5,\n 'n_estimators' : 50,\n 'learning_rate' : 0.1,\n 'seed': 0\n}\n\nmodel = xgb.XGBRegressor(**xgb_params)\nrun_model(model, feats)\n", "[06:43:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[06:43:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[06:43:45] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n" ] ], [ [ "## Hyperopt", "_____no_output_____" ] ], [ [ "def obj_func(params):\n print(\"Training with params: \")\n print(params)\n mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)\n return {'loss': np.abs(mean_mae), 'status': STATUS_OK}\n\n# space\nxgb_reg_params = {\n 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),\n 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),\n 'subsample': hp.quniform('subsample', 0.5, 1, 0.05),\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),\n 'objective': 'reg:squarederror',\n 'n_estimators' : 100,\n 'seed': 0,\n}\n\n## run\nbest = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25)", "Training with params: \n{'colsample_bytree': 0.8, 'learning_rate': 0.25, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning_rate': 0.3, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.55}\nTraining with params: \n{'colsample_bytree': 0.7000000000000001, 'learning_rate': 0.2, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params: \n{'colsample_bytree': 0.55, 'learning_rate': 0.1, 'max_depth': 11, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning_rate': 0.3, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.25, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params: \n{'colsample_bytree': 0.75, 'learning_rate': 0.05, 'max_depth': 7, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.9, 'learning_rate': 0.1, 'max_depth': 5, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.55}\nTraining with params: \n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.05, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.2, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params: \n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.3, 'max_depth': 11, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.55}\nTraining with params: \n{'colsample_bytree': 0.55, 'learning_rate': 0.1, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params: \n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.2, 'max_depth': 7, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning_rate': 0.3, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params: \n{'colsample_bytree': 0.9, 'learning_rate': 0.15000000000000002, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params: \n{'colsample_bytree': 0.9, 'learning_rate': 0.25, 'max_depth': 7, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning_rate': 0.15000000000000002, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning_rate': 0.1, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.1, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params: \n{'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 1.0}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning_rate': 0.15000000000000002, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 1.0}\nTraining with params: \n{'colsample_bytree': 0.75, 'learning_rate': 0.1, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.75, 'learning_rate': 0.1, 'max_depth': 15, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.7000000000000001, 'learning_rate': 0.1, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\n100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 25/25 [22:26<00:00, 60.26s/it, best loss: 7427.057565689908]\n" ], [ "best", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb0f6dcb7335aead13d84f91b6c513c566f0bc1a
24,439
ipynb
Jupyter Notebook
49_Overview.ipynb
Dzeiberg/multiinstance
95b70e066610b1935cda9086d8fb8609809e7d15
[ "Apache-2.0" ]
null
null
null
49_Overview.ipynb
Dzeiberg/multiinstance
95b70e066610b1935cda9086d8fb8609809e7d15
[ "Apache-2.0" ]
null
null
null
49_Overview.ipynb
Dzeiberg/multiinstance
95b70e066610b1935cda9086d8fb8609809e7d15
[ "Apache-2.0" ]
null
null
null
58.889157
8,552
0.775973
[ [ [ "# Bag J\n\n$X^J_U : x^j_u \\sim f^j(x) = \\sum_{i=1}^{K}\\alpha^j \\pi_i^j \\Phi^+_i(x) + (1 - \\alpha^j) \\rho_i^j \\Phi^-_i(x)$\n\n$\\sum_{i=1}^{K}\\pi_i^j = 1$\n\n$\\sum_{i=1}^{K}\\rho_i^j = 1$ \n\n$X^J_L : x^j_l \\sim f^j_+(x) = \\sum_{i=1}^{K}\\pi_i^j \\Phi_i^+(x)$ ", "_____no_output_____" ] ], [ [ "from pgmpy.models import BayesianModel\nfrom pgmpy.inference import VariableElimination\nimport daft\nfrom daft import PGM", "_____no_output_____" ], [ "def convert_pgm_to_pgmpy(pgm):\n \"\"\"Takes a Daft PGM object and converts it to a pgmpy BayesianModel\"\"\"\n edges = [(edge.node1.name, edge.node2.name) for edge in pgm._edges]\n model = BayesianModel(edges)\n return model", "_____no_output_____" ], [ "pgm = PGM()\n\npgm.add_node(daft.Node(\"bag\",r\"B\",2,1,observed=True))\npgm.add_node(daft.Node(\"label\",r\"y\",3,1))\npgm.add_node(daft.Node(\"cluster\",r\"c\",3,2))\npgm.add_node(daft.Node(\"prior\",r\"a\",2,4))\npgm.add_node(daft.Node(\"data\",r\"x\",4,2,observed=True))\npgm.add_edge(\"bag\",\"label\",directed=True)\npgm.add_edge(\"bag\",\"cluster\")\npgm.add_edge(\"label\",\"cluster\")\npgm.add_edge(\"label\",\"data\")\npgm.add_edge(\"cluster\",\"data\")\npgm.add_edge(\"prior\",\"label\")", "_____no_output_____" ], [ "pgm.render()", "_____no_output_____" ], [ "model = convert_pgm_to_pgmpy(pgm)", "_____no_output_____" ] ], [ [ "$p(x) = \\sum_{b \\in Bags}p(b) \\sum_{y_i} p(y_i|b)\\sum_{i=1}^{K}p(c_i|b,y)p(x|c_i,y)$", "_____no_output_____" ] ], [ [ "model.get_independencies()", "_____no_output_____" ], [ "model.fit()", "_____no_output_____" ] ], [ [ "$p(x|b_i) = \\sum_{b\\in Bags}I[b_i = b][]$", "_____no_output_____" ] ], [ [ "pgm.add_node(daft.Node(\"bag\",r\"B\",2,1,observed=True))\npgm.add_node(daft.Node(\"label\",r\"y\",3,1))\npgm.add_node(daft.Node(\"cluster\",r\"c\",3,2))\npgm.add_node(daft.Node(\"data\",r\"x\",4,2,observed=True))\npgm.add_edge(\"bag\",\"label\")\npgm.add_edge(\"bag\",\"cluster\")\npgm.add_edge(\"label\",\"cluster\")\npgm.add_edge(\"label\",\"data\")\npgm.add_edge(\"cluster\",\"data\")", "_____no_output_____" ], [ "model.render()", "_____no_output_____" ] ], [ [ "$p(x) = \\sum_{b_i \\in {Bags}}I[b_i = b]$", "_____no_output_____" ] ], [ [ "def convert_pgm_to_pgmpy(pgm):\n \"\"\"Takes a Daft PGM object and converts it to a pgmpy BayesianModel\"\"\"\n edges = [(edge.node1.name, edge.node2.name) for edge in pgm._edges]\n model = BayesianModel(edges)\n return model", "_____no_output_____" ], [ "pgm.render()\nplt.show()", "_____no_output_____" ] ], [ [ "model.to_daft(node_pos={\"bag\":(1,1),\n \"label\":(1,2),\n \"cluster\":(2,2),\n \"data\":(2,3)}).render()", "_____no_output_____" ] ], [ [ "model.get_independencies()", "_____no_output_____" ], [ "model = BayesianModel([(\"bag\",'label'),\n (\"bag\",\"cluster\"),\n (\"label\",\"cluster\"),\n (\"label\",\"data\"),\n (\"cluster\",\"data\")])", "_____no_output_____" ], [ "pgm = PGM()", "_____no_output_____" ], [ "import daft\nfrom daft import PGM\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import daft\nfrom daft import PGM\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "pgm._nodes", "_____no_output_____" ], [ "model.edges", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb0f76a7250985f464cd5f93689a8b19a4232163
278,299
ipynb
Jupyter Notebook
docs/Data exploration with python.ipynb
trquinn/tangos
e72fb70dd62c6f24a30fa97b3c07f83eaf69cd99
[ "BSD-3-Clause" ]
15
2017-12-04T18:05:32.000Z
2021-12-20T22:11:20.000Z
docs/Data exploration with python.ipynb
trquinn/tangos
e72fb70dd62c6f24a30fa97b3c07f83eaf69cd99
[ "BSD-3-Clause" ]
99
2017-11-09T16:47:20.000Z
2022-03-07T10:15:12.000Z
docs/Data exploration with python.ipynb
trquinn/tangos
e72fb70dd62c6f24a30fa97b3c07f83eaf69cd99
[ "BSD-3-Clause" ]
14
2017-11-06T18:46:17.000Z
2021-12-13T10:49:53.000Z
334.49399
46,342
0.923395
[ [ [ "Interactive analysis with python\n--------------------------------\n\nBefore starting this tutorial, ensure that you have set up _tangos_ [as described here](https://pynbody.github.io/tangos/) and the data sources [as described here](https://pynbody.github.io/tangos/data_exploration.html).\n\nWe get started by importing the modules we'll need:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport tangos\nimport pylab as p", "_____no_output_____" ] ], [ [ "First let's inspect what simulations are available in our database:", "_____no_output_____" ] ], [ [ "tangos.all_simulations()", "_____no_output_____" ] ], [ [ "For any of these simulations, we can generate a list of available timesteps as follows:", "_____no_output_____" ] ], [ [ "tangos.get_simulation(\"tutorial_changa\").timesteps", "_____no_output_____" ] ], [ [ "For any timestep, we can access the halos using `.halos` and a specific halo using standard python 0-based indexing:", "_____no_output_____" ] ], [ [ "tangos.get_simulation(\"tutorial_changa\").timesteps[3].halos[3]", "_____no_output_____" ] ], [ [ "One can skip straight to getting a specific halo as follows:", "_____no_output_____" ] ], [ [ "tangos.get_halo(\"tutorial_changa/%384/halo_4\")", "_____no_output_____" ] ], [ [ "Note the use of the SQL wildcard % character which avoids us having to type out the entire path. Whatever way you access it, the resulting object allows you to query what properties have been calculated for that specific halo. We can then access those properties using the normal python square-bracket dictionary syntax.", "_____no_output_____" ] ], [ [ "halo = tangos.get_halo(\"tutorial_changa/%960/halo_1\")\nhalo.keys()", "_____no_output_____" ], [ "halo['Mvir']", "_____no_output_____" ], [ "p.imshow(halo['uvi_image'])", "_____no_output_____" ] ], [ [ "One can also get meta-information about the computed property. It would be nice to know\nthe physical size of the image we just plotted. We retrieve the underlying property object\nand ask it:", "_____no_output_____" ] ], [ [ "halo.get_description(\"uvi_image\").plot_extent()", "_____no_output_____" ] ], [ [ "This tells us that the image is 15 kpc across. The example properties that come with _tangos_\nuse _pynbody_'s units system to convert everything to physical kpc, solar masses and km/s. When\nyou implement your own properties, you can of course store them in whichever units you like.", "_____no_output_____" ], [ "Getting a time sequence of properties\n-------------------------------------\n\nOften we would like to see how a property varies over time. _Tangos_ provides convenient ways to extract this information, automatically finding \nmajor progenitors or descendants for a halo. Let's see this illustrated on the SubFind _mass_ property:", "_____no_output_____" ] ], [ [ "halo = tangos.get_halo(\"tutorial_gadget/snapshot_020/halo_10\")\n\n# Calculate on major progenitor branch:\nMvir, t = halo.calculate_for_progenitors(\"mass\",\"t()\")\n\n# Now perform plotting:\np.plot(t,1e10*Mvir)\np.xlabel(\"t/Gyr\")\np.ylabel(r\"$M/h^{-1} M_{\\odot}$\")\np.semilogy()", "_____no_output_____" ] ], [ [ "In the example above, `calculate_for_progenitors` retrieves properties on the major progenitor branch of the chosen halo. One can ask for as many properties as you like, each one being returned as a numpy array in order. In this particular example the first property is the mass (as reported by subfind) and the second is the time. In fact the second property isn't really stored - if you check `halo.keys()` you won't find `t` in there. It's a simple example of a _live property_ which means it's calculated on-the-fly from other data. The time is actually stored in the TimeStep rather than the Halo database entry, so the `t()` live property simply retrieves it from the appropriate location.\n\nLive properties are a powerful aspect of _tangos_. We'll see more of them momentarily.", "_____no_output_____" ], [ "Histogram properties\n--------------------\n\nWhile the approach above is the main way to get time series of data with _tangos_, sometimes one\nwants to be able to use finer time bins than the number of outputs available. For example, star\nformation rates or black hole accretion rates often vary on short timescales and the output files\nfrom simulations are sufficient to reconstruct these variations in between snapshots.\n\n_Tangos_ implements `TimeChunkedHistogram` for this purpose. As the name suggests, a _chunk_ of\nhistorical data is stored with each timestep. The full history is then reconstructed by combining\nthe chunks through the merger tree; this process is customizable. Let's start with the simplest\npossible request:", "_____no_output_____" ] ], [ [ "halo = tangos.get_halo(\"tutorial_changa_blackholes/%960/halo_1\")\nSFR = halo[\"SFR_histogram\"]\n\n# The above is sufficient to retrieve the histogram; however you probably also want to check\n# the size of the time bins. The easiest approach is to request a suitable time array to go with\n# the SF history:\nSFR_property_object = halo.get_objects(\"SFR_histogram\")[0]\nSFR_time_bins = SFR_property_object.x_values()\n\np.plot(SFR_time_bins, SFR)\np.xlabel(\"Time/Gyr\")\np.ylabel(\"SFR/$M_{\\odot}\\,yr^{-1}$\")", "_____no_output_____" ] ], [ [ "The advantage of storing the histogram in chunks is that one can reconstruct it\nin different ways. The default is to go along the major progenitor branch, but\none can also sum over all progenitors. The following code shows the fraction of\nstar formation in the major progenitor:", "_____no_output_____" ] ], [ [ "SFR_all = halo.calculate('reassemble(SFR_histogram, \"sum\")')\np.plot(SFR_time_bins, SFR/SFR_all)\np.xlabel(\"Time/Gyr\")\np.ylabel(\"Frac. SFR in major progenitor\")", "/Users/app/anaconda/envs/py36/lib/python3.6/site-packages/ipykernel_launcher.py:2: RuntimeWarning: invalid value encountered in true_divide\n \n" ] ], [ [ "_Technical note_: It's worth being aware that the merger information is, of course, quantized to the\noutput timesteps even though the SFR information is stored in small chunks. This is rarely an issue\nbut with coarse timesteps (such as those in the tutorial simulations), the quantization can cause\nnoticable artefacts โ€“ here, the jump to 100% in the major progenitor shortly before _t_ = 3 Gyr \ncorresponds to the time of the penultimate stored step, after which no mergers are recorded. \n\nFor more information, see the [time-histogram properties](https://pynbody.github.io/tangos/histogram_properties.html) page.", "_____no_output_____" ], [ "Let's see another example of a histogram property: the black hole accretion rate ", "_____no_output_____" ] ], [ [ "BH_accrate = halo.calculate('BH.BH_mdot_histogram')\np.plot(SFR_time_bins, BH_accrate)\np.xlabel(\"Time/Gyr\")\np.ylabel(\"BH accretion rate/$M_{\\odot}\\,yr^{-1}$\")", "/Users/app/Science/tangos/tangos/live_calculation/__init__.py:585: RuntimeWarning: More than one relation for target 'BH' has been found. Picking the first.\n warnings.warn(\"More than one relation for target %r has been found. Picking the first.\"%str(self.locator), RuntimeWarning)\n" ] ], [ [ "This works fine, but you may have noticed the warning that more than one black hole\nis in the halo of interest. There is more information about the way that links between\nobjects work in _tangos_, and disambiguating between them, in the \"using links\" section\nbelow.", "_____no_output_____" ], [ "Getting properties for multiple halos\n-------------------------------------\n\nQuite often one wants to collect properties from multiple halos simultaneously. Suppose we want to plot the mass against the vmax for all halos at\na specific snapshot:", "_____no_output_____" ] ], [ [ "timestep = tangos.get_timestep(\"tutorial_gadget/snapshot_019\")\nmass, vmax = timestep.calculate_all(\"mass\",\"VMax\")\n\np.plot(mass*1e10,vmax,'k.')\np.loglog()\np.xlabel(\"$M/h^{-1} M_{\\odot}$\")\np.ylabel(r\"$v_{max}/{\\rm km s^{-1}}$\")", "_____no_output_____" ] ], [ [ "Often when querying multiple halos we still want to know something about their history, and live calculations enable that. Suppose we want to know how much the mass has grown since the previous snapshot:", "_____no_output_____" ] ], [ [ "mass, fractional_delta_2 = timestep.calculate_all(\"mass\", \"(mass-earlier(2).mass)/mass\")\n\np.hlines(0.0,1e10,1e15, colors=\"gray\")\np.plot(mass*1e10, fractional_delta_2,\"r.\", alpha=0.2)\np.semilogx()\np.ylim(-0.1,0.9)\np.xlim(1e12,1e15)\np.xlabel(\"$M/h^{-1} M_{\\odot}$\")\np.ylabel(\"Fractional growth in mass\")", "_____no_output_____" ] ], [ [ "This is a much more ambitious use of the live calculation system. Consider the last property retrieved, which is `(mass-earlier(2).mass)/mass`. This combines algebraic operations with _redirection_: `earlier(2)` finds the major progenitor two steps prior to this one, after which `.mass` retrieves the mass at that earlier timestep. This is another example of a \"link\", as previously used to retrieve\nblack hole information above.\n", "_____no_output_____" ], [ "Using Links\n-----------\n\n_Tangos_ has a concept of \"links\" between objects including halos and black holes. For example,\nthe merger tree information that you have already used indirectly is stored as links. \n\nReturning to our example of black holes above, we used a link named `BH`; however this issued a\nwarning that the result was technically ambiguous. Let's see that warning again. For clarity,\nwe will use the link named `BH_central` this time around -- it's an alternative set of links\nwhich only includes black holes associated with the central galaxy (rather than any satellites).", "_____no_output_____" ] ], [ [ "halo = tangos.get_halo(\"tutorial_changa_blackholes/%960/halo_1\")\nBH_mass = halo.calculate('BH_central.BH_mass')", "/Users/app/Science/tangos/tangos/live_calculation/__init__.py:585: RuntimeWarning: More than one relation for target 'BH_central' has been found. Picking the first.\n warnings.warn(\"More than one relation for target %r has been found. Picking the first.\"%str(self.locator), RuntimeWarning)\n" ] ], [ [ "We still get the warning, so there's more than one black hole in the central galaxy.\n\nTo avoid such warnings, you can specify more about which link you are referring to. For example,\nwe can specifically ask for the black hole with the _largest mass_ and _smallest impact parameters_\nusing the following two queries:", "_____no_output_____" ] ], [ [ "BH_max_mass = halo.calculate('link(BH_central, BH_mass, \"max\")')\nBH_closest = halo.calculate('link(BH_central, BH_central_distance, \"min\")')", "_____no_output_____" ] ], [ [ "The `link` live-calculation function returns the halo with either the maximum or minimum value of an\nassociated property, here the `BH_mass` and `BH_central_distance` properties respectively. \nEither approach disambiguates the black holes we mean (in fact, they unsurprisingly lead to\nthe same disambiguation):", "_____no_output_____" ] ], [ [ "BH_max_mass == BH_closest", "_____no_output_____" ] ], [ [ "However one doesn't always have to name a link to make use of it. The mere existence of a link\nis sometimes enough. An example is the merger tree information already used. Another useful\nexample is when two simulations have the same initial conditions, as in the `tutorial_changa`\nand `tutorial_changa_blackholes` examples; these two simulations differ only in that the latter\nhas AGN feedback. We can identify halos between simulations using the following syntax:", "_____no_output_____" ] ], [ [ "SFR_in_other_sim = halo.calculate(\"match('tutorial_changa').SFR_histogram\")\np.plot(SFR_time_bins, halo['SFR_histogram'],color='r', label=\"With AGN feedback\")\np.plot(SFR_time_bins, SFR_in_other_sim, color='b',label=\"No AGN feedback\")\np.legend(loc=\"lower right\")\np.semilogy()\np.xlabel(\"t/Gyr\")\np.ylabel(\"SFR/$M_{\\odot}\\,yr^{-1}$\")", "_____no_output_____" ] ], [ [ "The `match` syntax simply tries to follow links until it finds a halo in the named\n_tangos_ context. One can use it to match halos across entire timesteps too; let's\ncompare the stellar masses of our objects:", "_____no_output_____" ] ], [ [ "timestep = tangos.get_timestep(\"tutorial_changa/%960\")\nMstar_no_AGN, Mstar_AGN = timestep.calculate_all(\"star_mass_profile[-1]\", \n \"match('tutorial_changa_blackholes').star_mass_profile[-1]\")\n# note that we use star_mass_profile[-1] to get the last entry of the star_mass_profile array,\n# as a means to get the total stellar mass from a profile\np.plot(Mstar_no_AGN, Mstar_AGN, 'k.')\np.plot([1e6,1e11],[1e6,1e11],'k-',alpha=0.3)\np.loglog()\np.xlabel(\"$M_{\\star}/M_{\\odot}$ without AGN\")\np.ylabel(\"$M_{\\star}/M_{\\odot}$ with AGN\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0f856275923e23199a7dff7a7cb8df13cb2982
3,596
ipynb
Jupyter Notebook
docs/notebooks/previews.ipynb
1grasse/conflowgen
142330ab6427254109af3b86102a30a13144ba0c
[ "MIT" ]
null
null
null
docs/notebooks/previews.ipynb
1grasse/conflowgen
142330ab6427254109af3b86102a30a13144ba0c
[ "MIT" ]
null
null
null
docs/notebooks/previews.ipynb
1grasse/conflowgen
142330ab6427254109af3b86102a30a13144ba0c
[ "MIT" ]
null
null
null
29.47541
133
0.623192
[ [ [ "Previews\n--------\n\nPreviews are typically invoked before running the often time-consuming generation process triggered by\n:meth:`.ContainerFlowGenerationManager.generate`.\nA preview provides a first impression on what kind of data will be generated based on the input distributions and schedules.\nAt this stage, some simplifications are made to actually save some time.\nAmong others, no container instances are generated and operational constraints are neglected.\nThe steps are explained according to the database created by the\n`demo script for CTA <https://github.com/1kastner/conflowgen/blob/main/demo/demo_DEHAM_CTA.py/>`_ ,\nbut you can still use any other conflowgen database.", "_____no_output_____" ] ], [ [ "import datetime\nimport matplotlib.pyplot as plt\n\nfrom IPython.display import Markdown\n\nimport conflowgen\n\ndatabase_chooser = conflowgen.DatabaseChooser(\n sqlite_databases_directory=\"./data/\"\n)\ndatabase_chooser.load_existing_sqlite_database(\"demo_deham_cta.sqlite\")", "_____no_output_____" ] ], [ [ "### Showing Previews as Text\n\nFor running all previews, a convencience function exists.\nIt can simply print all information to the standard output.", "_____no_output_____" ] ], [ [ "conflowgen.run_all_previews(\n as_text=True,\n display_text_func=print,\n)", "_____no_output_____" ] ], [ [ "### Displaying Previews as Graphs\n\nPreviews can also be displayed as graphs.\nThe depicted information contains the same information but might be easier to grasp.\nFor emphasis, in the following the text version and graph version of the report are presented side-by-side.\nIn addition, we also use the Markdown capabilities of the convenience function.\nThis makes the presented previews blend into the remaining content.", "_____no_output_____" ] ], [ [ "conflowgen.run_all_previews(\n as_text=True,\n as_graph=True,\n display_text_func=lambda text: display(Markdown(text)),\n display_in_markup_language=\"markdown\",\n static_graphs=True\n)", "_____no_output_____" ] ] ]
[ "raw", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0f8c66ecadfa5e02d06e70285073e4337c1805
35,820
ipynb
Jupyter Notebook
GeeksForGeeks/Sudo Placement 2019/bit-magic/bit-magic.ipynb
Dhaneshgupta1027/Python
12193d689cc49d3198ea6fee3f7f7d37b8e59175
[ "MIT" ]
37
2019-04-03T07:19:57.000Z
2022-01-09T06:18:41.000Z
GeeksForGeeks/Sudo Placement 2019/bit-magic/bit-magic.ipynb
Dhaneshgupta1027/Python
12193d689cc49d3198ea6fee3f7f7d37b8e59175
[ "MIT" ]
16
2020-08-11T08:09:42.000Z
2021-10-30T17:40:48.000Z
GeeksForGeeks/Sudo Placement 2019/bit-magic/bit-magic.ipynb
Dhaneshgupta1027/Python
12193d689cc49d3198ea6fee3f7f7d37b8e59175
[ "MIT" ]
130
2019-10-02T14:40:20.000Z
2022-01-26T17:38:26.000Z
25.714286
390
0.469961
[ [ [ "n=b", "_____no_output_____" ] ], [ [ "# Binary representation ---> Microsoft\n# Difficulty: School โ€‚ Marks: 0\n'''\nWrite a program to print Binary representation of a given number N.\n\nInput:\nThe first line of input contains an integer T, denoting the number of test cases. Each test case contains an integer N.\n\nOutput:\nFor each test case, print the binary representation of the number N in 14 bits.\n\nConstraints:\n1 โ‰ค T โ‰ค 100\n1 โ‰ค N โ‰ค 5000\n\nExample:\nInput:\n2\n2\n5\n\nOutput:\n00000000000010\n00000000000101\n'''\n\nfor _ in range(int(input())):\n n=int(input())\n x=bin(n).split('b')[1]\n print('0'*(14-len(x))+x)", "2\n2\n00000000000010\n4\n00000000000100\n" ], [ "# Alone in couple ---> Ola Cabs\n# Difficulty: School โ€‚ Marks: 0\n'''\nIn a party everyone is in couple except one. People who are in couple have same numbers. Find out the person who is not in couple.\n\nInput:\nThe first line contains an integer 'T' denoting the total number of test cases. In each test cases, the first line contains an integer 'N' denoting the size of array. The second line contains N space-separated integers A1, A2, ..., AN denoting the elements of the array. (N is always odd)\n\n\nOutput:\nIn each seperate line print number of the person not in couple.\n\n\nConstraints:\n1<=T<=30\n1<=N<=500\n1<=A[i]<=500\nN%2==1\n\n\nExample:\nInput:\n1\n5\n1 2 3 2 1\n\nOutput:\n3\n'''\n\nfor _ in range(int(input())):\n n=int(input())\n s=input()\n a=''\n for i in s:\n if s.count(i)%2==1 and i not in a:\n a=i\n print(i,end=' ')", "1\n3\n1 1 1\n1 " ], [ "# Count total set bits ---> Amazon,Adobe\n# Difficulty: Basic โ€‚ Marks: 1\n'''\nYou are given a number N. Find the total count of set bits for all numbers from 1 to N(both inclusive).\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. T testcases follow. The first line of each test case is N.\n\nOutput:\nFor each testcase, in a new line, print the total count of all bits.\n\nConstraints:\n1 โ‰ค T โ‰ค 100\n1 โ‰ค N โ‰ค 103\n\nExample:\nInput:\n2\n4\n17\nOutput:\n5\n35\n\nExplanation:\nTestcase1:\nAn easy way to look at it is to consider the number, n = 4:\n0 0 0 = 0\n0 0 1 = 1\n0 1 0 = 1\n0 1 1 = 2\n1 0 0 = 1\nTherefore , the total number of bits is 5.\n'''\nfor _ in range(int(input())):\n n=int(input())\n s=0\n for i in range(n+1):\n s+=bin(i).split('b')[1].count('1')\n print(s)", "2\n4\n5\n17\n35\n" ] ], [ [ "***IMP***", "_____no_output_____" ] ], [ [ "# ------------------------------------------IMP---------------------------------------\n\"https://practice.geeksforgeeks.org/problems/toggle-bits-given-range/0/?track=sp-bit-magic&batchId=152\"\n# Toggle bits given range \n# Difficulty: Basic โ€‚ Marks: 1\n\n'''\nGiven a non-negative number N and two values L and R. The problem is to toggle the bits in the range L to R in the binary representation of N, i.e, to toggle bits from the rightmost Lth bit to the rightmost Rth bit. A toggle operation flips a bit 0 to 1 and a bit 1 to 0.\n\nInput:\nFirst line of input contains a single integer T which denotes the number of test cases. Then T test cases follows. First line of each test case contains three space separated integers N, L and R.\n\nOutput:\nFor each test case , print the number obtained by toggling bits from the rightmost Lth bit to the rightmost Rth bit in binary representation of N.\n\nConstraints:\n1<=T<=100\n1<=N<=1000\n1<=L<=R\nL<=R<= Number of bits(N)\n\nExample:\nInput:\n2\n17 2 3\n50 2 5\nOutput:\n23\n44\n'''\n\nfor _ in range(int(input())):\n l=list(map(int,input().split()))\n c=0\n s1=''\n s=bin(l[0])[2:]\n n=len(s)\n for i in s:\n if c>=(n-l[2]) and c<=(n-l[1]):\n if i=='0':\n s1+='1'\n else:\n s1+='0'\n else:\n s1+=i\n c+=1\n print(int(s1,base=2))", "2\n17 2 3\n23\n50 2 5\n44\n" ], [ "\"https://practice.geeksforgeeks.org/problems/set-kth-bit/0/?track=sp-bit-magic&batchId=152\"\n# Set kth bit ---> Cisco, Qualcomm\n# Difficulty: Basic โ€‚ Marks: 1\n\n'''\nGiven a number N and a value K. From the right, set the Kth bit in the binary representation of N. The position of LSB(or last bit) is 0, second last bit is 1 and so on. Also, 0 <= K < X, where X is the number of bits in the binary representation of N.\n\nInput:\nFirst line of input contains a single integer T, which denotes the number of test cases. T test cases follows. First line of each testcase contains two space separated integers N and K.\n\nOutput:\nFor each test case, print the new number after setting the Kth bit of N.\n\nConstraints:\n1 <= T <= 100\n1 <= N <= 1000\n\nExample:\nInput:\n2\n10 2\n15 3\n\nOutput:\n14\n15\n\nExplanation:\nTestcase 1: Binary representation of the given number 10 is: 1 0 1 0, number of bits in the binary reprsentation is 4. Thus 2nd bit from right is 0. The number after changing this bit to 1 is: 14(1 1 1 0).\n'''\n\nfor _ in range(int(input())):\n l=list(map(int,input().split()))\n s=bin(l[0])[2:]\n s1=''\n c=0\n if (l[1]+1)>len(s):\n s1='0'*(l[1]+1-len(s))+s\n s=s1\n s1=''\n \n for i in s:\n if c==(len(s)-(l[1]+1)):\n s1+='1'\n else:\n s1+=i\n c+=1\n print(int(s1,2))", "1\n12 2\n12\n" ], [ "\"https://practice.geeksforgeeks.org/problems/bit-difference/0/?track=sp-bit-magic&batchId=152\"\n# Bit Difference ---> Amazon Qualcomm, Samsung\n# Difficulty: Basic โ€‚ Marks: 1\n'''\nYou are given two numbers A and B. Write a program to count number of bits needed to be flipped to convert A to B.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. T testcases follow. The first line of each test case is A and B separated by a space.\n\nOutput:\nFor each testcase, in a new line, print the number of bits needed to be flipped.\n\nConstraints:\n1 โ‰ค T โ‰ค 100\n1 โ‰ค A, B โ‰ค 103\n\nExample:\nInput:\n1\n10 20\nOutput:\n4\n\nExplanation:\nTestcase1:\nA = 01010\nB = 10100\nNumber of bits need to flipped = 4\n'''\nfor _ in range(int(input())):\n a,c=input().split()\n a=bin(int(a))[2:]\n c=bin(int(c))[2:]\n an=len(a)\n cn=len(c)\n if an!=cn:\n if (an-cn)>0:\n c='0'*(an-cn)+c\n else:\n a='0'*(cn-an)+a\n count=0\n for i,j in zip(a,c):\n if i !=j:\n count+=1\n print(count)", "1\n7 9\n3\n" ], [ "\"https://practice.geeksforgeeks.org/problems/swap-two-nibbles-in-a-byte/0/?track=sp-bit-magic&batchId=152\"\n# Swap two nibbles in a byte ---> Accolite, Cisco, Amazon, Qualcomm\n# Difficulty: Basic โ€‚ Marks: 1\n'''\nGiven a byte, swap the two nibbles in it. For example 100 is be represented as 01100100 in a byte (or 8 bits).\nThe two nibbles are (0110) and (0100). If we swap the two nibbles, we get 01000110 which is 70 in decimal.\n\nInput:\n\nThe first line contains 'T' denoting the number of testcases. Each testcase contains a single positive integer X.\n\n\nOutput:\n\nIn each separate line print the result after swapping the nibbles.\n\n\nConstraints:\n\n1 โ‰ค T โ‰ค 70\n1 โ‰ค X โ‰ค 255\n\n\nExample:\n\nInput:\n\n2\n100\n129\n\nOutput:\n\n70\n24\n'''\nfor _ in range(int(input())):\n a=bin(int(input()))[2:]\n if len(a)%4!=0:\n a='0'*(4-len(a)%4)+a\n c=[]\n for i in range(1,(len(a)//4)+1):\n c.append(a[4*(i-1):4*i])\n c=c[::-1]\n print(int(''.join(c),2))", "2\n100\n70\n129\n24\n" ] ], [ [ "### [Check whether K-th bit is set or not](https://practice.geeksforgeeks.org/problems/check-whether-k-th-bit-is-set-or-not/0/?track=sp-bit-magic&batchId=152)\n- Company Tag: Cisco\n- Difficulty: Basic \n- Marks: 1\n\n***Given a number N and a bit number K, check if Kth bit of N is set or not. A bit is called set if it is 1. Position of set bit '1' should be indexed starting with 0 from RSB side in binary representation of the number. Consider N = 4(100): 0th bit = 0, 1st bit = 0, 2nd bit = 1.***\n\n***Input:***\\\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow.\\ \nEach test case consists of two lines. The first line of each test case contain an integer N. \\\nThe second line of each test case contains an integer K.\\\n\\\n***Output:***\\\nCorresponding to each test case, print \"Yes\" (without quotes) if Kth bit is set else print \"No\" (without quotes) in a new line.\\\n\\\n***Constraints:***\\\n1 โ‰ค T โ‰ค 200\\\n1 โ‰ค N โ‰ค 109\\\n0 โ‰ค K โ‰ค floor(log2(N) + 1)\\\n\\\n***Example:***\\\n***Input:***\\\n3\\\n4\\\n0\\\n4\\\n2\\\n500\\\n3\\\n\\\n***Output:***\\\nNo\\\nYes\\\nNo\\\n\\\n***Explanation:***\\\n***Testcase 1:*** Binary representation of 4 is 100, in which 0th bit from LSB is not set. So, answer is No.\\", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:]\n k=int(input())\n if a[(len(a)-1)-k]=='1':\n print('Yes')\n else:\n print('No')", "3\n4\n0\nNo\n4\n2\nYes\n500\n3\nNo\n" ] ], [ [ "### [Rightmost different bit](https://practice.geeksforgeeks.org/problems/rightmost-different-bit/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Basic\n- Marks: 1\n\n***Given two numbers M and N. The task is to find the position of rightmost different bit in binary representation of numbers.***\n\n***Input:***\\\nThe input line contains T, denoting the number of testcases. Each testcase follows. First line of each testcase contains two space separated integers M and N.\n\n***Output:***\\\nFor each testcase in new line, print the position of rightmost different bit in binary representation of numbers. If both M and N are same then print -1 in this case.\n\n***Constraints:***\\\n1 <= T <= 100\\\n1 <= M <= 103\\\n1 <= N <= 103\n\n***Example:***\\\n***Input:***\\\n2\\\n11 9\\\n52 4\n\n***Output:***\\\n2\\\n5\n\n***Explanation:***\\\n***Tescase 1:*** Binary representaion of the given numbers are: 1011 and 1001, 2nd bit from right is different.", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a,c=input().split()\n a=bin(int(a))[2:]\n c=bin(int(c))[2:]\n an=len(a)\n cn=len(c)\n if an!=cn:\n if (an-cn)>0:\n c='0'*(an-cn)+c\n else:\n a='0'*(cn-an)+a\n k=len(a)\n for i in range(k):\n if a[k-1-i]!=c[k-1-i]:\n print(i+1)\n break\n else:\n print(-1)", "2\n11 9\n2\n52 4\n5\n" ] ], [ [ "### [Number is sparse or not](https://practice.geeksforgeeks.org/problems/number-is-sparse-or-not/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Basic\n- Marks: 1\n\n***Given a number N, check whether it is sparse or not. A number is said to be a sparse number if in the binary representation of the number no two or more consecutive bits are set.***\n\n***Input:***\\\nThe first line of input contains an integer T denoting the number of test cases. The first line of each test case is number 'N'.\n\n***Output:***\\\nPrint '1' if the number is sparse and '0' if the number is not sparse.\n\n***Constraints:***\\\n1 <= T <= 100\\\n1 <= N <= 103\n\n***Example:***\\\n***Input:***\\\n2\\\n2\\\n3\n\n***Output:***\\\n1\\\n0\n\n***Explanation:***\\\n***Testcase 1:*** Binary Representation of 2 is 10, which is not having consecutive set bits. So, it is sparse number.\\\n***Testcase 2:*** Binary Representation of 3 is 11, which is having consecutive set bits in it. So, it is not a sparse number.", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:]\n if a.count('11')>0:\n print(0)\n else:\n print(1)", "1\n81\n0\n" ] ], [ [ "### [Gray Code](https://practice.geeksforgeeks.org/problems/gray-code/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Basic\n- Marks: 1\n\n***You are given a decimal number n. You need to find the gray code of the number n and convert it into decimal.\nTo see how it's done, refer here.***\n\n***Input:***\\\nThe first line contains an integer T, the number of test cases. For each test case, there is an integer n denoting the number\n\n***Output:***\\\nFor each test case, the output is gray code equivalent of n.\n\n***Constraints:***\\\n1 <= T <= 100\\\n0 <= n <= 108\n\n***Example:***\\\n***Input***\\\n2\\\n7\\\n10\n\n***Output***\\\n4\\\n15\n\n***Explanation:***\\\n***Testcase1:*** 7 is represented as 111 in binary form. The gray code of 111 is 100, in the binary form whose decimal equivalent is 4.\n\n***Testcase2:*** 10 is represented as 1010 in binary form. The gray code of 1010 is 1111, in the binary form whose decimal equivalent is 15.", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:]\n c=a[0]\n for i in range(1,len(a)):\n k=(int(a[i])+int(a[i-1]))\n if k==0 or k==1:\n c+=str(k)\n else:\n c+='0'\n print(int(c,2))", "1\n4\n6\n" ] ], [ [ "### [Gray to Binary equivalent](https://practice.geeksforgeeks.org/problems/gray-to-binary-equivalent/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Basic\n- Marks: 1\n\n***Given N in Gray code equivalent. Find its binary equivalent.***\n\n***Input:***\\\nThe first line contains an integer T, number of test cases. For each test cases, there is an integer N denoting the number in gray equivalent.\n\n***Output:***\\\nFor each test case, in a new line, the output is the decimal equivalent number N of binary form.\n\n***Constraints:***\\\n1 <= T <= 100\\\n0 <= n <= 108\n\n***Example:***\\\n***Input***\\\n2\\\n4\\\n15\n\n***Output***\\\n7\\\n10\n\n***Explanation:***\\\n***Testcase1.*** 4 is represented as 100 and its binary equivalent is 111 whose decimal equivalent is 7.\\\n***Testcase2.*** 15 is represented as 1111 and its binary equivalent is 1010 i.e. 10 in decimal.", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:]\n c=a[0]\n for i in range(1,len(a)):\n k=(int(a[i])+int(c[i-1]))\n if k==0 or k==1:\n c+=str(k)\n else:\n c+='0'\n print(int(c,2))", "1\n4\n7\n" ] ], [ [ "### [Check if a Integer is power of 8 or not](https://practice.geeksforgeeks.org/problems/check-if-a-integer-is-power-of-8-or-not/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Easy\n- Marks: 2\n\n***Given a positive integer N, The task is to find if it is a power of eight or not.***\n\n***Input:***\\\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case contains an integer N.\n\n***Output:***\\\nIn new line print \"Yes\" if it is a power of 8, else print \"No\".\n\n***Constraints:***\\\n1<=T<=100\\\n1<=N<=1018\n\n***Example:***\\\n***Input:***\\\n2\\\n64\\\n75\n\n***Output:***\\\nYes\\\nNo", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n n=int(input())\n i=1\n while 8**i<=n:\n i+=1\n if 8**(i-1)==n:\n print('Yes')\n else:\n print('No')", "2\n64\nYes\n75\nNo\n" ] ], [ [ "### [Is Binary Number Multiple of 3](https://practice.geeksforgeeks.org/problems/is-binary-number-multiple-of-3/0/?track=sp-bit-magic&batchId=152)\n- Company Tags : Adobe, Amazon, Microsoft\n- Difficulty: Medium\n- Marks: 4\n\n***Given a binary number, write a program that prints 1 if given binary number is a multiple of 3. Else prints 0. The given number can be big upto 2^100. It is recommended to finish the task using one traversal of input binary string.***\n\n***Input:***\\\nThe first line contains T denoting the number of testcases. Then follows description of testcases. \nEach case contains a string containing 0's and 1's.\n\n***Output:***\\\nFor each test case, output a 1 if string is multiple of 3, else 0.\n\n***Constraints:***\\\n1<=T<=100\\\n1<=Length of Input String<=100\n\n***Example:***\\\n***Input:***\\\n2\\\n011\\\n100\n\n***Output:***\\\n1\\\n0", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n n=int(input(),2)\n if n%3==0:\n print(1)\n else:\n print(0)", "1\n011\n1\n" ] ], [ [ "### [Reverse Bits](https://practice.geeksforgeeks.org/problems/reverse-bits/0/?track=sp-bit-magic&batchId=152)\n- Company Tags : Amazon, Cisco, HCL, Nvidia, Qualcomm\n- Difficulty: Easy\n- Marks: 2\n\n***Given a 32 bit number x, reverse its binary form and print the answer in decimal.***\n\n***Input:***\\\nThe first line of input consists T denoting the number of test cases. T testcases follow. Each test case contains a single 32 bit integer\n\n***Output:***\\\nFor each test case, in a new line, print the reverse of integer.\n\n***Constraints:***\\\n1 <= T <= 100\\\n0 <= x <= 4294967295\n\n***Example:***\\\n***Input:***\\\n2\\\n1\\\n5\n\n***Output:***\\\n2147483648\\\n2684354560\n\n***Explanation:***\\\n***Testcase1:***\\\n00000000000000000000000000000001 =1\\\n10000000000000000000000000000000 =2147483648\n", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:][::-1]\n a+='0'*(32-len(a))\n print(int(a,2))", "1\n1\n2147483648\n" ] ], [ [ "### [Swap all odd and even bits](https://practice.geeksforgeeks.org/problems/swap-all-odd-and-even-bits/0/?track=sp-bit-magic&batchId=152)\n- Difficulty: Easy\n- Marks: 2\n\n***Given an unsigned integer N. The task is to swap all odd bits with even bits. For example, if the given number is 23 (00010111), it should be converted to 43(00101011). Here, every even position bit is swapped with adjacent bit on right side(even position bits are highlighted in binary representation of 23), and every odd position bit is swapped with adjacent on left side.***\n\n***Input:***\\\nThe first line of input contains T, denoting the number of testcases. Each testcase contains single line.\n\n***Output:***\\\nFor each testcase in new line, print the converted number.\n\n***Constraints:***\\\n1 โ‰ค T โ‰ค 100\\\n1 โ‰ค N โ‰ค 100\n\n***Example:***\\\n***Input:***\\\n2\\\n23\\\n2\n\n***Output:***\\\n43\\\n1\n\n***Explanation:***\\\n***Testcase 1:*** BInary representation of the given number; 00010111 after swapping 00101011.", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n a=bin(int(input()))[2:]\n if len(a)%4!=0:\n a='0'*(4-len(a)%4)+a\n s=''\n for i,j in zip(a[1::2],a[::2]):\n s=s+i+j\n print(int(s,2))", "1\n23\n43\n" ], [ "def f(a,c):\n a=bin(a)[2:]\n c=bin(c)[2:]\n an=len(a)\n cn=len(c)\n if an!=cn:\n if (an-cn)>0:\n c='0'*(an-cn)+c\n else:\n a='0'*(cn-an)+a\n count=0\n for i,j in zip(a,c):\n if i !=j:\n count+=1\n return count\n\nfor _ in range(int(input())):\n count=0\n n=int(input())\n a=list(map(int,input().split()))\n for i in a:\n for j in a:\n count+=f(i,j)\n print(count)", "1\n8\n-1865008399 -524833575 -1162539990 -1003374718 -1677272376 -2046456104 -689632770 -688705725\n854\n" ], [ "if __name__ == '__main__':\n n = int(input())\n while n != 0:\n p = int(input())\n lis = [int(x) for x in input().split()]\n bits = 0\n for i in range(0, 32):\n k = 0\n for j in range(0, len(lis)):\n if lis[j] & (1 << i):\n k = k + 1\n bits += k * (len(lis) - k)\n print(2 * bits % 1000000007)\n n = n-1", "1\n8\n-1865008399 -524833575 -1162539990 -1003374718 -1677272376 -2046456104 -689632770 -688705725\n800\n" ] ], [ [ "### [Bleak Numbers](https://practice.geeksforgeeks.org/problems/bleak-numbers/0/?track=sp-bit-magic&batchId=152)\n- Company Tags : SAP Labs\n- Difficulty: Medium\n- Marks: 4\n\n***Given an integer, check whether it is Bleak or not.***\n\n***A number โ€˜nโ€™ is called Bleak if it cannot be represented as sum of a positive number x and set bit count in x, i.e., x + [countSetBits(x)](http://www.geeksforgeeks.org/count-set-bits-in-an-integer/) is not equal to n for any non-negative number x.***\n\n***Examples :***\n\n3 is not Bleak as it can be represented\nas 2 + countSetBits(2).\n\n4 is t Bleak as it cannot be represented \nas sum of a number x and countSetBits(x)\nfor any number x.\n\n***Input:***\\\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of a single line. The first line of each test case contains a single integer N to be checked for Bleak.\n\n***Output:***\\\nPrint \"1\" or \"0\" (without quotes) depending on whether the number is Bleak or not.\n\n***Constraints:***\\\n1 <= T <= 1000\\\n1 <= N <= 10000\n\n***Example:***\\\n***Input:***\\\n3\\\n4\\\n167\\\n3\n\n***Output:***\\\n1\\\n0\\\n0", "_____no_output_____" ] ], [ [ "for _ in range(int(input())):\n n=int(input())\n for i in range(0,n+1,2):\n if (i+bin(i).count('1'))==n:\n print(0)\n break\n else:\n print(1)", "1\n887\n1\n" ], [ "a", "_____no_output_____" ], [ "a[1::2]", "_____no_output_____" ], [ "''+'2'", "_____no_output_____" ], [ "a=bin(-2)\na", "_____no_output_____" ], [ "int('1b10',2)", "_____no_output_____" ], [ "a=list(map(int,input().split()))\nxor=0\nfor i in range(len(a)):\n for j in range(i+1,len(a)):\n if a[i]^a[j]>xor:\n xor=a[i]^a[j]\nprint(xor)", "1 2 3 4 5 6 22 33 44 22 111\n121\n" ], [ "a[::2]", "_____no_output_____" ], [ "32-len(a)", "_____no_output_____" ], [ "a=bin(52)[2:]\na", "_____no_output_____" ], [ "k=0\na[(len(a)-1)-k]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0f95e3b49a18aa56cdfe7d1ad1545c0941791a
523,487
ipynb
Jupyter Notebook
tsa/jose/TSA_COURSE_NOTEBOOKS/06-General-Forecasting-Models/00-Introduction-to-Forecasting-Revised.ipynb
juspreet51/ml_templates
60c9219f27a2ada97cde0b701c5be9321dda38c4
[ "MIT" ]
null
null
null
tsa/jose/TSA_COURSE_NOTEBOOKS/06-General-Forecasting-Models/00-Introduction-to-Forecasting-Revised.ipynb
juspreet51/ml_templates
60c9219f27a2ada97cde0b701c5be9321dda38c4
[ "MIT" ]
null
null
null
tsa/jose/TSA_COURSE_NOTEBOOKS/06-General-Forecasting-Models/00-Introduction-to-Forecasting-Revised.ipynb
juspreet51/ml_templates
60c9219f27a2ada97cde0b701c5be9321dda38c4
[ "MIT" ]
null
null
null
295.922555
53,944
0.917236
[ [ [ "___\n\n<a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a>\n___\n<center><em>Copyright Pierian Data</em></center>\n<center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>", "_____no_output_____" ], [ "# Introduction to Forecasting\nIn the previous section we fit various smoothing models to existing data. The purpose behind this is to predict what happens next.<br>\nWhat's our best guess for next month's value? For the next six months?\n\nIn this section we'll look to extend our models into the future. First we'll divide known data into training and testing sets, and evaluate the performance of a trained model on known test data.\n\n* Goals\n * Compare a Holt-Winters forecasted model to known data\n * Understand <em>stationarity</em>, <em>differencing</em> and <em>lagging</em>\n * Introduce ARIMA and describe next steps", "_____no_output_____" ], [ "### <font color=blue>Simple Exponential Smoothing / Simple Moving Average</font>\nThis is the simplest to forecast. $\\hat{y}$ is equal to the most recent value in the dataset, and the forecast plot is simply a horizontal line extending from the most recent value.\n### <font color=blue>Double Exponential Smoothing / Holt's Method</font>\nThis model takes trend into account. Here the forecast plot is still a straight line extending from the most recent value, but it has slope.\n### <font color=blue>Triple Exponential Smoothing / Holt-Winters Method</font>\nThis model has (so far) the \"best\" looking forecast plot, as it takes seasonality into account. When we expect regular fluctuations in the future, this model attempts to map the seasonal behavior.", "_____no_output_____" ], [ "## Forecasting with the Holt-Winters Method\nFor this example we'll use the same airline_passengers dataset, and we'll split the data into 108 training records and 36 testing records. Then we'll evaluate the performance of the model.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n%matplotlib inline\n\ndf = pd.read_csv('../Data/airline_passengers.csv',index_col='Month',parse_dates=True)\ndf.index.freq = 'MS'\ndf.head()", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 144 entries, 1949-01-01 to 1960-12-01\nFreq: MS\nData columns (total 1 columns):\nThousands of Passengers 144 non-null int64\ndtypes: int64(1)\nmemory usage: 2.2 KB\n" ] ], [ [ "## Train Test Split", "_____no_output_____" ] ], [ [ "train_data = df.iloc[:108] # Goes up to but not including 108\ntest_data = df.iloc[108:]", "_____no_output_____" ] ], [ [ "## Fitting the Model", "_____no_output_____" ] ], [ [ "from statsmodels.tsa.holtwinters import ExponentialSmoothing\n\nfitted_model = ExponentialSmoothing(train_data['Thousands of Passengers'],trend='mul',seasonal='mul',seasonal_periods=12).fit()", "_____no_output_____" ] ], [ [ "## Evaluating Model against Test Set", "_____no_output_____" ] ], [ [ "# YOU CAN SAFELY IGNORE WARNINGS HERE!\n# THIS WILL NOT AFFECT YOUR FORECAST, IT'S JUST SOMETHING STATSMODELS NEEDS TO UPDATE UPON NEXT RELEASE.\ntest_predictions = fitted_model.forecast(36).rename('HW Forecast')", "_____no_output_____" ], [ "test_predictions", "_____no_output_____" ], [ "train_data['Thousands of Passengers'].plot(legend=True,label='TRAIN')\ntest_data['Thousands of Passengers'].plot(legend=True,label='TEST',figsize=(12,8));", "_____no_output_____" ], [ "train_data['Thousands of Passengers'].plot(legend=True,label='TRAIN')\ntest_data['Thousands of Passengers'].plot(legend=True,label='TEST',figsize=(12,8))\ntest_predictions.plot(legend=True,label='PREDICTION');", "_____no_output_____" ], [ "train_data['Thousands of Passengers'].plot(legend=True,label='TRAIN')\ntest_data['Thousands of Passengers'].plot(legend=True,label='TEST',figsize=(12,8))\ntest_predictions.plot(legend=True,label='PREDICTION',xlim=['1958-01-01','1961-01-01']);", "_____no_output_____" ] ], [ [ "## Evaluation Metrics", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error,mean_absolute_error", "_____no_output_____" ], [ "mean_absolute_error(test_data,test_predictions)", "_____no_output_____" ], [ "mean_squared_error(test_data,test_predictions)", "_____no_output_____" ], [ "np.sqrt(mean_squared_error(test_data,test_predictions))", "_____no_output_____" ], [ "test_data.describe()", "_____no_output_____" ] ], [ [ "## Forecasting into Future", "_____no_output_____" ] ], [ [ "final_model = ExponentialSmoothing(df['Thousands of Passengers'],trend='mul',seasonal='mul',seasonal_periods=12).fit()", "_____no_output_____" ], [ "forecast_predictions = final_model.forecast(36)", "_____no_output_____" ], [ "df['Thousands of Passengers'].plot(figsize=(12,8))\nforecast_predictions.plot();", "_____no_output_____" ] ], [ [ "# Stationarity\nTime series data is said to be <em>stationary</em> if it does <em>not</em> exhibit trends or seasonality. That is, the mean, variance and covariance should be the same for any segment of the series, and are not functions of time.<br>\nThe file <tt>samples.csv</tt> contains made-up datasets that illustrate stationary and non-stationary data.\n\n<div class=\"alert alert-info\"><h3>For Further Reading:</h3>\n<strong>\n<a href='https://otexts.com/fpp2/stationarity.html'>Forecasting: Principles and Practice</a></strong>&nbsp;&nbsp;<font color=black>Stationarity and differencing</font></div>", "_____no_output_____" ] ], [ [ "df2 = pd.read_csv('../Data/samples.csv',index_col=0,parse_dates=True)\ndf2.head()", "_____no_output_____" ], [ "df2['a'].plot(ylim=[0,100],title=\"STATIONARY DATA\").autoscale(axis='x',tight=True);", "_____no_output_____" ], [ "df2['b'].plot(ylim=[0,100],title=\"NON-STATIONARY DATA\").autoscale(axis='x',tight=True);", "_____no_output_____" ], [ "df2['c'].plot(ylim=[0,10000],title=\"MORE NON-STATIONARY DATA\").autoscale(axis='x',tight=True);", "_____no_output_____" ] ], [ [ "In an upcoming section we'll learn how to test for stationarity.", "_____no_output_____" ], [ "# Differencing\n## First Order Differencing\nNon-stationary data can be made to look stationary through <em>differencing</em>. A simple method called <em>first order differencing</em> calculates the difference between consecutive observations.\n\n&nbsp;&nbsp;&nbsp;&nbsp;$y^{\\prime}_t = y_t - y_{t-1}$\n\nIn this way a linear trend is transformed into a horizontal set of values.\n", "_____no_output_____" ] ], [ [ "# Calculate the first difference of the non-stationary dataset \"b\"\ndf2['d1b'] = df2['b'] - df2['b'].shift(1)\n\ndf2[['b','d1b']].head()", "_____no_output_____" ] ], [ [ "Notice that differencing eliminates one or more rows of data from the beginning of the series.", "_____no_output_____" ] ], [ [ "df2['d1b'].plot(title=\"FIRST ORDER DIFFERENCE\").autoscale(axis='x',tight=True);", "_____no_output_____" ] ], [ [ "An easier way to perform differencing on a pandas Series or DataFrame is to use the built-in <tt>.diff()</tt> method:", "_____no_output_____" ] ], [ [ "df2['d1b'] = df2['b'].diff()\n\ndf2['d1b'].plot(title=\"FIRST ORDER DIFFERENCE\").autoscale(axis='x',tight=True);", "_____no_output_____" ] ], [ [ "### Forecasting on first order differenced data\nWhen forecasting with first order differences, the predicted values have to be added back in to the original values in order to obtain an appropriate forecast.\n\nLet's say that the next five forecasted values after applying some model to <tt>df['d1b']</tt> are <tt>[7,-2,5,-1,12]</tt>. We need to perform an <em>inverse transformation</em> to obtain values in the scale of the original time series.", "_____no_output_____" ] ], [ [ "# For our example we need to build a forecast series from scratch\n# First determine the most recent date in the training set, to know where the forecast set should start\ndf2[['b']].tail(3)", "_____no_output_____" ], [ "# Next set a DateTime index for the forecast set that extends 5 periods into the future\nidx = pd.date_range('1960-01-01', periods=5, freq='MS')\nz = pd.DataFrame([7,-2,5,-1,12],index=idx,columns=['Fcast'])\nz", "_____no_output_____" ] ], [ [ "The idea behind an inverse transformation is to start with the most recent value from the training set, and to add a cumulative sum of Fcast values to build the new forecast set. For this we'll use the pandas <tt>.cumsum()</tt> function which does the reverse of <tt>.diff()</tt>", "_____no_output_____" ] ], [ [ "z['forecast']=df2['b'].iloc[-1] + z['Fcast'].cumsum()\nz", "_____no_output_____" ], [ "df2['b'].plot(figsize=(12,5), title=\"FORECAST\").autoscale(axis='x',tight=True)\n\nz['forecast'].plot();", "_____no_output_____" ] ], [ [ "## Second order differencing\nSometimes the first difference is not enough to attain stationarity, particularly if the trend is not linear. We can difference the already differenced values again to obtain a second order set of values.\n\n&nbsp;&nbsp;&nbsp;&nbsp;$\\begin{split}y_{t}^{\\prime\\prime} &= y_{t}^{\\prime} - y_{t-1}^{\\prime} \\\\\n&= (y_t - y_{t-1}) - (y_{t-1} - y_{t-2}) \\\\\n&= y_t - 2y_{t-1} + y_{t-2}\\end{split}$", "_____no_output_____" ] ], [ [ "# First we'll look at the first order difference of dataset \"c\"\ndf2['d1c'] = df2['c'].diff()\n\ndf2['d1c'].plot(title=\"FIRST ORDER DIFFERENCE\").autoscale(axis='x',tight=True);", "_____no_output_____" ] ], [ [ "Now let's apply a second order difference to dataset \"c\".", "_____no_output_____" ] ], [ [ "# We can do this from the original time series in one step\ndf2['d2c'] = df2['c'].diff().diff()\n\ndf2[['c','d1c','d2c']].head()", "_____no_output_____" ], [ "df2['d2c'].plot(title=\"SECOND ORDER DIFFERENCE\").autoscale(axis='x',tight=True);", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-info\"><strong>NOTE: </strong>This is different from <font color=black><tt>df2['c'].diff(2)</tt></font>, which would provide a first order difference spaced 2 lags apart.<br>\nWe'll use this technique later to address seasonality.</div>", "_____no_output_____" ], [ "### Forecasting on second order differenced data\nAs before, the prediction values have to be added back in to obtain an appropriate forecast.\n\nTo invert the second order transformation and obtain forecasted values for $\\hat y_t$ we have to solve the second order equation for $y_t$:\n\n&nbsp;&nbsp;&nbsp;&nbsp;$\\begin{split}y_{t}^{\\prime\\prime} &= y_t - 2y_{t-1} + y_{t-2} \\\\\ny_t &= y_{t}^{\\prime\\prime} + 2y_{t-1} - y_{t-2}\\end{split}$\n\nLet's say that the next five forecasted values after applying some model to <tt>df['d2c']</tt> are <tt>[7,-2,5,-1,12]</tt>.", "_____no_output_____" ] ], [ [ "# For our example we need to build a forecast series from scratch\nidx = pd.date_range('1960-01-01', periods=5, freq='MS')\nz = pd.DataFrame([7,-2,5,-1,12],index=idx,columns=['Fcast'])\nz", "_____no_output_____" ] ], [ [ "One way to invert a 2nd order transformation is to follow the formula above:", "_____no_output_____" ] ], [ [ "forecast = []\n\n# Capture the two most recent values from the training set\nv2,v1 = df2['c'].iloc[-2:]\n\n# Apply the formula\nfor i in z['Fcast']:\n newval = i + 2*v1 - v2\n forecast.append(newval)\n v2,v1 = v1,newval\n\nz['forecast']=forecast\nz", "_____no_output_____" ] ], [ [ "Another, perhaps more straightforward method is to create a first difference set from the second, then build the forecast set from the first difference. We'll again use the pandas <tt>.cumsum()</tt> function which does the reverse of <tt>.diff()</tt>", "_____no_output_____" ] ], [ [ "# Add the most recent first difference from the training set to the Fcast cumulative sum\nz['firstdiff'] = (df2['c'].iloc[-1]-df2['c'].iloc[-2]) + z['Fcast'].cumsum()\n\n# Now build the forecast values from the first difference set\nz['forecast'] = df2['c'].iloc[-1] + z['firstdiff'].cumsum()\n\nz[['Fcast','firstdiff','forecast']]", "_____no_output_____" ], [ "df2['c'].plot(figsize=(12,5), title=\"FORECAST\").autoscale(axis='x',tight=True)\n\nz['forecast'].plot();", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger\"><strong>NOTE:</strong> statsmodels has a built-in differencing tool:<br>\n \n<tt><font color=black>&nbsp;&nbsp;&nbsp;&nbsp;from statsmodels.tsa.statespace.tools import diff<br><br>\n&nbsp;&nbsp;&nbsp;&nbsp;df2['d1'] = diff(df2['b'],k_diff=1)</font></tt><br><br>\n \nthat performs the same first order differencing operation shown above. We chose not to use it here because seasonal differencing is somewhat complicated. To difference based on 12 lags, the code would be<br><br>\n\n<tt><font color=black>&nbsp;&nbsp;&nbsp;&nbsp;df2['d12'] = diff(df2['b'],k_diff=0,k_seasonal_diff=1,seasonal_periods=12)\n</font></tt><br><br>\n\nwhereas with pandas it's simply<br><br>\n\n<tt><font color=black>&nbsp;&nbsp;&nbsp;&nbsp;df2['d12'] = df2['b'].diff(12)\n</font></tt>\n</div>", "_____no_output_____" ], [ "## Lagging\nAlso known as \"backshifting\", lagging notation reflects the value of $y$ at a prior point in time. This is a useful technique for performing <em>regressions</em> as we'll see in upcoming sections.\n\n\\begin{split}L{y_t} = y_{t-1} & \\text{ one lag shifts the data back one period}\\\\\nL^{2}{y_t} = y_{t-2} & \\text{ two lags shift the data back two periods} \\end{split}\n<br><br>\n<table>\n<tr><td>$y_t$</td><td>6</td><td>8</td><td>3</td><td>4</td><td>9</td><td>2</td><td>5</td></tr>\n<tr><td>$y_{t-1}$</td><td>8</td><td>3</td><td>4</td><td>9</td><td>2</td><td>5</td></tr>\n<tr><td>$y_{t-2}$</td><td>3</td><td>4</td><td>9</td><td>2</td><td>5</td></tr>\n</table>\n", "_____no_output_____" ], [ "# Introduction to ARIMA Models\nWe'll investigate a variety of different forecasting models in upcoming sections, but they all stem from ARIMA.\n\n<strong>ARIMA</strong>, or <em>Autoregressive Integrated Moving Average</em> is actually a combination of 3 models:\n* <strong>AR(p)</strong> Autoregression - a regression model that utilizes the dependent relationship between a current observation and observations over a previous period\n* <strong>I(d)</strong> Integration - uses differencing of observations (subtracting an observation from an observation at the previous time step) in order to make the time series stationary\n* <strong>MA(q)</strong> Moving Average - a model that uses the dependency between an observation and a residual error from a moving average model applied to lagged observations.\n\n<strong>Moving Averages</strong> we've already seen with EWMA and the Holt-Winters Method.<br>\n<strong>Integration</strong> will apply differencing to make a time series stationary, which ARIMA requires.<br>\n<strong>Autoregression</strong> is explained in detail in the next section. Here we're going to correlate a current time series with a lagged version of the same series.<br>\nOnce we understand the components, we'll investigate how to best choose the $p$, $d$ and $q$ values required by the model.", "_____no_output_____" ], [ "### Great, let's get started!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
cb0f9a43cb73df5e1f64d3091bd2ee7a7ad57b52
68,725
ipynb
Jupyter Notebook
Paul/Paul.ipynb
Fiona55/hiv_experiment
de79d5bac3499025e1a7e2d456810490c6ecb4e9
[ "MIT" ]
null
null
null
Paul/Paul.ipynb
Fiona55/hiv_experiment
de79d5bac3499025e1a7e2d456810490c6ecb4e9
[ "MIT" ]
null
null
null
Paul/Paul.ipynb
Fiona55/hiv_experiment
de79d5bac3499025e1a7e2d456810490c6ecb4e9
[ "MIT" ]
3
2022-03-14T15:27:46.000Z
2022-03-22T13:00:44.000Z
74.701087
12,334
0.704198
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom tqdm import tqdm", "_____no_output_____" ], [ "from hiv_patient import HIVPatient\nfrom buffer import Buffer", "_____no_output_____" ] ], [ [ "### Create dataset", "_____no_output_____" ] ], [ [ "patient = HIVPatient(clipping=False,logscale=False)", "_____no_output_____" ], [ "FQI_buffer = Buffer(50000)\n", "_____no_output_____" ], [ "for j in tqdm(range(30)):\n s = patient.reset(mode=\"unhealthy\")\n for i in range(200):\n a = np.random.choice(4)\n s_, r, d, _ = patient.step(a)\n FQI_buffer.append(s,a,r,s_,d)\n s = s_\n\n", "100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 30/30 [08:28<00:00, 16.96s/it]\n" ] ], [ [ "### FQI ", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "from typing import Optional,List, Callable\nfrom sklearn.base import BaseEstimator\n\ndef estimator_factory(*args, **kwargs):\n return RandomForestRegressor(*args, **kwargs)\n\n\ndef update(memory : Buffer,\n gamma : float = 0.98,\n estimator : Optional[BaseEstimator] = None,\n estimator_factory: Callable = estimator_factory):\n\n states, actions, rewards, next_state, done = memory.get()\n actions = np.expand_dims(actions,axis=1)\n target = np.expand_dims(rewards,axis=1)\n #target = rewards\n\n if estimator is not None:\n q_values = np.zeros((len(rewards),4))\n for a in range(4):\n actions_ = a*np.ones((len(rewards),1))\n X = np.concatenate((next_state,actions_),axis=1)\n q_values[:,a] = estimator.predict(X)\n \n #print(f\"q_values --> {q_values}\")\n qmax = np.expand_dims(np.max(q_values,axis=1),axis=1)\n #print(f\"Shape de qmax --> {qmax.shape}, Shape de target --> {target.shape}, Shape de done --> {done.shape}\")\n target += gamma*qmax * (1 - np.expand_dims(done,axis=1))\n #print(f\"Shape de target --> {target.shape}\")\n\n if estimator is None:\n estimator = estimator_factory()\n\n\n #print(f\"States shape --> {states.shape}, actions shape --> {actions.shape},target{target.shape}\")\n data = np.concatenate((states,actions),axis=1)\n #print(f\"Data shape --> {data.shape}\")\n estimator.fit(data,target)\n\n\n return estimator\n\n\n\n\n", "_____no_output_____" ] ], [ [ "### Premier entraรฎnement !", "_____no_output_____" ] ], [ [ "estimator = None\nfor _ in tqdm(range(100)):\n\n estimator = update(FQI_buffer,estimator=estimator)", " 0%| | 0/100 [00:00<?, ?it/s]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 1%| | 1/100 [00:03<06:22, 3.86s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 2%|โ– | 2/100 [00:08<06:44, 4.12s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 3%|โ–Ž | 3/100 [00:13<07:14, 4.48s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 4%|โ– | 4/100 [00:17<07:03, 4.41s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 5%|โ–Œ | 5/100 [00:21<06:49, 4.31s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 6%|โ–Œ | 6/100 [00:25<06:43, 4.29s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 7%|โ–‹ | 7/100 [00:30<06:39, 4.29s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 8%|โ–Š | 8/100 [00:34<06:42, 4.38s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 9%|โ–‰ | 9/100 [00:39<06:58, 4.60s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 10%|โ–ˆ | 10/100 [00:45<07:27, 4.98s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 11%|โ–ˆ | 11/100 [00:49<07:01, 4.74s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 12%|โ–ˆโ– | 12/100 [00:55<07:14, 4.94s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 13%|โ–ˆโ–Ž | 13/100 [00:59<06:45, 4.66s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 14%|โ–ˆโ– | 14/100 [01:03<06:31, 4.55s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 15%|โ–ˆโ–Œ | 15/100 [01:07<06:12, 4.38s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 16%|โ–ˆโ–Œ | 16/100 [01:11<06:03, 4.33s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 17%|โ–ˆโ–‹ | 17/100 [01:15<05:58, 4.33s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 18%|โ–ˆโ–Š | 18/100 [01:20<06:06, 4.48s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 19%|โ–ˆโ–‰ | 19/100 [01:27<06:53, 5.11s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 20%|โ–ˆโ–ˆ | 20/100 [01:31<06:31, 4.90s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 21%|โ–ˆโ–ˆ | 21/100 [01:35<06:08, 4.67s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 22%|โ–ˆโ–ˆโ– | 22/100 [01:40<06:03, 4.65s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 23%|โ–ˆโ–ˆโ–Ž | 23/100 [01:46<06:37, 5.17s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 24%|โ–ˆโ–ˆโ– | 24/100 [01:51<06:17, 4.97s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 25%|โ–ˆโ–ˆโ–Œ | 25/100 [01:56<06:06, 4.89s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 26%|โ–ˆโ–ˆโ–Œ | 26/100 [02:02<06:42, 5.44s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 27%|โ–ˆโ–ˆโ–‹ | 27/100 [02:08<06:39, 5.47s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 28%|โ–ˆโ–ˆโ–Š | 28/100 [02:12<06:00, 5.00s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 29%|โ–ˆโ–ˆโ–‰ | 29/100 [02:18<06:20, 5.37s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 30%|โ–ˆโ–ˆโ–ˆ | 30/100 [02:24<06:38, 5.69s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 31%|โ–ˆโ–ˆโ–ˆ | 31/100 [02:29<06:13, 5.41s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 32%|โ–ˆโ–ˆโ–ˆโ– | 32/100 [02:36<06:38, 5.86s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 33%|โ–ˆโ–ˆโ–ˆโ–Ž | 33/100 [02:40<05:50, 5.24s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 34%|โ–ˆโ–ˆโ–ˆโ– | 34/100 [02:44<05:21, 4.87s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 35%|โ–ˆโ–ˆโ–ˆโ–Œ | 35/100 [02:49<05:13, 4.82s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 36%|โ–ˆโ–ˆโ–ˆโ–Œ | 36/100 [02:52<04:49, 4.52s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 37%|โ–ˆโ–ˆโ–ˆโ–‹ | 37/100 [02:57<04:50, 4.60s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 38%|โ–ˆโ–ˆโ–ˆโ–Š | 38/100 [03:02<04:43, 4.58s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 39%|โ–ˆโ–ˆโ–ˆโ–‰ | 39/100 [03:06<04:35, 4.52s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 40%|โ–ˆโ–ˆโ–ˆโ–ˆ | 40/100 [03:10<04:12, 4.21s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 41%|โ–ˆโ–ˆโ–ˆโ–ˆ | 41/100 [03:13<03:57, 4.03s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 42%|โ–ˆโ–ˆโ–ˆโ–ˆโ– | 42/100 [03:17<03:46, 3.90s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 43%|โ–ˆโ–ˆโ–ˆโ–ˆโ–Ž | 43/100 [03:21<03:47, 4.00s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 44%|โ–ˆโ–ˆโ–ˆโ–ˆโ– | 44/100 [03:26<03:57, 4.24s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 45%|โ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 45/100 [03:30<03:54, 4.26s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 46%|โ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 46/100 [03:34<03:48, 4.24s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 47%|โ–ˆโ–ˆโ–ˆโ–ˆโ–‹ | 47/100 [03:39<03:48, 4.30s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 48%|โ–ˆโ–ˆโ–ˆโ–ˆโ–Š | 48/100 [03:43<03:37, 4.19s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 49%|โ–ˆโ–ˆโ–ˆโ–ˆโ–‰ | 49/100 [03:46<03:22, 3.97s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 50%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 50/100 [03:51<03:26, 4.14s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 51%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 51/100 [03:54<03:15, 3.99s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 52%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 52/100 [04:00<03:37, 4.54s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 53%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Ž | 53/100 [04:05<03:35, 4.58s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 54%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 54/100 [04:09<03:31, 4.59s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 55%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 55/100 [04:14<03:28, 4.64s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 56%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 56/100 [04:21<03:48, 5.18s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 57%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‹ | 57/100 [04:25<03:33, 4.98s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 58%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Š | 58/100 [04:31<03:33, 5.09s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 59%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‰ | 59/100 [04:35<03:24, 4.98s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 60%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 60/100 [04:40<03:13, 4.85s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 61%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 61/100 [04:44<02:57, 4.55s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 62%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 62/100 [04:48<02:45, 4.36s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 63%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Ž | 63/100 [04:52<02:37, 4.26s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 64%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 64/100 [04:56<02:33, 4.26s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 65%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 65/100 [05:00<02:27, 4.21s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 66%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 66/100 [05:04<02:20, 4.14s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 67%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‹ | 67/100 [05:08<02:17, 4.17s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 68%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Š | 68/100 [05:12<02:12, 4.14s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 69%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‰ | 69/100 [05:17<02:11, 4.24s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 70%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 70/100 [05:25<02:44, 5.50s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 71%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 71/100 [05:31<02:40, 5.52s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 72%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 72/100 [05:36<02:36, 5.59s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 73%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Ž | 73/100 [05:44<02:47, 6.22s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 74%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 74/100 [05:51<02:49, 6.51s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 75%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 75/100 [05:55<02:21, 5.65s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 76%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 76/100 [06:00<02:09, 5.39s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 77%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‹ | 77/100 [06:05<02:00, 5.24s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 78%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Š | 78/100 [06:10<01:57, 5.32s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 79%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‰ | 79/100 [06:14<01:45, 5.02s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 80%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 80/100 [06:19<01:39, 4.97s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 81%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 81/100 [06:26<01:41, 5.35s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 82%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 82/100 [06:30<01:29, 4.95s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 83%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Ž | 83/100 [06:34<01:19, 4.65s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 84%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ– | 84/100 [06:40<01:21, 5.09s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 85%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 85/100 [06:44<01:15, 5.01s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 86%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ | 86/100 [06:51<01:16, 5.50s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 87%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‹ | 87/100 [06:56<01:09, 5.37s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 88%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Š | 88/100 [07:00<00:58, 4.89s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 89%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‰ | 89/100 [07:03<00:49, 4.46s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 90%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 90/100 [07:07<00:42, 4.26s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 91%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ | 91/100 [07:11<00:36, 4.02s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 92%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–| 92/100 [07:14<00:30, 3.86s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 93%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Ž| 93/100 [07:18<00:26, 3.79s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 94%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–| 94/100 [07:21<00:22, 3.68s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 95%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ| 95/100 [07:25<00:18, 3.62s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 96%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Œ| 96/100 [07:29<00:15, 3.76s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 97%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‹| 97/100 [07:33<00:11, 3.93s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 98%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–Š| 98/100 [07:38<00:08, 4.15s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n 99%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‰| 99/100 [07:42<00:04, 4.21s/it]/tmp/ipykernel_1097/1251894869.py:38: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(data,target)\n100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 100/100 [07:46<00:00, 4.67s/it]\n" ] ], [ [ "### Deuxiรจme entraรฎnement", "_____no_output_____" ] ], [ [ "for j in tqdm(range(30)):\n\n s = patient.reset(mode=\"unhealthy\")\n\n for step in range(200):\n \n if np.random.random()<0.15:\n action = np.random.choice(4)\n \n else:\n if estimator is not None:\n greedy = np.zeros((4,2))\n for a in range(4):\n #print(f\"States shape --> {np.expand_dims(s.T,axis=1).shape}, actions shape --> {np.expand_dims(np.array([a]),axis=1).shape}\")\n sta = np.expand_dims(s,axis=1)\n act = np.expand_dims(np.array([a]),axis=1)\n X = np.concatenate((sta,act),axis=0).T\n #print((f\"shape of X --> {X.shape}\"))\n q = estimator.predict(X)\n #print(f'Q -->{q}')\n greedy[a,0] , greedy[a,1] = a , q\n\n #print(greedy)\n action = greedy[np.argmax(greedy[:,0]),0]\n \n else:\n action = np.random.choice(4)\n\n\n s_, r, d, _ = patient.step(int(action))\n FQI_buffer.append(s,a,r,s_,d)\n s = s_\n\n", "100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 30/30 [20:02<00:00, 40.08s/it]\n" ], [ "while _ in tqdm(range(100)):\n estimator = update(FQI_buffer,estimator=estimator)", "100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 100/100 [00:00<00:00, 56096.08it/s]\n" ] ], [ [ "### Affichons les rรฉsultats", "_____no_output_____" ] ], [ [ "\ns = patient.reset(mode=\"unhealthy\")\nT1,T2,T1_,T2_,V,E = np.zeros(200), np.zeros(200), np.zeros(200), np.zeros(200), np.zeros(200), np.zeros(200)\nactions = []\nfor step in tqdm(range(200)):\n T1[step], T2[step], T1_[step], T2_[step], V[step], E[step] = s \n\n greedy = np.zeros((4,2))\n \n for a in range(4):\n #print(f\"States shape --> {np.expand_dims(s.T,axis=1).shape}, actions shape --> {np.expand_dims(np.array([a]),axis=1).shape}\")\n sta = np.expand_dims(s,axis=1)\n act = np.expand_dims(np.array([a]),axis=1)\n X = np.concatenate((sta,act),axis=0).T\n #print((f\"shape of X --> {X.shape}\"))\n q = estimator.predict(X)\n #print(f'Q -->{q}')\n greedy[a,0] , greedy[a,1] = a , q\n\n #print(greedy)\n action = greedy[np.argmax(greedy[:,0]),0]\n actions += [action]\n \n\n\n s_, r, d, _ = patient.step(int(action))\n FQI_buffer.append(s,a,r,s_,d)\n s = s_", "100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 200/200 [00:59<00:00, 3.37it/s]\n" ] ], [ [ "Post traitement", "_____no_output_____" ] ], [ [ "actions", "_____no_output_____" ], [ "plt.plot(np.log10(E))", "_____no_output_____" ], [ "plt.plot(np.log10(T1))", "_____no_output_____" ], [ "FQI_buffer.get()", "_____no_output_____" ], [ "np.array(FQI_buffer.actions).shape", "_____no_output_____" ], [ "_, _, rewards, _, _ = FQI_buffer.get()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb0f9ccb9ebce531cb7b2e3903fb1d9be3780268
241,745
ipynb
Jupyter Notebook
documents/atiam_ml/05b_probabilistic_machine_learning.ipynb
esling/esling.github.io
a50f29a2901713f2c69f6c331a2462c479e539c5
[ "MIT" ]
1
2020-10-28T22:49:38.000Z
2020-10-28T22:49:38.000Z
documents/atiam_ml/05b_probabilistic_machine_learning.ipynb
esling/esling.github.io
a50f29a2901713f2c69f6c331a2462c479e539c5
[ "MIT" ]
null
null
null
documents/atiam_ml/05b_probabilistic_machine_learning.ipynb
esling/esling.github.io
a50f29a2901713f2c69f6c331a2462c479e539c5
[ "MIT" ]
5
2016-10-19T16:01:59.000Z
2021-09-20T04:59:13.000Z
213.179012
92,856
0.899282
[ [ [ "# Generative models - variational auto-encoders\n\n### Author: Philippe Esling ([email protected])\n\nIn this course we will cover\n1. A [quick recap](#recap) on simple probability concepts (and in TensorFlow)\n2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs)\n3. An explanation of the [implementation](#implem) of VAEs\n4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)**", "_____no_output_____" ], [ "<a id=\"recap\"> </a>\n\n## Quick recap on probability\n\nThe field of probability aims to model random or uncertain events. Hence, a random variable $X$ denotes a quantity that is uncertain, such as the result of an experiment (flipping a coin) or the measurement of an uncertain property (measuring the temperature). If we observe several occurrences of the variable $\\{\\mathbf{x}_{i}\\}_{i=1}$, it might take different values on each occasion, but some values may occur more often than others. This information is captured by the _probability distribution_ $p(\\mathbf{x})$ of the random variable.\n\nTo understand these concepts graphically, we will rely on the `Tensorflow Probability` package.", "_____no_output_____" ] ], [ [ "import tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Probability distributions\n\n#### Discrete distributions\n\nLet $\\mathbf{x}$ be a discrete random variable with range $R_{X}=\\{x_1,\\cdots,x_n\\}$ (finite or countably infinite). The function\n\\begin{equation}\n p_{X}(x_{i})=p(X=x_{i}), \\forall i\\in\\{1,\\cdots,n\\}\n\\end{equation}\nis called the probability mass function (PMF) of $X$.\n\nHence, the PMF defines the probabilities of all possible values for a random variable. The above notation allows to express that the PMF is defined for the random variable $X$, so that $p_{X}(1)$ gives the probability that $X=1$. For discrete random variables, the PMF is also called the \\textit{probability distribution}. The PMF is a probability measure, therefore it satisfies all the corresponding properties\n- $0 \\leq p_{X}(x_i) < 1, \\forall x_i$\n- $\\sum_{x_i\\in R_{X}} p_{X}(x_i) = 1$\n- $\\forall A \\subset R_{X}, p(X \\in A)=\\sum_{x_a \\in A}p_{X}(x_a)$", "_____no_output_____" ], [ "A very simple example of discrete distribution is the `Bernoulli` distribution. With this distribution, we can model a coin flip. If we throw the coin a very large number of times, we hope to see on average an equal amount of _heads_ and _tails_.", "_____no_output_____" ] ], [ [ "bernoulli = tfp.distributions.Bernoulli(probs=0.5)\nsamples = bernoulli.sample(10000)\nsns.distplot(samples)\nplt.title(\"Samples from a Bernoulli (coin toss)\")\nplt.show()", "_____no_output_____" ] ], [ [ "However, we can also _sample_ from the distribution to have individual values of a single throw. In that case, we obtain a series of separate events that _follow_ the distribution ", "_____no_output_____" ] ], [ [ "vals = ['heads', 'tails']\nsamples = bernoulli.sample(10)\nfor s in samples:\n print('Coin is tossed on ' + vals[s])", "Coin is tossed on heads\nCoin is tossed on heads\nCoin is tossed on tails\nCoin is tossed on heads\nCoin is tossed on heads\nCoin is tossed on tails\nCoin is tossed on tails\nCoin is tossed on heads\nCoin is tossed on tails\nCoin is tossed on tails\n" ] ], [ [ "#### Continuous distributions\n\nThe same ideas apply to _continuous_ random variables, which can model for instance the height of human beings. If we try to guess the height of someone that we do not know, there is a higher probability that this person will be around 1m70, instead of 20cm or 3m. For the rest of this course, we will use the shorthand notation $p(\\mathbf{x})$ for the distribution $p(\\mathbf{x}=x_{i})$, which expresses for a real-valued random variable $\\mathbf{x}$, evaluated at $x_{i}$, the probability that $\\mathbf{x}$ takes the value $x_i$.\n\nOne notorious example of such distributions is the Gaussian (or Normal) distribution, which is defined as \n\\begin{equation}\n p(x)=\\mathcal{N}(\\mu,\\sigma)=\\frac{1}{\\sqrt{2\\pi\\sigma^{2}}}e^{-\\frac{(x-\\mu)^{2}}{2\\sigma^{2}}}\n\\end{equation}\n\nSimilarly as before, we can observe the behavior of this distribution with the following code", "_____no_output_____" ] ], [ [ "normal = tfp.distributions.Normal(loc=0., scale=1.)\nsamples = normal.sample(10000)\nsns.distplot(samples)\nplt.title(\"Samples from a standard Normal\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Comparing distributions (KL divergence)\n$\n\\newcommand{\\R}{\\mathbb{R}}\n\\newcommand{\\bb}[1]{\\mathbf{#1}}\n\\newcommand{\\bx}{\\bb{x}}\n\\newcommand{\\by}{\\bb{y}}\n\\newcommand{\\bz}{\\bb{z}}\n\\newcommand{\\KL}[2]{\\mathcal{D}_{\\text{KL}}\\left[#1 \\| #2\\right]}$\nOriginally defined in the field of information theory, the _Kullback-Leibler (KL) divergence_ (usually noted $\\KL{p(\\bx)}{q(\\bx)}$) is a dissimilarity measure between two probability distributions $p(\\bx)$ and $q(\\bx)$. In the view of information theory, it can be understood as the cost in number of bits necessary for coding samples from $p(\\bx)$ by using a code optimized for $q(\\bx)$ rather than the code optimized for $p(\\bx)$. In the view of probability theory, it represents the amount of information lost when we use $q(\\bx)$ to approximate the true distribution $p(\\bx)$. %that explicit the cost incurred if events were generated by $p(\\bx)$ but charged under $q(\\bx)$\n\n\nGiven two probability distributions $p(\\bx)$ and $q(\\bx)$, the Kullback-Leibler divergence of $q(\\bx)$ _from_ $p(\\bx)$ is defined to be\n\\begin{equation}\n \\KL{p(\\bx)}{q(\\bx)}=\\int_{\\R} p(\\bx) \\log \\frac{p(\\bx)}{q(\\bx)}d\\bx\n\\end{equation}\n\nNote that this dissimilarity measure is \\textit{asymmetric}, therefore, we have\n\\begin{equation}\n \\KL{p(\\bx)}{q(\\bx)}\\neq \\KL{q(\\bx)}{p(\\bx)}\n\\end{equation}\nThis asymmetry also describes an interesting behavior of the KL divergence, depending on the order to which it is evaluated. The KL divergence can either be a _mode-seeking_ or _mode-coverage} measure.", "_____no_output_____" ], [ "<a id=\"vae\"></a>\n## Variational auto-encoders\n\nAs we have seen in the previous AE course, VAEs are also a form generative models. However, they are defined from a more sound probabilistic perspective. to find the underlying probability distribution of the data $p(\\mathbf{x})$ based on a set of examples in $\\mathbf{x}\\in\\mathbb{R}^{d_{x}}$. To do so, we consider *latent variables* defined in a lower-dimensional space $\\mathbf{z}\\in\\mathbb{R}^{d_{z}}$ ($d_{z} \\ll d_{x}$) with the joint probability distribution $p(\\mathbf{x}, \\mathbf{z}) = p(\\mathbf{x} \\vert \\mathbf{z})p(\\mathbf{z})$. Unfortunately, for complex distributions this integral is too complex and cannot be found in closed form.", "_____no_output_____" ], [ "\n\n### Variational inference\n\nThe idea of *variational inference* (VI) allows to solve this problem through *optimization* by assuming a simpler approximate distribution $q_{\\phi}(\\mathbf{z}\\vert\\mathbf{x})\\in\\mathcal{Q}$ from a family $\\mathcal{Q}$ of approximate densities. Hence, the goal is to minimize the difference between this approximation and the real distribution. Therefore, this turns into the optimization problem of minimizing the Kullback-Leibler (KL) divergence between the parametric approximation and the original density\n\n$$\nq_{\\phi}^{*}(\\mathbf{z}\\vert \\mathbf{x})=\\text{argmin}_{q_{\\phi}(\\mathbf{z} \\vert \\mathbf{x})\\in\\mathcal{Q}} \\mathcal{D}_{KL} \\big[ q_{\\phi}\\left(\\mathbf{z} \\vert \\mathbf{x}\\right) \\parallel p\\left(\\mathbf{z} \\vert \\mathbf{x}\\right) \\big]\n\\tag{2}\n$$\n\nBy developing this KL divergence and re-arranging terms (the detailed development can be found in [3](#reference1)), we obtain\n\n$$\n\\log{p(\\mathbf{x})} - D_{KL} \\big[ q_{\\phi}(\\mathbf{z} \\vert \\mathbf{x}) \\parallel p(\\mathbf{z} \\vert \\mathbf{x}) \\big] =\n\\mathbb{E}_{\\mathbf{z}} \\big[ \\log{p(\\mathbf{x} \\vert \\mathbf{z})}\\big] - D_{KL} \\big[ q_{\\phi}(\\mathbf{z} \\vert \\mathbf{x}) \\parallel p(\\mathbf{z}) \\big]\n\\tag{3}\n$$\n\nThis formulation describes the quantity we want to maximize $\\log p(\\mathbf{x})$ minus the error we make by using an approximate $q$ instead of $p$. Therefore, we can optimize this alternative objective, called the *evidence lower bound* (ELBO)\n\n$$\n\\begin{equation}\n\\mathcal{L}_{\\theta, \\phi} = \\mathbb{E} \\big[ \\log{ p_\\theta (\\mathbf{x|z}) } \\big] - \\beta \\cdot D_{KL} \\big[ q_\\phi(\\mathbf{z|x}) \\parallel p_\\theta(\\mathbf{z}) \\big]\n\\end{equation}\n\\tag{4}\n$$\n\nWe can see that this equation involves $q_{\\phi}(\\mathbf{z} \\vert \\mathbf{x})$ which *encodes* the data $\\mathbf{x}$ into the latent representation $\\mathbf{z}$ and a *decoder* $p(\\mathbf{x} \\vert \\mathbf{z})$, which allows generating a data vector $\\mathbf{x}$ given a latent configuration $\\mathbf{z}$. Hence, this structure defines the *Variational Auto-Encoder* (VAE).\n\nThe VAE objective can be interpreted intuitively. The first term increases the likelihood of the data generated given a configuration of the latent, which amounts to minimize the *reconstruction error*. The second term represents the error made by using a simpler posterior distribution $q_{\\phi}(\\mathbf{z} \\vert \\mathbf{x})$ compared to the true prior $p_{\\theta}(\\mathbf{z})$. Therefore, this *regularizes* the choice of approximation $q$ so that it remains close to the true posterior distribution [3].", "_____no_output_____" ], [ "### Reparametrization trick\n\nNow, while this formulation has some very interesting properties, it involves sampling operations, where we need to draw the latent point $\\mathbf{z}$ from the distribution $q_{\\phi}(\\mathbf{z}\\vert\\mathbf{x})$. The simplest choice for this variational approximate posterior is a multivariate Gaussian with a diagonal covariance structure (which leads to independent Gaussians on every dimension, called the *mean-field* family) so that\n$$\n\\text{log}q_\\phi(\\mathbf{z}\\vert\\mathbf{x}) = \\text{log}\\mathcal{N}(\\mathbf{z};\\mathbf{\\mu}^{(i)},\\mathbf{\\sigma}^{(i)})\n\\tag{5}\n$$\nwhere the mean $\\mathbf{\\mu}^{(i)}$ and standard deviation $\\mathbf{\\sigma}^{(i)}$ of the approximate posterior are different for each input point and are produced by our encoder parametrized by its variational parameters $\\phi$. Now the KL divergence between this distribution and a simple prior $\\mathcal{N}(\\mathbf{0}, \\mathbf{I})$ can be very simply obtained with\n$$\nD_{KL} \\big[ q_\\phi(\\mathbf{z|x}) \\parallel \\mathcal{N}(\\mathbf{0}, \\mathbf{I}) \\big] = \\frac{1}{2}\\sum_{j=1}^{D}\\left(1+\\text{log}((\\sigma^{(i)}_j)^2)+(\\mu^{(i)}_j)^2+(\\sigma^{(i)}_j)^2\\right)\n\\tag{6}\n$$\n\nWhile this looks convenient, we will still have to perform gradient descent through a sampling operation, which is non-differentiable. To solve this issue, we can use the *reparametrization trick*, which takes the sampling operation outside of the gradient flow by considering $\\mathbf{z}^{(i)}=\\mathbf{\\mu}^{(i)}+\\mathbf{\\sigma}^{(i)}\\odot\\mathbf{\\epsilon}^{(l)}$ with $\\mathbf{\\epsilon}^{(l)}\\sim\\mathcal{N}(\\mathbf{0}, \\mathbf{I})$", "_____no_output_____" ], [ "<a id=\"implem\"> </a>\n\n## VAE implementation\n\nAs we have seen, VAEs can be simply implemented by decomposing the above series of operations into an `encoder` which represents the distribution $q_\\phi(\\mathbf{z}\\vert\\mathbf{x})$, from which we will sample some values $\\tilde{\\mathbf{z}}$ (using the reparametrization trick) and compute the Kullback-Leibler (KL) divergence. Then, we use these values as input to a `decoder` which represents the distribution $p_\\theta(\\mathbf{x}\\vert\\mathbf{z})$ so that we can produce a reconstruction $\\tilde{\\mathbf{x}}$ and compute the reconstruction error. \n\nTherefore, we can define the VAE based on our previous implementation of the AE that we recall here", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import layers, losses\nfrom tensorflow.keras.datasets import fashion_mnist\nfrom tensorflow.keras.models import Model\n\nclass AE(Model):\n def __init__(self, encoder, decoder, encoding_dim):\n super(AE, self).__init__()\n self.encoding_dim = encoding_dim\n self.encoder = encoder\n self.decoder = decoder\n\n def call(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return decoded", "_____no_output_____" ] ], [ [ "In order to move to a probabilistic version, we need to add the latent space sampling mechanism, and change the behavior of our `call` function. This process is implemented in the following `VAE` class.\n\nNote that we purposedly rely on an implementation of the `encode` function where the `encoder` first produces an intermediate representation of size `encoder_dims`. Then, this representation goes through two separate functions for encoding $\\mathbf{\\mu}$ and $\\mathbf{\\sigma}$. This provides a clearer implementation but also the added bonus that we can ensure that $\\mathbf{\\sigma} > 0$", "_____no_output_____" ] ], [ [ "class VAE(AE):\n \n def __init__(self, encoder, decoder, encoding_dims, latent_dims):\n super(VAE, self).__init__(encoder, decoder, encoding_dims)\n self.latent_dims = latent_dims\n self.mu = layers.Dense(self.latent_dims, activation='relu')\n self.sigma = layers.Dense(self.latent_dims, activation='softplus')\n \n def encode(self, x):\n x = self.encoder(x)\n mu = self.mu(x)\n sigma = self.sigma(x)\n return mu, sigma\n \n def decode(self, z):\n return self.decoder(z)\n\n def call(self, x):\n # Encode the inputs\n z_params = self.encode(x)\n # Obtain latent samples and latent loss\n z_tilde, kl_div = self.latent(x, z_params)\n # Decode the samples\n x_tilde = self.decode(z_tilde)\n return x_tilde, kl_div\n \n def latent(self, x, z_params):\n n_batch = x.shape[0]\n # Retrieve mean and var\n mu, sigma = z_params\n # Re-parametrize\n q = tfp.distributions.Normal(np.zeros(mu.shape[1]), np.ones(sigma.shape[1]))\n z = (sigma * tf.cast(q.sample(n_batch), 'float32')) + mu\n # Compute KL divergence\n kl_div = -0.5 * tf.reduce_sum(1 + sigma - tf.pow(mu, 2) - tf.exp(sigma))\n kl_div = kl_div / n_batch\n return z, kl_div", "_____no_output_____" ] ], [ [ "Now the interesting aspect of VAEs is that we can define any parametric function as `encoder` and `decoder`, as long as we can optimize them. Here, we will rely on simple feed-forward neural networks, but these can be largely more complex (with limitations that we will discuss later in the tutorial).", "_____no_output_____" ] ], [ [ "def construct_encoder_decoder(nin, n_latent = 16, n_hidden = 512, n_classes = 1):\n # Encoder network\n encoder = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(n_hidden, activation='relu'),\n layers.Dense(n_hidden, activation='relu'),\n layers.Dense(n_hidden, activation='relu'),\n ])\n # Decoder network\n decoder = tf.keras.Sequential([\n layers.Dense(n_hidden, activation='relu'),\n layers.Dense(n_hidden, activation='relu'),\n layers.Dense(nin * n_classes, activation='sigmoid'),\n layers.Reshape((28, 28))\n ])\n return encoder, decoder", "_____no_output_____" ] ], [ [ "### Evaluating the error\n\nIn the definition of the `VAE` class, we directly included the computation of the $D_{KL}$ term to regularize our latent space. However, remember that the complete loss of equation (4) also contains a *reconstruction loss* which compares our reconstructed output to the original data. \n\nWhile there are several options to compare the error between two elements, there are usually two preferred choices among the generative literature depending on how we consider our problem\n1. If we consider each dimension (pixel) to be a binary unit (following a Bernoulli distribution), we can rely on the `binary cross entropy` between the two distributions\n2. If we turn our problem to a set of classifications, where each dimension can belong to a given set of *intensity classes*, then we can compute the `multinomial loss` between the two distributions\n\nIn the following, we define both error functions and regroup them in the `reconstruction_loss` call (depending on the `num_classes` considered). However, as the `multinomial loss` requires a large computational overhead, and for the sake of simplicity, we will train all our first models by relying on the `binary cross entropy`", "_____no_output_____" ] ], [ [ "optimizer = tf.keras.optimizers.Adam(1e-4)\n\ndef compute_loss(model, x):\n x_tilde, kl_div = model(x)\n cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_tilde, labels=x)\n logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2])\n return -tf.reduce_mean(logpx_z + kl_div)\n\[email protected]\ndef train_step(model, x, optimizer):\n \"\"\"Executes one training step and returns the loss.\"\"\"\n with tf.GradientTape() as tape:\n loss = compute_loss(model, x)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))", "_____no_output_____" ] ], [ [ "### Optimizing a VAE on a real dataset\n\nFor this tutorial, we are going to take a quick shot at a real-life problem by trying to train our VAEs on the `FashionMNIST` dataset. This dataset can be natively used in PyTorch by relying on the `torchvision.datasets` classes as follows", "_____no_output_____" ] ], [ [ "# Load (and eventually download) the dataset\n(x_train, _), (x_test, _) = fashion_mnist.load_data()\n# Normalize the dataset in the [0, 1] range]\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.", "_____no_output_____" ] ], [ [ "The `FashionMNIST` dataset is composed of simple 28x28 black and white images of different items of clothings (such as shoes, bags, pants and shirts). We put a simple function here to display one batch of the test set (note that we keep a fixed batch from the test set in order to evaluate the different variations that we will try in this tutorial).", "_____no_output_____" ] ], [ [ "def plot_batch(batch, nslices=8):\n # Create one big image for plot\n img = np.zeros(((batch.shape[1] + 1) * nslices, (batch.shape[2] + 1) * nslices))\n for b in range(batch.shape[0]):\n row = int(b / nslices); col = int(b % nslices)\n r_p = row * batch.shape[1] + row; c_p = col * batch.shape[2] + col\n img[r_p:(r_p+batch.shape[1]),c_p:(c_p+batch.shape[2])] = batch[b]\n im = plt.imshow(img, cmap='Greys', interpolation='nearest'),\n return im\n# Select a random set of fixed data\nfixed_batch = x_test[:64]\nprint(x_test.shape)\nplt.figure(figsize=(10, 10))\nplot_batch(fixed_batch);", "(10000, 28, 28)\n" ] ], [ [ "Now based on our proposed implementation, the optimization aspects are defined in a very usual way", "_____no_output_____" ] ], [ [ "# Using Bernoulli or Multinomial loss\nnum_classes = 1\n# Number of hidden and latent\nn_hidden = 512\nn_latent = 2\n# Compute input dimensionality\nnin = fixed_batch.shape[1] * fixed_batch.shape[2]\n# Construct encoder and decoder\nencoder, decoder = construct_encoder_decoder(nin, n_hidden = n_hidden, n_latent = n_latent, n_classes = num_classes)\n# Build the VAE model\nmodel = VAE(encoder, decoder, n_hidden, n_latent)", "_____no_output_____" ] ], [ [ "Now all that is left to do is train the model. We define here a `train_vae` function that we will reuse along the future implementations and variations of VAEs and flows. Note that this function is set to run for only a very few number of `epochs` and also most importantly, *only considers a subsample of the full dataset at each epoch*. This option is just here so that you can test the different models very quickly on any CPU or laptop.", "_____no_output_____" ] ], [ [ "def generate_and_save_images(model, epoch, test_sample):\n predictions, _ = model(test_sample)\n fig = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :], cmap='gray')\n plt.axis('off')\n # tight_layout minimizes the overlap between 2 sub-plots\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\nepochs=50\ntest_sample = x_test[0:16, :, :]\nfor epoch in range(1, epochs + 1):\n for train_x in x_train:\n train_step(model, tf.expand_dims(train_x, axis=0), optimizer)\n loss = tf.keras.metrics.Mean()\n for test_x in x_test:\n loss(compute_loss(model, tf.expand_dims(test_x, axis=0)))\n elbo = -loss.result()\n print('Epoch: {}, Test set ELBO: {}'.format(epoch, elbo))\n generate_and_save_images(model, epoch, test_sample)", "Epoch: 1, Test set ELBO: nan\n" ] ], [ [ "### Evaluating generative models\n\nIn order to evaluate our upcoming generative models, we will rely on the computation of the Negative Log-Likelihood. This code for the following `evaluate_nll_bpd` is inspired by the [Sylvester flow repository](https://github.com/riannevdberg/sylvester-flows)", "_____no_output_____" ] ], [ [ "from scipy.special import logsumexp\n\ndef evaluate_nll_bpd(data_loader, model, batch = 500, R = 5):\n # Set of likelihood tests\n likelihood_test = []\n # Go through dataset\n for batch_idx, (x, _) in enumerate(data_loader):\n for j in range(x.shape[0]):\n a = []\n for r in range(0, R):\n cur_x = x[j].unsqueeze(0)\n # Repeat it as batch\n x = cur_x.expand(batch, *cur_x.size()[1:]).contiguous()\n x = x.view(batch, -1)\n x_tilde, kl_div = model(x)\n rec = reconstruction_loss(x_tilde, x, average=False)\n a_tmp = (rec + kl_div)\n a.append(- a_tmp.cpu().data.numpy())\n # calculate max\n a = np.asarray(a)\n a = np.reshape(a, (a.shape[0] * a.shape[1], 1))\n likelihood_x = logsumexp(a)\n likelihood_test.append(likelihood_x - np.log(len(a)))\n likelihood_test = np.array(likelihood_test)\n nll = - np.mean(likelihood_test)\n # Compute the bits per dim (but irrelevant for binary data)\n bpd = nll / (np.prod(nin) * np.log(2.))\n return nll, bpd", "_____no_output_____" ] ], [ [ "Now we can evaluate our VAE model more formally as follows.", "_____no_output_____" ] ], [ [ "# Plot final loss\nplt.figure()\nplt.plot(losses_kld[:, 0].numpy());\n# Evaluate log-likelihood and bits per dim\nnll, _ = evaluate_nll_bpd(test_loader, model)\nprint('Negative Log-Likelihood : ' + str(nll))", "_____no_output_____" ] ], [ [ "### Limitations of VAEs - (**exercise**)\n\nAlthough VAEs are extremely powerful tools, they still have some limitations. Here we list the three most important and known limitations (all of them are still debated and topics of active research). \n1. **Blurry reconstructions.** As can be witnessed directly in the results of the previous vanilla VAE implementation, the reconstructions appear to be blurry. The precise origin of this phenomenon is still debated, but the proposed explanation are\n 1. The use of the KL regularization\n 2. High variance regions of the latent space\n 3. The reconstruction criterion (expectation)\n 4. The use of simplistic latent distributions\n2. **Posterior collapse.** The previous *blurry reconstructions* issue can be mitigated by using a more powerful decoder. However, relying on a decoder with a large capacity causes the phenomenon of *posterior collapse* where the latent space becomes useless. A nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/)\n3. **Simplistic Gaussian approximation**. In the derivation of the VAE objective, recall that the KL divergence term needs to be computed analytically. Therefore, this forces us to rely on quite simplistic families. However, the Gaussian family might be too simplistic to model real world data", "_____no_output_____" ], [ "In the present tutorial, we show how normalizing flows can be used to mostly solve the third limitation, while also adressing the two first problems. Indeed, we will see that normalizing flows also lead to sharper reconstructions and also act on preventing posterior collapse", "_____no_output_____" ], [ "<a id=\"improve\"></a>\n## Improving the quality of VAEs\n\nAs we discussed in the previous section, several known issues have been reported when using the vanilla VAE implementation. We listed some of the major issues as being\n1. **Blurry reconstructions.** \n2. **Posterior collapse.**\n3. **Simplistic Gaussian approximation**.\n\nHere, we discuss some recent developments that were proposed in the VAE literature and simple adjustments that can be made to (at least partly) alleviate these issues. However, note that some more advanced proposals such as PixelVAE [5](#reference1) and VQ-VAE [6](#reference1) can lead to wider increases in quality", "_____no_output_____" ], [ "### Reducing the bluriness of reconstructions\n\nIn this tutorial, we relied on extremely simple decoder functions, to show how we could easily define VAEs and normalizing flows together. However, the capacity of the decoder obviously directly influences the quality of the final reconstruction. Therefore, we could address this issue naively by using deep networks and of course convolutional layers as we are currently dealing with images.", "_____no_output_____" ], [ "First you need to construct a more complex encoder and decoder", "_____no_output_____" ] ], [ [ "def construct_encoder_decoder_complex(nin, n_latent = 16, n_hidden = 512, n_params = 0, n_classes = 1):\n # Encoder network\n encoder = ...\n # Decoder network\n decoder = ...\n return encoder, decoder", "_____no_output_____" ] ], [ [ "### Preventing posterior collapse with Wasserstein-VAE-MMD (InfoVAE)\n\nAs we discussed earlier, the reason behind posterior collapse mostly relates to the KL divergence criterion (a nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/). This can be mitigated by relying on a different criterion, such as regularizing the latent distribution by using the *Maximum Mean Discrepancy* (MMD) instead of the KL divergence. This model was independently proposed as the *InfoVAE* and later also as the *Wasserstein-VAE*.\n\nHere we provide a simple implementation of the `InfoVAEMMD` class based on our previous implementations.", "_____no_output_____" ] ], [ [ "def compute_kernel(x, y):\n return ...\n\ndef compute_mmd(x, y):\n return ...\n\nclass InfoVAEMMD(VAE):\n \n def __init__(self, encoder, decoder):\n super(InfoVAEMMD, self).__init__(encoder, decoder)\n \n def latent(self, x, z_params):\n return ...", "_____no_output_____" ] ], [ [ "### Putting it all together\n\nHere we combine all these ideas (except for the MMD, which is not adequate as the flow definition already regularizes the latent space without the KL divergence) to perform a more advanced optimization of the dataset. Hence, we will rely on the complex encoder and decoder with gated convolutions, the multinomial loss and the normalizing flows in order to improve the overall quality of our reconstructions.", "_____no_output_____" ] ], [ [ "# Size of latent space\nn_latent = 16\n# Number of hidden units\nn_hidden = 256\n# Rely on Bernoulli or multinomial\nnum_classes = 128\n# Construct encoder and decoder\nencoder, decoder = ...\n# Create VAE or (InfoVAEMMD - WAE) model\nmodel_flow_p = ...\n# Create optimizer algorithm\noptimizer = ...\n# Add learning rate scheduler\nscheduler = ...\n# Launch our optimization\nlosses_flow_param = ...", "_____no_output_____" ] ], [ [ "*NB*: It seems that the multinomial version have a hard time converging. Although I only let this run for 200 epochs and only for a subsampling of 5000 examples, it might need more time, but this might also come from a mistake somewhere in my code ... If you spot something odd please let me know :)", "_____no_output_____" ], [ "### References\n\n<a id=\"reference1\"></a>\n[1] Rezende, Danilo Jimenez, and Shakir Mohamed. \"Variational inference with normalizing flows.\" _arXiv preprint arXiv:1505.05770_ (2015). [link](http://arxiv.org/pdf/1505.05770)\n\n[2] Kingma, Diederik P., Tim Salimans, and Max Welling. \"Improving Variational Inference with Inverse Autoregressive Flow.\" _arXiv preprint arXiv:1606.04934_ (2016). [link](https://arxiv.org/abs/1606.04934)\n\n[3] Kingma, D. P., & Welling, M. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114. (2013). [link](https://arxiv.org/pdf/1312.6114)\n\n[4] Rezende, D. J., Mohamed, S., & Wierstra, D. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082. (2014). [link](https://arxiv.org/pdf/1401.4082)\n\n[5] Gulrajani, I., Kumar, K., Ahmed, F., Taiga, A. A., Visin, F., Vazquez, D., & Courville, A. (2016). Pixelvae: A latent variable model for natural images. arXiv preprint arXiv:1611.05013. [link](https://arxiv.org/pdf/1611.05013)\n\n[6] Van den Oord, A., & Vinyals, O. (2017). Neural discrete representation learning. In NIPS 2017 (pp. 6306-6315). [link](http://papers.nips.cc/paper/7210-neural-discrete-representation-learning.pdf)\n\n### Inspirations and resources\n\nhttps://blog.evjang.com/2018/01/nf1.html \nhttps://github.com/ex4sperans/variational-inference-with-normalizing-flows \nhttps://akosiorek.github.io/ml/2018/04/03/norm_flows.html \nhttps://github.com/abdulfatir/normalizing-flows \nhttps://github.com/riannevdberg/sylvester-flows", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb0fa14c786c489e5cdd9cbe8ade607f14f456b9
14,507
ipynb
Jupyter Notebook
teaching/2021-2022/5-cp/Tutorial3.ipynb
siala/siala.github.io
f2b13a030af03a44f235f068f2e81d88ba9e425c
[ "CC-BY-3.0" ]
null
null
null
teaching/2021-2022/5-cp/Tutorial3.ipynb
siala/siala.github.io
f2b13a030af03a44f235f068f2e81d88ba9e425c
[ "CC-BY-3.0" ]
null
null
null
teaching/2021-2022/5-cp/Tutorial3.ipynb
siala/siala.github.io
f2b13a030af03a44f235f068f2e81d88ba9e425c
[ "CC-BY-3.0" ]
null
null
null
25.585538
401
0.581581
[ [ [ "# Tutorial 3 - Boosting Search via Symmetry Breaking, Implied Constraints, Randomisation, and Restarts \nrevisit the exact parameters so that restars work) ", "_____no_output_____" ], [ "**Please do not read untill you fully finish the first 2 tutorials**\n\nCongratulations! you are now level one constraint programmer: you know the basics on how to model a problem, how to display solutions, how to evaluate models, and how to choose a good branching strategy !! **I'm so proud of you!**\n\nIn this tutorial we slowly dive into advanced techniques. We also start to use arithmetic constraints and solve optimisation problems.", "_____no_output_____" ] ], [ [ "from config import setup\nsetup()", "_____no_output_____" ] ], [ [ "## Golomb ruler\n\nYour goal is to place $N$ marks on a ruler, such that no two marks are at the same distance and the total length of the ruler (the position of the last mark) is minimized. \n\n<div class=\"row\" style=\"margin-top: 10px\">\n <img src=\"display/images/Golomb_Ruler-4.svg\" style=\"display: block; margin: auto; width: 400px;\" />\n <p style=\"margin: auto; margin-top: 10px; text-align: center;\">Golomb ruler of order 4 and length 6. This ruler is both optimal and perfect.</p>\n</div>\n\nGolomb ruler can be used in information theory to design error correcting codes or in telecommunications to avoid interferences during radio communications. You can read about it here https://en.wikipedia.org/wiki/Golomb_ruler#:~:targetText=In%20mathematics%2C%20a%20Golomb%20ruler,are%20the%20same%20distance%20apart.&targetText=It%20has%20been%20proven%20that,of%20the%20same%20order%20exists.", "_____no_output_____" ], [ "\n**In the rest of this tutorial (except the last part), please use the following parameter with the solve method:**\n\n```\nSearchType= 'DepthFirst'\n```\n\nAlso, in order to control the level of filtering (arc consistency, bound consistency, forward checking, etc), CPoptimizer offers to use a parameter called $DefaultInferenceLevel$ http://ibmdecisionoptimization.github.io/docplex-doc/cp/docplex.cp.parameters.py.html?highlight=defaultinferencelevel#docplex.cp.parameters.CpoParameters.DefaultInferenceLevel\n\nIn the rest of this tutorial, you are required to test all three possibilities\n\n\n```\nDefaultInferenceLevel=Low\nDefaultInferenceLevel=Medium\nDefaultInferenceLevel=Extended\n\n```\n\nAfter a while, if you see one that you particularly find efficient (runtime), you can use it for the rest of the tutorial. \n", "_____no_output_____" ], [ "Create a model for the decision version of this problem. That is, given $n$ marks, and a ruler of size $m$, place the $n$ markers such that no two markers are at the same distance. \n\nYou are free to use any constraint you want. However, you must declare and use the minimum amount of constraints (**NOT A SINGLE UNNESSASARY CONSTRAINT**)\n\nNote that for N marks, a ruler of length $2 ^ {N -1}$ can be found (I let you figure out why). \n\nWrite a funtion decision_model(n,m) that builds and returns the correspondant model. ", "_____no_output_____" ], [ "Solve the problem for n=4, m=6. Then try different values of (n,m) (but don't waste too much time). ", "_____no_output_____" ], [ "You can display to solution using : \n\n```\nfrom display import golomb as display_golomb\ndisplay_golomb([sol[m] for m in marks])\n```", "_____no_output_____" ], [ "Print and display all the sulutions for (n,m) = (4,6) and (4,7)", "_____no_output_____" ], [ "Write a funtion basic_optimisation_model(n) that builds and returns the correspondant model for the\noptimisation problem. Note that an optimisation function can be seen as a variable. In order to specify the variable to optimise, we can simply use : \n\n```\nmodel.add(model.minimize(myvariable))\n```\n\nor \n\n```\nmodel.add(model.maximize(myvariable))\n```\n", "_____no_output_____" ], [ "Solve the optimisation problem for N=6.. 10 and display the solution", "_____no_output_____" ], [ "# Symmetry Breaking", "_____no_output_____" ], [ "In combinatorial optimisation, two (partial) solutions are called symmetric if we can find a transformation from one to the other. \nConsider our golomb ruler problem. Given any solution to the marks variables, if the first mark is not at index $0$, we can always shift everything to the left to start from $0$ and still have a solution. \n\nConstraint programming is extremely flexible to handle symmetries since they can be declared as constraints. \n\nIn the case of the above symmetry, we can simply add \n```\nmodel.add (marks[0]==0)\n```\n\nThis problem has another symmetry, can you find it? In order to help you, display the solution for n=4 and m=6 for the decision problem. You should find 2 solutions that are essentially the same. Can you find the symmetry? How can we model this symmetry as a constraint? ", "_____no_output_____" ], [ "Write a new function nosymmetry_optimisation_model(n) that builds a new model that avoids the two symmetries we found so far. ", "_____no_output_____" ], [ "Compare nosymmetry_optimisation_model and basic_optimisation_model for different values of $n$ (you decide the values of $n$). Plot the runtime and the search tree size", "_____no_output_____" ], [ "What's your impression about symmetries? ", "_____no_output_____" ], [ "## Implied Constraints", "_____no_output_____" ], [ "An implied constraint is one that can be dedused by looking at the original constraints of the problem. \n\nFor instance, if we have $a<b $ and $b<c$, one can infer that $a<c$. \n\nSuch constraints (called also redundant constraints) can help the solver to prune further the search tree. \n\n\n\nIn our problem there is an implied constraint. Can you find it? Please check with of the supervisors. \n\n", "_____no_output_____" ], [ "Write a new function nosymmetry2_optimisation_model(n) that adds the implied constraint to the nosymmetry_optimisation_model(n) and returns the new model ", "_____no_output_____" ], [ "Compare nosymmetry2_optimisation_model and nosymmetry_optimisation_model ", "_____no_output_____" ], [ "# Randomisation and Restarts", "_____no_output_____" ], [ "Declare two search strategies: One that uses a lexicographical order on both variables and values, \n and the other using an impact-based choice on the variables with a random value selection. ", "_____no_output_____" ], [ "Run the two strategies using the nosymmetry2_optimisation_model for different values of $n$", "_____no_output_____" ], [ "### The magic of restarts\n\n\nCombinatorial search exhibits usually a bad behaviour in the runtime distribution called **heavy tailed phenomenon**. \nThat is, at any node of the search tree, there is a non-negligeable probability that the time needed to explore the current subtree is heavier than \nan exponential distribution (you can read about it here https://aaai.org/Papers/AAAI/1998/AAAI98-061.pdf. \n\n\nA simple solution to deal with such a bad behaviour is to restart search from time to time. \nCPOptimizer offers this choice by using the parameter: \n \n```\n SearchType= 'Restart'\n```\n\n", "_____no_output_____" ], [ "Using a restart search, evaluate the two strategies mentionned above using the nosymmetry2_optimisation_model for different values of $n$. What do you think? ", "_____no_output_____" ], [ "What is the maximum value of $n$ for which you can solve this problem? Use all your techniques! ", "_____no_output_____" ] ], [ [ "### WARNING : This block takes a lot of time to execute\n# A lot of configurations try for instance \n", "_____no_output_____" ] ], [ [ "What did you learn today? ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb0fa95a1a8e1229dcf8beaf2be8fc0ac0bce8bd
4,005
ipynb
Jupyter Notebook
src/other-subreddits/Reddit_UComment_Subs.ipynb
alu13/Antiwork_Analysis
8cc991dcd145bdc7f365a99b1f77c8de02ba49ce
[ "MIT" ]
null
null
null
src/other-subreddits/Reddit_UComment_Subs.ipynb
alu13/Antiwork_Analysis
8cc991dcd145bdc7f365a99b1f77c8de02ba49ce
[ "MIT" ]
null
null
null
src/other-subreddits/Reddit_UComment_Subs.ipynb
alu13/Antiwork_Analysis
8cc991dcd145bdc7f365a99b1f77c8de02ba49ce
[ "MIT" ]
null
null
null
28.404255
118
0.547566
[ [ [ "This notebook uses PRAW to pull the subreddits posted in of reddit users in antiwork", "_____no_output_____" ] ], [ [ "#Imports\nimport praw\nimport pandas as pd\nimport datetime as dt\nimport csv\nimport prawcore.exceptions ", "_____no_output_____" ] ], [ [ "We import the file of all usernames in antiwork, then loop through their posts and the subsequent subreddits.", "_____no_output_____" ] ], [ [ "df = pd.read_csv ('user_stats.csv')\n\n\nreddit = praw.Reddit(client_id='blb2ZuNkAGWKyPJr_ruqOw', \n client_secret='9IYM0Ba4kJQBKKP9H9ZTVkkDgtrSsQ', \n user_agent='6471_Project')\n#number of observations\nsize = df.shape[0]\nprint(\"total users within Anti-Work Subreddit \", size)\n#Users with over 100 total activity\nconsolidated_df = df[df[\"total_activity\"] >= 100]\ncon_size = consolidated_df.shape[0]\nprint(\"number of users with 100 or more total activity \", con_size)\ncon_users = consolidated_df[\"user_name\"]\n\n#Sanity Test\n# username = con_users[3]\n# user_posts = []\n# user = reddit.redditor(con_users[3])\n# submissions = user.comments.new(limit=None)\n# test_posts = [username]\n# try:\n# for link in submissions:\n# test_posts.append(link.id)\n# user_posts.append(test_posts)\n# except Forbidden:\n# print(\"failed\")\n\n\n#Dictionary mapping users to a list of their comments\n#loop through most active users\n\n#user_comments = []\nuser_comment_subreddits = []\n\ncount = 0\n#halted, starting where left off at 294\nfor username in con_users:\n user = reddit.redditor(username)\n submissions = user.comments.new(limit=None)\n #comment_ids = []\n comment_srs = []\n comment_srs.append(username)\n try:\n for link in submissions:\n #comment_ids.append(link)\n comment_srs.append(link.subreddit.display_name)\n #user_comments.append(comment_ids)\n user_comment_subreddits.append(comment_srs)\n \n except Exception:\n print(\"no submissions\")\n print(count)\n count += 1\n\n#Writing dictionary to a csv\nwith open('user_commment_subreddits.csv', 'w', newline = \"\") as f: # You will need 'wb' mode in Python 2.x\n w = csv.writer(f)\n w.writerows(user_subreddits)\nprint(\"done\")", "_____no_output_____" ], [ "with open('user_commment_subreddits.csv', 'w', newline = \"\") as f: # You will need 'wb' mode in Python 2.x\n w = csv.writer(f)\n w.writerows(user_comment_subreddits)\nprint(\"done\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0fc1f1fc389a0ed3e8ce899b72a850a74d77f3
362,391
ipynb
Jupyter Notebook
Gbm-tuning-best-from-50-to-53.ipynb
sid24rane/Promotion-OR-Not
eb30ea6bb9ac00b133c2edc1dad303ff54506c09
[ "MIT" ]
1
2019-02-03T18:00:39.000Z
2019-02-03T18:00:39.000Z
Gbm-tuning-best-from-50-to-53.ipynb
sid24rane/Promotion-OR-Not
eb30ea6bb9ac00b133c2edc1dad303ff54506c09
[ "MIT" ]
null
null
null
Gbm-tuning-best-from-50-to-53.ipynb
sid24rane/Promotion-OR-Not
eb30ea6bb9ac00b133c2edc1dad303ff54506c09
[ "MIT" ]
null
null
null
306.07348
50,489
0.511221
[ [ [ "!wget https://datahack-prod.s3.amazonaws.com/train_file/train_LZdllcl.csv -O train.csv", "--2018-09-16 12:40:37-- https://datahack-prod.s3.amazonaws.com/train_file/train_LZdllcl.csv\nResolving datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)... 52.219.64.12\nConnecting to datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)|52.219.64.12|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 3759647 (3.6M) [application/vnd.ms-excel]\nSaving to: โ€˜train.csvโ€™\n\ntrain.csv 100%[=====================>] 3.58M 491KB/s in 23s \n\n2018-09-16 12:41:02 (159 KB/s) - โ€˜train.csvโ€™ saved [3759647/3759647]\n\n" ], [ "!wget https://datahack-prod.s3.amazonaws.com/test_file/test_2umaH9m.csv -O test.csv", "--2018-09-16 12:41:02-- https://datahack-prod.s3.amazonaws.com/test_file/test_2umaH9m.csv\nResolving datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)... 52.219.62.40\nConnecting to datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)|52.219.62.40|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1565147 (1.5M) [application/vnd.ms-excel]\nSaving to: โ€˜test.csvโ€™\n\ntest.csv 100%[=====================>] 1.49M 152KB/s in 19s \n\n2018-09-16 12:41:22 (81.6 KB/s) - โ€˜test.csvโ€™ saved [1565147/1565147]\n\n" ], [ "!wget https://datahack-prod.s3.amazonaws.com/sample_submission/sample_submission_M0L0uXE.csv -O sample_submission.csv", "--2018-09-16 12:41:22-- https://datahack-prod.s3.amazonaws.com/sample_submission/sample_submission_M0L0uXE.csv\nResolving datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)... 52.219.64.0\nConnecting to datahack-prod.s3.amazonaws.com (datahack-prod.s3.amazonaws.com)|52.219.64.0|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 208067 (203K) [application/vnd.ms-excel]\nSaving to: โ€˜sample_submission.csvโ€™\n\nsample_submission.c 100%[=====================>] 203.19K 43.9KB/s in 4.6s \n\n2018-09-16 12:41:28 (43.9 KB/s) - โ€˜sample_submission.csvโ€™ saved [208067/208067]\n\n" ], [ "# Import the required packages\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt \n%matplotlib inline \nimport seaborn as sns", "_____no_output_____" ], [ "# Read the train and test data\ntrain=pd.read_csv(\"train.csv\")\ntrain.drop('employee_id',inplace=True,axis = 1)\ntest=pd.read_csv(\"test.csv\")", "_____no_output_____" ], [ "# Check the variables in train data\ntrain.columns", "_____no_output_____" ], [ "# Print datatype of each variable\ntrain.dtypes", "_____no_output_____" ], [ "# Dimension of the train dataset\ntrain.shape", "_____no_output_____" ], [ "# Print the head of train dataset\ntrain.head()", "_____no_output_____" ], [ "# Unique values in each variable of train dataset\ntrain.nunique()", "_____no_output_____" ] ], [ [ "### Univariate Analysis", "_____no_output_____" ], [ "#### Target Variable", "_____no_output_____" ] ], [ [ "train['is_promoted'].value_counts(normalize=True)", "_____no_output_____" ], [ "# Around 91% trainee have promoted\n# Unbalanced dataset ", "_____no_output_____" ] ], [ [ "#### Categorical Independent Variables", "_____no_output_____" ] ], [ [ "plt.figure(1)\nplt.subplot(221)\ntrain['department'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Department')\n\nplt.subplot(222)\ntrain['awards_won?'].value_counts(normalize=True).plot.bar(title= 'Awards won')\n\nplt.subplot(223)\ntrain['education'].value_counts(normalize=True).plot.bar(title= 'Education')\n\nplt.subplot(224)\ntrain['gender'].value_counts(normalize=True).plot.bar(title= 'Gender')\n\nplt.show()", "_____no_output_____" ], [ "# Most of the trainee are enrolled for Y and T program_type.\n# More number of trainee enrolment for offline test than online test.\n# Most of the test are easy in terms of difficulty level.", "_____no_output_____" ], [ "train['KPIs_met >80%'].value_counts(normalize=True).plot.bar(title= 'KPI met greater than 80')\n", "_____no_output_____" ], [ "plt.figure(1)\nplt.subplot(221)\ntrain['region'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Region')\n\nplt.subplot(222)\ntrain['recruitment_channel'].value_counts(normalize=True).plot.bar(title='Recruitment Channels')\n\nplt.subplot(223)\ntrain['no_of_trainings'].value_counts(normalize=True).plot.bar(title= 'No of Trainings')\n\nplt.subplot(224)\ntrain['previous_year_rating'].value_counts(normalize=True).plot.bar(title= 'Previous year ratings')\n\nplt.show()", "_____no_output_____" ], [ "# More male trainee as compared to female trainee\n# Most of the trainee have diploma\n# Most of the trainee belongs to tier 3 city\n# 10% of the trainee are handicapped", "_____no_output_____" ] ], [ [ "#### Numerical Independent Variables", "_____no_output_____" ] ], [ [ "sns.distplot(train['age']);", "_____no_output_____" ], [ "# Most of the trainee are in the age range of 20-30 and 40-50", "_____no_output_____" ], [ "sns.distplot(train['length_of_service']);", "_____no_output_____" ], [ "sns.distplot(train['avg_training_score']);", "_____no_output_____" ] ], [ [ "### Bivariate Analysis", "_____no_output_____" ] ], [ [ "# Correlation between numerical variables\nmatrix = train.corr()\nf, ax = plt.subplots(figsize=(9, 6))\nsns.heatmap(matrix, vmax=.8, square=True, cmap=\"BuPu\");", "_____no_output_____" ], [ "# Not much correlation between the variables", "_____no_output_____" ], [ "# program_id vs is_pass\nplt.figure(figsize=(12,4))\nsns.barplot(train['department'], train['is_promoted'])", "_____no_output_____" ], [ "plt.figure(figsize=(20,8))\n# program_type vs is_pass\nsns.barplot(train['region'], train['is_promoted'])", "_____no_output_____" ], [ "# Trainee in X and Y program type have higher chances to pass the test", "_____no_output_____" ], [ "# test_type vs is_pass\nsns.barplot(train['recruitment_channel'], train['is_promoted'])", "_____no_output_____" ], [ "# Trainee attending online mode of test have higher chances to pass the test", "_____no_output_____" ], [ "# difficulty_level vs is_pass\nsns.barplot(train['no_of_trainings'], train['is_promoted'])", "_____no_output_____" ], [ "# If the difficulty level of the test is easy, chances to pass the test are higher", "_____no_output_____" ], [ "# Gender vs is_pass\nsns.barplot(train['previous_year_rating'], train['is_promoted'])", "_____no_output_____" ], [ "# Gender does not affect the chances to pass the test", "_____no_output_____" ], [ "# education vs is_pass\nplt.figure(figsize=(12,4))\nsns.barplot(train['education'], train['is_promoted'])", "_____no_output_____" ], [ "# Trainee with Masters education level have more chances to pass the test", "_____no_output_____" ], [ "plt.figure(figsize=(20,8))\n# is_handicapped vs is_pass\nsns.barplot(train['length_of_service'], train['is_promoted'])", "_____no_output_____" ], [ "# Handicapped trainee have less chances to pass the test", "_____no_output_____" ], [ "# city_tier vs is_pass\nsns.barplot(train['KPIs_met >80%'], train['is_promoted'])", "_____no_output_____" ], [ "# Trainee from city tier 1 have higher chances to pass the test", "_____no_output_____" ], [ "# trainee_engagement_rating vs is_pass\nsns.barplot(train['awards_won?'], train['is_promoted'])", "_____no_output_____" ], [ "# As the trainee engagement rating increases, chances to pass the test also increases", "_____no_output_____" ] ], [ [ "### Missing Values Treatment", "_____no_output_____" ] ], [ [ "# Check the number of missing values in each variable\ntrain.isnull().sum()", "_____no_output_____" ], [ "# age and trainee_engagement_rating variables have missing values in it.", "_____no_output_____" ], [ "test = pd.read_csv('test.csv')\ntest.drop('employee_id',inplace=True,axis = 1)\ntest.head()", "_____no_output_____" ], [ "test['education'].fillna('other',inplace=True)\ntest['previous_year_rating'].fillna(99,inplace=True)\n\ntrain['education'].fillna('other',inplace=True)\ntrain['previous_year_rating'].fillna(99,inplace=True)", "_____no_output_____" ] ], [ [ "### Logistic Regression", "_____no_output_____" ] ], [ [ "train.head()", "_____no_output_____" ], [ "# Save target variable in separate dataset\nX = train.drop('is_promoted',axis=1)\ny = train.is_promoted", "_____no_output_____" ], [ "test.head()", "_____no_output_____" ], [ "# Apply dummies to the dataset\nX=pd.get_dummies(X)\ntest=pd.get_dummies(test)", "_____no_output_____" ], [ "from sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import cross_validation, metrics #Additional scklearn functions\nfrom sklearn.grid_search import GridSearchCV #Perforing grid search", "/opt/conda/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/grid_search.py:42: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.\n DeprecationWarning)\n" ], [ "#same function as xgboost tuning one!\ndef modelfit(alg, dtrain, predictors, performCV=True, printFeatureImportance=True, cv_folds=5):\n #Fit the algorithm on the data\n alg.fit(dtrain[predictors],y)\n \n #Predict training set:\n dtrain_predictions = alg.predict(dtrain[predictors])\n dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]\n \n #Perform cross-validation:\n if performCV:\n cv_score = cross_validation.cross_val_score(alg, dtrain[predictors],y, cv=cv_folds, scoring='f1')\n \n #Print model report:\n print(\"\\nModel Report\")\n print(\"F1 Score :\",metrics.f1_score(y, dtrain_predictions))\n \n if performCV:\n print(\"CV Score : Mean - %.7g | Std - %.7g | Min - %.7g | Max - %.7g\" % (np.mean(cv_score),np.std(cv_score),np.min(cv_score),np.max(cv_score)))\n \n #Print Feature Importance:\n if printFeatureImportance:\n feat_imp = pd.Series(alg.feature_importances_, predictors).sort_values(ascending=False)\n feat_imp.plot(kind='bar', title='Feature Importances')\n plt.ylabel('Feature Importance Score')\n", "_____no_output_____" ], [ "#Choose all predictors except target & IDcols\npredictors = [x for x in X.columns]\ngbm0 = GradientBoostingClassifier(random_state=42,verbose = 1)\nmodelfit(gbm0,X, predictors)", " Iter Train Loss Remaining Time \n 1 0.5528 10.35s\n 2 0.5370 10.29s\n 3 0.5257 10.26s\n 4 0.5169 10.25s\n 5 0.5089 10.07s\n 6 0.5029 9.74s\n 7 0.4964 9.53s\n 8 0.4909 9.49s\n 9 0.4868 9.41s\n 10 0.4822 9.34s\n 20 0.4357 8.38s\n 30 0.4113 7.29s\n 40 0.3900 6.17s\n 50 0.3780 5.00s\n 60 0.3704 3.96s\n 70 0.3616 2.95s\n 80 0.3542 1.96s\n 90 0.3461 0.98s\n 100 0.3429 0.00s\n Iter Train Loss Remaining Time \n 1 0.5531 7.00s\n 2 0.5374 6.87s\n 3 0.5260 6.83s\n 4 0.5172 6.89s\n 5 0.5084 7.04s\n 6 0.5019 7.13s\n 7 0.4955 7.12s\n 8 0.4909 7.01s\n 9 0.4860 6.91s\n 10 0.4815 6.80s\n 20 0.4280 6.07s\n 30 0.4031 5.46s\n 40 0.3772 4.85s\n 50 0.3688 3.95s\n 60 0.3621 3.09s\n 70 0.3553 2.32s\n 80 0.3515 1.53s\n 90 0.3455 0.77s\n 100 0.3416 0.00s\n Iter Train Loss Remaining Time \n 1 0.5521 7.41s\n 2 0.5363 7.38s\n 3 0.5249 7.25s\n 4 0.5160 7.18s\n 5 0.5079 7.17s\n 6 0.5020 7.18s\n 7 0.4960 7.20s\n 8 0.4913 7.10s\n 9 0.4864 7.01s\n 10 0.4819 6.93s\n 20 0.4387 6.03s\n 30 0.4063 5.51s\n 40 0.3807 4.81s\n 50 0.3680 3.98s\n 60 0.3605 3.15s\n 70 0.3567 2.30s\n 80 0.3512 1.52s\n 90 0.3485 0.75s\n 100 0.3458 0.00s\n Iter Train Loss Remaining Time \n 1 0.5519 7.11s\n 2 0.5360 7.31s\n 3 0.5245 7.43s\n 4 0.5153 7.46s\n 5 0.5072 7.44s\n 6 0.5004 7.31s\n 7 0.4940 7.24s\n 8 0.4892 7.13s\n 9 0.4844 7.07s\n 10 0.4794 7.04s\n 20 0.4326 6.31s\n 30 0.3972 5.66s\n 40 0.3774 4.88s\n 50 0.3684 3.94s\n 60 0.3628 3.07s\n 70 0.3564 2.29s\n 80 0.3531 1.49s\n 90 0.3507 0.73s\n 100 0.3483 0.00s\n Iter Train Loss Remaining Time \n 1 0.5533 6.98s\n 2 0.5375 6.88s\n 3 0.5262 6.85s\n 4 0.5175 6.97s\n 5 0.5093 7.05s\n 6 0.5019 7.08s\n 7 0.4963 6.95s\n 8 0.4908 6.83s\n 9 0.4858 6.73s\n 10 0.4822 6.63s\n 20 0.4400 5.92s\n 30 0.4048 5.40s\n 40 0.3878 4.68s\n 50 0.3774 3.83s\n 60 0.3661 3.06s\n 70 0.3601 2.25s\n 80 0.3536 1.50s\n 90 0.3493 0.74s\n 100 0.3451 0.00s\n Iter Train Loss Remaining Time \n 1 0.5534 7.41s\n 2 0.5380 7.66s\n 3 0.5268 7.63s\n 4 0.5182 7.47s\n 5 0.5102 7.29s\n 6 0.5028 7.13s\n 7 0.4973 6.97s\n 8 0.4919 6.85s\n 9 0.4869 6.74s\n 10 0.4830 6.52s\n 20 0.4328 5.96s\n 30 0.4001 5.48s\n 40 0.3791 4.85s\n 50 0.3684 3.97s\n 60 0.3602 3.11s\n 70 0.3560 2.30s\n 80 0.3496 1.54s\n 90 0.3453 0.77s\n 100 0.3425 0.00s\n\nModel Report\nF1 Score : 0.9262726703181654\nCV Score : Mean - 0.4528465 | Std - 0.01650186 | Min - 0.43 | Max - 0.4772727\n" ], [ "param_test1 = {'n_estimators':np.arange(180,400,20)}\ngsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1,verbose = 1, min_samples_split=500,min_samples_leaf=50,max_depth=5,max_features='sqrt',subsample=0.8,random_state=10), \nparam_grid = param_test1, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose=1)\ngsearch1.fit(X,y)", "Fitting 3 folds for each of 11 candidates, totalling 33 fits\n Iter Train Loss OOB Improve Remaining Time \n Iter Train Loss OOB Improve Remaining Time \n 1 0.5554 0.0239 7.65s\n 1 0.5123 0.0644 6.52s\n 2 0.5516 0.0111 7.80s\n 2 0.5159 0.0054 6.66s\n 3 0.5305 0.0149 7.36s\n 3 0.5026 0.0075 6.41s\n 4 0.5190 0.0103 7.19s\n 4 0.4866 0.0168 6.81s\n 5 0.5012 0.0185 7.15s\n 5 0.4833 0.0060 6.39s\n 6 0.4775 0.0026 6.18s\n 6 0.5018 0.0074 7.33s\n 7 0.4916 0.0042 7.02s\n 7 0.4738 0.0067 6.27s\n 8 0.4812 0.0063 6.91s\n 8 0.4676 0.0031 6.39s\n 9 0.4772 0.0081 6.86s\n 9 0.4624 0.0051 6.55s\n 10 0.4683 0.0059 6.90s\n 10 0.4552 0.0056 6.64s\n 20 0.4491 0.0020 6.16s\n 20 0.4312 0.0006 6.10s\n 30 0.4290 -0.0002 5.63s\n 30 0.4026 0.0029 5.77s\n 40 0.3962 0.0001 5.17s\n 40 0.3864 0.0006 5.18s\n 50 0.3844 0.0015 4.53s\n 50 0.3673 -0.0000 4.58s\n 60 0.3682 -0.0001 4.29s\n 60 0.3521 0.0004 4.30s\n 70 0.3551 0.0017 3.98s\n 70 0.3459 -0.0000 3.98s\n 80 0.3490 0.0003 3.65s\n 80 0.3374 0.0005 3.64s\n 90 0.3377 -0.0001 3.28s\n 90 0.3318 0.0003 3.26s\n 100 0.3304 -0.0002 2.93s\n 100 0.3284 0.0004 2.91s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5300 0.0511 6.60s\n Iter Train Loss OOB Improve Remaining Time \n 2 0.5210 0.0080 7.45s\n 1 0.5554 0.0239 7.23s\n 3 0.5107 0.0063 7.32s\n 2 0.5516 0.0111 7.80s\n 4 0.4988 0.0136 7.43s\n 3 0.5305 0.0149 7.88s\n 5 0.4906 0.0084 7.19s\n 4 0.5190 0.0103 8.09s\n 6 0.4771 0.0034 7.31s\n 5 0.5012 0.0185 8.29s\n 7 0.4787 0.0073 7.23s\n 6 0.5018 0.0074 8.50s\n 8 0.4725 0.0043 7.33s\n 7 0.4916 0.0042 8.23s\n 9 0.4634 0.0053 7.34s\n 8 0.4812 0.0063 8.21s\n 10 0.4606 0.0035 7.39s\n 9 0.4772 0.0081 8.26s\n 10 0.4683 0.0059 8.26s\n 20 0.4385 0.0006 6.43s\n 20 0.4491 0.0020 7.36s\n 30 0.4186 0.0034 5.97s\n 30 0.4290 -0.0002 6.81s\n 40 0.3916 0.0071 5.46s\n 40 0.3962 0.0001 6.28s\n 50 0.3719 0.0017 5.09s\n 50 0.3844 0.0015 5.94s\n 60 0.3586 0.0005 4.69s\n 60 0.3682 -0.0001 5.53s\n 70 0.3489 -0.0003 4.29s\n 70 0.3551 0.0017 5.11s\n 80 0.3433 0.0003 3.89s\n 80 0.3490 0.0003 4.73s\n 90 0.3375 -0.0003 3.45s\n 90 0.3377 -0.0001 4.28s\n 100 0.3351 0.0005 3.08s\n 100 0.3304 -0.0002 3.89s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5123 0.0644 7.14s\n 2 0.5159 0.0054 7.79s\n 3 0.5026 0.0075 7.62s\n 4 0.4866 0.0168 8.30s\n 5 0.4833 0.0060 7.99s\n 6 0.4775 0.0026 7.69s\n 7 0.4738 0.0067 7.75s\n 8 0.4676 0.0031 7.77s\n 9 0.4624 0.0051 7.90s\n 10 0.4552 0.0056 7.89s\n 200 0.3088 -0.0001 0.00s\n 20 0.4312 0.0006 7.43s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5300 0.0511 6.79s\n 2 0.5210 0.0080 7.10s\n 3 0.5107 0.0063 6.96s\n 4 0.4988 0.0136 7.46s\n 5 0.4906 0.0084 7.22s\n 6 0.4771 0.0034 7.36s\n 7 0.4787 0.0073 7.40s\n 8 0.4725 0.0043 7.59s\n 30 0.4026 0.0029 7.09s\n 9 0.4634 0.0053 7.75s\n 10 0.4606 0.0035 7.88s\n 40 0.3864 0.0006 6.42s\n 20 0.4385 0.0006 6.94s\n 50 0.3673 -0.0000 5.93s\n 30 0.4186 0.0034 6.58s\n 60 0.3521 0.0004 5.46s\n 40 0.3916 0.0071 6.07s\n 70 0.3459 -0.0000 5.05s\n 50 0.3719 0.0017 5.75s\n 80 0.3374 0.0005 4.63s\n 60 0.3586 0.0005 5.35s\n 90 0.3318 0.0003 4.20s\n 70 0.3489 -0.0003 4.99s\n 100 0.3284 0.0004 3.80s\n 80 0.3433 0.0003 4.60s\n 90 0.3375 -0.0003 4.18s\n 100 0.3351 0.0005 3.84s\n 200 0.3111 -0.0003 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5554 0.0239 8.48s\n 2 0.5516 0.0111 10.17s\n 3 0.5305 0.0149 9.79s\n 4 0.5190 0.0103 9.56s\n 5 0.5012 0.0185 9.35s\n 6 0.5018 0.0074 9.27s\n 7 0.4916 0.0042 8.77s\n 8 0.4812 0.0063 8.64s\n 9 0.4772 0.0081 8.55s\n" ], [ "gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_", "_____no_output_____" ], [ "#tuning max depth and min samples split\n\nparam_test2 = {'max_depth':np.arange(5,10,2),'min_samples_split':np.arange(500,1001,100)}\ngsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1,verbose = 1, n_estimators=600, max_features='sqrt', subsample=0.8, random_state=10), \nparam_grid = param_test2, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose =1)\ngsearch2.fit(X,y)", "Fitting 3 folds for each of 18 candidates, totalling 54 fits\n Iter Train Loss OOB Improve Remaining Time \n Iter Train Loss OOB Improve Remaining Time \n 1 0.5663 0.0127 17.12s\n 1 0.5439 0.0335 16.26s\n 2 0.5478 0.0051 15.72s\n 2 0.5675 0.0072 19.18s\n 3 0.5341 0.0067 15.54s\n 3 0.5510 0.0096 18.90s\n 4 0.5226 0.0119 16.44s\n 4 0.5402 0.0095 18.49s\n 5 0.5192 0.0068 15.74s\n 5 0.5282 0.0129 18.07s\n 6 0.5118 0.0043 15.83s\n 6 0.5308 0.0062 18.25s\n 7 0.5089 0.0053 15.96s\n 7 0.5199 0.0053 17.40s\n 8 0.5020 0.0042 16.23s\n 8 0.5125 0.0034 16.94s\n 9 0.4978 0.0049 16.28s\n 9 0.5088 0.0076 16.99s\n 10 0.4912 0.0053 16.25s\n 10 0.5008 0.0068 17.12s\n 20 0.4713 0.0025 15.15s\n 20 0.4674 0.0017 15.01s\n 30 0.4496 0.0005 14.71s\n 30 0.4447 0.0015 14.75s\n 40 0.4334 0.0013 14.11s\n 40 0.4186 0.0008 14.37s\n 50 0.4187 0.0002 13.74s\n 50 0.4069 0.0001 14.05s\n 60 0.3919 0.0006 13.57s\n 60 0.3881 0.0006 13.81s\n 70 0.3797 0.0009 13.33s\n 70 0.3753 0.0001 13.59s\n 80 0.3689 -0.0000 13.21s\n 80 0.3704 0.0014 13.64s\n 90 0.3613 0.0000 13.49s\n 90 0.3612 0.0000 13.98s\n 100 0.3550 0.0009 13.90s\n 100 0.3544 0.0000 14.27s\n 200 0.3253 -0.0001 13.08s\n 200 0.3242 0.0000 13.31s\n 300 0.3059 -0.0001 10.13s\n 300 0.3052 -0.0001 10.31s\n 400 0.2913 -0.0000 6.88s\n 400 0.2991 -0.0002 6.97s\n 500 0.2882 -0.0001 3.48s\n 500 0.2903 -0.0000 3.52s\n 600 0.2838 -0.0001 0.00s\n 600 0.2806 -0.0001 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5507 0.0314 23.45s\n 2 0.5425 0.0069 23.70s\n 3 0.5345 0.0051 22.75s\n 4 0.5180 0.0213 23.08s\n 5 0.5127 0.0065 24.59s\n 6 0.4996 0.0029 25.17s\n 7 0.5006 0.0069 25.18s\n Iter Train Loss OOB Improve Remaining Time \n 8 0.4965 0.0036 25.19s\n 1 0.5663 0.0127 20.78s\n 9 0.4882 0.0062 25.64s\n 2 0.5679 0.0059 23.15s\n 3 0.5518 0.0097 23.38s\n 10 0.4830 0.0037 25.56s\n 4 0.5408 0.0097 22.78s\n 5 0.5287 0.0130 22.69s\n 6 0.5311 0.0062 23.22s\n 7 0.5203 0.0052 22.43s\n 8 0.5135 0.0034 22.25s\n 9 0.5098 0.0077 22.53s\n 10 0.5017 0.0068 22.89s\n 20 0.4544 0.0018 23.83s\n 20 0.4720 0.0025 21.20s\n 30 0.4370 0.0040 23.08s\n 30 0.4513 0.0005 20.69s\n 40 0.4166 0.0066 21.92s\n 40 0.4155 0.0058 20.19s\n 50 0.4007 0.0001 21.42s\n 50 0.4054 0.0001 19.98s\n 60 0.3836 0.0008 20.99s\n 60 0.3917 0.0007 19.74s\n 70 0.3732 0.0001 20.70s\n 70 0.3834 0.0002 19.48s\n 80 0.3683 -0.0000 20.14s\n 80 0.3793 0.0014 19.10s\n 90 0.3591 -0.0001 19.70s\n 90 0.3664 0.0000 18.67s\n 100 0.3565 0.0010 19.32s\n 100 0.3588 -0.0000 18.33s\n 200 0.3174 -0.0002 15.39s\n 200 0.3255 0.0004 15.04s\n 300 0.3063 -0.0001 11.34s\n 300 0.3059 -0.0000 11.11s\n 400 0.2964 -0.0001 7.42s\n 400 0.3011 -0.0002 7.33s\n 500 0.2899 -0.0000 3.68s\n 500 0.2922 -0.0001 3.65s\n 600 0.2855 -0.0001 0.00s\n 600 0.2822 -0.0001 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5439 0.0335 22.04s\n 2 0.5478 0.0051 21.50s\n 3 0.5341 0.0067 25.15s\n 4 0.5227 0.0118 26.47s\n Iter Train Loss OOB Improve Remaining Time \n 5 0.5194 0.0068 25.61s\n 6 0.5120 0.0043 24.70s\n 1 0.5541 0.0289 21.12s\n 7 0.5092 0.0053 24.44s\n 2 0.5457 0.0071 21.83s\n 3 0.5375 0.0051 21.32s\n 8 0.5023 0.0042 24.62s\n 4 0.5199 0.0224 22.18s\n 9 0.4973 0.0055 24.92s\n 5 0.5142 0.0070 21.45s\n 10 0.4907 0.0053 24.90s\n 6 0.4997 0.0048 21.97s\n 7 0.5009 0.0068 22.05s\n 8 0.4967 0.0036 22.73s\n 9 0.4869 0.0069 23.55s\n 10 0.4795 0.0064 23.68s\n 20 0.4660 0.0016 22.69s\n 20 0.4573 0.0020 21.41s\n 30 0.4445 0.0013 21.52s\n 30 0.4384 0.0040 21.37s\n 40 0.4340 0.0012 20.86s\n 40 0.4247 0.0011 20.28s\n 50 0.4216 0.0000 20.05s\n 50 0.4139 0.0001 20.08s\n 60 0.3951 0.0011 19.76s\n 60 0.3931 0.0008 19.69s\n" ], [ "gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_", "_____no_output_____" ], [ "#Tuning min_samples_leaf after updating the latest hyperparameter values i.e max_depth and min_samples_split\n\nparam_test3 = {'min_samples_leaf':np.arange(50,100,10)}\n\ngsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=600,min_samples_split=600,max_depth=7,max_features='sqrt',verbose = 1, subsample=0.8, random_state=10), \nparam_grid = param_test3, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose = 1)\n\ngsearch3.fit(X,y)\n", "Fitting 3 folds for each of 5 candidates, totalling 15 fits\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5608 0.0197 22.50s\n Iter Train Loss OOB Improve Remaining Time \n 2 0.5547 0.0132 23.49s\n 1 0.5560 0.0190 25.59s\n 3 0.5363 0.0112 23.72s\n 2 0.5578 0.0072 24.78s\n 4 0.5272 0.0084 22.81s\n 3 0.5438 0.0085 24.85s\n 5 0.5108 0.0178 22.51s\n 6 0.5116 0.0063 22.36s\n 4 0.5345 0.0088 25.57s\n 7 0.4978 0.0083 21.21s\n 5 0.5273 0.0104 24.67s\n 8 0.4914 0.0031 20.46s\n 6 0.5216 0.0045 24.37s\n 9 0.4875 0.0083 20.74s\n 7 0.5094 0.0125 24.80s\n 10 0.4806 0.0050 20.91s\n 8 0.4990 0.0077 24.92s\n 9 0.4929 0.0066 24.75s\n 10 0.4874 0.0039 24.35s\n 20 0.4479 0.0027 20.05s\n 20 0.4611 0.0017 22.07s\n 30 0.4311 0.0007 18.87s\n 30 0.4277 0.0026 21.59s\n 40 0.4056 0.0017 18.21s\n 40 0.4085 0.0005 22.27s\n 50 0.3819 0.0000 19.84s\n 50 0.3905 0.0023 23.07s\n 60 0.3642 0.0017 21.35s\n 60 0.3662 0.0021 23.64s\n 70 0.3543 0.0018 22.07s\n 70 0.3542 0.0016 23.84s\n 80 0.3472 0.0014 22.33s\n 80 0.3456 0.0002 24.05s\n 90 0.3379 0.0002 22.26s\n 90 0.3382 -0.0002 23.68s\n 100 0.3319 0.0000 22.08s\n 100 0.3342 0.0008 23.56s\n 200 0.3098 0.0002 19.20s\n 200 0.3104 -0.0002 19.49s\n 300 0.2910 -0.0003 14.44s\n 300 0.2916 -0.0002 14.58s\n 400 0.2845 -0.0001 9.63s\n 400 0.2768 -0.0002 9.69s\n 500 0.2749 -0.0002 4.82s\n 500 0.2724 -0.0002 4.85s\n 600 0.2642 -0.0003 0.00s\n 600 0.2660 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5502 0.0332 27.92s\n 2 0.5401 0.0080 30.75s\n 3 0.5290 0.0070 31.88s\n Iter Train Loss OOB Improve Remaining Time \n 4 0.5146 0.0194 34.92s\n 1 0.5615 0.0184 29.31s\n 5 0.5064 0.0084 33.85s\n 2 0.5578 0.0094 32.19s\n 6 0.4929 0.0039 32.95s\n 3 0.5416 0.0095 31.38s\n 7 0.4912 0.0082 32.85s\n 4 0.5307 0.0098 29.80s\n 8 0.4846 0.0057 32.65s\n 5 0.5192 0.0125 29.83s\n 9 0.4752 0.0065 33.16s\n 6 0.5188 0.0080 30.75s\n 7 0.5050 0.0085 29.77s\n 10 0.4683 0.0057 34.14s\n 8 0.4982 0.0033 29.33s\n 9 0.4936 0.0082 30.02s\n 10 0.4860 0.0055 30.24s\n 20 0.4386 0.0014 31.33s\n 20 0.4536 0.0027 29.67s\n 30 0.4190 0.0029 31.03s\n 30 0.4356 0.0008 28.76s\n 40 0.4006 0.0038 29.85s\n 40 0.4016 0.0011 28.52s\n 50 0.3871 0.0006 29.09s\n 50 0.3871 0.0000 27.99s\n 60 0.3671 0.0003 28.33s\n 60 0.3680 0.0003 27.91s\n 70 0.3561 -0.0001 28.09s\n 70 0.3602 0.0009 27.60s\n 80 0.3483 0.0004 27.62s\n 80 0.3542 0.0009 26.96s\n 90 0.3414 -0.0000 27.08s\n 90 0.3404 0.0007 26.45s\n 100 0.3386 0.0017 26.81s\n 100 0.3332 0.0003 25.96s\n 200 0.3073 -0.0002 20.77s\n 200 0.3102 0.0001 20.76s\n 300 0.2952 -0.0001 15.28s\n 300 0.2918 -0.0002 15.25s\n 400 0.2847 -0.0002 10.02s\n 400 0.2855 -0.0003 10.04s\n 500 0.2770 -0.0001 4.99s\n 500 0.2748 -0.0002 5.05s\n 600 0.2712 -0.0002 0.00s\n 600 0.2650 -0.0003 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5564 0.0190 29.04s\n 2 0.5594 0.0059 28.15s\n 3 0.5450 0.0088 28.98s\n 4 0.5357 0.0089 31.46s\n 5 0.5300 0.0093 32.03s\n 6 0.5236 0.0050 33.55s\n Iter Train Loss OOB Improve Remaining Time \n 7 0.5109 0.0129 34.40s\n 1 0.5506 0.0323 32.73s\n 2 0.5434 0.0054 31.61s\n 8 0.5001 0.0083 35.26s\n 3 0.5314 0.0072 30.72s\n 9 0.4937 0.0069 35.25s\n 4 0.5143 0.0227 32.43s\n 10 0.4881 0.0039 34.92s\n 5 0.5055 0.0086 31.42s\n 6 0.4913 0.0054 31.42s\n 7 0.4895 0.0088 31.15s\n 8 0.4811 0.0064 31.45s\n 9 0.4722 0.0054 31.42s\n 10 0.4671 0.0045 31.96s\n 20 0.4642 0.0019 31.48s\n 20 0.4393 0.0009 29.75s\n 30 0.4294 0.0032 30.57s\n 30 0.4162 0.0034 29.15s\n 40 0.4108 0.0005 29.16s\n 40 0.4023 0.0007 28.10s\n 50 0.3902 0.0007 28.39s\n 50 0.3879 0.0002 27.63s\n 60 0.3656 0.0024 28.18s\n 60 0.3721 0.0026 27.58s\n" ], [ "gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_", "_____no_output_____" ], [ "param_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]}\n\ngsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, verbose = 1 , n_estimators=600,max_depth=7,min_samples_split=600, min_samples_leaf=60, subsample=0.8, random_state=10,max_features=7),\nparam_grid = param_test5, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose = 1)\n\ngsearch5.fit(X,y)\n\n", "Fitting 3 folds for each of 6 candidates, totalling 18 fits\n Iter Train Loss OOB Improve Remaining Time \n Iter Train Loss OOB Improve Remaining Time \n 1 0.5577 0.0179 20.63s\n 1 0.5532 0.0279 20.00s\n 2 0.5517 0.0182 22.12s\n 2 0.5497 0.0069 19.12s\n 3 0.5326 0.0080 23.04s\n 3 0.5334 0.0088 20.03s\n 4 0.5306 0.0070 24.55s\n 4 0.5266 0.0077 23.23s\n 5 0.5155 0.0101 26.55s\n 5 0.5191 0.0104 24.10s\n 6 0.5150 0.0072 27.87s\n 6 0.5170 0.0040 25.84s\n 7 0.5147 0.0077 26.68s\n 7 0.5017 0.0074 25.92s\n 8 0.5027 0.0034 26.03s\n 8 0.5104 0.0033 27.33s\n 9 0.5011 0.0068 26.47s\n 9 0.4946 0.0067 27.91s\n 10 0.4924 0.0052 26.76s\n 10 0.4864 0.0050 28.47s\n 20 0.4583 0.0010 27.49s\n 20 0.4618 0.0021 27.91s\n 30 0.4405 0.0002 27.14s\n 30 0.4335 0.0020 28.02s\n 40 0.4032 0.0030 27.54s\n 40 0.4174 0.0039 27.36s\n 50 0.3995 0.0031 26.53s\n 50 0.3932 0.0019 26.91s\n 60 0.3736 0.0023 26.50s\n 60 0.3818 0.0015 26.35s\n 70 0.3620 0.0016 25.91s\n 70 0.3710 0.0010 26.11s\n 80 0.3545 0.0007 25.58s\n 80 0.3585 0.0001 25.55s\n 90 0.3517 0.0001 24.82s\n 90 0.3485 -0.0000 24.97s\n 100 0.3420 -0.0002 24.36s\n 100 0.3464 0.0004 24.29s\n 200 0.3163 -0.0002 19.13s\n 200 0.3140 -0.0001 19.26s\n 300 0.3032 -0.0001 14.22s\n 300 0.3001 -0.0001 14.38s\n 400 0.2880 -0.0002 9.44s\n 400 0.2925 -0.0002 9.52s\n 500 0.2842 -0.0002 4.70s\n 500 0.2857 -0.0001 4.73s\n 600 0.2802 -0.0002 0.00s\n 600 0.2808 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5557 0.0303 31.60s\n Iter Train Loss OOB Improve Remaining Time \n 2 0.5455 0.0053 33.11s\n 1 0.5601 0.0180 28.59s\n 3 0.5352 0.0067 33.52s\n 2 0.5592 0.0106 31.66s\n 4 0.5309 0.0083 34.92s\n 3 0.5396 0.0093 33.89s\n 5 0.5199 0.0192 35.11s\n 4 0.5329 0.0087 33.38s\n 6 0.5039 0.0070 35.81s\n 5 0.5192 0.0116 33.35s\n 7 0.4911 0.0133 36.04s\n 6 0.5213 0.0079 34.00s\n 8 0.4800 0.0072 36.16s\n 7 0.5107 0.0081 32.61s\n 8 0.5025 0.0033 32.01s\n 9 0.4715 0.0057 35.32s\n 10 0.4626 0.0039 34.63s\n 9 0.5002 0.0080 31.90s\n 10 0.4900 0.0065 31.84s\n 20 0.4383 0.0037 30.72s\n 20 0.4607 0.0029 30.01s\n 30 0.4221 0.0021 29.25s\n 30 0.4340 0.0002 28.94s\n 40 0.4096 0.0003 27.90s\n 40 0.4144 0.0011 28.28s\n 50 0.3872 0.0014 27.17s\n 50 0.3902 0.0002 28.22s\n 60 0.3778 0.0006 26.94s\n 60 0.3698 0.0022 27.93s\n 70 0.3674 0.0006 26.22s\n 70 0.3621 0.0017 27.67s\n 80 0.3560 0.0010 25.50s\n 80 0.3544 0.0005 27.16s\n 90 0.3504 0.0002 25.05s\n 90 0.3451 -0.0001 26.43s\n 100 0.3443 0.0011 24.52s\n 100 0.3401 0.0001 25.79s\n 200 0.3202 0.0000 19.57s\n 200 0.3153 -0.0002 20.44s\n 300 0.3085 -0.0002 14.57s\n 300 0.2969 -0.0001 15.16s\n 400 0.2942 -0.0001 9.62s\n 400 0.2914 -0.0001 9.97s\n 500 0.2884 -0.0002 4.78s\n 500 0.2831 -0.0001 4.96s\n 600 0.2820 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5619 0.0188 30.95s\n 2 0.5627 0.0057 30.62s\n 3 0.5428 0.0086 30.77s\n 4 0.5360 0.0091 32.19s\n 5 0.5295 0.0098 31.95s\n 6 0.5193 0.0071 33.41s\n 7 0.5145 0.0074 33.25s\n 8 0.5091 0.0057 33.26s\n 9 0.4993 0.0059 32.96s\n 600 0.2737 -0.0003 0.00s\n 10 0.4906 0.0063 32.81s\n 20 0.4554 0.0026 30.73s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5537 0.0292 31.95s\n 2 0.5476 0.0052 34.67s\n 3 0.5351 0.0088 34.09s\n 4 0.5206 0.0125 35.49s\n 5 0.5104 0.0165 34.50s\n 6 0.4963 0.0070 34.90s\n 7 0.4929 0.0072 34.56s\n 30 0.4351 0.0012 31.46s\n 8 0.4823 0.0070 35.67s\n 9 0.4729 0.0067 35.53s\n 10 0.4655 0.0037 35.28s\n 40 0.4243 0.0012 30.36s\n 20 0.4371 0.0015 31.93s\n 50 0.4012 0.0005 29.61s\n 30 0.4239 0.0021 30.42s\n 60 0.3714 0.0026 29.05s\n 40 0.4125 0.0004 29.26s\n 70 0.3612 0.0010 28.20s\n 50 0.3966 0.0004 28.57s\n" ], [ "gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_", "_____no_output_____" ], [ "gbm_tuned_1 = GradientBoostingClassifier(learning_rate=0.1, n_estimators=600,max_depth=7, min_samples_split=600,min_samples_leaf=60, subsample=0.8, random_state=10, max_features=7,verbose=1 )\nmodelfit(gbm_tuned_1,X,predictors)", " Iter Train Loss OOB Improve Remaining Time \n 1 0.5618 0.0216 23.75s\n 2 0.5507 0.0112 27.46s\n 3 0.5395 0.0108 29.15s\n 4 0.5328 0.0097 29.22s\n 5 0.5077 0.0155 30.48s\n 6 0.5121 0.0032 31.05s\n 7 0.5029 0.0070 31.64s\n 8 0.4984 0.0043 31.67s\n 9 0.4862 0.0102 31.91s\n 10 0.4850 0.0024 31.95s\n 20 0.4464 0.0035 31.45s\n 30 0.4240 0.0011 30.33s\n 40 0.4080 0.0007 29.58s\n 50 0.3892 0.0004 29.36s\n 60 0.3731 0.0017 29.31s\n 70 0.3600 0.0001 28.92s\n 80 0.3492 0.0002 28.49s\n 90 0.3479 0.0013 28.24s\n 100 0.3414 0.0002 27.72s\n 200 0.3107 -0.0001 21.76s\n 300 0.2956 -0.0001 16.03s\n 400 0.2917 -0.0001 10.67s\n 500 0.2788 -0.0002 5.30s\n 600 0.2776 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5577 0.0244 22.04s\n 2 0.5478 0.0074 23.30s\n 3 0.5418 0.0063 23.22s\n 4 0.5270 0.0128 23.55s\n 5 0.5111 0.0201 23.91s\n 6 0.4999 0.0084 24.02s\n 7 0.4967 0.0035 23.82s\n 8 0.4938 0.0041 23.87s\n 9 0.4861 0.0029 23.75s\n 10 0.4888 0.0040 24.12s\n 20 0.4596 0.0022 23.04s\n 30 0.4376 0.0003 22.73s\n 40 0.4224 0.0010 22.68s\n 50 0.3956 0.0026 22.58s\n 60 0.3778 0.0033 22.40s\n 70 0.3694 0.0011 22.35s\n 80 0.3585 -0.0000 21.99s\n 90 0.3479 0.0000 21.55s\n 100 0.3411 0.0005 20.93s\n 200 0.3097 0.0001 16.72s\n 300 0.2964 -0.0002 12.51s\n 400 0.2874 -0.0001 8.27s\n 500 0.2790 -0.0002 4.12s\n 600 0.2732 -0.0001 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5561 0.0247 22.62s\n 2 0.5459 0.0121 23.63s\n 3 0.5360 0.0095 23.61s\n 4 0.5230 0.0073 23.26s\n 5 0.5146 0.0124 22.76s\n 6 0.5016 0.0099 24.52s\n 7 0.5032 0.0020 24.84s\n 8 0.4991 0.0034 24.63s\n 9 0.4912 0.0028 24.44s\n 10 0.4921 0.0063 24.51s\n 20 0.4589 0.0017 23.06s\n 30 0.4379 0.0002 22.74s\n 40 0.4123 0.0008 22.96s\n 50 0.3873 0.0018 22.76s\n 60 0.3692 0.0020 22.75s\n 70 0.3641 0.0008 22.56s\n 80 0.3548 0.0005 22.00s\n 90 0.3449 0.0003 21.53s\n 100 0.3387 0.0002 21.07s\n 200 0.3081 0.0000 16.97s\n 300 0.2954 -0.0002 12.51s\n 400 0.2861 -0.0002 8.24s\n 500 0.2807 -0.0001 4.09s\n 600 0.2743 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5563 0.0238 26.41s\n 2 0.5494 0.0119 26.73s\n 3 0.5346 0.0089 25.74s\n 4 0.5265 0.0081 24.17s\n 5 0.5126 0.0141 23.37s\n 6 0.5066 0.0098 23.92s\n 7 0.5001 0.0022 23.79s\n 8 0.4989 0.0034 23.43s\n 9 0.4924 0.0032 23.19s\n 10 0.4881 0.0060 23.15s\n 20 0.4559 0.0016 23.83s\n 30 0.4383 0.0002 23.07s\n 40 0.4135 0.0006 22.98s\n 50 0.3950 -0.0000 23.15s\n 60 0.3774 0.0040 22.85s\n 70 0.3707 0.0005 22.37s\n 80 0.3539 0.0006 22.04s\n 90 0.3496 0.0010 21.81s\n 100 0.3403 0.0008 21.41s\n 200 0.3109 0.0002 16.92s\n 300 0.2970 -0.0001 12.52s\n 400 0.2880 -0.0001 8.27s\n 500 0.2818 -0.0000 4.10s\n 600 0.2747 -0.0001 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5628 0.0217 21.03s\n 2 0.5503 0.0072 19.25s\n 3 0.5338 0.0144 20.61s\n 4 0.5333 0.0052 21.62s\n 5 0.5140 0.0174 21.61s\n 6 0.5026 0.0096 22.30s\n 7 0.4914 0.0118 22.30s\n 8 0.4840 0.0056 22.19s\n 9 0.4849 0.0020 21.74s\n 10 0.4794 0.0029 21.16s\n 20 0.4417 0.0013 22.88s\n 30 0.4184 0.0049 22.46s\n 40 0.4088 0.0001 22.24s\n 50 0.3807 0.0044 22.22s\n 60 0.3713 0.0017 21.80s\n 70 0.3635 -0.0000 21.34s\n 80 0.3493 -0.0000 21.37s\n 90 0.3452 0.0003 20.95s\n 100 0.3397 -0.0002 20.70s\n 200 0.3111 -0.0000 16.77s\n 300 0.3009 -0.0002 12.29s\n 400 0.2863 -0.0001 8.11s\n 500 0.2823 -0.0001 4.03s\n 600 0.2741 -0.0002 0.00s\n Iter Train Loss OOB Improve Remaining Time \n 1 0.5571 0.0277 26.45s\n 2 0.5460 0.0077 22.46s\n 3 0.5283 0.0140 22.39s\n 4 0.5266 0.0069 22.85s\n 5 0.5053 0.0186 22.35s\n 6 0.4988 0.0073 22.59s\n 7 0.4886 0.0083 22.52s\n 8 0.4833 0.0026 22.44s\n 9 0.4867 0.0015 22.03s\n 10 0.4773 0.0043 21.78s\n" ], [ "pred = gbm_tuned_1.predict(test)", "_____no_output_____" ], [ "# Read the submission file\nsubmission=pd.read_csv(\"sample_submission.csv\")", "_____no_output_____" ], [ "submission.head()", "_____no_output_____" ], [ "# Fill the is_pass variable with the predictions\nsubmission['is_promoted']=pred", "_____no_output_____" ], [ "submission['is_promoted'] = submission['is_promoted'].astype(np.int64)", "_____no_output_____" ], [ "submission.head()", "_____no_output_____" ], [ "submission['is_promoted'].value_counts()", "_____no_output_____" ], [ "# Converting the submission file to csv format\nsubmission.to_csv('logistic_submission.csv', index=False)", "_____no_output_____" ] ], [ [ "score on leaderboard - 0.71145", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb0fc41ad3c82d7c0cbf5fa31efea21ce81daf0f
755,737
ipynb
Jupyter Notebook
Time_Series_Forecasting_with_EMD_and_Fully_Convolutional_Neural_Networks_on_the_IRX_data_set.ipynb
3catz/DeepLearning-NLP
2f9289a7118f79cd6462bd536b161d1e2243f16a
[ "MIT" ]
1
2021-05-25T10:27:51.000Z
2021-05-25T10:27:51.000Z
Time_Series_Forecasting_with_EMD_and_Fully_Convolutional_Neural_Networks_on_the_IRX_data_set.ipynb
3catz/DeepLearning-NLP
2f9289a7118f79cd6462bd536b161d1e2243f16a
[ "MIT" ]
null
null
null
Time_Series_Forecasting_with_EMD_and_Fully_Convolutional_Neural_Networks_on_the_IRX_data_set.ipynb
3catz/DeepLearning-NLP
2f9289a7118f79cd6462bd536b161d1e2243f16a
[ "MIT" ]
1
2019-04-07T06:42:52.000Z
2019-04-07T06:42:52.000Z
565.671407
82,988
0.904573
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/3catz/DeepLearning-NLP/blob/master/Time_Series_Forecasting_with_EMD_and_Fully_Convolutional_Neural_Networks_on_the_IRX_data_set.ipynb)", "_____no_output_____" ], [ "# TIME SERIES FORECASTING -- using Empirical Mode Decomposition with Fully Convolutional Networks for One-step ahead forecasting on the IRX time series. \n", "_____no_output_____" ], [ "# Summary:# \nA noisy time series is additively decomposed into Intrinsic Mode Functions--oscillating, orthogonal basis functions, using the Empirical Mode Decomposition method pionered by Norden Huang. The IMF components are then used as features for a deep convolutional neural network, which can \"learn\" the decomposition--divide and conquer--and thereby improve forecasting performance and offer not only forecasting for the series but also the IMF components going into the future. This allows us to focus on forecasting physically significant or interesting IMFs. Note: This is additive, not multiplicative decomposition, which means that you consider the time series to be the sum of various components, rather than the product of various component functions. What it is--or rather, which is the better model--is something you have to explore. It helps to have domain knowledge, though more advanced forms of spectral analysis can also be used to glean insights in this regard. \n\nIn this notebook, I demonstrate that using the IMFs a features alongside the original time series can do very well in out-of-sample forecasting, in this case, forecasting 1 step ahead. We used a lookback window of 10 lags from the signal as well as the IMFs to help us predict 1-step ahead in the future. Using the R2 coefficient of determination, we can see that the model can account for over 98% of the variation when applied to an out-of-sample forecast. ", "_____no_output_____" ], [ "# Data#\n\n**IRX opening prices**\nIRX is the stock ticker for the [13 Week Treasury Bill](https://finance.yahoo.com/quote/%5EIRX/history/).\nI downloaded the data from [Comp-engine.org, a self-organizing database of time series](https://www.comp-engine.org/#!visualize/25c6285e-3872-11e8-8680-0242ac120002) fully accessible to the public.\n\n", "_____no_output_____" ], [ "# Architecture and Process#\n\n\n\n1. 4 Conv layers, all from the original input, each with 128 hidden units, filter size of 3, dilation rates exponential powers of 2. \n2. Concatenate these 4 layers with the original input--no adding or multiplying, just concatenate on axis = -1.\n3. Deconv with hidden units equal to number of IMF-components, in this case 11. \n4. Add the predicted IMF-components together to reconstruct the signal, which is your yhat prediction for a step ahead.\n5. Compare with ground truth to see how you did. \n\n", "_____no_output_____" ] ], [ [ "!pip install pyhht\n!pip install PeakUtils\nfrom sklearn.preprocessing import MinMaxScaler, RobustScaler \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\n\n#import pandas_datareader.data as web\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom pandas import concat\nimport matplotlib.pyplot as plt\nimport os \nfrom scipy.integrate import odeint\n#keras\nfrom keras.models import *\nfrom keras.layers import * \nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras import backend as K\nfrom keras.engine.topology import Layer\nimport peakutils\n#!pip install pyramid-arima\n#from pyramid.arima import auto_arima", "_____no_output_____" ] ], [ [ "# Utilities: series to supervised", "_____no_output_____" ] ], [ [ "def series_to_supervised(data, n_in, n_out, dropnan=True):\n\n\tn_vars = 1 if type(data) is list else data.shape[1]\n\tdf = DataFrame(data)\n\tcols, names = list(), list()\n\t# input sequence (t-n, ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t\tnames += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t\tif i == 0:\n\t\t\tnames += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n\t\telse:\n\t\t\tnames += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n\tagg = concat(cols, axis=1)\n\tagg.columns = names\n\t# drop rows with NaN values\n\tif dropnan:\n\t\tagg.dropna(inplace=True)\n\treturn agg", "_____no_output_____" ] ], [ [ "# Loading Data ", "_____no_output_____" ] ], [ [ "from google.colab import files \nfiles.upload()\n\n\n", "_____no_output_____" ], [ "import numpy as np \ndata = np.fromfile(\"yourfilehere.dat\", sep = \"\\n\")\nprint(data)\nlen(data)\n", "_____no_output_____" ], [ "import numpy as np\ndata = np.genfromtxt(\"FLYahooop_IRX.csv\", delimiter = \",\"); data = np.asarray(data); data.shape", "_____no_output_____" ], [ "#Plot of Time Series\nfrom scipy.interpolate import interp1d \nplt.figure(figsize=(20,6))\nplt.plot(data)\nplt.tight_layout()\nplt.xlim([0,len(data)])\nplt.show()", "_____no_output_____" ], [ "#Scale the Data to -1,1\nscaler = MinMaxScaler(feature_range = (-1,1)) \nscaled_data = scaler.fit_transform(data.reshape(-1,1))\nscaled_data.shape\nscaled_data = np.squeeze(scaled_data)\nscaled_data.shape", "_____no_output_____" ], [ "scaled_data = np.transpose(scaled_data)", "_____no_output_____" ], [ "# before you do the EMD, cut out the out of sample part so that the EMDs are not constructed with those future values and information contained within them\n\nin_sample = scaled_data[:-1000]; out_sample = scaled_data[-1000:]", "_____no_output_____" ], [ "print(in_sample.shape)", "(9000,)\n" ] ], [ [ "#Empirical Mode Decomposition\n\nFrom Yang et. al (2015), a summary: \n**Empirical mode decomposition (EMD)** technique to decompose the nonstationary signal into a series of intrinsic mode functions (IMFs) [9โ€“11]. This ability makes HHT competitive in processing various composite signals [12โ€“14]. With HHT, complex signals can be decomposed into multiple single-frequency signals that can further be processed by intrinsic mode function of EMD. *After the nonstationary signals have been decomposed into IMFs through EMD, these signals can easily be obtained by Hilbert transform of each mode function*. By doing so, researchers can obtain the instantaneous frequency and amplitude of each IMF. With the Hilbert spectrum and Hilbert marginal spectrum of IMFs, people can accurately get the joint distribution of energy with frequency and time and further predict whether IoT equipment is normal or not. Compared with FFT and VT, HHT is a strong adaptive time frequency analysis method.", "_____no_output_____" ] ], [ [ "from pyhht.emd import EMD \nfrom pyhht.visualization import plot_imfs\n\ndecomposer1 = EMD(in_sample, maxiter = 10000)\nimfs1 = decomposer1.decompose()\nprint(\"There are a total of %s IMFs\" % len(imfs1))", "There are a total of 11 IMFs\n" ], [ "#Plot the IMFs, from highest frequency to lowest. The last one should be a monotonic trend function. It is known as the residue,\n#the irreducible trend left after the detrending of the EMD process. \nfor i in range(len(imfs1)):\n fig, ax = plt.subplots(figsize=(25,2))\n fig = plt.plot(imfs1[i])\n plt.show()", "_____no_output_____" ], [ "import numpy as np\nimport pylab as plt\nfrom scipy.signal import hilbert\n\n#from PyEMD import EMD\n\n\ndef instant_phase(imfs):\n \"\"\"Extract analytical signal through Hilbert Transform.\"\"\"\n analytic_signal = hilbert(imfs) # Apply Hilbert transform to each row\n phase = np.unwrap(np.angle(analytic_signal)) # Compute angle between img and real\n return phase\n\nt = np.linspace(0,len(scaled_data),len(scaled_data))\ndt = 1 \n# Extract instantaneous phases and frequencies using Hilbert transform \ninstant_phases = instant_phase(imfs1)\ninstant_freqs = np.diff(instant_phases)/(2*np.pi*dt)\n\n# Create a figure consisting of 3 panels which from the top are the input signal, IMFs and instantaneous frequencies\nfig, axes = plt.subplots(3, figsize=(20,18))\n\n# The top panel shows the input signal\nax = axes[0]\nax.plot(t, scaled_data)\nax.set_ylabel(\"Amplitude [arb. u.]\")\nax.set_title(\"Input signal Channel 1\")\n\n# The middle panel shows all IMFs\nax = axes[1]\nfor num, imf in enumerate(imfs1):\n ax.plot(t, imf, label='IMF %s' %(num+1))\n\n# Label the figure\nax.legend()\nax.set_ylabel(\"Amplitude [arb. u.]\")\nax.set_title(\"IMFs\")\n\n# The bottom panel shows all instantaneous frequencies\nax = axes[2]\nfor num, instant_freq in enumerate(instant_freqs):\n ax.plot(t[:-1], instant_freq, label='IMF %s'%(num+1))\n\n# Label the figure\nax.legend()\nax.set_xlabel(\"Time [s]\")\nax.set_ylabel(\"Inst. Freq. [Hz]\")\nax.set_title(\"Huang-Hilbert Transform\")\n\nplt.tight_layout()\nplt.savefig('hht_example', dpi=120)\nplt.show()\n", "_____no_output_____" ] ], [ [ "# Creating Datasets\n", "_____no_output_____" ], [ "*Raw Data, using a certian number of lags; most of my experimentation has beeen with either 10 or 20.", "_____no_output_____" ] ], [ [ "in_sample = in_sample.reshape(-1,1); print(in_sample.shape)", "(9000, 1)\n" ], [ "lookback = 10\ndata_f = series_to_supervised(in_sample, n_in = lookback, n_out = 1, dropnan = True)", "_____no_output_____" ], [ "\nprint(data_f.shape)\ndata_f = np.asarray(data_f)\nXr = data_f[:,:-1]\nY = data_f[:,-1]\nprint(Xr.shape, Y.shape)", "(8990, 11)\n(8990, 10) (8990,)\n" ] ], [ [ "\n", "_____no_output_____" ], [ "# Use the IMFs--which are time series of equal length as the original signal, as features for convolutional/recurrent network.", "_____no_output_____" ] ], [ [ "imfs1.shape\nimfs1 = np.transpose(imfs1, (1,0)); imfs1.shape", "_____no_output_____" ], [ "imf_df = series_to_supervised(imfs1, n_in = lookback, n_out = 1, dropnan = True)\nimf_df = np.expand_dims(imf_df, axis = 1)\nprint(imf_df.shape)", "(8990, 1, 121)\n" ], [ "imf_df = np.reshape(imf_df, (imf_df.shape[0], (lookback +1), imfs1.shape[-1]))\n\nprint(imf_df.shape)\n", "(8990, 11, 11)\n" ], [ "targets = imf_df[:,-1,:]\nprint(targets.shape)", "(8990, 11)\n" ], [ "print(Xr.shape) ", "(8990, 10)\n" ], [ "#so reshape everything properly\ninput_data = np.reshape(Xr, (targets.shape[0],1,lookback))\ntargets = np.reshape(targets,(targets.shape[0],1,targets.shape[1]))\nprint(input_data.shape, targets.shape)", "(8990, 1, 10) (8990, 1, 11)\n" ], [ "#test Y values--completely out of sample. The calculation of the IMFs\n#was not influenced by these values. No information contamination from future to past.\n\nout_df = series_to_supervised(out_sample.reshape(-1,1), n_in = lookback, n_out = 1, dropnan = True)\nprint(out_df.shape); out_df = np.asarray(out_df)\ntestY = out_df[:,-1]\ntestX = out_df[:,:-1]\n\ntestX = np.expand_dims(testX, axis = 1)\nprint(testX.shape,testY.shape)", "(990, 11)\n(990, 1, 10) (990,)\n" ] ], [ [ "# Partial autocorrelation \n\nIf you were doing SARIMA analysis, you would want to know if this series is autoregressive and to what extent. this helps when calculating a good lag for prediction, that is, how many past values you need to accurately predict a future value.", "_____no_output_____" ] ], [ [ "from statsmodels.graphics.tsaplots import plot_acf\nfrom statsmodels.graphics.tsaplots import plot_pacf\n\nfig, axes = plt.subplots(2, figsize=(20,6))\nfig1 = plot_acf(scaled_data,lags = 60, ax = axes[0])\nfig2 = plot_pacf(scaled_data, lags = 100, ax = axes[1])\nplt.show()", "/usr/local/lib/python3.6/dist-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n" ] ], [ [ "# Network Architecture and Model fitting ", "_____no_output_____" ] ], [ [ "from keras.layers.advanced_activations import *\nfrom keras.regularizers import l1, l2 \nfrom sklearn.metrics import r2_score\nimport keras.backend as K\nfrom keras.layers import ConvLSTM2D\nfrom keras.layers import LeakyReLU\nnp.random.seed(2018) #inputs are (1, 20) and outputs are #(1 time step,17 features)\n\ndef convs(x, n, f, rate, bn = False):\n x = Conv1D(n, f, padding = \"causal\", dilation_rate = rate, activation=\"tanh\")(x)\n if bn == False:\n x = x \n else:\n x = BatchNormalization()(x)\n return x \n \n \ninputs = Input(shape = (1, lookback))\n\nx = convs(x = inputs, n = 128, f = 3, rate = 2, bn = False)\ny = convs(x = inputs, n = 128, f = 3, rate = 4, bn = False)\nu = convs(x = inputs, n = 128, f = 3, rate = 8, bn = False)\nv = convs(x = inputs, n = 128, f = 3, rate = 16, bn = False)\n\nz = concatenate([inputs, x, y, u, v], axis = -1)\nz = Activation(\"tanh\")(z)\nz = Dropout(0.3)(z)\n\npredictions = Conv1D(11, 3, padding = \"causal\", dilation_rate = 1)(z)\n \n\n\nmodel = Model(inputs = inputs, outputs = predictions)\nopt = adam(lr = 1e-3, clipnorm = 1.)\nreduce_lr = ReduceLROnPlateau(monitor='loss', factor = 0.9, patience = 3, min_lr = 1e-5, verbose = 1)\ncheckpointer = ModelCheckpoint(filepath = \"timeseries_weights.hdf5\", verbose = 1, save_best_only = True)\nearly = EarlyStopping(monitor = 'loss', min_delta = 1e-4, patience = 10, verbose = 1)\n\nmodel.compile(optimizer=opt, loss='mse', metrics = [])\nmodel.summary()\n\nhistory = model.fit(input_data, targets, \n epochs = 20, \n batch_size = 128, \n verbose = 1, \n #validation_data = (validX, validY),\n callbacks = [reduce_lr, early],\n shuffle = False) \n", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_30 (InputLayer) (None, 1, 10) 0 \n__________________________________________________________________________________________________\nconv1d_134 (Conv1D) (None, 1, 128) 3968 input_30[0][0] \n__________________________________________________________________________________________________\nconv1d_135 (Conv1D) (None, 1, 128) 3968 input_30[0][0] \n__________________________________________________________________________________________________\nconv1d_136 (Conv1D) (None, 1, 128) 3968 input_30[0][0] \n__________________________________________________________________________________________________\nconv1d_137 (Conv1D) (None, 1, 128) 3968 input_30[0][0] \n__________________________________________________________________________________________________\nconcatenate_28 (Concatenate) (None, 1, 522) 0 input_30[0][0] \n conv1d_134[0][0] \n conv1d_135[0][0] \n conv1d_136[0][0] \n conv1d_137[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 1, 522) 0 concatenate_28[0][0] \n__________________________________________________________________________________________________\ndropout_26 (Dropout) (None, 1, 522) 0 activation_19[0][0] \n__________________________________________________________________________________________________\nconv1d_138 (Conv1D) (None, 1, 11) 17237 dropout_26[0][0] \n==================================================================================================\nTotal params: 33,109\nTrainable params: 33,109\nNon-trainable params: 0\n__________________________________________________________________________________________________\nEpoch 1/20\n8990/8990 [==============================] - 3s 344us/step - loss: 0.0228\nEpoch 2/20\n8990/8990 [==============================] - 1s 99us/step - loss: 0.0160\nEpoch 3/20\n8990/8990 [==============================] - 1s 98us/step - loss: 0.0146\nEpoch 4/20\n8990/8990 [==============================] - 1s 97us/step - loss: 0.0152\nEpoch 5/20\n8990/8990 [==============================] - 1s 99us/step - loss: 0.0148\nEpoch 6/20\n8990/8990 [==============================] - 1s 99us/step - loss: 0.0140\nEpoch 7/20\n8990/8990 [==============================] - 1s 98us/step - loss: 0.0134\nEpoch 8/20\n8990/8990 [==============================] - 1s 99us/step - loss: 0.0131\nEpoch 9/20\n8990/8990 [==============================] - 1s 103us/step - loss: 0.0128\nEpoch 10/20\n8990/8990 [==============================] - 1s 108us/step - loss: 0.0126\nEpoch 11/20\n8990/8990 [==============================] - 1s 106us/step - loss: 0.0124\nEpoch 12/20\n8990/8990 [==============================] - 1s 106us/step - loss: 0.0123\nEpoch 13/20\n8990/8990 [==============================] - 1s 104us/step - loss: 0.0120\nEpoch 14/20\n8990/8990 [==============================] - 1s 104us/step - loss: 0.0117\nEpoch 15/20\n8990/8990 [==============================] - 1s 102us/step - loss: 0.0115\nEpoch 16/20\n8990/8990 [==============================] - 1s 98us/step - loss: 0.0118\nEpoch 17/20\n8990/8990 [==============================] - 1s 98us/step - loss: 0.0120\nEpoch 18/20\n8990/8990 [==============================] - 1s 96us/step - loss: 0.0120\n\nEpoch 00018: ReduceLROnPlateau reducing learning rate to 0.0009000000427477062.\nEpoch 19/20\n8990/8990 [==============================] - 1s 98us/step - loss: 0.0121\nEpoch 20/20\n8990/8990 [==============================] - 1s 96us/step - loss: 0.0121\n" ] ], [ [ "", "_____no_output_____" ] ], [ [ "preds = model.predict(testX, batch_size = 1)\nsummed = np.sum(preds, axis = -1); print(summed.shape)\ntest_preds = summed[:,0]\nplt.plot(test_preds)", "(990, 1)\n" ] ], [ [ "# R2 analysis#\nIn statistics, the coefficient of determination, denoted R2 or r2 and pronounced \"R squared\", is the proportion of the variance in the dependent variable that is predictable from the independent variable(s).\n\nIt is a statistic used in the context of statistical models whose main purpose is either the prediction of future outcomes or the testing of hypotheses, on the basis of other related information. It provides a measure of how well observed outcomes are replicated by the model, based on the proportion of total variation of outcomes explained by the model.[1][2][3]", "_____no_output_____" ] ], [ [ "print(\"Final R2 Score is: {}\".format(r2_score(testY, test_preds)))\n\nfig = plt.figure(figsize = (20,6))\nfig = plt.plot(test_preds, label = \"PREDICTIONS\")\nfig = plt.plot(testY, label = \"TRUE DATA\")\nplt.xlim([0,990])\nplt.legend()\nplt.show()\n\nplt.clf()\nplt.cla()\nplt.close()", "Final R2 Score is: 0.9888552530780896\n" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb0fc76d01f545df64db2562d721977cffceb318
3,687
ipynb
Jupyter Notebook
notebooks/47_bad_pair_generator.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
null
null
null
notebooks/47_bad_pair_generator.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
8
2021-10-16T19:24:20.000Z
2021-11-25T02:28:32.000Z
notebooks/47_bad_pair_generator.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
null
null
null
24.417219
114
0.560618
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from collections import namedtuple\n\nimport wandb\n\nfrom src.data.utils import load_datasets\nfrom src.models.triplet_loss import get_near_negatives", "_____no_output_____" ], [ "given_surname = \"surname\"\nConfig = namedtuple(\"Config\", \"train_path near_negatives_path\")\nconfig = Config(\n train_path=f\"s3://familysearch-names/processed/tree-hr-{given_surname}-train-unfiltered.csv.gz\",\n near_negatives_path=f\"s3://nama-data/data/processed/tree-hr-{given_surname}-near-negatives.csv.gz\",\n)", "_____no_output_____" ], [ "wandb.init(\n project=\"nama\",\n entity=\"nama\",\n name=\"47_bad_pair_generator\",\n group=given_surname,\n notes=\"\",\n config=config._asdict()\n)", "_____no_output_____" ], [ "[train] = load_datasets([config.train_path])\n\ninput_names_train, weighted_actual_names_train, candidate_names_train = train", "_____no_output_____" ], [ "k = 50\nlower_threshold = 0.8 # 0.6\nupper_threshold = 0.99 # 0.74\nsample_size = 100 # 0\nnear_negatives_train = get_near_negatives(\n input_names_train, weighted_actual_names_train, candidate_names_train, \n k=k, lower_threshold=lower_threshold, upper_threshold=upper_threshold,\n total=sample_size\n)", "_____no_output_____" ], [ "print(\"input_names_train\", len(input_names_train))\n# print(\"filtered_wans_train\", sum(len(wan) for wan in filtered_wans_train))\nprint(\"candidate_names_train\", len(candidate_names_train))\nprint(\"near_negatives_train\", sum(len(negs) for _, negs in near_negatives_train.items()))", "_____no_output_____" ], [ "for ix in range(0, 100):\n print(input_names_train[ix])\n wans = sorted(weighted_actual_names_train[ix], key=lambda x: -x[2])\n print(\" positives\", wans)\n print(\" negatives\", near_negatives_train[input_names_train[ix]])", "_____no_output_____" ], [ "wandb.finish()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb0fce79ad49c44885c6abab2cc2b4752ed1b67f
159,820
ipynb
Jupyter Notebook
_notebooks/2022-05-24-thunder-speech-pronunciation-trainer.ipynb
mizoru/blog
1af9e66afca0d57fce39d82c75f0e7a308de96f8
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-05-24-thunder-speech-pronunciation-trainer.ipynb
mizoru/blog
1af9e66afca0d57fce39d82c75f0e7a308de96f8
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-05-24-thunder-speech-pronunciation-trainer.ipynb
mizoru/blog
1af9e66afca0d57fce39d82c75f0e7a308de96f8
[ "Apache-2.0" ]
null
null
null
122.843966
118,123
0.762915
[ [ [ "<a href=\"https://colab.research.google.com/github/mizoru/blog/blob/master/2022-05-24-thunder-speech-pronunciation-trainer.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Finetuning a pretrained QuartzNet on TIMIT\nusing [thunder-speech](https://github.com/scart97/thunder-speech)", "_____no_output_____" ], [ "I talk more about this project [here on Twitter](https://twitter.com/Irmuzy/status/1529087355377836032)", "_____no_output_____" ] ], [ [ "# hide\n!pip install thunder-speech wandb", "_____no_output_____" ] ], [ [ "Cloning the repository that contains `.csv`'s with processed labels and filepaths, courtesy of my coursemates. ", "_____no_output_____" ] ], [ [ "!git clone https://github.com/mizoru/pronunciation-trainer.git", "_____no_output_____" ] ], [ [ "### Getting the data and the imports ready", "_____no_output_____" ] ], [ [ "from kaggle import api\napi.dataset_download_files('mfekadu/darpa-timit-acousticphonetic-continuous-speech')\nimport zipfile\narchive = zipfile.ZipFile('darpa-timit-acousticphonetic-continuous-speech.zip')\narchive.extractall()", "_____no_output_____" ] ], [ [ "This dataset is going to be used as noise", "_____no_output_____" ] ], [ [ "api.dataset_download_files('chrisfilo/urbansound8k')\nimport zipfile\narchive = zipfile.ZipFile('urbansound8k.zip')\narchive.extractall('data')", "_____no_output_____" ], [ "import thunder\nfrom thunder.callbacks import FinetuneEncoderDecoder\nfrom thunder.finetune import FinetuneCTCModule\nfrom thunder.data.dataset import BaseSpeechDataset\nfrom thunder.data.datamodule import BaseDataModule\nfrom thunder.blocks import conv1d_decoder\nfrom thunder.quartznet.compatibility import load_quartznet_checkpoint\nfrom typing import Any, List, Sequence, Tuple, Union\nimport torch\nfrom torch import Tensor, nn\nfrom thunder.registry import load_pretrained\nfrom thunder.quartznet.compatibility import QuartznetCheckpoint\nfrom pathlib import Path\nimport pandas as pd\nimport librosa\nimport numpy as np\nimport torchaudio\nimport pytorch_lightning as pl\nfrom math import ceil\nfrom IPython.display import Audio", "_____no_output_____" ], [ "labels = pd.read_csv('pronunciation-trainer/dataDS.csv')", "_____no_output_____" ], [ "noise_files = pd.read_csv('data/UrbanSound8K.csv')\nnoise_files = list('data/fold1/' + noise_files[noise_files.fold==1].slice_file_name)", "_____no_output_____" ] ], [ [ "### Setting up Dataset and DataModule for training\nThe commented out code is the transforms I tried.", "_____no_output_____" ] ], [ [ "class TimitDataset(BaseSpeechDataset):\n def __init__(\n self, items: Sequence, force_mono: bool = True, sample_rate: int = 16000,\n time_stretch = None, volume = None, pitch = None, noise_files = None\n # 0.2, 0.2, 2\n ):\n super().__init__(items, force_mono, sample_rate)\n self.librosa_transforms = bool(time_stretch)\n self.time_stretch = time_stretch\n self.volume = volume\n self.pitch = pitch\n self.noise_files = noise_files\n\n def open_audio(self, item) -> Tuple[Tensor, int]:\n audio,sr = self.loader.open_audio(item.Path)\n\n# adding noise\n if self.noise_files:\n idx = int(torch.randint(0, len(self.noise_files), (1,)))\n noise = self.loader(self.noise_files[idx]) \n# this bit of code I got from a course, it gets the loudness ratio right\n noize_level = torch.rand(1) * 40 # from 0 to 40\n noize_energy = torch.norm(noise)\n audio_energy = torch.norm(audio)\n alpha = (audio_energy / noize_energy) * torch.pow(10, -noize_level / 20)\n# repeat the noise as many times as wee need\n if noise.shape[1] < audio.shape[1]:\n noise = torch.cat([noise] * ceil(audio.shape[1] / noise.shape[1]), 1)\n noise = noise[:,:audio.shape[1]]\n\n audio = audio + alpha * noise\n audio.clamp_(-1, 1)\n\n # THIS TRANSFORM TAKES FOREVER\n if self.pitch: # AND PROBABLY DOESN'T WORK \n audio = torchaudio.functional.pitch_shift(audio, sr, self.pitch * torch.randn(1)) \n \n if self.volume: # this transform led to CUDA out of memory\n audio = torchaudio.transforms.Vol(torch.abs(1+self.volume*torch.randn(1)))(audio) \n \n # this works, but I didn't get better results with it, might need tuning\n if self.librosa_transforms: audio = audio.numpy().squeeze()\n if self.time_stretch:\n audio = librosa.effects.time_stretch(audio, np.abs(1 + self.time_stretch * np.random.randn()))\n if self.librosa_transforms: audio = torch.Tensor(audio).unsqueeze(0)\n\n return audio, sr \n\n def open_text(self, item) -> str:\n return item.Transcription\n \n def get_item(self, index: int) -> Any:\n return self.items.iloc[index]", "_____no_output_____" ], [ "Audio(TimitDataset(labels, noise_files=noise_files)[159][0], rate=16000)", "_____no_output_____" ], [ "class TimtiDataModule(BaseDataModule):\n def __init__(\n self,\n batch_size: int = 32,\n num_workers: int = 2,\n time_stretch = 0.2, volume = 0.2, pitch = 2, noise_files=None\n ):\n super().__init__(batch_size, num_workers)\n self.time_stretch = time_stretch\n self.volume = volume\n self.pitch = pitch\n self.noise_files = noise_files\n\n def get_dataset(self, split):\n if split != \"train\":\n return TimitDataset(labels[labels[\"is_valid\"]], time_stretch = False, volume = False, pitch = False)\n else:\n return TimitDataset(labels[labels[\"is_valid\"] == False],\n time_stretch = self.time_stretch, volume = self.volume, pitch = self.pitch,\n noise_files = self.noise_files)", "_____no_output_____" ], [ "dm = TimtiDataModule(batch_size=32, noise_files=noise_files)", "_____no_output_____" ] ], [ [ "Getting the tokens from the data", "_____no_output_____" ] ], [ [ "whole = '.'.join([t for t in labels.Transcription])\ntokens = list(set(whole.split('.')))\nlen(tokens)", "_____no_output_____" ], [ "def dot_tokenizer(s:str):\n return s.split('.')", "_____no_output_____" ] ], [ [ "### Adapting pretrained weights", "_____no_output_____" ] ], [ [ "model = FinetuneCTCModule(QuartznetCheckpoint.QuartzNet15x5Base_En,\n decoder_class = conv1d_decoder, tokens = tokens,\n text_kwargs={'custom_tokenizer_function':dot_tokenizer})", "_____no_output_____" ] ], [ [ "These next five cells import the weights of the decoder from a trained models and adapt them into the new decoder.\n\n`correspondences` is a dictionary that assigns every token in the new decoder the corresponding token in the trained decoder to take the model parameters from.", "_____no_output_____" ] ], [ [ "correspnodences = {'s': 's', 'n': 'n', 'dส’': 'j', 'dฬš': 'd', 'w': 'w', 'b': 'b', 'g': 'g', 'm': 'm',\n 'lฬฉ': 'l', 'f': 'f', 'l': 'l', 'j': 'y', 'k': 'k', 'eษช': 'a', 'pฬš': 'p', 'mฬฉ': 'm',\n 'r': 'r', 't': 't', 'h': 'h', 'aสŠ': 'o', 'nฬฉ': 'n', 'i': 'e', 'bฬš': 'b', 'p': 'p',\n 'kฬš': 'k', 'd': 'd', 'u': 'o', 'tฬš': 't', 'z': 'z', 'aษช': 'i', 'v': 'v', 'tสƒ': 'c',\n 'oสŠ': 'o', '<blank>' : '<blank>', 'ษ' : 'e', 'ส‰' : 'o', 'รฐ' : 't', 'ฮธ' : 't', 'ษš' : 'e',\n 'ษฆ' : 'h', 'ล‹' : 'n', 'ส”' : 't', 'ส’' : 's', 'สŠ' : 'o', 'ษพ' : 't', 'ษช' : 'i', 'ษ™ฬฅ' : 'u',\n 'ษ‘' : 'a', 'ษ™' : 'e', 'ษ›' : 'e', 'ษ”ษช' : 'o', 'ษกฬš' : 'g', 'ษ”' : 'o', 'ษจฬž' : 'i', 'ล‹ฬฉ' : 'n',\n 'สŒ' : 'u', 'ษพฬƒ' : 'n', 'สƒ' : 's', 'รฆ' : 'a'}", "_____no_output_____" ], [ "def adapt_into_new_decoder(decoder, old_vocab, new_vocab, correspnodences = None):\n if correspnodences == None:\n correspnodences = {k:k[0] for k in new_vocab.keys() if k and k[0] in old_vocab.keys()}\n with torch.no_grad():\n new_decoder = conv1d_decoder(1024, len(new_vocab))\n weight = decoder.weight\n bias = decoder.bias\n for new_token,old_token in correspnodences.items():\n new_decoder.weight[new_vocab[new_token]] = weight[old_vocab[old_token]]\n new_decoder.bias[new_vocab[new_token]] = bias[old_vocab[old_token]]\n return new_decoder", "_____no_output_____" ], [ "checkpoint_model = load_quartznet_checkpoint(QuartznetCheckpoint.QuartzNet15x5Base_En)", "_____no_output_____" ] ], [ [ "These `vocab` dictionaries give the function `adapt_into_new_decoder` the indices in the weight matrix of the decoder for the corresponding tokens.", "_____no_output_____" ] ], [ [ "old_vocab = checkpoint_model.text_transform.vocab.itos\nold_vocab = {k:v for (v, k) in enumerate(old_vocab)}\nnew_vocab = {k:v for (v, k) in enumerate(model.text_transform.vocab.itos)}", "_____no_output_____" ], [ "model.decoder = adapt_into_new_decoder(checkpoint_model.decoder, old_vocab, new_vocab, correspnodences)\ndel checkpoint_model", "_____no_output_____" ] ], [ [ "### Training", "_____no_output_____" ] ], [ [ "import wandb\nfrom pytorch_lightning.loggers import WandbLogger", "_____no_output_____" ], [ "wandb_logger = WandbLogger(project='pronunciation-trainer', name='transform-thunder')", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mmizoru\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" ] ], [ [ "Setting a higher `encoder_initial_lr_div` led to less overfitting.", "_____no_output_____" ] ], [ [ "trainer = pl.Trainer(\n gpus=-1, # Use all gpus\n max_epochs=30,\n callbacks=[FinetuneEncoderDecoder(unfreeze_encoder_at_epoch=15, encoder_initial_lr_div=100)],\n logger = wandb_logger\n)", "GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nHPU available: False, using: 0 HPUs\n" ], [ "trainer.fit(model = model, datamodule=dm)", "_____no_output_____" ], [ "trainer.validate(model = model, datamodule=dm)", "_____no_output_____" ] ], [ [ "let's save our model for inference", "_____no_output_____" ] ], [ [ "model.to_torchscript(\"QuartzNet_thunderspeech.pt\")", "_____no_output_____" ], [ "wandb.save('QuartzNet_thunderspeech.pt', policy='now')", "_____no_output_____" ], [ "wandb.finish()", "_____no_output_____" ] ], [ [ "### Getting predictions for the app", "_____no_output_____" ] ], [ [ "loader = AudioFileLoader(sample_rate=16000)", "_____no_output_____" ], [ "natives = pd.read_csv('pronunciation-trainer/natives.csv')", "_____no_output_____" ] ], [ [ "I came up with a small list of words that learners might struggle with differentiating.", "_____no_output_____" ] ], [ [ "subset = [\"thin\", \"thing\", \"think\", \"fit\", \"feet\", \"bald\", \"bold\", \"food\", \"foot\",\n \"death\", \"deaf\", \"worm\", \"warm\"]", "_____no_output_____" ], [ "subset_df = natives[natives.replica.isin(subset)]", "_____no_output_____" ] ], [ [ "This dataset contains audio for single words.", "_____no_output_____" ] ], [ [ "!wget https://lingualibre.org/datasets/Q22-eng-English.zip", "--2022-05-23 20:27:30-- https://lingualibre.org/datasets/Q22-eng-English.zip\nResolving lingualibre.org (lingualibre.org)... 152.228.161.167, 2001:41d0:304:100::4790\nConnecting to lingualibre.org (lingualibre.org)|152.228.161.167|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 321221694 (306M) [application/zip]\nSaving to: โ€˜Q22-eng-English.zipโ€™\n\nQ22-eng-English.zip 100%[===================>] 306.34M 11.6MB/s in 27s \n\n2022-05-23 20:27:57 (11.4 MB/s) - โ€˜Q22-eng-English.zipโ€™ saved [321221694/321221694]\n\n" ], [ "import zipfile\narchive = zipfile.ZipFile('Q22-eng-English.zip')\narchive.extractall()", "_____no_output_____" ] ], [ [ "I get the raw prediction tensors and then convert them into the format I need.", "_____no_output_____" ] ], [ [ "model.eval()\npredicts = []\nfor i in range(len(subset_df)):\n path = str(Path('Q22-eng-English') / '/'.join(subset_df.path.iloc[i].split('/')[2:]))\n # print(path)\n try:\n audio = loader(path)\n predicts.append(model(audio, torch.tensor(audio.shape[0] * [audio.shape[-1]], device=audio.device)))\n except Exception:\n predicts.append(None)\n # print(predicts[-1])", "_____no_output_____" ], [ "vocab = model.text_transform.vocab.itos\nvocab[-1] = ''", "_____no_output_____" ], [ "for i in range(len(predicts)):\n if predicts[i] != None:\n ids = predicts[i][0].argmax(1)[0]\n s = []\n # print(ids)\n if vocab[ids[0]]: s.append(vocab[ids[0]])\n for l in range(1,len(ids)):\n if ids[l-1] != ids[l]:\n new = vocab[ids[l]]\n if new: s.append(new)\n predicts[i] = '.'.join(s)", "_____no_output_____" ], [ "predicts", "_____no_output_____" ], [ "subset_df[\"transcription\"] = predicts", "_____no_output_____" ], [ "subset_df.to_csv(\"native_words_subset.csv\", index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb0fe008e7f8b63eb3334b0ede365e280f18d9cb
101,109
ipynb
Jupyter Notebook
ex07/Guilherme_Pereira/Aula_7_Guilherme_Pereira.ipynb
fabiormazza/IA025_2022S1
6f422faa6492d1030c458274ae11190b5592ac33
[ "MIT" ]
2
2022-03-20T21:16:14.000Z
2022-03-20T22:20:26.000Z
ex07/Guilherme_Pereira/Aula_7_Guilherme_Pereira.ipynb
fabiormazza/IA025_2022S1
6f422faa6492d1030c458274ae11190b5592ac33
[ "MIT" ]
null
null
null
ex07/Guilherme_Pereira/Aula_7_Guilherme_Pereira.ipynb
fabiormazza/IA025_2022S1
6f422faa6492d1030c458274ae11190b5592ac33
[ "MIT" ]
9
2022-03-16T15:39:36.000Z
2022-03-27T14:04:34.000Z
37.240884
267
0.497186
[ [ [ "<a href=\"https://colab.research.google.com/github/unicamp-dl/IA025_2022S1/blob/main/ex07/Guilherme_Pereira/Aula_7_Guilherme_Pereira.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "nome = 'Guilherme Pereira'\nprint(f'Meu nome รฉ {nome}')", "Meu nome รฉ Guilherme Pereira\n" ] ], [ [ "# Exercรญcio: Modelo de Linguagem (Bengio 2003) - MLP + Embeddings", "_____no_output_____" ], [ "Neste exercรญcio iremos treinar uma rede neural simples para prever a proxima palavra de um texto, data as palavras anteriores como entrada. Esta tarefa รฉ chamada de \"Modelagem da Lรญngua\".\n\nEste dataset jรก possui um tamanho razoรกvel e รฉ bem provรกvel que vocรช vai precisar rodar seus experimentos com GPU.\n\nAlguns conselhos รบteis:\n- **ATENร‡รƒO:** o dataset รฉ bem grande. Nรฃo dรช comando de imprimรญ-lo.\n- Durante a depuraรงรฃo, faรงa seu dataset ficar bem pequeno, para que a depuraรงรฃo seja mais rรกpida e nรฃo precise de GPU. Somente ligue a GPU quando o seu laรงo de treinamento jรก estรก funcionando\n- Nรฃo deixe para fazer esse exercรญcio na vรฉspera. Ele รฉ trabalhoso.", "_____no_output_____" ] ], [ [ "# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.\n!pip install transformers", "Collecting transformers\n Downloading transformers-4.19.2-py3-none-any.whl (4.2 MB)\n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 4.2 MB 5.2 MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.64.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.11.3)\nCollecting tokenizers!=0.11.3,<0.13,>=0.11.1\n Downloading tokenizers-0.12.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.6 MB)\n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 6.6 MB 43.8 MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.21.6)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers) (21.3)\nCollecting huggingface-hub<1.0,>=0.1.0\n Downloading huggingface_hub-0.6.0-py3-none-any.whl (84 kB)\n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 84 kB 2.5 MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.7.0)\nCollecting pyyaml>=5.1\n Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n\u001b[K |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 596 kB 19.3 MB/s \n\u001b[?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0,>=0.1.0->transformers) (4.2.0)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers) (3.0.9)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.8.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.10.8)\nInstalling collected packages: pyyaml, tokenizers, huggingface-hub, transformers\n Attempting uninstall: pyyaml\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\nSuccessfully installed huggingface-hub-0.6.0 pyyaml-6.0 tokenizers-0.12.1 transformers-4.19.2\n" ] ], [ [ "## Importaรงรฃo dos pacotes", "_____no_output_____" ] ], [ [ "import collections\nimport itertools\nimport functools\nimport math\nimport random\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm_notebook\n", "_____no_output_____" ], [ "# Check which GPU we are using\n!nvidia-smi", "Wed May 18 13:43:15 2022 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 37C P8 9W / 70W | 0MiB / 15109MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "if torch.cuda.is_available(): \n dev = \"cuda:0\"\nelse: \n dev = \"cpu\"\ndevice = torch.device(dev)\nprint('Using {}'.format(device))", "Using cuda:0\n" ] ], [ [ "## Implementaรงรฃo do MyDataset", "_____no_output_____" ] ], [ [ "from typing import List\n\n\ndef tokenize(text: str, tokenizer):\n return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids\n\n\nclass MyDataset():\n def __init__(self, texts: List[str], tokenizer, context_size: int):\n # Escreva seu cรณdigo aqui\n\n self.tokens, self.target = [], []\n\n for text in texts:\n ids = tokenize(text, tokenizer)\n \n for i in range(len(ids)-context_size):\n\n self.tokens.append(ids[i:i + context_size]) \n self.target.append(ids[i + context_size])\n\n self.tokens = torch.tensor(self.tokens)\n self.target = torch.tensor(self.target)\n\n def __len__(self):\n # Escreva seu cรณdigo aqui\n return len(self.target)\n\n def __getitem__(self, idx):\n # Escreva seu cรณdigo aqui\n return self.tokens[idx], self.target[idx]", "_____no_output_____" ] ], [ [ "## Teste se sua implementaรงรฃo do MyDataset estรก correta", "_____no_output_____" ] ], [ [ "from transformers import BertTokenizer\n\ntokenizer = BertTokenizer.from_pretrained(\"neuralmind/bert-base-portuguese-cased\")\n\ndummy_texts = ['Eu gosto de correr', 'Ela gosta muito de comer pizza']\n\ndummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, context_size=3)\ndummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)\nassert len(dummy_dataset) == 5\nprint('passou no assert de tamanho do dataset')\n\nfirst_batch_input, first_batch_target = next(iter(dummy_loader))\n\ncorrect_first_batch_input = torch.LongTensor(\n [[ 3396, 10303, 125],\n [ 1660, 5971, 785],\n [ 5971, 785, 125],\n [ 785, 125, 1847],\n [ 125, 1847, 13779]])\n\ncorrect_first_batch_target = torch.LongTensor([13239, 125, 1847, 13779, 15616])\n\nassert torch.equal(first_batch_input, correct_first_batch_input)\nprint('Passou no assert de input')\nassert torch.equal(first_batch_target, correct_first_batch_target)\nprint('Passou no assert de target')", "_____no_output_____" ] ], [ [ "# Carregamento do dataset ", "_____no_output_____" ], [ "Iremos usar uma pequena amostra do dataset [BrWaC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) para treinar e avaliar nosso modelo de linguagem.", "_____no_output_____" ] ], [ [ "!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula7/sample_brwac.txt", "--2022-05-18 13:44:13-- https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula7/sample_brwac.txt\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.139.128, 108.177.12.128, 74.125.26.128, ...\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.139.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 123983611 (118M) [text/plain]\nSaving to: โ€˜sample_brwac.txtโ€™\n\nsample_brwac.txt 100%[===================>] 118.24M 82.5MB/s in 1.4s \n\n2022-05-18 13:44:15 (82.5 MB/s) - โ€˜sample_brwac.txtโ€™ saved [123983611/123983611]\n\n" ], [ "# Load datasets\ncontext_size = 9\n\nvalid_examples = 100\ntest_examples = 100\ntexts = open('sample_brwac.txt').readlines()\n\n# print('Truncating for debugging purposes.')\n# texts = texts[:500] \n\ntraining_texts = texts[:-(valid_examples + test_examples)]\nvalid_texts = texts[-(valid_examples + test_examples):-test_examples]\ntest_texts = texts[-test_examples:]\n\ntraining_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, context_size=context_size)\nvalid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, context_size=context_size)\ntest_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, context_size=context_size)", "_____no_output_____" ], [ "print(f'training examples: {len(training_dataset)}')\nprint(f'valid examples: {len(valid_dataset)}')\nprint(f'test examples: {len(test_dataset)}')", "training examples: 27675945\nvalid examples: 82070\ntest examples: 166726\n" ], [ "class LanguageModel(torch.nn.Module):\n\n def __init__(self, vocab_size, context_size, embedding_dim, hidden_size):\n \"\"\"\n Implements the Neural Language Model proposed by Bengio et al.\"\n\n Args:\n vocab_size (int): Size of the input vocabulary.\n context_size (int): Size of the sequence to consider as context for prediction.\n embedding_dim (int): Dimension of the embedding layer for each word in the context.\n hidden_size (int): Size of the hidden layer.\n \"\"\"\n # Escreva seu cรณdigo aqui.\n\n super(LanguageModel, self).__init__()\n\n self.context_size = context_size\n self.embeddings_dim = embedding_dim\n\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.hidden_layer1 = nn.Linear(self.context_size*self.embeddings_dim, hidden_size*4)\n self.hidden_layer2 = nn.Linear(hidden_size*4, hidden_size*2)\n self.hidden_layer3 = nn.Linear(hidden_size*2, hidden_size)\n self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, inputs):\n \"\"\"\n Args:\n inputs is a LongTensor of shape (batch_size, context_size)\n \"\"\"\n # Escreva seu cรณdigo aqui.\n\n out = self.embeddings(inputs).view(-1, self.context_size*self.embeddings_dim)\n out = self.relu(self.hidden_layer1(out))\n out = self.relu(self.hidden_layer2(out))\n out = self.relu(self.hidden_layer3(out))\n\n return self.output_layer(out)", "_____no_output_____" ] ], [ [ "## Teste o modelo com um exemplo", "_____no_output_____" ] ], [ [ "model = LanguageModel(\n vocab_size=tokenizer.vocab_size,\n context_size=context_size,\n embedding_dim=64,\n hidden_size=128,\n).to(device)\n\nsample_train, _ = next(iter(DataLoader(training_dataset)))\nsample_train_gpu = sample_train.to(device)\nmodel(sample_train_gpu).shape", "_____no_output_____" ], [ "num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint(f'Number of model parameters: {num_params}')", "Number of model parameters: 6180096\n" ] ], [ [ "## Assert da Perplexidade\n", "_____no_output_____" ] ], [ [ "random.seed(123)\nnp.random.seed(123)\ntorch.manual_seed(123)\n\n\ndef perplexity(logits, target):\n \"\"\"\n Computes the perplexity.\n\n Args:\n logits: a FloatTensor of shape (batch_size, vocab_size)\n target: a LongTensor of shape (batch_size,)\n\n Returns:\n A float corresponding to the perplexity.\n \"\"\"\n # Escreva seu cรณdigo aqui.\n return torch.exp(nn.functional.cross_entropy(logits,target))\n\n\nn_examples = 1000\n\nsample_train, target_token_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))\nsample_train_gpu = sample_train.to(device)\ntarget_token_ids = target_token_ids.to(device)\nlogits = model(sample_train_gpu)\n\nmy_perplexity = perplexity(logits=logits, target=target_token_ids)\n\nprint(f'my perplexity: {int(my_perplexity)}')\nprint(f'correct initial perplexity: {tokenizer.vocab_size}')\n\nassert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=2000)\nprint('Passou o no assert da perplexidade')", "my perplexity: 30139\ncorrect initial perplexity: 29794\nPassou o no assert da perplexidade\n" ] ], [ [ "## Laรงo de Treinamento e Validaรงรฃo", "_____no_output_____" ] ], [ [ "max_examples = 200_000_000\neval_every_steps = 5000\nlr = 3.5e-5\nbatch_size = 1024\n\n\nmodel = LanguageModel(\n vocab_size=tokenizer.vocab_size,\n context_size=context_size,\n embedding_dim=128,\n hidden_size=256,\n).to(device)\n\ntrain_loader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\nvalidation_loader = DataLoader(valid_dataset, batch_size=batch_size)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n\ndef train_step(input, target):\n model.train()\n model.zero_grad()\n\n logits = model(input.to(device))\n loss = nn.functional.cross_entropy(logits, target.to(device))\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n\ndef validation_step(input, target):\n model.eval()\n logits = model(input)\n loss = nn.functional.cross_entropy(logits, target)\n return loss.item()\n\n\ntrain_losses = []\nn_examples = 0\nstep = 0\nver = 0\nwhile n_examples < max_examples:\n for input, target in train_loader:\n loss = train_step(input.to(device), target.to(device)) \n train_losses.append(loss)\n \n if step % eval_every_steps == 0:\n train_ppl = np.exp(np.average(train_losses))\n\n with torch.no_grad():\n valid_ppl = np.exp(np.average([\n validation_step(input.to(device), target.to(device))\n for input, target in validation_loader]))\n\n print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')\n train_losses = []\n\n n_examples += len(input) # Increment of batch size\n step += 1\n if n_examples >= max_examples:\n break", "0 steps; 0 examples so far; train ppl: 29804.25, valid ppl: 29721.71\n5000 steps; 5120000 examples so far; train ppl: 1493.07, valid ppl: 1092.24\n10000 steps; 10240000 examples so far; train ppl: 931.18, valid ppl: 815.84\n15000 steps; 15360000 examples so far; train ppl: 741.66, valid ppl: 680.47\n20000 steps; 20480000 examples so far; train ppl: 634.24, valid ppl: 590.17\n25000 steps; 25600000 examples so far; train ppl: 557.52, valid ppl: 522.47\n30000 steps; 30720000 examples so far; train ppl: 494.38, valid ppl: 471.37\n35000 steps; 35840000 examples so far; train ppl: 448.05, valid ppl: 429.20\n40000 steps; 40960000 examples so far; train ppl: 412.57, valid ppl: 397.26\n45000 steps; 46080000 examples so far; train ppl: 383.29, valid ppl: 370.07\n50000 steps; 51200000 examples so far; train ppl: 359.91, valid ppl: 347.94\n55000 steps; 56320000 examples so far; train ppl: 338.07, valid ppl: 329.40\n60000 steps; 61440000 examples so far; train ppl: 316.18, valid ppl: 314.70\n65000 steps; 66560000 examples so far; train ppl: 302.64, valid ppl: 301.85\n70000 steps; 71680000 examples so far; train ppl: 290.70, valid ppl: 290.05\n75000 steps; 76800000 examples so far; train ppl: 280.43, valid ppl: 280.18\n80000 steps; 81920000 examples so far; train ppl: 271.02, valid ppl: 270.13\n85000 steps; 87040000 examples so far; train ppl: 257.56, valid ppl: 262.13\n90000 steps; 92160000 examples so far; train ppl: 249.44, valid ppl: 255.08\n95000 steps; 97280000 examples so far; train ppl: 243.72, valid ppl: 248.32\n100000 steps; 102400000 examples so far; train ppl: 238.55, valid ppl: 242.19\n105000 steps; 107520000 examples so far; train ppl: 232.52, valid ppl: 237.09\n110000 steps; 112640000 examples so far; train ppl: 225.39, valid ppl: 232.94\n115000 steps; 117760000 examples so far; train ppl: 217.61, valid ppl: 227.38\n120000 steps; 122880000 examples so far; train ppl: 214.82, valid ppl: 224.05\n125000 steps; 128000000 examples so far; train ppl: 212.19, valid ppl: 219.67\n130000 steps; 133120000 examples so far; train ppl: 208.46, valid ppl: 215.56\n135000 steps; 138240000 examples so far; train ppl: 205.03, valid ppl: 211.95\n140000 steps; 143360000 examples so far; train ppl: 196.63, valid ppl: 209.30\n145000 steps; 148480000 examples so far; train ppl: 194.68, valid ppl: 206.74\n150000 steps; 153600000 examples so far; train ppl: 193.82, valid ppl: 203.30\n155000 steps; 158720000 examples so far; train ppl: 191.21, valid ppl: 201.21\n160000 steps; 163840000 examples so far; train ppl: 189.07, valid ppl: 197.90\n165000 steps; 168960000 examples so far; train ppl: 184.42, valid ppl: 196.50\n170000 steps; 174080000 examples so far; train ppl: 180.87, valid ppl: 194.34\n175000 steps; 179200000 examples so far; train ppl: 179.85, valid ppl: 191.99\n180000 steps; 184320000 examples so far; train ppl: 178.22, valid ppl: 189.92\n185000 steps; 189440000 examples so far; train ppl: 176.95, valid ppl: 188.07\n190000 steps; 194560000 examples so far; train ppl: 175.07, valid ppl: 186.08\n195000 steps; 199680000 examples so far; train ppl: 169.51, valid ppl: 185.38\n" ] ], [ [ "## Avaliaรงรฃo final no dataset de teste\n\n\nBonus: o modelo com menor perplexidade no dataset de testes ganharรก 0.5 ponto na nota final.", "_____no_output_____" ] ], [ [ "test_loader = DataLoader(test_dataset, batch_size=64)\n\nwith torch.no_grad():\n test_ppl = np.exp(np.average([\n validation_step(input.to(device), target.to(device))\n for input, target in test_loader\n ]))\n\nprint(f'test perplexity: {test_ppl}')", "test perplexity: 171.63801942647711\n" ] ], [ [ "## Teste seu modelo com uma sentenรงa\n\nEscolha uma sentenรงa gerada pelo modelo que ache interessante.", "_____no_output_____" ] ], [ [ "prompt = 'Eu estou sozinho, sinto muita falta da minha namorada'\nmax_output_tokens = 10\n\nfor _ in range(max_output_tokens):\n input_ids = tokenize(text=prompt, tokenizer=tokenizer)\n input_ids_truncated = input_ids[-context_size:] # Usamos apenas os รบltimos <context_size> tokens como entrada para o modelo.\n logits = model(torch.LongTensor([input_ids_truncated]).to(device))\n # Ao usarmos o argmax, a saรญda do modelo em cada passo รฉ token de maior probabilidade.\n # Isso se chama decodificaรงรฃo gulosa (greedy decoding).\n predicted_id = torch.argmax(logits).item()\n input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.\n prompt = tokenizer.decode(input_ids)\n print(prompt)", "Eu estou sozinho, sinto muita falta da minha namorada,\nEu estou sozinho, sinto muita falta da minha namorada, e\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se o\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se o que\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se o que รฉ\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se o que รฉ o\nEu estou sozinho, sinto muita falta da minha namorada, e nรฃo sei se o que รฉ o que\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb0fe1d660457e3a8ba6b3e1ecf18be38891aeb5
7,731
ipynb
Jupyter Notebook
train-r2rNet.ipynb
YCL92/deepSelfie
6ca0c0872b8f1ec1f5e784e4630094e2d035fcf6
[ "MIT" ]
8
2020-07-24T04:58:47.000Z
2020-12-21T09:14:20.000Z
train-r2rNet.ipynb
YCL92/deepSelfie
6ca0c0872b8f1ec1f5e784e4630094e2d035fcf6
[ "MIT" ]
null
null
null
train-r2rNet.ipynb
YCL92/deepSelfie
6ca0c0872b8f1ec1f5e784e4630094e2d035fcf6
[ "MIT" ]
1
2021-01-23T08:35:03.000Z
2021-01-23T08:35:03.000Z
26.84375
82
0.494632
[ [ [ "# Network Training", "_____no_output_____" ], [ "## Includes", "_____no_output_____" ] ], [ [ "# mass includes\nimport os, sys, warnings\nimport ipdb\nimport torch as t\nimport torchnet as tnt\nfrom tqdm.notebook import tqdm\n\n# add paths for all sub-folders\npaths = [root for root, dirs, files in os.walk('.')]\nfor item in paths:\n sys.path.append(item)\n\nfrom ipynb.fs.full.config import r2rNetConf\nfrom ipynb.fs.full.monitor import Visualizer\nfrom ipynb.fs.full.network import r2rNet\nfrom ipynb.fs.full.dataLoader import r2rSet\nfrom ipynb.fs.full.util import *", "_____no_output_____" ] ], [ [ "## Initialization", "_____no_output_____" ] ], [ [ "# for debugging only\n%pdb off\nwarnings.filterwarnings('ignore')\n\n# choose GPU if available\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\ndevice = t.device('cuda' if t.cuda.is_available() else 'cpu')\n\n# define model\nopt = r2rNetConf()\nmodel = r2rNet().to(device)\n\n# load pre-trained model if necessary\nif opt.save_root:\n last_epoch = model.load(opt.save_root)\n last_epoch += opt.save_epoch\nelse:\n last_epoch = 0\n\n# dataloader for training\ntrain_dataset = r2rSet(opt, mode='train')\ntrain_loader = t.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers,\n pin_memory=True)\n\n# dataloader for validation\nval_dataset = r2rSet(opt, mode='val')\nval_loader = t.utils.data.DataLoader(val_dataset)\n\n# optimizer\nlast_lr = opt.lr * opt.lr_decay**(last_epoch // opt.upd_freq)\noptimizer = t.optim.Adam(model.parameters(), lr=last_lr)\nscheduler = t.optim.lr_scheduler.StepLR(optimizer,\n step_size=opt.upd_freq,\n gamma=opt.lr_decay)\n\n# visualizer\nvis = Visualizer(env='r2rNet', port=8686)\nloss_meter = tnt.meter.AverageValueMeter()", "_____no_output_____" ] ], [ [ "## Validation", "_____no_output_____" ] ], [ [ "def validate():\n # set to evaluation mode\n model.eval()\n\n psnr = 0.0\n for (raw_patch, srgb_patch, cam_wb) in val_loader:\n with t.no_grad():\n # copy to device\n raw_patch = raw_patch.to(device)\n srgb_patch = srgb_patch.to(device)\n rggb_patch = toRGGB(srgb_patch)\n cam_wb = cam_wb.to(device)\n\n # inference\n pred_patch = model(rggb_patch, cam_wb)\n pred_patch = t.clamp(pred_patch, 0.0, 1.0)\n\n # compute psnr\n mse = t.mean((pred_patch - raw_patch)**2)\n psnr += 10 * t.log10(1 / mse)\n psnr /= len(val_loader)\n\n # set to training mode\n model.train(mode=True)\n\n return psnr", "_____no_output_____" ] ], [ [ "## Training entry", "_____no_output_____" ] ], [ [ "for epoch in tqdm(range(last_epoch, opt.max_epoch),\n desc='epoch',\n total=opt.max_epoch - last_epoch):\n # reset meter and update learning rate\n loss_meter.reset()\n scheduler.step()\n\n for (raw_patch, srgb_patch, cam_wb) in train_loader:\n # reset gradient\n optimizer.zero_grad()\n\n # copy to device\n raw_patch = raw_patch.to(device)\n srgb_patch = srgb_patch.to(device)\n rggb_patch = toRGGB(srgb_patch)\n cam_wb = cam_wb.to(device)\n\n # inference\n pred_patch = model(rggb_patch, cam_wb)\n\n # compute loss\n loss = t.mean(t.abs(pred_patch - raw_patch))\n\n # backpropagation\n loss.backward()\n optimizer.step()\n\n # add to loss meter for logging\n loss_meter.add(loss.item())\n\n # show training status\n vis.plot('loss', loss_meter.value()[0])\n gt_img = raw2Img(raw_patch[0, :, :, :],\n wb=opt.d65_wb,\n cam_matrix=opt.cam_matrix)\n pred_img = raw2Img(pred_patch[0, :, :, :],\n wb=opt.d65_wb,\n cam_matrix=opt.cam_matrix)\n vis.img('gt/pred/mask', t.cat([gt_img, pred_img], dim=2).cpu() * 255)\n\n # save model and do validation\n if (epoch + 1) > opt.save_epoch or (epoch + 1) % 50 == 0:\n model.save()\n psnr = validate()\n vis.log('epoch: %d, psnr: %.2f' % (epoch, psnr))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb0ff4d981cd6ab8734c461c23df2ed5f3b228be
6,193
ipynb
Jupyter Notebook
graphs_trees/tree_bfs/bfs_challenge.ipynb
hanbf/interactive-coding-challenges
1676ac16c987e35eeb4be6ab57a3c10ed9b71b8b
[ "Apache-2.0" ]
null
null
null
graphs_trees/tree_bfs/bfs_challenge.ipynb
hanbf/interactive-coding-challenges
1676ac16c987e35eeb4be6ab57a3c10ed9b71b8b
[ "Apache-2.0" ]
null
null
null
graphs_trees/tree_bfs/bfs_challenge.ipynb
hanbf/interactive-coding-challenges
1676ac16c987e35eeb4be6ab57a3c10ed9b71b8b
[ "Apache-2.0" ]
null
null
null
25.485597
282
0.477313
[ [ [ "This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).", "_____no_output_____" ], [ "# Challenge Notebook", "_____no_output_____" ], [ "## Problem: Implement breadth-first traversal on a binary tree.\n\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)", "_____no_output_____" ], [ "## Constraints\n\n* Can we assume we already have a Node class with an insert method?\n * Yes\n* Can we assume this fits in memory?\n * Yes\n* What should we do with each node when we process it?\n * Call an input method `visit_func` on the node", "_____no_output_____" ], [ "## Test Cases\n\n### Breadth-First Traversal\n* 5, 2, 8, 1, 3 -> 5, 2, 8, 1, 3", "_____no_output_____" ], [ "## Algorithm\n\nRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/tree_bfs/bfs_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.", "_____no_output_____" ], [ "## Code", "_____no_output_____" ] ], [ [ "# %load ../bst/bst.py\nclass Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n def __repr__(self):\n return str(self.data)\n\n\nclass Bst(object):\n\n def __init__(self, root=None):\n self.root = root\n\n def insert(self, data):\n if data is None:\n raise TypeError('data cannot be None')\n if self.root is None:\n self.root = Node(data)\n return self.root\n else:\n return self._insert(self.root, data)\n\n def _insert(self, node, data):\n if node is None:\n return Node(data)\n if data <= node.data:\n if node.left is None:\n node.left = self._insert(node.left, data)\n node.left.parent = node\n return node.left\n else:\n return self._insert(node.left, data)\n else:\n if node.right is None:\n node.right = self._insert(node.right, data)\n node.right.parent = node\n return node.right\n else:\n return self._insert(node.right, data)", "_____no_output_____" ], [ "class BstBfs(Bst):\n\n def bfs(self, visit_func):\n queue = [self.root]\n index = 0\n \n while index < len(queue):\n node = queue[index]\n index += 1\n visit_func(node)\n if node.left is not None:\n queue.append(node.left)\n \n if node.right is not None:\n queue.append(node.right)\n \n ", "_____no_output_____" ] ], [ [ "## Unit Test", "_____no_output_____" ] ], [ [ "%run ../utils/results.py", "_____no_output_____" ], [ "# %load test_bfs.py\nfrom nose.tools import assert_equal\n\n\nclass TestBfs(object):\n\n def __init__(self):\n self.results = Results()\n\n def test_bfs(self):\n bst = BstBfs(Node(5))\n bst.insert(2)\n bst.insert(8)\n bst.insert(1)\n bst.insert(3)\n bst.bfs(self.results.add_result)\n assert_equal(str(self.results), '[5, 2, 8, 1, 3]')\n\n print('Success: test_bfs')\n\n\ndef main():\n test = TestBfs()\n test.test_bfs()\n\n\nif __name__ == '__main__':\n main()", "Success: test_bfs\n" ] ], [ [ "## Solution Notebook\n\nReview the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/tree_bfs/bfs_solution.ipynb) for a discussion on algorithms and code solutions.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb0ff5362cfa922d1595e1754188050af3b73d80
13,302
ipynb
Jupyter Notebook
vote_analysis/precincts-join-census.ipynb
lrayle/sf-voting
7c878f4b85716ae2507194093fa459278290e8f4
[ "BSD-3-Clause" ]
null
null
null
vote_analysis/precincts-join-census.ipynb
lrayle/sf-voting
7c878f4b85716ae2507194093fa459278290e8f4
[ "BSD-3-Clause" ]
null
null
null
vote_analysis/precincts-join-census.ipynb
lrayle/sf-voting
7c878f4b85716ae2507194093fa459278290e8f4
[ "BSD-3-Clause" ]
null
null
null
32.602941
233
0.58713
[ [ [ "# Spatial merge census and precinct data\n\nThis notebook will join precincts with census data. \n\n Spatial unit of analysis is the precinct. \n The aim is to join census data to each precinct. The problem is the precinct and block group boundaries don't match up. \n \n So, calculate census values for each precinct this way:\n\nFor each precinct, variable value is a weighted average of the values of the bg's with which that precinct overlaps. \n\n x_A = p_A1 \\* x_1 + p_A2 \\* x_2\n \n where\n \n x_A = variable x for precinct A, block group 1\n \n p_A1 = proportion of precinct A's area that is in block group 1\n ", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nfrom geopandas import GeoDataFrame, read_file\nfrom geopandas.tools import overlay\nimport pandas as pd\nimport spatial_processing_functions as spf\n#import importlib\n#importlib.reload(spf)\n", "_____no_output_____" ] ], [ [ "SF voting precincts. Boundaries are updated every ten years, and become active two years after the census. \nWe have 1992, 2002, and 2012. \nyears = ['1990','2000','2010','2009','2014']\n\n1990 census data -> 1992 precinct + 1990 bg (missing)\n2000, 2009 census data -> 2002 precincts + 2000 bgs\n2010, 2014 census data -> 2012 precincts + 2010 bgs", "_____no_output_____" ], [ "## Step 1: load precinct and census geography shapefiles", "_____no_output_____" ], [ " We'll need the following combinations of censusXprecinct:\n \n- ce2000pre1992, ce2000pre2002, ce2007pre2002, ce2012pre2012 <- for census data\n \n- 'bg2000pre1992', 'bg2000pre2002', 'bg2010pre2012' <- for block groups (since ce2007 data uses 2000 bg boundaries)", "_____no_output_____" ] ], [ [ "\n\nbgXprec = dict.fromkeys(['bg2000pre1992', 'bg2000pre2002', 'bg2010pre2012'])\nfor yr_key in bgXprec.keys():\n bgs = spf.load_bg_shp(yr_key[2:6])\n precincts = spf.load_prec_shp(yr_key[9:13])\n precincts = spf.reproject_prec(precincts)\n bgXprec[yr_key] = spf.merge_precinct_bg(precincts,bgs,yr_key)\n \n\n#yr_key ='bg2010pre2012'\n#bgs = load_bg_shp(yr_key[2:6])\n#precincts = load_prec_shp(yr_key[9:13])\n#bgXprec[yr_key] = merge_precinct_bg(precincts,bgs,yr_key)", "omitted 1 row(s) with missing geometry\nYear 1992: total 712 precincts\nworking on intersection for year bg2000pre1992" ], [ "bgXprec.keys()", "_____no_output_____" ] ], [ [ "## Merge with census data", "_____no_output_____" ] ], [ [ "# We'll need the following combinations of censusXprecinct\n#ce2000pre1992, ce2000pre2002, ce2007pre2002, ce2012pre2012 <- for census data\n#'bg2000pre1992', 'bg2000pre2002', 'bg2010pre2012' <- for block groups\n\n\n# dictionary for matching correct year. \n# (although we don't actually need 1990 data. )\ncensus2bg_year = {'1990':'1990', '2000':'2000','2010':'2010','2007':'2000','2012':'2010'}\n\nce2bgXpre={'ce2000pre1992':'bg2000pre1992','ce2000pre2002':'bg2000pre2002','ce2007pre2002':'bg2000pre2002','ce2012pre2012':'bg2010pre2012'}", "_____no_output_____" ], [ "\n# load census data, for each year. Then merge with the appropriate bg/precinct file. \n\ncensus_data_by_precinct = dict.fromkeys(['ce2000pre1992', 'ce2000pre2002', 'ce2007pre2002', 'ce2012pre2012'])\nfor yr_key in census_data_by_precinct.keys():\n print('\\n',yr_key)\n census_yr = yr_key[2:6]\n census_df = spf.load_census_data(census_yr)\n \n #lookup correct bgXprec dataframe to use.\n bg_key = ce2bgXpre[yr_key]\n \n # now merge. \n print('{} precincts before'.format(len(bgXprec[bg_key].precname.unique())))\n df_merged = pd.merge(bgXprec[bg_key], census_df, on = 'geoid')\n print('{} precincts after'.format(len(df_merged.precname.unique())))\n \n vars_to_use = spf.get_vars_to_use()\n cols_to_keep = vars_to_use + ['precname','area_m','intersect_area','geoid']\n df_merged = df_merged[cols_to_keep]\n df_merged_calc = spf.calc_variables(df_merged, vars_to_use) # leave off geo columns, obviously\n \n # aggregate back to precinct level. \n df_new = spf.agg_vars_by_prec(df_merged_calc)\n \n # clean up by dropping unweighted and other unneeded columns\n df_new.drop(vars_to_use, axis=1, inplace=True)\n df_new.drop(['intersect_area','prop_area'], axis=1, inplace=True)\n df_new = spf.rename_wgt_cols(df_new, vars_to_use)\n \n # store data frame in a dictionary\n census_data_by_precinct[yr_key] = df_new\n # also save as csv. \n spf.save_census_data(df_new,yr_key)\n\n \n", "\n ce2000pre2002\n586 precincts before\n586 precincts after\nSum >1.1 or <.97:\n precname\n2009 0.968559\nName: prop_area, dtype: float64\nsaved as census_by_precinct_ce2000pre2002.csv\n\n ce2007pre2002\n586 precincts before\n586 precincts after\nSum >1.1 or <.97:\n precname\n2009 0.968559\nName: prop_area, dtype: float64\nsaved as census_by_precinct_ce2007pre2002.csv\n\n ce2012pre2012\n604 precincts before\n604 precincts after\nSum >1.1 or <.97:\n Series([], Name: prop_area, dtype: float64)\nsaved as census_by_precinct_ce2012pre2012.csv\n\n ce2000pre1992\n712 precincts before\n712 precincts after\nSum >1.1 or <.97:\n precname\n2002 0.935645\n2005 0.914213\n2014 0.936640\n2059 0.953853\n2816 0.960311\nName: prop_area, dtype: float64\nsaved as census_by_precinct_ce2000pre1992.csv\n" ] ], [ [ "Let's check out the precincts that don't total 1.0.. something may be wrong. \n\nPrecinct 2009(2002) is on the SF border and has 931 registered voters. \nThe 5 precincts(1992) with weird results are all on the southern SF border. \n\nThese that are on the border probably don't add up to 1.0 because the boundaries are slightly different from the census shapefiles. \nI think it's close enough that it's not a problem. \n\nTODO: fix these two precincts.\n\n\nPrecincts(2012) 7025 and 7035 are the Hunter's Point Shipyard area. I wonder if this is messed up because boundaries changed? \nSomething's clearly wrong with 7035 because there are 327 registered voters and a tot population of only ~34. \n7025 has 441 registered voters and tot pop of ~1323.\nFor these, it might be more of a problem because they're really far off. \nProbably have to omit them until I can come back and figure out what to do. ", "_____no_output_____" ] ], [ [ "# look for other missing data. # can't find any other missing data here. \n\nfor yr_key in census_data_by_precinct.keys():\n print(len(census_data_by_precinct[yr_key][pd.isnull(census_data_by_precinct[yr_key]['med_inc_wgt'])]))", "0\n0\n0\n0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb10178a1451fe47f34e99b81c2a9fe1f401a8e9
5,325
ipynb
Jupyter Notebook
code/notebooks/Prototypes/Face_Detection/UI.ipynb
jasag/Phytoliths-recognition-system
1d34e9d9a0c4e5fe27716c90c7cacb1938fb2930
[ "BSD-3-Clause" ]
3
2017-05-06T07:27:18.000Z
2018-01-22T22:27:54.000Z
code/notebooks/Prototypes/Face_Detection/UI.ipynb
jasag/Phytoliths-recognition-system
1d34e9d9a0c4e5fe27716c90c7cacb1938fb2930
[ "BSD-3-Clause" ]
52
2017-04-28T14:55:21.000Z
2017-07-05T21:42:45.000Z
code/notebooks/Prototypes/Face_Detection/UI.ipynb
jasag/Phytoliths-recognition-system
1d34e9d9a0c4e5fe27716c90c7cacb1938fb2930
[ "BSD-3-Clause" ]
null
null
null
31.508876
93
0.590235
[ [ [ "from __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\n%matplotlib inline\n#Importamos nuestros mรณdulos y clases necesarias\nimport Image_Classifier as img_clf\nimport Labeled_Image as li\nimport classifiers as clfs\n\nfrom skimage import io\nfrom skimage.color import rgb2gray\nfrom skimage.transform import rescale\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from IPython.display import display\nimport fileupload\nimport os\nimport PIL.Image\nimport io as io2\nimport numpy as np\n\n# Inicializamos la clase que se encarga de clasificar imagenes \nclf = img_clf.Image_Classifier(clfs.classifiers.get('svm'))\nlbl_img = li.Labeled_Image(clf)\n\n''' Funciรณn que se encarga de aplicar las operaciones \nnecesarias para convertir los datos obtenidos del FileUpload\nen una imagen'''\ndef imageConverter(change):\n ch = change['owner']\n image = io2.BytesIO(ch.data)\n image = PIL.Image.open(image)\n image = np.array(image)\n return rgb2gray(image)\n\n'''Funciรณn mediante la que indicamos el clasificador\ncon el que clasificaremos la imagen'''\ndef set_classifier_wrapper(classifier_index):\n clf.set_classifier(clfs.classifiers[classifier_index][0],\n is_probs_classifier = clfs.classifiers[classifier_index][1])\n \n'''Funciรณn que nos permite mostrar la imagen'''\ndef plotter_wrapper():\n lbl_img.boxes_generator_with_nms()\n lbl_img.plotter()\n\n''' Funciรณn mediante la que escogemos la imagen'''\ndef _upload(lbl_img):\n\n _upload_widget = fileupload.FileUploadWidget()\n\n def _cb(change):\n image = imageConverter(change)\n lbl_img.set_image(image)\n #lbl_img.predict()\n \n _upload_widget.observe(_cb, names='data')\n display(_upload_widget)\n \n'''Funciรณn que nos permite mostrar la imagen'''\ndef rescale_image_selector(lbl_img, rescale_coef):\n if lbl_img.get_original_image() is not None:\n lbl_img.image_rescale(rescale_coef)\n\ndef patch_size_selector(Ni, Nj):\n clf.set_patch_size((Ni,Nj))\n\nclf_button = widgets.Button(description=\"Clasificar\")\n\ndef on_button_clicked(b):\n # Etiquetamos imagen\n lbl_img.predict()\n # Y la mostramos\n plotter_wrapper()\n \n#clf_button.on_click(on_button_clicked)#, clf)\n\ndef step_size_selector(istep, jstep):\n clf.set_istep(istep)\n clf.set_jstep(jstep)\n\ndef probabilities_selector(probs):\n lbl_img.set_probs(probs)\n lbl_img.predict()\n plotter_wrapper()\n\ndef alfa_selector(alfa):\n lbl_img.set_alfa(alfa)", "_____no_output_____" ], [ "# Mostramos el widget que permita elegir el clasificador\ninteract(set_classifier_wrapper, classifier_index = list(clfs.classifiers.keys()));\n\n# Mostramos el widget que permita elegir la imagen a clasificar\n_upload(lbl_img)\n\n# Permitimos escoger el rescalado de la imagen, por defecto 1\ninteract(rescale_image_selector, rescale_coef=(0.3,1,0.001), lbl_img=fixed(lbl_img))\n\n# Permitimos escoger el tamaรฑo de alto y ancho para\n# las subdivisiones de la ventana\n#interact(patch_size_selector, Ni=(0,100), Nj=(0,100))\n\n# Permitimos escoger el tamaรฑo del salto\n# en las subdivisiones de la imagen\ninteract(step_size_selector, istep=(0,100), jstep=(0,100))\n\ninteract(alfa_selector, alfa=(0,1,0.001))\n\n# Por ultimo, mostramos la imagen y permitimos que muestre las ventanas \n# en funciรณn de las probabilidades\ninteract_manual(probabilities_selector, probs=(0.5,1,0.001))\n\n# LLamar al clasificador\n#display(clf_button)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb10189f798aed303c5500b851df27e98ea8364c
123,251
ipynb
Jupyter Notebook
courses/cluster_analysis/cluster_analysis2.ipynb
albertoarenas/datacamp
58ce3d15f8377ba5e2eac8d0c995c4e36608d00e
[ "MIT" ]
null
null
null
courses/cluster_analysis/cluster_analysis2.ipynb
albertoarenas/datacamp
58ce3d15f8377ba5e2eac8d0c995c4e36608d00e
[ "MIT" ]
null
null
null
courses/cluster_analysis/cluster_analysis2.ipynb
albertoarenas/datacamp
58ce3d15f8377ba5e2eac8d0c995c4e36608d00e
[ "MIT" ]
null
null
null
305.833747
23,095
0.691459
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport missingno as msno\n\npd.set_option('display.max_rows', 40)\npd.set_option('display.max_columns', 20) \npd.set_option('display.width', 200)\n\ndef explore_df(df):\n print(df.shape)\n print(df.head())\n print(df.info())", "_____no_output_____" ] ], [ [ "# Extract RGB values from image\nThere are broadly three steps to find the dominant colors in an image:\n\nExtract RGB values into three lists.\nPerform k-means clustering on scaled RGB values.\nDisplay the colors of cluster centers.\nTo extract RGB values, we use the imread() function of the image class of matplotlib. Empty lists, r, g and b have been initialized.\n\nFor the purpose of finding dominant colors, we will be using the following image.", "_____no_output_____" ] ], [ [ "import matplotlib.image as img\n\nr = []\ng = []\nb = []\n\n# Read batman image\nbatman_image = img.imread('datasets/batman.jpg')\nprint(batman_image.shape)\n\nfor row in batman_image:\n for temp_r, temp_g, temp_b in row:\n r.append(temp_r)\n g.append(temp_g)\n b.append(temp_b)\n", "(169, 269, 3)\n" ], [ "batman_df = pd.DataFrame({'red':r, 'green':b, 'blue':b})\ndisplay(batman_df.head())", "_____no_output_____" ], [ "from scipy.cluster.vq import whiten\n\ncolumns = ['red', 'green', 'blue']\nfor column in columns:\n batman_df['scaled_'+ column] = whiten(batman_df[column])\n\ndisplay(batman_df.head())\n", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\n\n# Create scaler\nscaler = StandardScaler(with_mean=False, with_std=True)\n\nbatman_df[['scaled_red', 'scaled_green', 'scaled_blue']] = scaler.fit_transform(batman_df[['red','green','blue']])\ndisplay(batman_df.shape)\ndisplay(batman_df.head())", "_____no_output_____" ] ], [ [ "# How many dominant colors?\nWe have loaded the following image using the imread() function of the image class of matplotlib.\n\n\nThe RGB values are stored in a data frame, batman_df. The RGB values have been standardized used the whiten() function, stored in columns, scaled_red, scaled_blue and scaled_green.\n\nConstruct an elbow plot with the data frame. How many dominant colors are present?", "_____no_output_____" ] ], [ [ "from scipy.cluster.vq import kmeans, vq\n\ndistortions = []\nnum_clusters = range(1, 7)\n\n# Create a list of distortions from the kmeans function\nfor i in num_clusters:\n cluster_centers, distortion = kmeans(batman_df[['scaled_red', 'scaled_green', 'scaled_blue']], i)\n distortions.append(distortion)\n\n# Create a data frame with two lists, num_clusters and distortions\nelbow_plot = pd.DataFrame({'num_clusters':num_clusters, 'distortions':distortions})\n\n# Create a line plot of num_clusters and distortions\nsns.lineplot(x='num_clusters', y='distortions', data = elbow_plot)\nplt.xticks(num_clusters)\nplt.show()", "_____no_output_____" ], [ "from sklearn.cluster import KMeans\n\nnum_clusters = range(1, 7)\ninertias = []\n\nfor k in num_clusters:\n # Create a KMeans with k clusters\n model = KMeans(n_clusters=k)\n\n # Fit model to samples\n model.fit(batman_df[['scaled_red', 'scaled_green', 'scaled_blue']])\n\n # Append iterntia to the list of inertias\n inertias.append(model.inertia_)\n\n# Create a data frame with two lists - num_clusters, distortions\nelbow_plot = pd.DataFrame({'num_clusters': num_clusters, 'inertias': inertias})\n\n# Creat a line plot of num_clusters and distortions\nsns.lineplot(x='num_clusters', y='inertias', data = elbow_plot)\nplt.show()", "_____no_output_____" ] ], [ [ "### RESULT: Notice that there are three distinct colors present in the image, which is supported by the elbow plot.", "_____no_output_____" ], [ "# Display dominant colors\nWe have loaded the following image using the imread() function of the image class of matplotlib.\n\nTo display the dominant colors, convert the colors of the cluster centers to their raw values and then \n\nconverted them to the range of 0-1, using the following formula: \n\n```converted_pixel = standardized_pixel * pixel_std / 255```\n\nThe RGB values are stored in a data frame, batman_df. The scaled RGB values are stored in columns, scaled_red, scaled_blue and scaled_green. The cluster centers are stored in the variable cluster_centers, which were generated using the kmeans() function with three clusters.", "_____no_output_____" ] ], [ [ "colors = []\ncluster_centers, distortion = kmeans(batman_df[['scaled_red', 'scaled_green', 'scaled_blue']], 3)\n\n# Get standard deviations of each color\nr_std, g_std, b_std = batman_df[['red', 'green', 'blue']].std()\ndisplay(r_std, g_std, g_std)\n\n\nfor cluster_center in cluster_centers:\n scaled_r, scaled_g, scaled_b = cluster_center\n # Convert each standardized value to scaled value\n colors.append((\n scaled_r * r_std / 255,\n scaled_g * g_std / 255,\n scaled_b * b_std / 255\n ))\n\nplt.imshow([colors])\nplt.show()", "_____no_output_____" ], [ "model = KMeans(n_clusters=3)\nmodel.fit(batman_df[['scaled_red', 'scaled_green', 'scaled_blue']])\ncentroids = model.cluster_centers_\ndisplay(centroids)\n\ncentroids_unscaled = scaler.inverse_transform(centroids)\ndisplay(centroids_unscaled)\n\ncentroids_unscaled /= 255\ndisplay(centroids_unscaled)\n\nplt.imshow([centroids_unscaled])\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
cb101952a38ac9916008b0666ad3bc196c8ba417
76,404
ipynb
Jupyter Notebook
Statistical_Machine_Learning/workshop/.ipynb_checkpoints/worksheet03_solutions-checkpoint.ipynb
infinityglow/Unimelb-CS-Subjects
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
[ "MIT" ]
1
2022-02-14T16:31:07.000Z
2022-02-14T16:31:07.000Z
Statistical_Machine_Learning/workshop/.ipynb_checkpoints/worksheet03_solutions-checkpoint.ipynb
hidara2000/Unimelb-CS-Subjects
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
[ "MIT" ]
null
null
null
Statistical_Machine_Learning/workshop/.ipynb_checkpoints/worksheet03_solutions-checkpoint.ipynb
hidara2000/Unimelb-CS-Subjects
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
[ "MIT" ]
1
2021-06-14T11:59:13.000Z
2021-06-14T11:59:13.000Z
137.664865
22,664
0.865347
[ [ [ "# COMP90051 Workshop 3\n## Logistic regression\n***\nIn this workshop we'll be implementing L2-regularised logistic regression using `scipy` and `numpy`. \nOur key objectives are:\n\n* to become familiar with the optimisation problem that sits behind L2-regularised logistic regression;\n* to apply polynomial basis expansion and recognise when it's useful; and\n* to experiment with the effect of L2 regularisation.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "### 1. Binary classification data\nLet's begin by generating some binary classification data.\nTo make it easy for us to visualise the results, we'll stick to a two-dimensional feature space.", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_circles\nX, Y = make_circles(n_samples=300, noise=0.1, factor=0.7, random_state=90051)\nplt.plot(X[Y==0,0], X[Y==0,1], 'o', label = \"y=0\")\nplt.plot(X[Y==1,0], X[Y==1,1], 's', label = \"y=1\")\nplt.legend()\nplt.xlabel(\"$x_0$\")\nplt.ylabel(\"$x_1$\")\nplt.show()", "_____no_output_____" ] ], [ [ "**Question:** What's interesting about this data? Do you think logistic regression will perform well?\n\n**Answer:** *This question is answered in section 3.*", "_____no_output_____" ], [ "In preparation for fitting and evaluating a logistic regression model, we randomly partition the data into train/test sets. We use the `train_test_split` function from `sklearn`.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=90051)\nprint(\"Training set has {} instances. Test set has {} instances.\".format(X_train.shape[0], X_test.shape[0]))", "Training set has 201 instances. Test set has 99 instances.\n" ] ], [ [ "### 2. Logistic regression objective function", "_____no_output_____" ], [ "Recall from lectures, that logistic regression models the distribution of the binary class $y$ *conditional* on the feature vector $\\mathbf{x}$ as\n$$\ny | \\mathbf{x} \\sim \\mathrm{Bernoulli}[\\sigma(\\mathbf{w}^T \\mathbf{x} + b)]\n$$\nwhere $\\mathbf{w}$ is the weight vector, $b$ is the bias term and $\\sigma(z) = 1/(1 + e^{-z})$ is the logistic function.\nTo simplify the notation, we'll collect the model parameters $\\mathbf{w}$ and $b$ in a single vector $\\mathbf{v} = [b, \\mathbf{w}]$.\n\nFitting this model amounts to choosing $\\mathbf{v}$ that minimises the sum of cross-entropies over the instances ($i = 1,\\ldots,n$) in the training set\n$$\nf_\\mathrm{cross-ent}(\\mathbf{v}; \\mathbf{X}, \\mathbf{Y}) = - \\sum_{i = 1}^{n} \\left\\{ y_i \\log \\sigma(\\mathbf{w}^T \\mathbf{x}_i + b) + (1 - y_i) \\log (1 - \\sigma(\\mathbf{w}^T \\mathbf{x}_i + b)) \\right\\}\n$$\n\nOften a regularisation term of the form $f_\\mathrm{reg}(\\mathbf{w}; \\lambda) = \\frac{1}{2} \\lambda \\mathbf{w}^T \\mathbf{w}$ is added to the objective to penalize large weights (this can help to prevent overfitting). Note that $\\lambda \\geq 0$ controls the strength of the regularisation term.\n\nPutting this together, our goal is to minimise the following objective function with respect to $\\mathbf{w}$ and $b$:\n$$\nf(\\mathbf{v}; \\mathbf{X}, \\mathbf{Y}, \\lambda) = f_\\mathrm{reg}(\\mathbf{w}; \\lambda) + f_\\mathrm{cross-ent}(\\mathbf{v}; \\mathbf{X}, \\mathbf{Y})\n$$\n\n**Question:** Why aren't we regularising the entire parameter vector $\\mathbf{v}$? Notice that only $\\mathbf{w}$ is included in $f_\\mathrm{reg}$โ€”in other words $b$ is excluded from regularisation. \n\n**Answer:** *If we were to replace $\\mathbf{w}$ with $\\mathbf{v}$ in the regularisation term, we'd be penalising large $b$. This is not a good idea, because a large bias may be required for some data setsโ€”and restricting the bias doesn't help with generalisation.*\n\nWe're going to find a solution to this minimisation problem using the BFGS algorithm (named after the inventors Broyden, Fletcher, Goldfarb and Shanno). BFGS is a \"hill-climbing\" algorithm like gradient descent, however it additionally makes use of second-order derivative information (by approximating the Hessian). It converges in fewer iterations than gradient descent (it's convergence rate is *superlinear* whereas gradient descent is only *linear*).\n\nWe'll use an implementation of BFGS provided in `scipy` called `fmin_bfgs`. The algorithm requires two functions as input: (i) a function that evaluates the objective $f(\\mathbf{v}; \\ldots)$ and (ii) a function that evalutes the gradient $\\nabla_{\\mathbf{v}} f(\\mathbf{v}; \\ldots)$.\n\nLet's start by writing a function to compute $f(\\mathbf{v}; \\ldots)$.", "_____no_output_____" ] ], [ [ "from scipy.special import expit # this is the logistic function\n\n# v: parameter vector\n# X: feature matrix\n# Y: class labels\n# Lambda: regularisation constant\ndef obj_fn(v, X, Y, Lambda):\n prob_1 = expit(np.dot(X,v[1::]) + v[0])\n reg_term = 0.5 * Lambda * np.dot(v[1::],v[1::]) # fill in\n cross_entropy_term = - np.dot(Y, np.log(prob_1)) - np.dot(1. - Y, np.log(1. - prob_1))\n return reg_term + cross_entropy_term # fill in", "_____no_output_____" ] ], [ [ "Now for the gradient, we use the following result (if you're familiar with vector calculus, you may wish to derive this yourself):\n$$\n\\nabla_{\\mathbf{v}} f(\\mathbf{v}; \\ldots) = \\left[\\frac{\\partial f(\\mathbf{w}, b;\\ldots)}{\\partial b}, \\nabla_{\\mathbf{w}} f(\\mathbf{w}, b; \\ldots) \\right] = \\left[\\sum_{i = 1}^{n} \\sigma(\\mathbf{w}^T \\mathbf{x}_i + b) - y_i, \\lambda \\mathbf{w} + \\sum_{i = 1}^{n} (\\sigma(\\mathbf{w}^T \\mathbf{x}_i + b) - y_i)\\mathbf{x}_i\\right]\n$$\n\nThe function below implements $\\nabla_{\\mathbf{v}} f(\\mathbf{v}; \\ldots)$.", "_____no_output_____" ] ], [ [ "# v: parameter vector\n# X: feature matrix\n# Y: class labels\n# Lambda: regularisation constant\ndef grad_obj_fn(v, X, Y, Lambda):\n prob_1 = expit(np.dot(X, v[1::]) + v[0])\n grad_b = np.sum(prob_1 - Y)\n grad_w = Lambda * v[1::] + np.dot(prob_1 - Y, X)\n return np.insert(grad_w, 0, grad_b)", "_____no_output_____" ] ], [ [ "### 3. Solving the minimization problem using BFGS", "_____no_output_____" ], [ "Now that we've implemented functions to compute the objective and the gradient, we can plug them into `fmin_bfgs`.\nSpecifically, we define a function `my_logistic_regression` which calls `fmin_bfgs` and returns the optimal weight vector.", "_____no_output_____" ] ], [ [ "from scipy.optimize import fmin_bfgs\n\n# X: feature matrix\n# Y: class labels\n# Lambda: regularisation constant\n# v_initial: initial guess for parameter vector\ndef my_logistic_regression(X, Y, Lambda, v_initial, disp=True):\n # Function for displaying progress\n def display(v):\n print('v is', v, 'objective is', obj_fn(v, X, Y, Lambda))\n \n return fmin_bfgs(f=obj_fn, fprime=grad_obj_fn, \n x0=v_initial, args=(X, Y, Lambda), disp=disp, \n callback=display)", "_____no_output_____" ] ], [ [ "Let's try it out!", "_____no_output_____" ] ], [ [ "Lambda = 1\nv_initial = np.zeros(X_train.shape[1] + 1) # fill in a vector of zeros of appropriate length\nv_opt = my_logistic_regression(X_train, Y_train, Lambda, v_initial)\n\n# Function to plot the data points and decision boundary\ndef plot_results(X, Y, v, trans_func = None):\n # Scatter plot in feature space\n plt.plot(X[Y==0,0], X[Y==0,1], 'o', label = \"y=0\")\n plt.plot(X[Y==1,0], X[Y==1,1], 's', label = \"y=1\")\n \n # Compute axis limits\n x0_lower = X[:,0].min() - 0.1\n x0_upper = X[:,0].max() + 0.1\n x1_lower = X[:,1].min() - 0.1\n x1_upper = X[:,1].max() + 0.1\n \n # Generate grid over feature space\n x0, x1 = np.mgrid[x0_lower:x0_upper:.01, x1_lower:x1_upper:.01]\n grid = np.c_[x0.ravel(), x1.ravel()]\n if (trans_func is not None):\n grid = trans_func(grid) # apply transformation to features\n arg = (np.dot(grid, v[1::]) + v[0]).reshape(x0.shape)\n \n # Plot decision boundary (where w^T x + b == 0)\n plt.contour(x0, x1, arg, levels=[0], cmap=\"Greys\", vmin=-0.2, vmax=0.2)\n plt.legend()\n plt.show()\n \nplot_results(X, Y, v_opt)", "v is [-0.12895873 -0.08185978 0.02005271] objective is 138.80620162265674\nv is [-0.10559965 -0.16360542 0.05028661] objective is 138.71483494120764\nv is [-0.10594786 -0.16265682 0.0537028 ] objective is 138.71470747506936\nv is [-0.10744929 -0.16393098 0.05382675] objective is 138.71452420357136\nv is [-0.10855209 -0.16486892 0.05391878] objective is 138.71448412102714\nOptimization terminated successfully.\n Current function value: 138.714484\n Iterations: 5\n Function evaluations: 9\n Gradient evaluations: 9\n" ] ], [ [ "**Question:** Is the solution what you expected? Is it a good fit for the data?\n\n**Answer:** *It's not a good fit because logistic regression is a linear classifier, and the data is not linearly seperable.*\n\n**Question:** What's the accuracy of this model? Fill in the code below assuming the following decision function\n$$\n\\hat{y} = \\begin{cases}\n 1, &\\mathrm{if} \\ p(y = 1|\\mathbf{x}) \\geq \\tfrac{1}{2}, \\\\\n 0, &\\mathrm{otherwise}.\n\\end{cases}\n$$", "_____no_output_____" ] ], [ [ "from sklearn.metrics import accuracy_score\nY_test_pred = ((np.dot(X_test, v_opt[1::]) + v_opt[0]) >= 0)*1 # fill in\naccuracy_score(Y_test, Y_test_pred)", "_____no_output_____" ] ], [ [ "### 4. Adding polynomial features", "_____no_output_____" ], [ "We've seen that ordinary logistic regression does poorly on this data set, because the data is not linearly separable in the $x_0,x_1$ feature space.\n\nWe can get around this problem using basis expansion. In this case, we'll augment the feature space by adding polynomial features of degree 2. In other words, we replace the original feature matrix $\\mathbf{X}$ by a transformed feature matrix $\\mathbf{\\Phi}$ which contains additional columns corresponding to $x_0^2$, $x_0 x_1$ and $x_1^2$. This is done using the function `add_quadratic_features` defined below.\n\n**Note:** There's a built-in function in `sklearn` for adding polynomial features located at `sklearn.preprocessing.PolynomialFeatures`.", "_____no_output_____" ] ], [ [ "# X: original feature matrix\ndef add_quadratic_features(X):\n return np.c_[X, X[:,0]**2, X[:,0]*X[:,1], X[:,1]**2]\n\nPhi_train = add_quadratic_features(X_train)\nPhi_test = add_quadratic_features(X_test)", "_____no_output_____" ] ], [ [ "Let's apply our custom logistic regression function again on the augmented feature space.", "_____no_output_____" ] ], [ [ "Lambda = 1\nv_initial = np.zeros(Phi_train.shape[1] + 1) # fill in a vector of zeros of appropriate length\nv_opt = my_logistic_regression(Phi_train, Y_train, Lambda, v_initial)\nplot_results(X, Y, v_opt, trans_func=add_quadratic_features)", "v is [-0.25330176 -0.16078962 0.0393877 -0.69061891 -0.06191788 -0.66913767] objective is 133.7254597353608\nv is [ 0.14454987 -0.26356343 0.09103964 -1.12285189 -0.11645183 -1.10119619] objective is 123.21507652891698\nv is [ 0.93565182 0.58998862 -0.16877892 -2.03937966 -0.1485882 -2.11948612] objective is 111.06992373381527\nv is [ 2.00981184 0.20654673 -0.22783537 -3.18173066 0.0422559 -3.18999173] objective is 92.98273768800516\nv is [ 2.26572069 0.28225825 0.32509747 -3.39539396 -0.10521964 -3.55184492] objective is 90.3795977794516\nv is [ 2.68728421 0.13115228 0.20274263 -3.68503291 -0.75049241 -4.09603433] objective is 87.07364434715637\nv is [ 3.5263152 -0.03542892 0.1458146 -5.14685314 -0.2240546 -4.34861015] objective is 86.16404106182307\nv is [ 3.6108584 -0.1112141 0.10341003 -4.89905057 0.05122634 -4.80353784] objective is 85.31670295561514\nv is [ 3.30171225 -0.0320724 0.13822552 -4.41769093 -0.51691109 -4.63915242] objective is 84.7873170737362\nv is [ 3.37997084 -0.05177555 0.12586252 -4.54820823 -0.36757039 -4.66696619] objective is 84.70560757639106\nv is [ 3.37636311 -0.05150433 0.12495499 -4.5418868 -0.36860325 -4.66144529] objective is 84.70546489137999\nv is [ 3.37591202 -0.05140376 0.12484208 -4.54067978 -0.36864392 -4.66010235] objective is 84.70545611957576\nv is [ 3.37599404 -0.05137606 0.12487315 -4.54072288 -0.36863733 -4.66013615] objective is 84.70545603432774\nv is [ 3.3760122 -0.05136884 0.1248793 -4.54073972 -0.36863645 -4.66015408] objective is 84.7054560325126\nOptimization terminated successfully.\n Current function value: 84.705456\n Iterations: 14\n Function evaluations: 16\n Gradient evaluations: 16\n" ] ], [ [ "This time we should get a better result for the accuracy on the test set.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import accuracy_score\nY_test_pred = ((np.dot(Phi_test, v_opt[1::]) + v_opt[0]) >= 0)*1 # fill in\naccuracy_score(Y_test, Y_test_pred)", "_____no_output_____" ] ], [ [ "### 5. Effect of regularisation", "_____no_output_____" ], [ "So far, we've fixed the regularisation constant so that $\\lambda = 1$. (Note it's possible to choose an \"optimal\" value for $\\lambda$ by applying cross-validation.)\n\n**Question:** What do you think will happen if we switch the regularisation off? Try setting $\\lambda$ to a small value (say $10^{-3}$) and check whether the accuracy of the model is affected.\n\n**Answer:** *Generally speaking, we risk overfitting if the regularisation constant is too small (or switched off entirely). You should observe that the accuracy on the test set reduces slightly with $\\lambda = 10^{-3}$ vs. $\\lambda = 1$.*", "_____no_output_____" ], [ "### 6. Logistic regression using sklearn", "_____no_output_____" ], [ "Now that you have some insight into the optimisation problem behind logistic regression, you should feel confident in using the built-in implementation in `sklearn` (or other packages).\nNote that the `sklearn` implementation handles floating point underflow/overflow more carefully than we have done, and uses faster numerical optimisation algorithms.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nclf = LogisticRegression(C=1)\nclf.fit(Phi_train, Y_train)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\nY_test_pred = clf.predict(Phi_test)\naccuracy_score(Y_test, Y_test_pred)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
cb101b633516b0b6be8893cb689ea63ec0cc1545
18,114
ipynb
Jupyter Notebook
nbs/dl2/02a_why_sqrt5.ipynb
pyjaime/fastai-dl-course-2019
b5aacbbd99701f6c6c5d6c3978414f5ee6cd3c9a
[ "Apache-2.0" ]
null
null
null
nbs/dl2/02a_why_sqrt5.ipynb
pyjaime/fastai-dl-course-2019
b5aacbbd99701f6c6c5d6c3978414f5ee6cd3c9a
[ "Apache-2.0" ]
null
null
null
nbs/dl2/02a_why_sqrt5.ipynb
pyjaime/fastai-dl-course-2019
b5aacbbd99701f6c6c5d6c3978414f5ee6cd3c9a
[ "Apache-2.0" ]
null
null
null
20.239106
193
0.497184
[ [ [ "%load_ext autoreload\n%autoreload 2\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Does nn.Conv2d init work well?", "_____no_output_____" ], [ "[Jump_to lesson 9 video](https://course.fast.ai/videos/?lesson=9&t=21)", "_____no_output_____" ] ], [ [ "#export\nfrom exp.nb_02 import *\n\ndef get_data():\n path = datasets.download_data(MNIST_URL, ext='.gz')\n with gzip.open(path, 'rb') as f:\n ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')\n return map(tensor, (x_train,y_train,x_valid,y_valid))\n\ndef normalize(x, m, s): return (x-m)/s", "_____no_output_____" ], [ "torch.nn.modules.conv._ConvNd.reset_parameters??", "_____no_output_____" ] ], [ [ "```\ndef reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # why 5???\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n```", "_____no_output_____" ], [ "Let's try to test that with MNIST, observing how initialization affects performance...?", "_____no_output_____" ] ], [ [ "# Load data\nx_train,y_train,x_valid,y_valid = get_data()\ntrain_mean,train_std = x_train.mean(),x_train.std()\nx_train = normalize(x_train, train_mean, train_std)\nx_valid = normalize(x_valid, train_mean, train_std)", "_____no_output_____" ], [ "# Reshape\nx_train = x_train.view(-1,1,28,28)\nx_valid = x_valid.view(-1,1,28,28)\nx_train.shape,x_valid.shape", "_____no_output_____" ], [ "n,*_ = x_train.shape\nc = y_train.max()+1\nnh = 32\nn,c", "_____no_output_____" ], [ "l1 = nn.Conv2d(in_channels=1, out_channels=nh, kernel_size=5)", "_____no_output_____" ] ], [ [ "The parameters are: channels in (1 because is b&w), channels out (number of \"hidden layer neurons\") and kernel size.\n\nWe take 100 samples from the original input:", "_____no_output_____" ] ], [ [ "x = x_valid[:100]", "_____no_output_____" ], [ "x.shape", "_____no_output_____" ] ], [ [ "Write a function for a repetitive task like obtaining the stats ;)", "_____no_output_____" ] ], [ [ "def stats(x): \n return x.mean(),x.std()", "_____no_output_____" ] ], [ [ "We can access to `l1.weight` and to `l1.bias`", "_____no_output_____" ] ], [ [ "l1.weight.shape, l1.bias.shape", "_____no_output_____" ] ], [ [ "The weights tensor dimensions: 32 output filters, 1 input filter (because we only have 1 channel), and 5x5 because of the filter/kernel size.\n\nThe initialization of Conv2d gives us these stats:", "_____no_output_____" ] ], [ [ "stats(l1.weight),stats(l1.bias)", "_____no_output_____" ] ], [ [ "Pass our input through the conv layer...", "_____no_output_____" ] ], [ [ "t = l1(x)", "_____no_output_____" ], [ "stats(t)", "_____no_output_____" ] ], [ [ "The std dev is far from the expected 1 value. This looks like a problem.\n\nComparing to the regular kaiming normal (remember it's designed to be used followed by a ReLU layer, or more generally by a Leaky ReLU layer, where `a` is the slope of the negative part):", "_____no_output_____" ] ], [ [ "init.kaiming_normal_(l1.weight, a=1.)\nstats(l1(x))", "_____no_output_____" ] ], [ [ "Let's test our conv layer followed by a Leaky ReLU layer...", "_____no_output_____" ] ], [ [ "import torch.nn.functional as F", "_____no_output_____" ], [ "def f1(x,a=0): \n return F.leaky_relu(l1(x),a)", "_____no_output_____" ] ], [ [ "With a=0 (ReLU) the mean raises to 0.5 aprox, but std dev remains near to 1...", "_____no_output_____" ] ], [ [ "init.kaiming_normal_(l1.weight, a=0)\nstats(f1(x))", "_____no_output_____" ] ], [ [ "Compared to the default with a=5:", "_____no_output_____" ] ], [ [ "l1 = nn.Conv2d(1, nh, 5)\nstats(f1(x))", "_____no_output_____" ] ], [ [ "Let's write our own kaiming normalization function :)", "_____no_output_____" ] ], [ [ "l1.weight.shape", "_____no_output_____" ], [ "# receptive field size (number of kernel elements)\nrec_fs = l1.weight[0,0].numel()\nrec_fs", "_____no_output_____" ], [ "nf,ni,*_ = l1.weight.shape\nnf,ni", "_____no_output_____" ], [ "# Effective fan in and fan out of the convolutional layer\nfan_in = ni*rec_fs\nfan_out = nf*rec_fs\nfan_in,fan_out", "_____no_output_____" ] ], [ [ "The gain for the normalization, having `a` into account:", "_____no_output_____" ] ], [ [ "def gain(a): \n return math.sqrt(2.0 / (1 + a**2))", "_____no_output_____" ], [ "gain(1),gain(0),gain(0.01),gain(0.1),gain(math.sqrt(5.))", "_____no_output_____" ] ], [ [ "The last value is the one corresponding to the PyTorch's `a` init value.\n\nRemember that PyTorch uses kaiming_uniform instead of kaiming_normal, and the std dev of an uniform distribution is not 1:", "_____no_output_____" ] ], [ [ "torch.zeros(10000).uniform_(-1,1).std()", "_____no_output_____" ], [ "1/math.sqrt(3.)", "_____no_output_____" ] ], [ [ "Our complete kaiming function, with the compensated gain:", "_____no_output_____" ] ], [ [ "def kaiming2(x,a, use_fan_out=False):\n nf,ni,*_ = x.shape\n rec_fs = x[0,0].shape.numel()\n fan = nf*rec_fs if use_fan_out else ni*rec_fs\n std = gain(a) / math.sqrt(fan)\n bound = math.sqrt(3.) * std\n x.data.uniform_(-bound,bound)", "_____no_output_____" ], [ "kaiming2(l1.weight, a=0);\nstats(f1(x))", "_____no_output_____" ], [ "kaiming2(l1.weight, a=math.sqrt(5.))\nstats(f1(x))", "_____no_output_____" ], [ "class Flatten(nn.Module):\n def forward(self,x): return x.view(-1)", "_____no_output_____" ], [ "m = nn.Sequential(\n nn.Conv2d(1,8, 5,stride=2,padding=2), nn.ReLU(),\n nn.Conv2d(8,16,3,stride=2,padding=1), nn.ReLU(),\n nn.Conv2d(16,32,3,stride=2,padding=1), nn.ReLU(),\n nn.Conv2d(32,1,3,stride=2,padding=1),\n nn.AdaptiveAvgPool2d(1),\n Flatten(),\n)", "_____no_output_____" ], [ "y = y_valid[:100].float()", "_____no_output_____" ], [ "t = m(x)\nstats(t)", "_____no_output_____" ] ], [ [ "That std dev is very low, and we have a problem, because the variance is really small in the last layer.", "_____no_output_____" ] ], [ [ "l = mse(t,y)\nl.backward()", "_____no_output_____" ], [ "stats(m[0].weight.grad)", "_____no_output_____" ] ], [ [ "Let's return to `kaiming_uniform`, and apply that to the previous network:", "_____no_output_____" ] ], [ [ "init.kaiming_uniform_??", "_____no_output_____" ], [ "for l in m:\n if isinstance(l,nn.Conv2d):\n init.kaiming_uniform_(l.weight)\n l.bias.data.zero_()", "_____no_output_____" ], [ "t = m(x)\nstats(t)", "_____no_output_____" ], [ "l = mse(t,y)\nl.backward()\nstats(m[0].weight.grad)", "_____no_output_____" ] ], [ [ "It's better than the default Conv2d.\n\nPyTorch team said it was a bug (not multiplying by math.sqrt(3.)) but it empirically worked really well :S", "_____no_output_____" ], [ "## Export", "_____no_output_____" ] ], [ [ "!python notebook2script.py 02a_why_sqrt5.ipynb", "Converted 02a_why_sqrt5.ipynb to exp\\nb_02a.py\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb101c2ad2144fa4f37abbc88f5afb1e5d3472f0
511,515
ipynb
Jupyter Notebook
5 instant gratification/cognitive-conditioning.ipynb
MLVPRASAD/KaggleProjects
379e062cf58d83ff57a456552bb956df68381fdd
[ "MIT" ]
2
2020-01-25T08:31:14.000Z
2022-03-23T18:24:03.000Z
5 instant gratification/cognitive-conditioning.ipynb
MLVPRASAD/KaggleProjects
379e062cf58d83ff57a456552bb956df68381fdd
[ "MIT" ]
null
null
null
5 instant gratification/cognitive-conditioning.ipynb
MLVPRASAD/KaggleProjects
379e062cf58d83ff57a456552bb956df68381fdd
[ "MIT" ]
null
null
null
1,716.493289
503,088
0.95598
[ [ [ "import numpy as np\nimport pandas as pd\nfrom sklearn import *\nimport warnings; warnings.filterwarnings(\"ignore\")\n\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\nsub = pd.read_csv('../input/sample_submission.csv')\ntrain.shape, test.shape, sub.shape", "_____no_output_____" ] ], [ [ "Wordplay in Column Names\n==============================", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport networkx as nx\nG=nx.Graph()\n\ncol = [c for c in train.columns if c not in ['id', 'target']]\n\nG.add_node('Start')\nfor i in range(4):\n G.add_node('Column Section '+ str(i))\n G.add_edge('Start','Column Section '+ str(i))\n for c in train[col].columns:\n if c.split('-')[i] not in G.nodes():\n G.add_node(c.split('-')[i])\n G.add_edge('Column Section '+ str(i), c.split('-')[i])\n if c not in G.nodes():\n G.add_node(c)\n G.add_edge(c.split('-')[i],c)\nplt.figure(1,figsize=(12,12))\nnx.draw_networkx(G, node_size=1,font_size=6)\nplt.axis('off'); plt.show()", "_____no_output_____" ] ], [ [ "How unique are the column values\n==========", "_____no_output_____" ] ], [ [ "df = []\nfor c in train.columns:\n if c not in ['target', 'id', 'wheezy-copper-turtle-magic']:\n l1 = test[c].unique()\n l2 = train[c].unique()\n df.append([c, len(l1), len(l2), len(l1)- 131073, len(l2) - 262144])\ndf = pd.DataFrame(df, columns=['col', 'test_unique', 'train_unique', 'test_diff', 'train_diff'])\nfor c in ['test_unique', 'train_unique', 'test_diff', 'train_diff']:\n print(df[c].min(), df[c].max())\n \n#col = list(df[((df['test_diff']<-1900) & (df['train_diff']<-7500))]['col'].values)\ndf.head()", "128997 129262\n253919 255057\n-2076 -1811\n-8225 -7087\n" ] ], [ [ "Getting wheezy\n=====", "_____no_output_____" ] ], [ [ "col = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]\n\ndf_all = pd.concat((train,test), axis=0, ignore_index=True).reset_index(drop=True)\ndf_all['wheezy-copper-turtle-magic'] = df_all['wheezy-copper-turtle-magic'].astype('category')\n\ntrain = df_all[:train.shape[0]].reset_index(drop=True)\ntest = df_all[train.shape[0]:].reset_index(drop=True)\ndel df_all\ntrain.shape, test.shape", "_____no_output_____" ] ], [ [ "Lets Race\n======", "_____no_output_____" ] ], [ [ "test_ = []\n\nkn = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)\nsv = svm.NuSVC(kernel='poly', degree=4, random_state=4, probability=True, coef0=0.08)\n\nfor s in sorted(train['wheezy-copper-turtle-magic'].unique()):\n train2 = train[train['wheezy-copper-turtle-magic']==s].reset_index(drop=True).copy()\n test2 = test[test['wheezy-copper-turtle-magic']==s].reset_index(drop=True).copy()\n kn.fit(train2[col], train2['target'])\n sv.fit(train2[col], train2['target'])\n test2['target'] = (kn.predict_proba(test2[col])[:,1] * 0.2) + (sv.predict_proba(test2[col])[:,1] * 0.8)\n test_.append(test2)\n\ntest_ = pd.concat(test_).reset_index(drop=True)\ntest_[['id','target']].to_csv(\"submission.csv\", index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb101e46a9b09fe9cda5a171161f789f2d848c57
36,967
ipynb
Jupyter Notebook
book/chapters/trigger.ipynb
gordonwatts/xaod_usage
ee00797115009deaa19e9cd17bef6eb069ff822c
[ "MIT" ]
1
2022-02-28T07:43:01.000Z
2022-02-28T07:43:01.000Z
book/chapters/trigger.ipynb
gordonwatts/xaod_usage
ee00797115009deaa19e9cd17bef6eb069ff822c
[ "MIT" ]
19
2021-10-21T01:42:07.000Z
2022-03-31T12:15:16.000Z
book/chapters/trigger.ipynb
gordonwatts/xaod_usage
ee00797115009deaa19e9cd17bef6eb069ff822c
[ "MIT" ]
null
null
null
127.034364
14,942
0.871696
[ [ [ "# Accessing the Trigger\n\nIn ATLAS all access to event trigger decision is via the Trigger Decision Tool (TDT). There is quite a bit of information attached to the trigger, and its layout is quite complex - for that reason one should use the TDT to access the data. It is not really possible for a human to navigate the data structures quickly!", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom config import ds_zee as ds\nfrom func_adl_servicex_xaodr21 import tdt_chain_fired, tmt_match_object", "_____no_output_____" ] ], [ [ "## Looking for events that fired a chain\n\nLets look at $Z \\rightarrow ee$ Monte Carlo for a single electron trigger in the event.", "_____no_output_____" ] ], [ [ "n_electrons = (ds.Select(lambda e:\n {\n \"n_ele\": e.Electrons().Where(lambda e: abs(e.eta()) < 2.5).Count(),\n \"fired\": tdt_chain_fired(\"HLT_e60_lhmedium_nod0\"),\n })\n .AsAwkwardArray()\n .value()\n )", "_____no_output_____" ], [ "plt.hist(n_electrons.n_ele, bins=4, range=(0, 4), label='All Events')\nplt.hist(n_electrons.n_ele[n_electrons.fired], bins=4, range=(0, 4), label='Fired Events')\nplt.xlabel('Number of Electrons')\nplt.ylabel('Number of Events')\nplt.title('Electron Trigger and Number of Electrons in the Event')\n_ = plt.legend()", "_____no_output_____" ] ], [ [ "## Trigger Matching\n\nNext, let's find the electrons that matched that trigger that fired above. We'll do this by looking only at events where the trigger has fired, and then asking each electron if it matches withing a $\\Delta R$.", "_____no_output_____" ] ], [ [ "matched_electrons = (\n ds.Where(lambda e: tdt_chain_fired(\"HLT_e60_lhmedium_nod0\"))\n .SelectMany(lambda e: e.Electrons())\n .Select(\n lambda e: {\n \"pt\": e.pt() / 1001.0,\n \"eta\": e.eta(),\n \"is_trig\": tmt_match_object(\"HLT_e60_lhmedium_nod0\", e, 0.7),\n }\n )\n .AsAwkwardArray()\n .value()\n)", "_____no_output_____" ] ], [ [ "To know the `tnt_match_object` arguments, you'll need to look up its definition below on the atlas twiki.", "_____no_output_____" ] ], [ [ "plt.hist(matched_electrons.pt, bins=100, range=(0, 100), label='All Electrons')\ntrigger_electrons = matched_electrons[matched_electrons.is_trig]\nplt.hist(trigger_electrons.pt, bins=100, range=(0, 100), label='Trigger Electrons')\nplt.xlabel('Electron $p_T$ [GeV]')\nplt.ylabel('Number of Electrons')\n_ = plt.legend()", "_____no_output_____" ] ], [ [ "## Further Information\n\n* Tutorial on [trigger for analysis](https://indico.cern.ch/event/860971/contributions/3626403/attachments/1973400/3283452/200122_TriggerTutorial.pdf).\n* Trigger Group's [Trigger Analysis Tool](https://twiki.cern.ch/twiki/bin/view/Atlas/TriggerAnalysisTools) twiki page (with a [page devoted to the TDT](https://twiki.cern.ch/twiki/bin/view/Atlas/TrigDecisionTool)).\n* [Lowest un-prescaled triggers](https://twiki.cern.ch/twiki/bin/view/Atlas/LowestUnprescaled) per data-taking period twiki.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb10273e7d8cbf1c9915a4422be9b24d2401a1dc
19,324
ipynb
Jupyter Notebook
Foundations_of_Private_Computation/Secure_multiparty_computation/Additive Secret Sharing.ipynb
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
5
2021-01-06T16:49:22.000Z
2021-02-19T05:34:27.000Z
Foundations_of_Private_Computation/Secure_multiparty_computation/Additive Secret Sharing.ipynb
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
null
null
null
Foundations_of_Private_Computation/Secure_multiparty_computation/Additive Secret Sharing.ipynb
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
null
null
null
25.662683
526
0.547143
[ [ [ "# Additive Secret Sharing\n\nAuthor: \n- Carlos Salgado - [email](mailto:[email protected]) - [linkedin](https://www.linkedin.com/in/eng-socd/) - [github](https://github.com/socd06)\n\n## Additive Secret Sharing\nAdditive Secret Sharing is a mechanism to share data among parties and to perform computation on it. \n\n![Secret Sharing](img/secret-sharing.jpg)\n\n## Sharing\nA secret `s` is uniformly split into `n` shares, one per shareholder (also known as worker, node, user or party) using some randomness `r`, also known as some **very high random prime** number `Q`. \n\n$ F_s (s, r, n) = ( s_1, s_2, ..., s_n ) $", "_____no_output_____" ], [ "## Reconstruction\n`s` can be reconstructed (decrypted) by adding up **all the shares** and taking the [*modulo*](https://en.wikipedia.org/wiki/Modulo_operation) of the random prime number `Q`, used to encrypt the shares originally. ", "_____no_output_____" ], [ "$ s = ( \\: \\sum \\limits _{i=1} ^n s_i \\: ) \\; mod \\; Q $", "_____no_output_____" ], [ "## 32-bit Integer Secrets\nA secret is the data or message that a party wants to secure. In additive secret sharing, secrets (and therefore, shares) must be members of a fixed [finite field](https://en.wikipedia.org/wiki/Finite_field). Particularly, the literature mentions shares should be members of the $ {\\mathbb{Z}_{2^{32}}} $ [ring](https://en.wikipedia.org/wiki/Ring_(mathematics)), which is the [ring of integers](https://en.wikipedia.org/wiki/Ring_of_integers) that fit within [32-bits](https://en.wikipedia.org/wiki/32-bit_computing). ", "_____no_output_____" ], [ "![Number Types](img/num-types-solid.jpg)\n\nRings are [sets](https://en.wikipedia.org/wiki/Set_(mathematics)) with two operations, addition and multiplication, which allow the rationale of secret sharing and reconstruction to work. \n\nPlainly, secrets and secret shares **must** be integers within -2,147,483,647 to +2,147,483,647", "_____no_output_____" ], [ "## Governance\nAdditive secret sharing provides shared governance. The threshold `t` to reconstruct `s` is equal to `n`, which means **no party can recover the data** alone because all the shares are required to decrypt the secret *(t = n)*. This scheme allows us to do computation on the shares while each shareholder is only aware of their **own** share.", "_____no_output_____" ], [ "### [Quiz] Find the secret `s`\n\nIn practice, we use a **very high prime number** Q to add a **big deal of uniform randomness** to our shares. Here we will use a very small Q, so you can try to solve the quiz without programming yet. \n\nLet $ s_1 = 10 \\; and \\; s_2 = 74 \\; and \\; Q = 59 $\n\nWhat is the original secret `s`? Fill the ____ space below with your answer.\nTry **not** to use a calculator or programming.", "_____no_output_____" ] ], [ [ "# Run this cell to import the quizzes\nfrom quiz import q0, q1, q2", "_____no_output_____" ], [ "# run to check your answer\nq0.check(___)", "_____no_output_____" ], [ "# Uncomment the line below to see a hint\n# q0.hint", "_____no_output_____" ], [ "# Uncomment the line below to see the solution\n# q0.solution", "_____no_output_____" ] ], [ [ "### [Quiz] Find the final share s<sub>2</sub>\n\nUsing a small `Q` to facilitate calculation (it needs to be a **very high prime number** in production), let \n\n$ s = 7, n = 2 $ with $ Q = 59 $ and $ s_1 = 9 $\nplugged in on the secret reconstruction equation, find the final share s<sub>2</sub>.\n\nFill the ____ space below with your answer. Feel free to implement the equation in a new cell or use whatever tool you'd like (e.g. a calculator), it's your call. ", "_____no_output_____" ] ], [ [ "# Fill the ____ space below with your answer\nfinal_share = \n\n# run to check your answer\nq1.check(final_share)", "_____no_output_____" ], [ "# Uncomment the line below to see a hint\n# q1.hint", "_____no_output_____" ], [ "# Uncomment the line below to see the solution\n# q1.solution", "_____no_output_____" ] ], [ [ "## In Practice\nJust as an educational example, we can generate a list of prime numbers using [sympy](https://www.sympy.org/en/index.html)", "_____no_output_____" ] ], [ [ "# Verify we have all the tools we need to run the notebook\n!pip install -r requirements.txt", "_____no_output_____" ], [ "import sympy\n\n# An arbitrary constant, feel free to play with it\nCONST = 999\n\nBIT_DEPTH = 31\n\n# Range start\nstart = 2**BIT_DEPTH-CONST\n\n# Maximum in Z2**32 ring\nend = 2**BIT_DEPTH \n\nprime_lst = list(sympy.primerange(start,end+1))\n\nprint(\"Prime numbers in range: \" , prime_lst)", "_____no_output_____" ] ], [ [ "And **randomly** choose one every time using [NumPy](https://numpy.org/devdocs/contents.html)'s [randint](https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html)", "_____no_output_____" ] ], [ [ "from numpy.random import randint\n\nQ = prime_lst[randint(len(prime_lst))]\nQ", "_____no_output_____" ] ], [ [ "As an additional note, the [Secrets module](https://docs.python.org/3/library/secrets.html), introduced in Python 3.6, provides randomness as secure as your operating system.", "_____no_output_____" ] ], [ [ "import secrets \n \nQ = secrets.choice(prime_lst)\nQ", "_____no_output_____" ] ], [ [ "## The Final Share and 2-party Additive Secret Sharing\n\nKnowing that $ s_n = Q - (\\; \\sum \\limits _{i=1} ^{n-1} s_i \\; mod \\; Q \\; ) + s $\n\nHow do we implement 2-party ($ n=2 $) additive secret sharing using Python? \n\nKeep reading and fing out!", "_____no_output_____" ] ], [ [ "def dual_share(s, r):\n '''\n s = secret\n r = randomness\n '''\n share_lst = list()\n share_lst.append(randint(0,r))\n \n final_share = r - (share_lst[0] % r) + s\n \n share_lst.append(final_share)\n \n return share_lst", "_____no_output_____" ], [ "# Let's generate a couple of shares\nsecret = 5 \n\ndual_shares = dual_share(secret, Q)\ndual_shares", "_____no_output_____" ] ], [ [ "Now go back to the previous cell and **run it again**. Notice anything?\n\n...\n\n...\n\n... \n\nSee it yet? The shares are never the same because they are **randomly generated**.\n\nNow let's implement the reconstruction (or decryption) function.", "_____no_output_____" ] ], [ [ "def decrypt(shares, r):\n '''\n shares = iterable made of additive secret shares\n r = randomness\n '''\n return sum(shares) % r", "_____no_output_____" ], [ "# And let's decrypt our secret for the first time\ndecrypt(dual_shares, Q)", "_____no_output_____" ] ], [ [ "## Exercise: Implement n-party additive secret sharing \nFill the function below with your code.", "_____no_output_____" ] ], [ [ "def n_share(s, r, n):\n '''\n s = secret\n r = randomness\n n = number of nodes, workers or participants \n \n returns a tuple of n-shares\n '''\n # replace with your code\n pass ", "_____no_output_____" ], [ "five_shares = n_share(s=686,r=Q,n=5)\nfive_shares\n\n# run this cell to check your solution\nq2.check(decrypt(five_shares, Q))", "_____no_output_____" ], [ "# Uncomment the line below to see a hint\n# q2.hint", "_____no_output_____" ], [ "# Uncomment the line below to see the solution\n# q2.solution", "_____no_output_____" ] ], [ [ "## Addition\nGiven two shared values $a$ and $b$, a party $P_i$ can compute the added shares as:\n$ c_i = ( a_i + b_i ) \\; mod \\; Q$\n\nIn Python, we can implement this type of addition like this:", "_____no_output_____" ] ], [ [ "def addition(a, b, r):\n '''\n a = iterable of the same length of b\n b = iterable of the same length of a\n r = randomness AKA randomly generated very high prime number \n '''\n c = list()\n \n for i in range(len(a)):\n c.append((a[i] + b[i]) % r)\n \n return tuple(c)", "_____no_output_____" ] ], [ [ "Considering Alice and Bob are our parties, with secrets $s_a$ and $s_b$ to be shared (2-way) and wanting to compute addition.\n\nLet $s_a = 5 $ and $s_b = 11 $\n\nAlice's shares would be something like:", "_____no_output_____" ] ], [ [ "# Alice's secret\nsa = 5 \n\nalice_shares = dual_share(sa, Q)\nalice_shares", "_____no_output_____" ] ], [ [ "While Bob's shares would be", "_____no_output_____" ] ], [ [ "# Bob's secret\nsb = 11\n\nbob_shares = dual_share(sb, Q)\nbob_shares", "_____no_output_____" ], [ "secret_sum = addition(alice_shares, bob_shares, Q)\nsecret_sum", "_____no_output_____" ] ], [ [ "Doesn't make a lot of sense, does it?\n\nSecret shares must only reveal information about their secrets when they are all combined. Otherwise all data must be hidden, which defines the **privacy** property. \n\nThese are still secret shares so there is one more step to get the sum of the original secrets. ", "_____no_output_____" ] ], [ [ "decrypt(secret_sum, Q)", "_____no_output_____" ] ], [ [ "Et Voilร !", "_____no_output_____" ], [ "## Public (scalar) Multiplication\nGiven a list of shared values $a$ and a **scalar** $b$, a party $P_i$ can compute the multiplied shares as:\n$ c_i = a_i \\times b \\; mod \\; Q$\n\nIn Python, we can implement this type of multiplication like this:", "_____no_output_____" ] ], [ [ "def public_mul(a, b, r):\n '''\n a = iterable of the same length of b\n b = scalar to multiply a by\n r = randomness AKA randomly generated very high prime number \n '''\n c = list()\n \n for i in range(len(a)):\n c.append((a[i] * b) % r)\n \n return tuple(c)", "_____no_output_____" ] ], [ [ "Let's say another party wants to multiply Alice's shares by the **scalar** value of 3. ", "_____no_output_____" ] ], [ [ "alice_times3 = public_mul(alice_shares, 3, Q)", "_____no_output_____" ] ], [ [ "Then we can decrypt (with Alice's permission) to double check we did multiply what we intended.", "_____no_output_____" ] ], [ [ "decrypt(alice_times3,Q)", "_____no_output_____" ] ], [ [ "And this is `True` because Alice's secret $sa = 5$, remember?", "_____no_output_____" ] ], [ [ "decrypt(alice_times3,Q) == sa * 3", "_____no_output_____" ] ], [ [ "## PyTorch + PySyft implementation\nNow that you know how additive secret sharing works under the hood, let's see how we can leverage PyTorch and PySyft to do it for us.", "_____no_output_____" ] ], [ [ "import torch\nimport syft as sy\nhook = sy.TorchHook(torch)", "_____no_output_____" ] ], [ [ "Let's say Alice, Bob and Charlie are all enrolled on the **Foundations of Privacy** course and we, as instructors, want to know on average, how far in the course they are. We don't want to breach their privacy so each percentage of completion will be their own secret (a, b and c). \n\nFor educational purposes, we will define our parties (nodes, workers, etc) using `VirtualWorker` PySyft objects.", "_____no_output_____" ] ], [ [ "alice = sy.VirtualWorker(hook, id=\"alice\")\nbob = sy.VirtualWorker(hook, id=\"bob\")\ncharlie = sy.VirtualWorker(hook, id=\"charlie\")", "_____no_output_____" ] ], [ [ "We also need a \"secure worker\", also known as the `Crypto Provider` to provide us with random prime numbers.", "_____no_output_____" ] ], [ [ "secure_worker = sy.VirtualWorker(hook, \"secure_worker\")", "_____no_output_____" ] ], [ [ "We define our secrets using `torch.tensor` PyTorch tensor objects and we `Additive Share` them with our fellow workers.", "_____no_output_____" ] ], [ [ "# Let a, b and c be our students' completion percentage\na = torch.tensor([35])\nb = torch.tensor([77])\nc = torch.tensor([10])", "_____no_output_____" ], [ "# And we additive share with our parties\na = a.share(alice, bob, charlie, crypto_provider=secure_worker)\nb = b.share(alice, bob, charlie, crypto_provider=secure_worker)\nc = c.share(alice, bob, charlie, crypto_provider=secure_worker)", "_____no_output_____" ], [ "# And we compute the mean of our tensor\nmean = torch.mean(torch.stack(list([a,b,c])))\nmean", "_____no_output_____" ] ], [ [ "Also, see that the object type is **[AdditiveSharingTensor]**.\nFor this example, we can decrypt our computation result using the get() method", "_____no_output_____" ] ], [ [ "decrypted_mean = mean.get()\ndecrypted_mean", "_____no_output_____" ] ], [ [ "And get the scalar using the item() method (Only works for 1-dimensional tensors).", "_____no_output_____" ] ], [ [ "scalar_mean = decrypted_mean.item()\nscalar_mean", "_____no_output_____" ] ], [ [ "Now, the average completion should actually be 40 and $ \\frac{1}{3} $ (or 40.6666666666... ) but this is something we will learn about in the next lessons.", "_____no_output_____" ], [ "Letโ€™s now tackle private multiplication!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb103a9f38c6143a794cd4d39201ec51003eed83
10,589
ipynb
Jupyter Notebook
basic_torch.ipynb
RRakshit/AIPND
72275d8eb2233c79011c84cb7436b3175815ff9b
[ "MIT" ]
null
null
null
basic_torch.ipynb
RRakshit/AIPND
72275d8eb2233c79011c84cb7436b3175815ff9b
[ "MIT" ]
null
null
null
basic_torch.ipynb
RRakshit/AIPND
72275d8eb2233c79011c84cb7436b3175815ff9b
[ "MIT" ]
null
null
null
23.849099
224
0.377089
[ [ [ "<a href=\"https://colab.research.google.com/github/RRakshit/AIPND/blob/master/basic_torch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch", "_____no_output_____" ], [ "x=torch.rand(3,2)\nx", "_____no_output_____" ], [ "y=torch.ones(x.size())\ny", "_____no_output_____" ], [ "z =x + y\nz", "_____no_output_____" ], [ "z[0]", "_____no_output_____" ], [ "z[:,:1]", "_____no_output_____" ], [ "z.add(1)", "_____no_output_____" ], [ "z.size()", "_____no_output_____" ], [ "z.resize_(2,3)", "_____no_output_____" ], [ "a=np.random.rand(4,3)\na", "_____no_output_____" ], [ "b=torch.from_numpy(a)\nb", "_____no_output_____" ], [ "b.numpy()", "_____no_output_____" ], [ "b.mul(2)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb1073fdc66fdbc4981028f8f517361cd6e31cf9
534,602
ipynb
Jupyter Notebook
notebooks/0.31-compare-sequence-models-bf/.ipynb_checkpoints/hmmlearn_FOMM-checkpoint.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/0.31-compare-sequence-models-bf/.ipynb_checkpoints/hmmlearn_FOMM-checkpoint.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/0.31-compare-sequence-models-bf/.ipynb_checkpoints/hmmlearn_FOMM-checkpoint.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
209.319499
118,748
0.876809
[ [ [ "### HMM Software\n- [hmmlearn](https://hmmlearn.readthedocs.io/en/latest/tutorial.html)\n- [pomegranite]()\n- [r calculate AIC/BIC of model](https://rdrr.io/cran/HMMpa/man/AIC_HMM.html)\n- [comparison between pomegranite and hmmlearn (with notebook)](https://kyso.io/share/pomegranate-vs-hmmlearn#files)\n- [discussion of AIC/BIC from hmmlearn](https://waterprogramming.wordpress.com/2018/07/03/fitting-hidden-markov-models-part-ii-sample-python-script/)", "_____no_output_____" ] ], [ [ "%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n%env CUDA_VISIBLE_DEVICES=2", "env: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=2\n" ], [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom tqdm.autonotebook import tqdm\nfrom joblib import Parallel, delayed\nimport umap\nimport pandas as pd", "/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ], [ "from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir\nfrom avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms\nfrom avgn.visualization.spectrogram import draw_spec_set\nfrom avgn.visualization.quickplots import draw_projection_plots", "_____no_output_____" ], [ "from avgn.visualization.projections import (\n scatter_projections,\n draw_projection_transitions,\n)", "_____no_output_____" ] ], [ [ "### Collect data", "_____no_output_____" ] ], [ [ "DATASET_ID = 'bengalese_finch_sober'", "_____no_output_____" ], [ "DATA_DIR", "_____no_output_____" ], [ "syllable_df = pd.concat([pd.read_pickle(i) for i in list((DATA_DIR / 'indv_dfs' / DATASET_ID).glob('*.pickle'))])", "_____no_output_____" ], [ "syllable_df[:3]", "_____no_output_____" ], [ "label = \"hdbscan_labels\"", "_____no_output_____" ] ], [ [ "#### Compare hdbscan to hand", "_____no_output_____" ] ], [ [ "from avgn.visualization.projections import scatter_spec, scatter_projections\nfrom avgn.utils.general import save_fig\nfrom avgn.utils.paths import FIGURE_DIR, ensure_dir", "_____no_output_____" ], [ "indvs = syllable_df.indv.unique()", "_____no_output_____" ], [ "label_dict = {lab:i for i, lab in enumerate(np.unique(syllable_df['labels'].values))}\nsyllable_df['labels_num'] = [label_dict[i] for i in syllable_df.labels.values]", "_____no_output_____" ], [ "for indv in tqdm(indvs):\n print(indv)\n indv_df = syllable_df[syllable_df.indv == indv]\n z = np.vstack(indv_df.umap.values)\n \n fig, axs = plt.subplots(ncols=2, figsize=(10,5))\n ax = axs[0]\n ax.scatter(z[:,0], z[:,1], c = indv_df['labels_num'].values, s = 0.5, cmap = plt.cm.tab20, alpha = 0.25)\n ax.axis('off')\n ax.set_title('Hand Labels')\n ax = axs[1]\n ax.scatter(z[:,0], z[:,1], c = indv_df['hdbscan_labels'].values, s = 0.5, cmap = plt.cm.tab20, alpha = 0.25)\n ax.set_title('HDBSCAN Labels')\n ax.axis('off')\n plt.show()", "_____no_output_____" ] ], [ [ "### train HMM on data", "_____no_output_____" ] ], [ [ "indv = 'gy6or6'\nindv_df = syllable_df[syllable_df.indv == indv]\nindv_df = indv_df.sort_values(by=['syllables_sequence_id', 'syllables_sequence_pos'])\nindv_df = indv_df.reset_index()\nprint(len(indv_df))\nindv_df[:3]", "56375\n" ], [ "element_prob = {i: np.sum(indv_df.labels_num.values== i) for i in np.unique(indv_df.labels_num.values)}\nelement_prob", "_____no_output_____" ], [ "for key, val in element_prob.items():\n if val < 100:\n indv_df = indv_df.drop(indv_df[indv_df.labels_num == key].index)\nlen(indv_df)", "_____no_output_____" ], [ "hand_seqs = [\n list(indv_df[indv_df.syllables_sequence_id == seqid][\"labels_num\"].values)\n for seqid in indv_df.syllables_sequence_id.unique()\n]\nprint(hand_seqs[:3])", "[[11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13], [11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 11, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8], [11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4]]\n" ], [ "seq_lens = [len(i) for i in hand_seqs]", "_____no_output_____" ], [ "from hmmlearn import hmm", "_____no_output_____" ], [ "def AIC(log_likelihood, k):\n \"\"\" AIC given log_likelihood and # parameters (k)\n \"\"\"\n aic = 2 * k - 2 * log_likelihood\n return aic\n\ndef BIC(log_likelihood, n, k):\n \"\"\" BIC given log_likelihood, number of observations (n) and # parameters (k)\n \"\"\"\n bic = np.log(n)*k - 2 * log_likelihood\n return bic", "_____no_output_____" ], [ "training_mask = np.random.choice(np.arange(len(hand_seqs)), size = int(len(hand_seqs)/2), replace=False)\ntesting_mask = np.array([i for i in np.arange(len(hand_seqs)) if i not in training_mask])\ntraining_mask[:4], testing_mask[:4]", "_____no_output_____" ], [ "seqs_train = np.array(hand_seqs)[training_mask]\nseqs_test = np.array(hand_seqs)[testing_mask]\nlen(hand_seqs), len(seqs_train), len(seqs_test)", "_____no_output_____" ], [ "print(np.unique(np.concatenate(seqs_train).reshape(-1, 1)))\nprint(np.unique(np.concatenate(seqs_test).reshape(-1, 1)))", "[ 1 3 4 5 6 7 8 9 10 11 12 13]\n[ 1 3 4 5 6 7 8 9 10 11 12 13]\n" ], [ "from joblib import Parallel, delayed", "_____no_output_____" ], [ "def fit_hmm(seqs_train, seqs_test, seqs_test, n_components):\n \n # model\n model = hmm.MultinomialHMM(n_components=n_components).fit(\n np.concatenate(seqs_train).reshape(-1, 1), [len(i) for i in seqs_train]\n )\n \n # params\n num_params = (\n np.product(model.transmat_.shape)\n + np.product(model.emissionprob_.shape)\n + np.product(model.startprob_.shape)\n )\n \n # data\n n_data = np.sum(seq_lens)\n \n # probability of data given model\n log_probability = model.score(\n np.concatenate(seqs_test).reshape(-1, 1), [len(i) for i in seqs_test]\n )\n \n # AIC and BIC\n aic = AIC(log_probability, num_params)\n bic = BIC(log_probability, n_data, num_params)\n \n return (model,\n n_components,\n num_params,\n log_probability,\n aic,\n bic)", "_____no_output_____" ], [ "n_repeats = 5\nresults = Parallel(n_jobs=-1, verbose=15)(\n delayed(fit_hmm)(seqs_train, seqs_test, seqs_test, n_components)\n for n_components in tqdm(np.repeat(np.arange(10, 50, 1), n_repeats))\n)\nresults_df = pd.DataFrame(results, \n columns=[\"model\", \"n_components\", \"n_params\", \"log_prob\", \"AIC\", \"BIC\"]\n)\nresults_df[:3]", "_____no_output_____" ], [ "fig, axs = plt.subplots(ncols =3, figsize=(20,5))\nax = axs[0]\nax.scatter(results_df.n_components, results_df.log_prob)\n#ax.plot(results_df.n_components, results_df.log_prob)\n\nax = axs[1]\nax.scatter(results_df.n_components, results_df.AIC)\n#ax.plot(results_df.n_components, results_df.AIC)\n\nax = axs[2]\nax.scatter(results_df.n_components, results_df.BIC)\n#ax.plot(results_df.n_components, results_df.BIC)", "_____no_output_____" ], [ "print(seqs_train[0])", "[11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9, 10, 12, 13, 11, 3, 4, 5, 6, 7, 7, 8, 9]\n" ], [ "train_second_order = [['-'.join([str(state), str(seq[si+1])]) for si, state in enumerate(seq[:-1])] for seq in seqs_train]\ntest_second_order = [['-'.join([str(state), str(seq[si+1])]) for si, state in enumerate(seq[:-1])] for seq in seqs_test]\n\nprint(train_second_order[0])", "['11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-11', '11-3', '3-4', '4-5', '5-6', '6-7', '7-7', '7-8', '8-9', '9-10', '10-12', '12-13', '13-11', '11-3', '3-4', '4-5', '5-6', '6-7', '7-7', '7-8', '8-9', '9-10', '10-12', '12-13', '13-11', '11-3', '3-4', '4-5', '5-6', '6-7', '7-7', '7-8', '8-9', '9-10', '10-12', '12-13', '13-11', '11-3', '3-4', '4-5', '5-6', '6-7', '7-7', '7-8', '8-9', '9-10', '10-12', '12-13', '13-11', '11-3', '3-4', '4-5', '5-6', '6-7', '7-7', '7-8', '8-9']\n" ], [ "train_third_order = [['-'.join([str(state), str(seq[si+1]), str(seq[si+2])]) for si, state in enumerate(seq[:-2])] for seq in seqs_train]\nprint(train_third_order[0])", "['11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-11', '11-11-3', '11-3-4', '3-4-5', '4-5-6', '5-6-7', '6-7-7', '7-7-8', '7-8-9', '8-9-10', '9-10-12', '10-12-13', '12-13-11', '13-11-3', '11-3-4', '3-4-5', '4-5-6', '5-6-7', '6-7-7', '7-7-8', '7-8-9', '8-9-10', '9-10-12', '10-12-13', '12-13-11', '13-11-3', '11-3-4', '3-4-5', '4-5-6', '5-6-7', '6-7-7', '7-7-8', '7-8-9', '8-9-10', '9-10-12', '10-12-13', '12-13-11', '13-11-3', '11-3-4', '3-4-5', '4-5-6', '5-6-7', '6-7-7', '7-7-8', '7-8-9', '8-9-10', '9-10-12', '10-12-13', '12-13-11', '13-11-3', '11-3-4', '3-4-5', '4-5-6', '5-6-7', '6-7-7', '7-7-8', '7-8-9']\n" ], [ "second_order_dict = {s:si for si, s in enumerate(np.unique(np.concatenate(train_second_order)))}\ntrain_second_order_num = [[second_order_dict[i] for i in seq] for seq in train_second_order]\n#test_second_order_num = [[second_order_dict[i] for i in seq] for seq in test_second_order]", "_____no_output_____" ], [ "n_repeats = 1\nresults = Parallel(n_jobs=-1, verbose=15)(\n delayed(fit_hmm)(train_second_order_num, train_second_order_num, n_components)\n for n_components in tqdm(np.repeat(np.arange(10, 40, 5), n_repeats))\n)\nresults_df_second = pd.DataFrame(results, \n columns=[\"model\", \"n_components\", \"n_params\", \"log_prob\", \"AIC\", \"BIC\"]\n)\nresults_df[:3]", "_____no_output_____" ], [ "fig, axs = plt.subplots(ncols =3, figsize=(20,5))\nax = axs[0]\nax.scatter(results_df.n_components, results_df.log_prob)\n\nax.scatter(results_df_second.n_components, results_df_second.log_prob)\n#ax.plot(results_df.n_components, results_df.log_prob)\n\nax = axs[1]\nax.scatter(results_df_second.n_components, results_df_second.AIC)\n#ax.plot(results_df.n_components, results_df.AIC)\n\nax = axs[2]\nax.scatter(results_df_second.n_components, results_df_second.BIC)\n#ax.plot(results_df.n_components, results_df.BIC)", "_____no_output_____" ] ], [ [ "#### TODO\n- create a HMM where latent states are just normal states (e.g. Markov model)\n - compute likelihood of markov model\n- create a HMM where latent states are HDBSCAN states\n - compute likelihood of model\n- create a second order markov model \n - compute likelihood of markov model\n- create a HMM where latent states are learned using Baum Welch - chose highest AIC\n - compute likelihood of model", "_____no_output_____" ] ], [ [ "from hmmlearn.utils import iter_from_X_lengths", "_____no_output_____" ], [ "results_df.iloc[0]", "_____no_output_____" ], [ "model = results_df.iloc[0].model", "_____no_output_____" ], [ "lengths = [len(i) for i in seqs_test]\nX = np.concatenate(seqs_test).reshape(-1, 1)\nlen(X), len(lengths)", "_____no_output_____" ], [ "curr_logprob = 0\nfor i,j in iter_from_X_lengths(X, lengths):\n framelogprob = model._compute_log_likelihood(X[i:j])\n logprob, fwdlattice = model._do_forward_pass(framelogprob)\n curr_logprob += logprob\n bwdlattice = model._do_backward_pass(framelogprob)\n posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)", "_____no_output_____" ], [ "np.shape(framelogprob)", "_____no_output_____" ], [ "fwdlattice[-1]", "_____no_output_____" ], [ "fwdlattice", "_____no_output_____" ], [ "bwdlattice", "_____no_output_____" ], [ "np.shape(bwdlattice)", "_____no_output_____" ], [ "curr_logprob", "_____no_output_____" ], [ "results_df.iloc[0].model.__dict__", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb1074675541201dd6750932226299d9b8728bdf
542,792
ipynb
Jupyter Notebook
05_Ensemble_Learning.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
null
null
null
05_Ensemble_Learning.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
null
null
null
05_Ensemble_Learning.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
1
2018-11-03T02:41:58.000Z
2018-11-03T02:41:58.000Z
192.206799
301,242
0.888353
[ [ [ "# TensorFlow Tutorial #05\n# Ensemble Learning\n\nby [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)\n/ [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)", "_____no_output_____" ], [ "## Introduction\n\nThis tutorial shows how to use a so-called ensemble of convolutional neural networks. Instead of using a single neural network, we use several neural networks and average their outputs.\n\nThis is used on the MNIST data-set for recognizing hand-written digits. The ensemble improves the classification accuracy slightly on the test-set, but the difference is so small that it is possibly random. Furthermore, the ensemble mis-classifies some images that are correctly classified by some of the individual networks.\n\nThis tutorial builds on the previous tutorials, so you should have a basic understanding of TensorFlow and the add-on package Pretty Tensor. A lot of the source-code and text here is similar to the previous tutorials and may be read quickly if you have recently read the previous tutorials.", "_____no_output_____" ], [ "## Flowchart", "_____no_output_____" ], [ "The following chart shows roughly how the data flows in a single Convolutional Neural Network that is implemented below. The network has two convolutional layers and two fully-connected layers, with the last layer being used for the final classification of the input images. See Tutorial #02 for a more detailed description of this network and convolution in general.\n\nThis tutorial implements an ensemble of 5 such neural networks, where the network structure is the same but the weights and other variables are different for each network.", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage('images/02_network_flowchart.png')", "_____no_output_____" ] ], [ [ "## Imports", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\nimport os\n\n# Use PrettyTensor to simplify Neural Network construction.\nimport prettytensor as pt", "_____no_output_____" ] ], [ [ "This was developed using Python 3.5.2 (Anaconda) and TensorFlow version:", "_____no_output_____" ] ], [ [ "tf.__version__", "_____no_output_____" ] ], [ [ "PrettyTensor version:", "_____no_output_____" ] ], [ [ "pt.__version__", "_____no_output_____" ] ], [ [ "## Load Data", "_____no_output_____" ], [ "The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.", "_____no_output_____" ] ], [ [ "from tensorflow.examples.tutorials.mnist import input_data\ndata = input_data.read_data_sets('data/MNIST/', one_hot=True)", "Extracting data/MNIST/train-images-idx3-ubyte.gz\nExtracting data/MNIST/train-labels-idx1-ubyte.gz\nExtracting data/MNIST/t10k-images-idx3-ubyte.gz\nExtracting data/MNIST/t10k-labels-idx1-ubyte.gz\n" ] ], [ [ "The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets, but we will make random training-sets further below.", "_____no_output_____" ] ], [ [ "print(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(len(data.train.labels)))\nprint(\"- Test-set:\\t\\t{}\".format(len(data.test.labels)))\nprint(\"- Validation-set:\\t{}\".format(len(data.validation.labels)))", "Size of:\n- Training-set:\t\t55000\n- Test-set:\t\t10000\n- Validation-set:\t5000\n" ] ], [ [ "### Class numbers\n\nThe class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test- and validation-sets, so we calculate them now.", "_____no_output_____" ] ], [ [ "data.test.cls = np.argmax(data.test.labels, axis=1)\ndata.validation.cls = np.argmax(data.validation.labels, axis=1)", "_____no_output_____" ] ], [ [ "### Helper-function for creating random training-sets\n\nWe will train 5 neural networks on different training-sets that are selected at random. First we combine the original training- and validation-sets into one big set. This is done for both the images and the labels.", "_____no_output_____" ] ], [ [ "combined_images = np.concatenate([data.train.images, data.validation.images], axis=0)\ncombined_labels = np.concatenate([data.train.labels, data.validation.labels], axis=0)", "_____no_output_____" ] ], [ [ "Check that the shape of the combined arrays is correct.", "_____no_output_____" ] ], [ [ "print(combined_images.shape)\nprint(combined_labels.shape)", "(60000, 784)\n(60000, 10)\n" ] ], [ [ "Size of the combined data-set.", "_____no_output_____" ] ], [ [ "combined_size = len(combined_images)\ncombined_size", "_____no_output_____" ] ], [ [ "Define the size of the training-set used for each neural network. You can try and change this.", "_____no_output_____" ] ], [ [ "train_size = int(0.8 * combined_size)\ntrain_size", "_____no_output_____" ] ], [ [ "We do not use a validation-set during training, but this would be the size.", "_____no_output_____" ] ], [ [ "validation_size = combined_size - train_size\nvalidation_size", "_____no_output_____" ] ], [ [ "Helper-function for splitting the combined data-set into a random training- and validation-set.", "_____no_output_____" ] ], [ [ "def random_training_set():\n # Create a randomized index into the full / combined training-set.\n idx = np.random.permutation(combined_size)\n\n # Split the random index into training- and validation-sets.\n idx_train = idx[0:train_size]\n idx_validation = idx[train_size:]\n\n # Select the images and labels for the new training-set.\n x_train = combined_images[idx_train, :]\n y_train = combined_labels[idx_train, :]\n\n # Select the images and labels for the new validation-set.\n x_validation = combined_images[idx_validation, :]\n y_validation = combined_labels[idx_validation, :]\n\n # Return the new training- and validation-sets.\n return x_train, y_train, x_validation, y_validation", "_____no_output_____" ] ], [ [ "## Data Dimensions", "_____no_output_____" ], [ "The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below.", "_____no_output_____" ] ], [ [ "# We know that MNIST images are 28 pixels in each dimension.\nimg_size = 28\n\n# Images are stored in one-dimensional arrays of this length.\nimg_size_flat = img_size * img_size\n\n# Tuple with height and width of images used to reshape arrays.\nimg_shape = (img_size, img_size)\n\n# Number of colour channels for the images: 1 channel for gray-scale.\nnum_channels = 1\n\n# Number of classes, one class for each of 10 digits.\nnum_classes = 10", "_____no_output_____" ] ], [ [ "### Helper-function for plotting images", "_____no_output_____" ], [ "Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.", "_____no_output_____" ] ], [ [ "def plot_images(images, # Images to plot, 2-d array.\n cls_true, # True class-no for images.\n ensemble_cls_pred=None, # Ensemble predicted class-no.\n best_cls_pred=None): # Best-net predicted class-no.\n\n assert len(images) == len(cls_true)\n \n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n\n # Adjust vertical spacing if we need to print ensemble and best-net.\n if ensemble_cls_pred is None:\n hspace = 0.3\n else:\n hspace = 1.0\n fig.subplots_adjust(hspace=hspace, wspace=0.3)\n\n # For each of the sub-plots.\n for i, ax in enumerate(axes.flat):\n\n # There may not be enough images for all sub-plots.\n if i < len(images):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if ensemble_cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n msg = \"True: {0}\\nEnsemble: {1}\\nBest Net: {2}\"\n xlabel = msg.format(cls_true[i],\n ensemble_cls_pred[i],\n best_cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n \n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "_____no_output_____" ] ], [ [ "### Plot a few images to see if data is correct", "_____no_output_____" ] ], [ [ "# Get the first images from the test-set.\nimages = data.test.images[0:9]\n\n# Get the true classes for those images.\ncls_true = data.test.cls[0:9]\n\n# Plot the images and labels using our helper-function above.\nplot_images(images=images, cls_true=cls_true)", "_____no_output_____" ] ], [ [ "## TensorFlow Graph\n\nThe entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.\n\nTensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.\n\nTensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.\n\nA TensorFlow graph consists of the following parts which will be detailed below:\n\n* Placeholder variables used for inputting data to the graph.\n* Variables that are going to be optimized so as to make the convolutional network perform better.\n* The mathematical formulas for the neural network.\n* A loss measure that can be used to guide the optimization of the variables.\n* An optimization method which updates the variables.\n\nIn addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial.", "_____no_output_____" ], [ "### Placeholder variables", "_____no_output_____" ], [ "Placeholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.\n\nFirst we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.", "_____no_output_____" ] ], [ [ "x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')", "_____no_output_____" ] ], [ [ "The convolutional layers expect `x` to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is:", "_____no_output_____" ] ], [ [ "x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])", "_____no_output_____" ] ], [ [ "Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case.", "_____no_output_____" ] ], [ [ "y_true = tf.placeholder(tf.float32, shape=[None, 10], name='y_true')", "_____no_output_____" ] ], [ [ "We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point.", "_____no_output_____" ] ], [ [ "y_true_cls = tf.argmax(y_true, dimension=1)", "_____no_output_____" ] ], [ [ "### Neural Network", "_____no_output_____" ], [ "This section implements the Convolutional Neural Network using Pretty Tensor, which is much simpler than a direct implementation in TensorFlow, see Tutorial #03.\n\nThe basic idea is to wrap the input tensor `x_image` in a Pretty Tensor object which has helper-functions for adding new computational layers so as to create an entire neural network. Pretty Tensor takes care of the variable allocation, etc.", "_____no_output_____" ] ], [ [ "x_pretty = pt.wrap(x_image)", "_____no_output_____" ] ], [ [ "Now that we have wrapped the input image in a Pretty Tensor object, we can add the convolutional and fully-connected layers in just a few lines of source-code.\n\nNote that `pt.defaults_scope(activation_fn=tf.nn.relu)` makes `activation_fn=tf.nn.relu` an argument for each of the layers constructed inside the `with`-block, so that Rectified Linear Units (ReLU) are used for each of these layers. The `defaults_scope` makes it easy to change arguments for all of the layers.", "_____no_output_____" ] ], [ [ "with pt.defaults_scope(activation_fn=tf.nn.relu):\n y_pred, loss = x_pretty.\\\n conv2d(kernel=5, depth=16, name='layer_conv1').\\\n max_pool(kernel=2, stride=2).\\\n conv2d(kernel=5, depth=36, name='layer_conv2').\\\n max_pool(kernel=2, stride=2).\\\n flatten().\\\n fully_connected(size=128, name='layer_fc1').\\\n softmax_classifier(num_classes=num_classes, labels=y_true)", "_____no_output_____" ] ], [ [ "### Optimization Method", "_____no_output_____" ], [ "Pretty Tensor gave us the predicted class-label (`y_pred`) as well as a loss-measure that must be minimized, so as to improve the ability of the neural network to classify the input images.\n\nIt is unclear from the documentation for Pretty Tensor whether the loss-measure is cross-entropy or something else. But we now use the `AdamOptimizer` to minimize the loss.\n\nNote that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.", "_____no_output_____" ] ], [ [ "optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)", "_____no_output_____" ] ], [ [ "### Performance Measures\n\nWe need a few more performance measures to display the progress to the user.\n\nFirst we calculate the predicted class number from the output of the neural network `y_pred`, which is a vector with 10 elements. The class number is the index of the largest element.", "_____no_output_____" ] ], [ [ "y_pred_cls = tf.argmax(y_pred, dimension=1)", "_____no_output_____" ] ], [ [ "Then we create a vector of booleans telling us whether the predicted class equals the true class of each image.", "_____no_output_____" ] ], [ [ "correct_prediction = tf.equal(y_pred_cls, y_true_cls)", "_____no_output_____" ] ], [ [ "The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.", "_____no_output_____" ] ], [ [ "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "_____no_output_____" ] ], [ [ "### Saver\n\nIn order to save the variables of the neural network, we now create a Saver-object which is used for storing and retrieving all the variables of the TensorFlow graph. Nothing is actually saved at this point, which will be done further below.\n\nNote that if you have more than 100 neural networks in the ensemble then you must increase `max_to_keep` accordingly.", "_____no_output_____" ] ], [ [ "saver = tf.train.Saver(max_to_keep=100)", "_____no_output_____" ] ], [ [ "This is the directory used for saving and retrieving the data.", "_____no_output_____" ] ], [ [ "save_dir = 'checkpoints/'", "_____no_output_____" ] ], [ [ "Create the directory if it does not exist.", "_____no_output_____" ] ], [ [ "if not os.path.exists(save_dir):\n os.makedirs(save_dir)", "_____no_output_____" ] ], [ [ "This function returns the save-path for the data-file with the given network number.", "_____no_output_____" ] ], [ [ "def get_save_path(net_number):\n return save_dir + 'network' + str(net_number)", "_____no_output_____" ] ], [ [ "## TensorFlow Run", "_____no_output_____" ], [ "### Create TensorFlow session\n\nOnce the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.", "_____no_output_____" ] ], [ [ "session = tf.Session()", "_____no_output_____" ] ], [ [ "### Initialize variables\n\nThe variables for `weights` and `biases` must be initialized before we start optimizing them. We make a simple wrapper-function for this, because we will call it several times below.", "_____no_output_____" ] ], [ [ "def init_variables():\n session.run(tf.initialize_all_variables())", "_____no_output_____" ] ], [ [ "### Helper-function to create a random training batch.", "_____no_output_____" ], [ "There are thousands of images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer.\n\nIf your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to perform more optimization iterations.", "_____no_output_____" ] ], [ [ "train_batch_size = 64", "_____no_output_____" ] ], [ [ "Function for selecting a random training-batch of the given size.", "_____no_output_____" ] ], [ [ "def random_batch(x_train, y_train):\n # Total number of images in the training-set.\n num_images = len(x_train)\n\n # Create a random index into the training-set.\n idx = np.random.choice(num_images,\n size=train_batch_size,\n replace=False)\n\n # Use the random index to select random images and labels.\n x_batch = x_train[idx, :] # Images.\n y_batch = y_train[idx, :] # Labels.\n\n # Return the batch.\n return x_batch, y_batch", "_____no_output_____" ] ], [ [ "### Helper-function to perform optimization iterations", "_____no_output_____" ], [ "Function for performing a number of optimization iterations so as to gradually improve the variables of the network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations.", "_____no_output_____" ] ], [ [ "def optimize(num_iterations, x_train, y_train):\n # Start-time used for printing time-usage below.\n start_time = time.time()\n\n for i in range(num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = random_batch(x_train, y_train)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {x: x_batch,\n y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n session.run(optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations and after last iteration.\n if i % 100 == 0:\n\n # Calculate the accuracy on the training-batch.\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n \n # Status-message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Batch Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n\n # Ending time.\n end_time = time.time()\n\n # Difference between start and end-times.\n time_dif = end_time - start_time\n\n # Print the time-usage.\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))", "_____no_output_____" ] ], [ [ "### Create ensemble of neural networks", "_____no_output_____" ], [ "Number of neural networks in the ensemble.", "_____no_output_____" ] ], [ [ "num_networks = 5", "_____no_output_____" ] ], [ [ "Number of optimization iterations for each neural network.", "_____no_output_____" ] ], [ [ "num_iterations = 10000", "_____no_output_____" ] ], [ [ "Create the ensemble of neural networks. All networks use the same TensorFlow graph that was defined above. For each neural network the TensorFlow weights and variables are initialized to random values and then optimized. The variables are then saved to disk so they can be reloaded later.\n\nYou may want to skip this computation if you just want to re-run the Notebook with different analysis of the results.", "_____no_output_____" ] ], [ [ "if True:\n # For each of the neural networks.\n for i in range(num_networks):\n print(\"Neural network: {0}\".format(i))\n\n # Create a random training-set. Ignore the validation-set.\n x_train, y_train, _, _ = random_training_set()\n\n # Initialize the variables of the TensorFlow graph.\n session.run(tf.global_variables_initializer())\n\n # Optimize the variables using this training-set.\n optimize(num_iterations=num_iterations,\n x_train=x_train,\n y_train=y_train)\n\n # Save the optimized variables to disk.\n saver.save(sess=session, save_path=get_save_path(i))\n\n # Print newline.\n print()", "Neural network: 0\nOptimization Iteration: 1, Training Batch Accuracy: 6.2%\nOptimization Iteration: 101, Training Batch Accuracy: 87.5%\nOptimization Iteration: 201, Training Batch Accuracy: 92.2%\nOptimization Iteration: 301, Training Batch Accuracy: 92.2%\nOptimization Iteration: 401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 501, Training Batch Accuracy: 95.3%\nOptimization Iteration: 601, Training Batch Accuracy: 95.3%\nOptimization Iteration: 701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1001, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1101, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1201, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2001, Training Batch Accuracy: 95.3%\nOptimization Iteration: 2101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3001, Training Batch Accuracy: 95.3%\nOptimization Iteration: 3101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 4301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 4601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 4801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 4901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5001, Training Batch Accuracy: 95.3%\nOptimization Iteration: 5101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 7901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 8901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9901, Training Batch Accuracy: 100.0%\nTime usage: 0:00:40\n\nNeural network: 1\nOptimization Iteration: 1, Training Batch Accuracy: 7.8%\nOptimization Iteration: 101, Training Batch Accuracy: 85.9%\nOptimization Iteration: 201, Training Batch Accuracy: 95.3%\nOptimization Iteration: 301, Training Batch Accuracy: 90.6%\nOptimization Iteration: 401, Training Batch Accuracy: 92.2%\nOptimization Iteration: 501, Training Batch Accuracy: 95.3%\nOptimization Iteration: 601, Training Batch Accuracy: 95.3%\nOptimization Iteration: 701, Training Batch Accuracy: 93.8%\nOptimization Iteration: 801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 901, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1001, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1101, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1601, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 2801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 4901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 5801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 6001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 7301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9901, Training Batch Accuracy: 98.4%\nTime usage: 0:00:40\n\nNeural network: 2\nOptimization Iteration: 1, Training Batch Accuracy: 3.1%\nOptimization Iteration: 101, Training Batch Accuracy: 84.4%\nOptimization Iteration: 201, Training Batch Accuracy: 87.5%\nOptimization Iteration: 301, Training Batch Accuracy: 87.5%\nOptimization Iteration: 401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 501, Training Batch Accuracy: 93.8%\nOptimization Iteration: 601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 701, Training Batch Accuracy: 93.8%\nOptimization Iteration: 801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1001, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1101, Training Batch Accuracy: 93.8%\nOptimization Iteration: 1201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1401, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1601, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2401, Training Batch Accuracy: 95.3%\nOptimization Iteration: 2501, Training Batch Accuracy: 92.2%\nOptimization Iteration: 2601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2801, Training Batch Accuracy: 95.3%\nOptimization Iteration: 2901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3101, Training Batch Accuracy: 93.8%\nOptimization Iteration: 3201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3501, Training Batch Accuracy: 95.3%\nOptimization Iteration: 3601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3901, Training Batch Accuracy: 95.3%\nOptimization Iteration: 4001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5101, Training Batch Accuracy: 96.9%\nOptimization Iteration: 5201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 7501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8101, Training Batch Accuracy: 96.9%\nOptimization Iteration: 8201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 9801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 9901, Training Batch Accuracy: 100.0%\nTime usage: 0:00:39\n\nNeural network: 3\nOptimization Iteration: 1, Training Batch Accuracy: 9.4%\nOptimization Iteration: 101, Training Batch Accuracy: 89.1%\nOptimization Iteration: 201, Training Batch Accuracy: 89.1%\nOptimization Iteration: 301, Training Batch Accuracy: 90.6%\nOptimization Iteration: 401, Training Batch Accuracy: 93.8%\nOptimization Iteration: 501, Training Batch Accuracy: 93.8%\nOptimization Iteration: 601, Training Batch Accuracy: 90.6%\nOptimization Iteration: 701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 801, Training Batch Accuracy: 93.8%\nOptimization Iteration: 901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 1301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1701, Training Batch Accuracy: 92.2%\nOptimization Iteration: 1801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2001, Training Batch Accuracy: 93.8%\nOptimization Iteration: 2101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2901, Training Batch Accuracy: 95.3%\nOptimization Iteration: 3001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3801, Training Batch Accuracy: 95.3%\nOptimization Iteration: 3901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4301, Training Batch Accuracy: 95.3%\nOptimization Iteration: 4401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 4801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6201, Training Batch Accuracy: 95.3%\nOptimization Iteration: 6301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 7901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 9601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9901, Training Batch Accuracy: 100.0%\nTime usage: 0:00:39\n\nNeural network: 4\nOptimization Iteration: 1, Training Batch Accuracy: 9.4%\nOptimization Iteration: 101, Training Batch Accuracy: 82.8%\nOptimization Iteration: 201, Training Batch Accuracy: 89.1%\nOptimization Iteration: 301, Training Batch Accuracy: 89.1%\nOptimization Iteration: 401, Training Batch Accuracy: 96.9%\nOptimization Iteration: 501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 701, Training Batch Accuracy: 96.9%\nOptimization Iteration: 801, Training Batch Accuracy: 93.8%\nOptimization Iteration: 901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1101, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1201, Training Batch Accuracy: 93.8%\nOptimization Iteration: 1301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1501, Training Batch Accuracy: 95.3%\nOptimization Iteration: 1601, Training Batch Accuracy: 96.9%\nOptimization Iteration: 1701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 1801, Training Batch Accuracy: 93.8%\nOptimization Iteration: 1901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 2101, Training Batch Accuracy: 95.3%\nOptimization Iteration: 2201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2301, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2401, Training Batch Accuracy: 93.8%\nOptimization Iteration: 2501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2601, Training Batch Accuracy: 96.9%\nOptimization Iteration: 2701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 2901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3001, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3201, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 3401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3501, Training Batch Accuracy: 96.9%\nOptimization Iteration: 3601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3801, Training Batch Accuracy: 98.4%\nOptimization Iteration: 3901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 4701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 4901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 5001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5401, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5601, Training Batch Accuracy: 98.4%\nOptimization Iteration: 5701, Training Batch Accuracy: 95.3%\nOptimization Iteration: 5801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 5901, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6001, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 6301, Training Batch Accuracy: 98.4%\nOptimization Iteration: 6401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 6801, Training Batch Accuracy: 96.9%\nOptimization Iteration: 6901, Training Batch Accuracy: 96.9%\nOptimization Iteration: 7001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7201, Training Batch Accuracy: 96.9%\nOptimization Iteration: 7301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 7901, Training Batch Accuracy: 93.8%\nOptimization Iteration: 8001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8101, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8501, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8701, Training Batch Accuracy: 98.4%\nOptimization Iteration: 8801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 8901, Training Batch Accuracy: 98.4%\nOptimization Iteration: 9001, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9101, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9201, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9301, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9401, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9501, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9601, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9701, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9801, Training Batch Accuracy: 100.0%\nOptimization Iteration: 9901, Training Batch Accuracy: 98.4%\nTime usage: 0:00:39\n\n" ] ], [ [ "### Helper-functions for calculating and predicting classifications\n\nThis function calculates the predicted labels of images, that is, for each image it calculates a vector of length 10 indicating which of the 10 classes the image is.\n\nThe calculation is done in batches because it might use too much RAM otherwise. If your computer crashes then you can try and lower the batch-size.", "_____no_output_____" ] ], [ [ "# Split the data-set in batches of this size to limit RAM usage.\nbatch_size = 256\n\ndef predict_labels(images):\n # Number of images.\n num_images = len(images)\n\n # Allocate an array for the predicted labels which\n # will be calculated in batches and filled into this array.\n pred_labels = np.zeros(shape=(num_images, num_classes),\n dtype=np.float)\n\n # Now calculate the predicted labels for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_images:\n # The ending index for the next batch is denoted j.\n j = min(i + batch_size, num_images)\n\n # Create a feed-dict with the images between index i and j.\n feed_dict = {x: images[i:j, :]}\n\n # Calculate the predicted labels using TensorFlow.\n pred_labels[i:j] = session.run(y_pred, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n return pred_labels", "_____no_output_____" ] ], [ [ "Calculate a boolean array whether the predicted classes for the images are correct.", "_____no_output_____" ] ], [ [ "def correct_prediction(images, labels, cls_true):\n # Calculate the predicted labels.\n pred_labels = predict_labels(images=images)\n\n # Calculate the predicted class-number for each image.\n cls_pred = np.argmax(pred_labels, axis=1)\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n return correct", "_____no_output_____" ] ], [ [ "Calculate a boolean array whether the images in the test-set are classified correctly.", "_____no_output_____" ] ], [ [ "def test_correct():\n return correct_prediction(images = data.test.images,\n labels = data.test.labels,\n cls_true = data.test.cls)", "_____no_output_____" ] ], [ [ "Calculate a boolean array whether the images in the validation-set are classified correctly.", "_____no_output_____" ] ], [ [ "def validation_correct():\n return correct_prediction(images = data.validation.images,\n labels = data.validation.labels,\n cls_true = data.validation.cls)", "_____no_output_____" ] ], [ [ "### Helper-functions for calculating the classification accuracy\n\nThis function calculates the classification accuracy given a boolean array whether each image was correctly classified. E.g. `classification_accuracy([True, True, False, False, False]) = 2/5 = 0.4`", "_____no_output_____" ] ], [ [ "def classification_accuracy(correct):\n # When averaging a boolean array, False means 0 and True means 1.\n # So we are calculating: number of True / len(correct) which is\n # the same as the classification accuracy.\n return correct.mean()", "_____no_output_____" ] ], [ [ "Calculate the classification accuracy on the test-set.", "_____no_output_____" ] ], [ [ "def test_accuracy():\n # Get the array of booleans whether the classifications are correct\n # for the test-set.\n correct = test_correct()\n \n # Calculate the classification accuracy and return it.\n return classification_accuracy(correct)", "_____no_output_____" ] ], [ [ "Calculate the classification accuracy on the original validation-set.", "_____no_output_____" ] ], [ [ "def validation_accuracy():\n # Get the array of booleans whether the classifications are correct\n # for the validation-set.\n correct = validation_correct()\n \n # Calculate the classification accuracy and return it.\n return classification_accuracy(correct)", "_____no_output_____" ] ], [ [ "## Results and analysis", "_____no_output_____" ], [ "Function for calculating the predicted labels for all the neural networks in the ensemble. The labels are combined further below.", "_____no_output_____" ] ], [ [ "def ensemble_predictions():\n # Empty list of predicted labels for each of the neural networks.\n pred_labels = []\n\n # Classification accuracy on the test-set for each network.\n test_accuracies = []\n\n # Classification accuracy on the validation-set for each network.\n val_accuracies = []\n\n # For each neural network in the ensemble.\n for i in range(num_networks):\n # Reload the variables into the TensorFlow graph.\n saver.restore(sess=session, save_path=get_save_path(i))\n\n # Calculate the classification accuracy on the test-set.\n test_acc = test_accuracy()\n\n # Append the classification accuracy to the list.\n test_accuracies.append(test_acc)\n\n # Calculate the classification accuracy on the validation-set.\n val_acc = validation_accuracy()\n\n # Append the classification accuracy to the list.\n val_accuracies.append(val_acc)\n\n # Print status message.\n msg = \"Network: {0}, Accuracy on Validation-Set: {1:.4f}, Test-Set: {2:.4f}\"\n print(msg.format(i, val_acc, test_acc))\n\n # Calculate the predicted labels for the images in the test-set.\n # This is already calculated in test_accuracy() above but\n # it is re-calculated here to keep the code a bit simpler.\n pred = predict_labels(images=data.test.images)\n\n # Append the predicted labels to the list.\n pred_labels.append(pred)\n \n return np.array(pred_labels), \\\n np.array(test_accuracies), \\\n np.array(val_accuracies)", "_____no_output_____" ], [ "pred_labels, test_accuracies, val_accuracies = ensemble_predictions()", "Network: 0, Accuracy on Validation-Set: 0.9948, Test-Set: 0.9893\nNetwork: 1, Accuracy on Validation-Set: 0.9936, Test-Set: 0.9880\nNetwork: 2, Accuracy on Validation-Set: 0.9958, Test-Set: 0.9893\nNetwork: 3, Accuracy on Validation-Set: 0.9938, Test-Set: 0.9889\nNetwork: 4, Accuracy on Validation-Set: 0.9938, Test-Set: 0.9892\n" ] ], [ [ "Summarize the classification accuracies on the test-set for the neural networks in the ensemble.", "_____no_output_____" ] ], [ [ "print(\"Mean test-set accuracy: {0:.4f}\".format(np.mean(test_accuracies)))\nprint(\"Min test-set accuracy: {0:.4f}\".format(np.min(test_accuracies)))\nprint(\"Max test-set accuracy: {0:.4f}\".format(np.max(test_accuracies)))", "Mean test-set accuracy: 0.9889\nMin test-set accuracy: 0.9880\nMax test-set accuracy: 0.9893\n" ] ], [ [ "The predicted labels of the ensemble is a 3-dim array, the first dim is the network-number, the second dim is the image-number, the third dim is the classification vector.", "_____no_output_____" ] ], [ [ "pred_labels.shape", "_____no_output_____" ] ], [ [ "### Ensemble predictions", "_____no_output_____" ], [ "There are different ways to calculate the predicted labels for the ensemble. One way is to calculate the predicted class-number for each neural network, and then select the class-number with most votes. But this requires a large number of neural networks relative to the number of classes.\n\nThe method used here is instead to take the average of the predicted labels for all the networks in the ensemble. This is simple to calculate and does not require a large number of networks in the ensemble.", "_____no_output_____" ] ], [ [ "ensemble_pred_labels = np.mean(pred_labels, axis=0)\nensemble_pred_labels.shape", "_____no_output_____" ] ], [ [ "The ensemble's predicted class number is then the index of the highest number in the label, which is calculated using argmax as usual.", "_____no_output_____" ] ], [ [ "ensemble_cls_pred = np.argmax(ensemble_pred_labels, axis=1)\nensemble_cls_pred.shape", "_____no_output_____" ] ], [ [ "Boolean array whether each of the images in the test-set was correctly classified by the ensemble of neural networks.", "_____no_output_____" ] ], [ [ "ensemble_correct = (ensemble_cls_pred == data.test.cls)", "_____no_output_____" ] ], [ [ "Negate the boolean array so we can use it to lookup incorrectly classified images.", "_____no_output_____" ] ], [ [ "ensemble_incorrect = np.logical_not(ensemble_correct)", "_____no_output_____" ] ], [ [ "### Best neural network\n\nNow we find the single neural network that performed best on the test-set.\n\nFirst list the classification accuracies on the test-set for all the neural networks in the ensemble.", "_____no_output_____" ] ], [ [ "test_accuracies", "_____no_output_____" ] ], [ [ "The index of the neural network with the highest classification accuracy.", "_____no_output_____" ] ], [ [ "best_net = np.argmax(test_accuracies)\nbest_net", "_____no_output_____" ] ], [ [ "The best neural network's classification accuracy on the test-set.", "_____no_output_____" ] ], [ [ "test_accuracies[best_net]", "_____no_output_____" ] ], [ [ "Predicted labels of the best neural network.", "_____no_output_____" ] ], [ [ "best_net_pred_labels = pred_labels[best_net, :, :]", "_____no_output_____" ] ], [ [ "The predicted class-number.", "_____no_output_____" ] ], [ [ "best_net_cls_pred = np.argmax(best_net_pred_labels, axis=1)", "_____no_output_____" ] ], [ [ "Boolean array whether the best neural network classified each image in the test-set correctly.", "_____no_output_____" ] ], [ [ "best_net_correct = (best_net_cls_pred == data.test.cls)", "_____no_output_____" ] ], [ [ "Boolean array whether each image is incorrectly classified.", "_____no_output_____" ] ], [ [ "best_net_incorrect = np.logical_not(best_net_correct)", "_____no_output_____" ] ], [ [ "### Comparison of ensemble vs. the best single network", "_____no_output_____" ], [ "The number of images in the test-set that were correctly classified by the ensemble.", "_____no_output_____" ] ], [ [ "np.sum(ensemble_correct)", "_____no_output_____" ] ], [ [ "The number of images in the test-set that were correctly classified by the best neural network.", "_____no_output_____" ] ], [ [ "np.sum(best_net_correct)", "_____no_output_____" ] ], [ [ "Boolean array whether each image in the test-set was correctly classified by the ensemble and incorrectly classified by the best neural network.", "_____no_output_____" ] ], [ [ "ensemble_better = np.logical_and(best_net_incorrect,\n ensemble_correct)", "_____no_output_____" ] ], [ [ "Number of images in the test-set where the ensemble was better than the best single network:", "_____no_output_____" ] ], [ [ "ensemble_better.sum()", "_____no_output_____" ] ], [ [ "Boolean array whether each image in the test-set was correctly classified by the best single network and incorrectly classified by the ensemble.", "_____no_output_____" ] ], [ [ "best_net_better = np.logical_and(best_net_correct,\n ensemble_incorrect)", "_____no_output_____" ] ], [ [ "Number of images in the test-set where the best single network was better than the ensemble.", "_____no_output_____" ] ], [ [ "best_net_better.sum()", "_____no_output_____" ] ], [ [ "### Helper-functions for plotting and printing comparisons", "_____no_output_____" ], [ "Function for plotting images from the test-set and their true and predicted class-numbers.", "_____no_output_____" ] ], [ [ "def plot_images_comparison(idx):\n plot_images(images=data.test.images[idx, :],\n cls_true=data.test.cls[idx],\n ensemble_cls_pred=ensemble_cls_pred[idx],\n best_cls_pred=best_net_cls_pred[idx])", "_____no_output_____" ] ], [ [ "Function for printing the predicted labels.", "_____no_output_____" ] ], [ [ "def print_labels(labels, idx, num=1):\n # Select the relevant labels based on idx.\n labels = labels[idx, :]\n\n # Select the first num labels.\n labels = labels[0:num, :]\n \n # Round numbers to 2 decimal points so they are easier to read.\n labels_rounded = np.round(labels, 2)\n\n # Print the rounded labels.\n print(labels_rounded)", "_____no_output_____" ] ], [ [ "Function for printing the predicted labels for the ensemble of neural networks.", "_____no_output_____" ] ], [ [ "def print_labels_ensemble(idx, **kwargs):\n print_labels(labels=ensemble_pred_labels, idx=idx, **kwargs)", "_____no_output_____" ] ], [ [ "Function for printing the predicted labels for the best single network.", "_____no_output_____" ] ], [ [ "def print_labels_best_net(idx, **kwargs):\n print_labels(labels=best_net_pred_labels, idx=idx, **kwargs)", "_____no_output_____" ] ], [ [ "Function for printing the predicted labels of all the neural networks in the ensemble. This only prints the labels for the first image.", "_____no_output_____" ] ], [ [ "def print_labels_all_nets(idx):\n for i in range(num_networks):\n print_labels(labels=pred_labels[i, :, :], idx=idx, num=1)", "_____no_output_____" ] ], [ [ "## Examples: Ensemble is better than the best network\n\nPlot examples of images that were correctly classified by the ensemble and incorrectly classified by the best single network.", "_____no_output_____" ] ], [ [ "plot_images_comparison(idx=ensemble_better)", "_____no_output_____" ] ], [ [ "The ensemble's predicted labels for the first of these images (top left image):", "_____no_output_____" ] ], [ [ "print_labels_ensemble(idx=ensemble_better, num=1)", "[[ 0. 0. 0. 0.76 0. 0. 0. 0. 0.23 0. ]]\n" ] ], [ [ "The best network's predicted labels for the first of these images:", "_____no_output_____" ] ], [ [ "print_labels_best_net(idx=ensemble_better, num=1)", "[[ 0. 0. 0. 0.21 0. 0. 0. 0. 0.79 0. ]]\n" ] ], [ [ "The predicted labels of all the networks in the ensemble, for the first of these images:", "_____no_output_____" ] ], [ [ "print_labels_all_nets(idx=ensemble_better)", "[[ 0. 0. 0. 0.21 0. 0. 0. 0. 0.79 0. ]]\n[[ 0. 0. 0. 0.96 0. 0.01 0. 0. 0.03 0. ]]\n[[ 0. 0. 0. 0.99 0. 0. 0. 0. 0.01 0. ]]\n[[ 0. 0. 0. 0.88 0. 0. 0. 0. 0.12 0. ]]\n[[ 0. 0. 0. 0.76 0. 0.01 0. 0. 0.22 0. ]]\n" ] ], [ [ "## Examples: Best network is better than ensemble\n\nNow plot examples of images that were incorrectly classified by the ensemble but correctly classified by the best single network.", "_____no_output_____" ] ], [ [ "plot_images_comparison(idx=best_net_better)", "_____no_output_____" ] ], [ [ "The ensemble's predicted labels for the first of these images (top left image):", "_____no_output_____" ] ], [ [ "print_labels_ensemble(idx=best_net_better, num=1)", "[[ 0.5 0. 0. 0. 0. 0.05 0.45 0. 0. 0. ]]\n" ] ], [ [ "The best single network's predicted labels for the first of these images:", "_____no_output_____" ] ], [ [ "print_labels_best_net(idx=best_net_better, num=1)", "[[ 0.3 0. 0. 0. 0. 0.15 0.56 0. 0. 0. ]]\n" ] ], [ [ "The predicted labels of all the networks in the ensemble, for the first of these images:", "_____no_output_____" ] ], [ [ "print_labels_all_nets(idx=best_net_better)", "[[ 0.3 0. 0. 0. 0. 0.15 0.56 0. 0. 0. ]]\n[[ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n[[ 0.19 0. 0. 0. 0. 0. 0.81 0. 0. 0. ]]\n[[ 0.15 0. 0. 0. 0. 0.12 0.72 0. 0. 0. ]]\n[[ 0.85 0. 0. 0. 0. 0. 0.14 0. 0. 0. ]]\n" ] ], [ [ "## Close TensorFlow Session", "_____no_output_____" ], [ "We are now done using TensorFlow, so we close the session to release its resources.", "_____no_output_____" ] ], [ [ "# This has been commented out in case you want to modify and experiment\n# with the Notebook without having to restart it.\n# session.close()", "_____no_output_____" ] ], [ [ "## Conclusion\n\nThis tutorial created an ensemble of 5 convolutional neural networks for classifying hand-written digits in the MNIST data-set. The ensemble worked by averaging the predicted class-labels of the 5 individual neural networks. This resulted in slightly improved classification accuracy on the test-set, with the ensemble having an accuracy of 99.1% compared to 98.9% for the best individual network.\n\nHowever, the ensemble did not always perform better than the individual neural networks, which sometimes classified images correctly while the ensemble misclassified those images. This suggests that the effect of using an ensemble of neural networks is somewhat random and may not provide a reliable way of improving the performance over a single neural network.\n\nThe form of ensemble learning used here is called [bagging](https://en.wikipedia.org/wiki/Bootstrap_aggregating) (or Bootstrap Aggregating), which is mainly useful for avoiding overfitting and may not be necessary for this particular neural network and data-set. So it is still possible that ensemble learning may work in other settings.\n\n### Technical Note\n\nThis implementation of ensemble learning used the TensorFlow `Saver()`-object to save and reload the variables of the neural network. But this functionality was really designed for another purpose and becomes very awkward to use for ensemble learning with different types of neural networks, or if you want to load multiple neural networks at the same time. There's an add-on package for TensorFlow called [sk-flow](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/learn/python/learn) which makes this much easier, but it is still in the early stages of development as of August 2016.", "_____no_output_____" ], [ "## Exercises\n\nThese are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.\n\nYou may want to backup this Notebook before making any changes.\n\n* Change different aspects of this program to see how it affects the performance:\n * Use more neural networks in the ensemble.\n * Change the size of the training-sets.\n * Change the number of optimization iterations, try both more and less.\n* Explain to a friend how the program works.\n* Do you think Ensemble Learning is worth more research effort, or should you rather focus on improving a single neural network?", "_____no_output_____" ], [ "## License (MIT)\n\nCopyright (c) 2016 by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb108fe1dd654d05dad791a3564b10bb2184bbd8
39,693
ipynb
Jupyter Notebook
signals/sp-m1-4-discrete-fourier-transform.ipynb
Ahmed-H-N/uoe_speech_processing_course
f87720f3267dc61079c24dcd96768c4b58ec7898
[ "MIT" ]
null
null
null
signals/sp-m1-4-discrete-fourier-transform.ipynb
Ahmed-H-N/uoe_speech_processing_course
f87720f3267dc61079c24dcd96768c4b58ec7898
[ "MIT" ]
null
null
null
signals/sp-m1-4-discrete-fourier-transform.ipynb
Ahmed-H-N/uoe_speech_processing_course
f87720f3267dc61079c24dcd96768c4b58ec7898
[ "MIT" ]
null
null
null
35.921267
586
0.587056
[ [ [ "#### _Speech Processing Labs 2020: Signals: Module 1_", "_____no_output_____" ] ], [ [ "## Run this first! \n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport cmath\nfrom math import floor\nfrom matplotlib.animation import FuncAnimation\nfrom IPython.display import HTML\nplt.style.use('ggplot')\n\n## some helper functions:\nfrom dspMisc import *", "_____no_output_____" ] ], [ [ "# 4 The Discrete Fourier Transform\n\n\n### Learning Outcomes\n* Understand the DFT equation: what input does it take and what outputs does it produce.\n\n### Need to know\n* Topic Videos: Series Expansion, Fourier Analysis, Frequency Domain\n* Vector operations: dot product\n* [Digital Signals: Sampling sinusoids](./sp-m1-3-sampling-sinusoids.ipynb)\n", "_____no_output_____" ], [ "<div class=\"alert alert-warning\">\n <em> <strong>Optional-ish:</strong> This notebook goes through the DFT equation in quite a lot of detail - more than is strictly necessary for this course. It's perfectly fine to just focus on the visualizations or to skip it for now. \n \nThat said, you might like to run the code anyway and look at the visualization of the different phasors (i.e. basis functions) that correspond to the different DFT outputs in the DFT work through section called [Generate Phasors](#genphasors), and the corresponding magnitude and phase response graphs for that example</em>\n</div>\n\nDo you remember learning a bunch of math (trig, algebra, calculus) in high-school and thinking you'll never have to use this? While you might not use it directly, a lot of the technology you use everyday depends on it. Pretty much all current speech technology depends on the Fourier Transform in some way! It has been (and continues to be) used to solve problems in many different fields from from image processing to quantum mechanics\n\nIn the previous notebook, you looked at how you can add 'pure' sinusoids together to form complicated waveforms. The Fourier Transform gives us a way of doing the opposite: taking sequence of measurements over time (like a speech wave) and decomposing it into sinusoids of different frequencies, magnitudes, and phases. \n\nSince we are interested in digital signals (i.e. measuresment over discrete time steps), we'll need to use the **Discrete Fourier Transform**. This makes it possible to actually do these calculations on a computer, but it does also lead to some unexpected side-effects. \n\n<div class=\"alert alert-warning\">\n<strong>Equation alert</strong>: If you're viewing this on github, please note that the equation rendering is not always perfect. You should view the notebooks through a jupyter notebook server for an accurate view.\n</div>", "_____no_output_____" ], [ "## 4.1 The DFT equation \n\nYou can think of the DFT as a machine that takes $N$ amplitude samples over time and returns the frequency spectrum of the input signal (just like you when you view a spectral slice in praat).\n\nYou can treat this as a black box. But, now that we've gone through complex numbers, phasors, etc we can look at how the machine works and, more importantly, what it's limitations are.\n\n* Input: $x[n]$, for $n=1..N$ (i.e. a time series of $N$ samples)\n* Output: N complex numbers $\\mapsto$ magnitude and phases of specific phasors:\n\n$$ \n\\begin{align}\nDFT[k] &= \\sum_{n=0}^{N-1} x[n] e^{-j \\frac{2\\pi n}{N} k} \\\\\nDFT[k] &= \\sum_{n=0}^{N-1} x[n]\\big[\\cos(\\frac{2\\pi n}{N} k) - j \\sin(\\frac{2\\pi n}{N} k) \\big] \n\\end{align}\n$$\n\nYou'll usually see this written as $X[k]$ in signal processing textbooks, but we'll just say $DFT[k]$ to be a bit more transparent (and reduce the probability of typos!)\n\nNotice the $-j$ in the phasor! This means as $n$ increases the 'clockhand' of the phasor is rotating clockwise! \n", "_____no_output_____" ], [ "## 4.2 The DFT Step-by-Step\n\nBut what does the DFT do for us? Very broadly, the DFT takes a series of $N$ values in time as input (e.g. waveform amplitudes over time for our sound waves) and outputs the **correlations** between the input and $N$ pure cosine waves with specific frequencies. That is, it tells you how you would weight (and shift) these cosine waves so that you can add them all up and recover the original input. \n\n_Let's break it down!_ \n\n### The DFT for $k=0$\nLet's first look at the DFT for $k=0$: \n\n$$ \n\\begin{align}\nDFT[0] = \\sum_{n=0}^{N-1} x[n] e^{-j \\frac{2\\pi n}{N} 0} \n\\end{align}\n$$\n\nThis is usually referred to as the **bias** term. It tells us whether the input function is shifted up or down the amplitude axis (in a time v amplitude plot of the waveform). \n\n### Exercise: \n\n* Q: Why doesn't DFT[0] tell us anything about the frequency of the input? ", "_____no_output_____" ], [ "### The DFT for $k=1$: \n\nNow we can start to look at the correlation between the input sequence and some phasors of different frequencies. For $k=1$, we have: \n\n$$ DFT[1] = \\sum_{n=0}^{N-1} x[n] e^{-j \\frac{2 \\pi n}{N}}$$\n\n\n\nEach of the $e^{j 2\\pi n/N}$ terms in the sum is a step of $2\\pi n/N$ radians around the circle. That is, our $k=1$ phasor $e^{-j \\frac{2 \\pi}{N}n}$ makes completes 1 full circle every $N$ time steps. \n", "_____no_output_____" ], [ "Let's plot the the $k=1$ phasor for $N=16$: ", "_____no_output_____" ] ], [ [ "## plot complex numbers in the DFT[1] equation\nN=16\n\n## make an array of N steps: 0,...,N\nnsteps = np.array(range(N))\ntsteps = 2*np.pi*nsteps/N\nTmin = np.min(tsteps)\nTmax = np.max(tsteps)\n\n## Corresponding complex numbers z[n] = e^{j 2 pi n/N}\nzn_k1 = np.exp(-1j*2*np.pi*nsteps/N) \n", "_____no_output_____" ], [ "## Plot the phasor corresponding to DFT[k=1] \nX_k1, Y_k1, fig, phasor, iproj, rproj = plot_phasor_static(zn_k1, tsteps, plot_real=True, plot_imag=True)", "_____no_output_____" ], [ "## Animate the phasor! \nphasor1_vid = get_phasor_animation(X_k1, Y_k1, tsteps, phasor, iproj, rproj, fig)\nphasor1_vid", "_____no_output_____" ] ], [ [ "What this animation shows is that for $k=1$, the $e^{-j\\varphi}$ terms in the DFT represent $N$ evenly space samples around the unit circle aka one period of a sinusoid. So, The term $x[n] e^{-j2\\pi n/N}$ represents multiplying the $n$th step around the unit circle with the $n$th input value.\n\n\n", "_____no_output_____" ], [ "### Exercise (optional)\n\nWe saw from Euler's formula previously that projecting the phasor (left fig) onto the imaginary (vertical) axis, would give us a sine wave, i.e. $\\sin(t)$. This isn't quite what we see above. Instead the sinusoid on the right, looks like the mirror image of $\\sin(t)$, i.e. $-\\sin(t)$. Why is this? ", "_____no_output_____" ], [ "### Notes", "_____no_output_____" ], [ "So, we can interpret the DFT as taking the dot product between the input and a cosine (and sine) wave of specific frequency. That is, it tells us if the input sequence repeats itself with a specific frequency. \n\n\n", "_____no_output_____" ], [ "### The dot product with the input $x[n]$\nIt's useful at this point to break the DFT equation into two parts using Euler's rule. This esssentially gives us two dot products (i.e. elementwise multiplication and summation). This allows us to calculate the real and imaginary components of the DFT output separately as a dot product with a cosine (the real part) and a dot product with a sine (the imaginary part): \n\n\n$$ \n\\begin{align}\nDFT[1] &= \\sum_{n=0}^{N-1} x[n] \\big(\\cos(2\\pi n/N) -j\\sin(2\\pi n/N)\\big)\\\\\n&= \\sum_{n=0}^{N-1} x[n] \\cos(2\\pi n/N) - j \\sum_{n=0}^{N-1} x[n] \\sin(2\\pi n/N)\n\\end{align}\n$$\n\n\n\n\n\n", "_____no_output_____" ], [ "### Example: \nLet's look at what's happening in these sum terms using a simple sampled square wave with the same period as the $k=1$ phasor for input. The following visualizes the second sum (i.e. the dot product with $\\sin(2\\pi n/N)$).", "_____no_output_____" ] ], [ [ "\n## Make the background and plot the k=1 phasor\nX_k1, Y_k1, fig, phasor, iproj, rproj = plot_phasor_static(zn_k1, tsteps, plot_real=True, plot_imag=True)\n\n## Now, let's say our input x looks like this square wave: \nxn = np.array([-1]*8 + [1]*8)\n\n## Exercise: Try some other inputs later\n#xn = np.array([-1, -1,-1,-1, 1, 1,1,1]*2)\n#xn = np.array([-1]*4 + [1]*8 + [-1]*4)\n#xn = np.array([1] + [-1, -1,-1, 1, 1,1,1]*2 + [-1])\n#xn = zn_imag_k1\n#xn = np.concatenate([Ys[np.arange(0, 16, 2)], Ys[np.arange(0, 16, 2)]])\n#xn = np.array([1] + [-1, -1,-1, 1, 1,1,1]*2 + [-1])\n\n\nprint(\"x[n]:\", xn)\n\n## Plot the input x[n]:\n\n# with respect to the imaginary component (top right)\niproj.scatter(tsteps, xn, color='C1')\n\n# and the real component (bottom left)\nrproj.scatter(xn, tsteps, color='C1')\n\n## Do the elementwise multiplication of input (xn) and the k=1 phasor (zn_k1)\nxn_times_zn = xn * zn_k1\n\n## Get the real and imaginary parts\nreal_xn_zn = np.real(xn_times_zn)\nimag_xn_zn = np.imag(xn_times_zn)\n\n## Plot the imaginary parts = x[n]*sin(theta) (top right) in yellow\niproj.plot(tsteps, imag_xn_zn, color='C4')\niproj.scatter(tsteps, imag_xn_zn, color='C4', s=100, alpha=0.5)\n\n## Plot the real parts = x[n]*cos(theta) (bottom left) in yello\nrproj.plot(real_xn_zn, tsteps, color='C4')\nrproj.scatter(real_xn_zn, tsteps, color='C4', s=100, alpha=0.5)", "_____no_output_____" ] ], [ [ "In the plot above, you can see that: \n* The grey dots show our samples from the $k=1$ phasor (top left), and projected imaginary component, i.e. the sine in the DFT sum (top right), and the projected real component, i.e. the cosine (bottom left) \n* The blue dots show our input sequence (top right and bottom left)\n* the yellow dots show the element wise multiplication of the phasor values and the input, projected on the imaginary and real axes. \n\nLet's just look at the imaginary (sine) part of the $x[n] \\cdot z_1[n]$ multiplication (i.e., `xn_times_zn`): \nWhen we multiply the values in both sequences together, we can see that (1) the values in the input and phasor don't exactly match, but (2) they are always the same sign. That is, the input and the sine component of the $k=1$ phasor are correlated to some extent. In this case, this means that the multiplication terms (in yellow) in this case are all positive. So, adding them all up will give us a positive term for the imaginary component of the DFT. \n\nWe can also see that, even though we basically chose this example to match the sine component, we do also get a positive correlation with the real (cos) component. \n\nThe following cell shows that you get the same result from doing the dot products with the real (cos) and imaginary (sin) parts of the phasor separately, or doing the dot product with the phasor and then projecting the real and imaginary parts. \n", "_____no_output_____" ] ], [ [ "## The dot product: sum all the elementwise multiplications\ndft_1_real = np.sum(real_xn_zn)\ndft_1_imag = np.sum(imag_xn_zn)\n\n\nprint(\"* projection and then two separate products: DFT[k] = %f + i%f\" %(dft_1_real, dft_1_imag))\n\n## check these are the same!\ndft_1 = np.sum(xn*zn_k1)\nprint(\"* one dot product and then projection: DFT[k] = %f + i%f\" %(dft_1.real, dft_1.imag))", "_____no_output_____" ] ], [ [ "### Exercise: Change the input\nWhat happens when we change the input `xn` so that:\n\n* it has a different period\n* exactly matches the sine component of the k=1 phasor\n* is out of phase with the sine component of the k=1 phasor\n* has a different magnitude\n* something else ...\n\nThere are some commented out options that you can try in the cell above", "_____no_output_____" ], [ "### Notes", "_____no_output_____" ], [ "### Example: Phase shifted input\n\nLet's see what happens when our input matches the imaginary component of $k=1$ phasor but has it's phase shifted a bit. Remember, we can shift the start point of our phasor by multiply the whole phasor sequence a complex number. \n\n$$ \\sin(\\theta + \\phi) = Imag(e^{j\\phi}e^{j\\theta}) = Imag(e^{j\\theta+\\phi})$$", "_____no_output_____" ] ], [ [ "## Let's generate our DFT[1] phasor for an input of N=16 points:\nN=16\n\n## make an array of N steps: 0,...,N\nnsteps = np.array(range(N))\ntsteps = 2*np.pi*nsteps/N\n\n## The k=1 phasor: \nzn_k1 = np.exp(-1j*tsteps) ", "_____no_output_____" ] ], [ [ "Now let's create an input that's the same as the sine component of the DFT[1] phasor, but shifted by $\\pi/3$ radians. ", "_____no_output_____" ] ], [ [ "## Plot the DFT[1] phasor\nX_k1, Y_k1, fig, phasor, iproj, rproj = plot_phasor_static(zn_k1, tsteps, plot_real=False, plot_imag=True)\n\n## Now, for the input let's use a phase shifted version of the sine component of our phasor, zn (defined above)\n# Remember that multiplying a complex number by e^{j theta} rotates it by theta degrees \n# (anticlockwise if theta is positive) \n# So to shift the whole sequence we just multiply everything in the sequence by the same complex number\n\n## For this example, let's shift our -sin(2 pi n/N) by pi/3\nzn_shift = np.exp(1j*np.pi/3) * zn_k1\n\n## And take as input just the imaginary component of this shifted sine wave\nxn = np.imag(zn_shift)\n\n## Plot the phase shifted sine wave it in blue! \niproj.scatter(2*np.pi*nsteps/N, xn, color='C1')\niproj.plot(2*np.pi*nsteps/N, xn, color='C1')\n", "_____no_output_____" ] ], [ [ "In the plot above, you should see the input (in blue) is the same as the sine component of the $k=1$ phasor but *phase shifted*. By multiplying everything by $e^{j\\pi/3}$ _delays_ the our $-sin(\\theta)$ function by $\\pi/3$ seconds (in this example we're assuming that it takes $\\theta$ seconds to travel $\\theta$ degrees around the phasor). \n\nNow let's see how this effects the DFT output: ", "_____no_output_____" ] ], [ [ " ## Elementwise multiplication of the input and the k=1 phasor\nxn_times_zn = xn * zn_k1\n\n## Add it all up to get the dot product\ndft_1 = np.sum(xn_times_zn)\n\nprint(\"DFT[1] = %f + i%f\" %(dft_1.real, dft_1.imag))\nprint(\"in polar coordinates: magnitude=%f, phase=%f\" %cmath.polar(dft_1))\n\n## Plot the sequence of multiplications (yellow)\niproj.plot(2*np.pi*nsteps/N, np.imag(xn_times_zn), color='C4')\niproj.scatter(2*np.pi*nsteps/N, np.imag(xn_times_zn), color='C4')\nfig", "_____no_output_____" ] ], [ [ "The result of the DFT is 6.93 + j4, which in polar coordinates has a magnitude=8 and phase angle=0.52 \n\n* Non-zero magnitude means that the input has a frequency component that matches the frequency of the $k=1$ phasor\n* Non-zero phase means that the input is like the $k=1$ phasor in frequency, but starting from a different starting angle. ", "_____no_output_____" ], [ "### How does the detected phase angle relate to the phase we used to shift the input? \n\nThe magnitude of the DFT output is relatively straightforward to interpret. The bigger the magnitude, the bigger the amplitude of this frequency component in the input. \n\nWhen we convert the $DFT[1]$ output to polar coordinates we can, as before, interpret $DFT[1]$ as a scaling (magnitude) and rotation (phase) factor. We can think of the phase as signalling a bigger amplitude of this frequency in the the input. We can think of the phase angle as rotating the starting position of the DFT[1] phasor 'clock hand' by that angle. When we convert this to time vs amplitude, this essential means starting from a future point in the sinusoid for positive phase (or a past point for negative phase) relative to the direction the phasor is rotating. \n\n### DFT phase is relative to cosine! \n\nAn important thing to note is that the phase output of the DFT is relative to the **cosine** function with the same frequency as the phasor. This is why the phase value we calculated isn't actually the same as the angle we used to shift the input sequence ($\\pi/3=1.047$ radians), since that input sequence was actually based on a sine function. \n\n\n", "_____no_output_____" ], [ "### Exercise (optional) \n\n\n\nShow that our input above $-\\sin(t-\\pi/3)$ is the same as a cosine wave with the phase shift derived above $\\pi/6$.\nYou'll need to use these trig identities: \n$$ \\cos(\\alpha - \\pi/2) = \\sin(\\alpha) $$\n$$\\sin(t+\\pi) = -\\sin(t)$$\n\n**Note** We definitely won't be asking you do these sorts of calculations for assessment, but going through it will help consolidate your intutions about sinusoids and the relationship between cosine and sine waves.\n\n\n", "_____no_output_____" ], [ "### Notes\n", "_____no_output_____" ], [ "Even if you don't do the exercise above, we can see that shifting a cosine function by the DFT output phase gives us the same sinusoid as our input. \n\nThat is: \n$$ \\cos(t + \\pi/6) = -\\sin(t-\\pi/3)$$\n\njust by plotting them:\n", "_____no_output_____" ] ], [ [ "## Plot the input from above and the equivalent cos and sin function based on DFT phase output. \n_, _, fig, phasor, iproj, rproj = plot_phasor_static(zn_k1, tsteps, plot_real=False, plot_imag=True)\n\n\n#fig, phasor, iproj, rproj = create_phasor_iproj_bkg(Tmin, Tmax, ymax=1.5)\n#phasor.scatter(zn_real_k1, zn_imag_k1)\n\n## Our input (C1=blue)\ntsteps = 2*np.pi*nsteps/N\niproj.scatter(tsteps, xn, color='C1', s=300)\niproj.plot(tsteps, xn, color='C1')\n\n## cos(t + pi/6) (C0=red)\niproj.scatter(tsteps, np.cos(tsteps+np.pi/6), color='C0', s=200)\niproj.plot(tsteps, np.cos(tsteps+np.pi/6), color='C0')\n\n## -sin(t-pi/3) (C5=green)\niproj.scatter(tsteps, -np.sin(tsteps-np.pi/3), color='C5', s=80)\niproj.plot(tsteps, -np.sin(tsteps-np.pi/3), color='C5')", "_____no_output_____" ] ], [ [ "In the plot above, you should see:\n* the DFT[1] phasor in grey, i.e. $-\\sin(t)$\n* Our phase shifted input in blue\n* The phase shift determined by DFT[1] applied as a cosine wave: $\\cos(t + \\pi/6)$ in red\n* Our phase shifted input generated directly using the `np.sin` function: $-\\sin(t-\\pi/3)$ in green\n\nYou should see is that that the last three functions are all the same! We can always write a sine wave as a cosine wave with a phase shift. ", "_____no_output_____" ], [ "### The DFT for k = 2 and beyond\n\nWe can think of DFT[2] as representing the contribution of a phasor that spins around the unit circle\ntwice as fast as the $k=1$ DFT phasor:\n\n* For $k=2$, Each $e^{i \\frac{2\\pi n}{N}k}$ is a step of $\\frac{2\\pi}{N}\\times 2$ radians around the unit circle\n * i.e. we only sample every second point compared to the $k=1$ case.\n \n* This means it only takes half the time to make a full cycle. So, this phasor represents a sinusoid with twice the frequency of the one for $k=1$\n", "_____no_output_____" ], [ "### Exercise:\nPlot sinusoids for different values of `k` using the code below\n* What happens when $k=N/2$?\n* What happens when $k > N/2$? \n* What if you increase $N$?\n* How many actual frequencies can the DFT actually tell us about? \n* How does this relate to the aliasing problem we saw in the previously? ", "_____no_output_____" ] ], [ [ "## Plot the phasor and sinusoid for different values of k\n\n## Number of samples\nN=16\n\n## DFT output we're looking at: a sinusoid with k times the freq of the k=1 phasor\nk=15\n\n## indices of points in sample\nnsteps = np.array(range(N))\n\n## times of samples\ntsteps = 2*np.pi*nsteps/N\n\n## N sample values of the kth DFT phasor\nzn_k = np.exp(k*-1j*tsteps) \n\nX_k, Y_k, fig, phasor, iproj, rproj = plot_phasor_static(zn_k, tsteps, plot_real=False, plot_imag=True)\n", "_____no_output_____" ], [ "kphasor_anim= get_phasor_animation(X_k, Y_k, tsteps, phasor, iproj, rproj, fig)\nkphasor_anim", "_____no_output_____" ] ], [ [ "### Notes", "_____no_output_____" ], [ "## 4.3 The full DFT\n\nNow that we've seen what happens when we calculate the individual DFT components, lets do the whole thing!\n\nWe need\n\n\n1. **Input:** a sequence of $N$ samples\n1. generate $N$ phasors, with $N$ sampled points\n1. generate $N$ dot products between the input and the phasors\n1. **Output:** $N$ complex numbers representing magnitude and phase \n\nThe magnitudes and phases give us the decomposition of the input into pure cosine waves\n\n", "_____no_output_____" ], [ "### Set the input $x[n]$\n\nLet's use the same input as above $x[n] = -\\sin(2\\pi n/N -\\pi/3)$", "_____no_output_____" ] ], [ [ "## 1) Input: -sin(t-pi/3) as above\nN=16\nnsteps = np.array(range(N))\ntheta_step = 2*np.pi/N\ntheta_steps = theta_step * nsteps\n\n## set the phase shift\nphase_in = np.pi/3\n\n## set the input as -sin with phase shift\nx = -np.sin(theta_steps-phase_in) \n\n## Plot the input x\nfig, tdom = plt.subplots(figsize=(16, 4))\ntdom.scatter(tsteps, x, color='magenta')\ntdom.plot(tsteps, x, color='magenta')\n", "_____no_output_____" ] ], [ [ "You should see the input (16 points), `x`, plotted in magenta ", "_____no_output_____" ], [ "### Generate phasors\n<a name=\"genphasors\"></a>\nLet's use some functions to generate all the DFT phasors in one go: ", "_____no_output_____" ] ], [ [ "## 2) Generate the phasors\n# Given a sequence length N, return dictionary representing N DFT outputs\n# the kth element of the return dictionary contains the complex numbers sampled around the unit circle after \n# N steps, their real and imaginary parts, and the angles of those complex numbers (magnitude is always 1)\ndef get_dft_phasors(N, centered=False):\n \n ## N input steps, N phasors\n nsteps = np.array(range(N))\n \n ## DFT works for negatively indexed samples, i.e. x[-n], but we don't need this right now! \n if centered:\n nsteps = nsteps - floor(N/2)\n print(nsteps)\n \n # We know the smallest step around the unit circle we can take is 2pi/N\n theta_step = 2*np.pi/N\n # And we're going to take N steps\n theta_steps = theta_step * nsteps\n \n ## Generate N phasors\n phasors = {}\n for k in range(N):\n ## samples around the unit circle\n zs = np.exp(k*-1j*theta_steps) \n real = np.real(zs)\n imag = np.imag(zs) \n \n ## Since we're here, let's return all these things for convenience\n # zs: the phasor samples, real: the real component, imag: the imaginary component\n # theta_steps: the angles for each phasor sample, theta_step: the smallest angle step size\n \n phasors[k] = {'zs':zs, 'real':real, 'imag':imag, 'thetas':theta_steps, 'theta_step':theta_step}\n \n return phasors\n\n", "_____no_output_____" ], [ "## get the list of phasors\nphasors = get_dft_phasors(N)", "_____no_output_____" ], [ "## plot the different phasors and sine (imaginary) components\n## You should be able to see the frequency relationship between each DFT component\nfor k in range(N):\n X_k, Y_k, fig, phasor, iproj, rproj = plot_phasor_static(phasors[k]['zs'], tsteps, plot_real=False, plot_imag=True, color='C'+str(k))\n\n", "_____no_output_____" ] ], [ [ "You should see in the plots above that phasors $k > N/2$ repeat the frequencies we see for $k < N/2$. For example, \nthe $k=1$ phasor is equivalent in frequency to the $k=15$ phasor! ", "_____no_output_____" ], [ "### Calculate dot products (i.e. input and phasor correlations) ", "_____no_output_____" ] ], [ [ "## 3) Get the dot products for each for each phasor and the input\n\ndef get_dft_outputs(x, phasors):\n DFT = []\n \n ## Go through our list of N phasors\n for k, phasor in phasors.items():\n \n ## Do the dot product between the input and each phasor sequence\n DFT.append(np.sum(x * phasor['zs']))\n\n return DFT\n\n", "_____no_output_____" ], [ "## do the DFT\ndft = get_dft_outputs(x, phasors)", "_____no_output_____" ] ], [ [ "### Get the output magnitudes and phases\n\nNow we convert the results of the dot products we just calculated into polar form to get magnitudes and phases. We can then plot them! ", "_____no_output_____" ] ], [ [ "## convert to polar coordinates to get magnitude and phase\ndft_polar = [cmath.polar(z) for z in dft]\nmags = [m for m, _ in dft_polar]\nphases = [ph if mag > 0.00001 else 0 for mag,ph in dft_polar]\n\n## we need to put a condition into the phase calculation because phase is calculated by \n## a ratio of the imaginary component and the real component. Both of these values might be very, very small. \n## This makes it susceptible to floating point errors. \n## Then, the ratio of two very small things can actually end up to be quite (spuriously) big! \n", "_____no_output_____" ], [ "## Plot the magnitudes\n#print(mags)\nfig, freqdom = plt.subplots(figsize=(16, 4))\nfreqdom.set(xlim=(-1, N), ylim=(-1, 10))\nfreqdom.scatter(range(N), mags)", "_____no_output_____" ], [ "## plot the phase angle for each DFT component\n#print(phases)\nfig, fdom = plt.subplots(figsize=(16, 4))\nfdom.set(xlim=(-1, N), ylim=(-2, 2))\nfdom.scatter(range(N), phases)", "_____no_output_____" ] ], [ [ "### How do we interpret the magnitude and phase graphs? \n\nThe top plot shows the magnitude response of the DFT with respect to our input. The bottom plot shows the phase respose. \n\nThe magnitude plot shows positive magnitudes for the 1st and the N-1th DFT components, and zero for all the rest. This means that the input is a sinusoid with the same frequency as the 1st DFT component. \n\nThe positive phase for the first component indicates a phase shift. We see the opposite phase shift for the 15th component because the phasor for that has the same frequency but rotates the opposite way. \n\nSo, this is what we expect since we generated the input as exactly the sine projection of the k=1 phasor, just shifted by $\\pi/3$ radians! \n\n", "_____no_output_____" ], [ "### Plotting DFT outputs on the complex plane\nSo far, we've looked at the real and imaginary components of the DFT outputs separately. But we can also think about their properties on the complex plane. \n\nThe following plots the DFT outputs for the example above on the complex plane. \n", "_____no_output_____" ] ], [ [ "## Plot the actual DFT outputs as complex numbers on the complex plane\n## For the example above, we can see that most of the outputs are at (0,0), i.e. not contribution\n## but we get positive magnitudes for the 1st and 15th component, with the same \n## magnitude by 'mirrored' phase angle: \n\nymax=(N/2) + 2\nfig, ax = plt.subplots(figsize=(5, 5))\nax.set(xlim=(-10, 10), ylim=(-10, 10))\nax.plot([-ymax,ymax], [0,0], color='grey')\nax.plot([0,0],[-ymax,ymax], color='grey')\ncircle1 = plt.Circle((0, 0), 1, color='grey',fill=False)\n \nax.add_artist(circle1)\n\nfor k in range(N):\n zs = phasors[k]['zs']\n xz = x * zs\n dftk = sum(xz)\n #print(dftk, dftk.real, cmath.polar(dftk))\n ax.plot(dftk.real, dftk.imag, 'o')\n \n if dftk.real > 0.0001: \n print(\"k=%d, %f + j%f\" % (k, dftk.real, dftk.imag))\n\n", "_____no_output_____" ] ], [ [ "You can see from this plot that all but two of the DFT outputs have zero magnitude. The two that have non-zero magnitude are the 1st and 15th components. You can see that they have the same magnitude, but mirror each other in phase (they are complex conjugates). This symmetry is something you see a lot of in digital signal processing. We won't go into it in this class, but for one thing we can take advantage of to save some computations! ", "_____no_output_____" ], [ "### Exercise (optional) \n\nYou can use the code below to visualize the dot products of the input with with cos (real) and sin (imaginary) parts of each DFT component by varying $k$: \n\n* What's the relationship between the DFT phasors for $k=1$ and $k=N-1$ in terms of the dot product between the real (left plot) and imaginary (right plot) components.\n* What's happen for components that don't have the same frequency as the input? e.g. $k=2$", "_____no_output_____" ] ], [ [ "## 1) Input: -sin(t-pi/3) as above\nnsteps = np.array(range(N))\ntheta_step = 2*np.pi/N\ntheta_steps = theta_step * nsteps\n\nTmin = np.min(theta_steps)\nTmax = np.max(theta_steps)\n\nphase_in = np.pi/3\nx = -np.sin(theta_steps-phase_in) \n\nfig, tdom = plt.subplots(figsize=(16, 4))\ntdom.scatter(tsteps, x, color='magenta')\ntdom.plot(tsteps, x, color='magenta')\n", "_____no_output_____" ], [ "## let's break it down into cos and sin components\nk=15\n\n## zcos is our sampling of the real part of the kth phasor\nzcos = phasors[k]['real']\nzsin = phasors[k]['imag']\n\n\n## the elementwise multiplication between the input x and the real part of the phasor, zcos\nxz_real = x * zcos\nxz_imag = x * zsin\n\n## This initializes our plots, makes them the appropriate size etc, but leave the plot empty\n_, _, fig, phasor, iproj, rproj = plot_phasor_static(phasors[k]['zs'], tsteps, plot_phasor=False, plot_real=True, plot_imag=True, color='grey')\n\n\n## plot the zero line: if the sequence we get after the elementwise multiplication is \n## symmetric around zero, we know that the dot product will be zero since we're adding all the \n## values together\n#inusoid.plot([Tmin-1,Tmax+1], [0,0], color='grey')\n\n\n## Plot the input in magenta\nrproj.plot(tsteps, x, color='magenta')\nrproj.scatter(tsteps, x, color='magenta')\n\n\n## Plot the elementwise multiplication in orange\nrproj.plot(tsteps, xz_real, color='orange')\nrproj.scatter(tsteps, xz_real, color='orange')\n\n## Plot the input in magenta\niproj.plot(tsteps, x, color='magenta')\niproj.scatter(tsteps, x, color='magenta')\n\n## Plot the elementwise multiplication in orange\niproj.plot(tsteps, xz_imag, color='orange')\niproj.scatter(tsteps, xz_imag, color='orange')\n\n## Add it all up to get the dot product\ndftk_real, dftk_imag = sum(xz_real), sum(xz_imag)\nprint(\"dft[%d] = %f + j%f \" % (k, dftk_real, dftk_imag))", "_____no_output_____" ] ], [ [ "### Notes", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb109ef468b25f4a9efe56871d8126ba1d3128d6
60,182
ipynb
Jupyter Notebook
HURDAT_JRDISCHG-Connie1955.ipynb
williampc8985/VT-JamesRiver
6bacd10f4fd6158db74973ddc1abd89b650efc9f
[ "MIT" ]
null
null
null
HURDAT_JRDISCHG-Connie1955.ipynb
williampc8985/VT-JamesRiver
6bacd10f4fd6158db74973ddc1abd89b650efc9f
[ "MIT" ]
null
null
null
HURDAT_JRDISCHG-Connie1955.ipynb
williampc8985/VT-JamesRiver
6bacd10f4fd6158db74973ddc1abd89b650efc9f
[ "MIT" ]
null
null
null
132.559471
47,996
0.87046
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "#This is the Richmond USGS Data gage\nriver_richmnd = pd.read_csv('JR_Richmond02037500.csv')", "/Users/williampc/opt/anaconda3/envs/geop/lib/python3.9/site-packages/IPython/core/interactiveshell.py:3165: DtypeWarning: Columns (7) have mixed types.Specify dtype option on import or set low_memory=False.\n has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n" ], [ "river_richmnd.dropna();", "_____no_output_____" ], [ "#Hurricane data for the basin - Names of Relevant Storms - This will be used for getting the storms from the larger set\nJR_stormnames = pd.read_csv('gis_match.csv')\n", "_____no_output_____" ], [ "# Bring in the Big HURDAT data, from 1950 forward (satellites and data quality, etc.)\nHURDAT = pd.read_csv('hurdatcleanva_1950_present.csv')\n", "_____no_output_____" ], [ "VA_JR_stormmatch = JR_stormnames.merge(HURDAT)\n", "_____no_output_____" ], [ "# Now the common storms for the James Basin have been created. We now have time and storms together for the basin\n#checking some things about the data", "_____no_output_____" ], [ "# How many unique storms within the basin since 1950? 62 here and 53 in the Data on the Coast.NOAA.gov's website. \n#I think we are close enough here, digging may show some other storms, but I think we have at least captured the ones \n#from NOAA\nlen(VA_JR_stormmatch['Storm Number'].unique());", "_____no_output_____" ], [ "#double ck the lat and long parameters\nprint(VA_JR_stormmatch['Lat'].min(),\nVA_JR_stormmatch['Lon'].min(),\nVA_JR_stormmatch['Lat'].max(),\nVA_JR_stormmatch['Lon'].max())", "36.1 -83.7 39.9 -75.1\n" ], [ "#Make a csv of this data\nVA_JR_stormmatch.to_csv('storms_in_basin.csv', sep=',',encoding = 'utf-8')", "_____no_output_____" ], [ "#names of storms \nlen(VA_JR_stormmatch['Storm Number'].unique())\nVA_JR_stormmatch['Storm Number'].unique()\nnumbers = VA_JR_stormmatch['Storm Number']", "_____no_output_____" ], [ "#grab a storm from this list and lok at the times\n#Bill = pd.DataFrame(VA_JR_stormmatch['Storm Number'=='AL032003'])\n\nstorm = VA_JR_stormmatch[(VA_JR_stormmatch[\"Storm Number\"] == 'AL021955')]\nstorm\n#so this is the data for a storm named Bill that had a pth through the basin * BILL WAS A BACKDOOR Storm\n\n", "_____no_output_____" ], [ "# plotting for the USGS river Gage data \nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom climata.usgs import DailyValueIO\nfrom datetime import datetime\nfrom pandas.plotting import register_matplotlib_converters\nimport numpy as np\n\nregister_matplotlib_converters()\nplt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\n# set parameters\nnyears = 1\nndays = 365 * nyears\nstation_id = \"02037500\"\nparam_id = \"00060\"\n\ndatelist = pd.date_range(end=datetime.today(), periods=ndays).tolist()\n#take an annual average for the river\nannual_data = DailyValueIO(\n start_date=\"1955-01-01\",\n end_date=\"1956-01-01\",\n station=station_id,\n parameter=param_id,)\nfor series in annual_data:\n flow = [r[1] for r in series.data]\n si_flow_annual = np.asarray(flow) * 0.0283168\n flow_mean = np.mean(si_flow_annual)\n\n#now for the storm \ndischg = DailyValueIO(\n start_date=\"1955-08-10\",\n end_date=\"1955-08-24\",\n station=station_id,\n parameter=param_id,)\n#create lists of date-flow values\nfor series in dischg:\n flow = [r[1] for r in series.data]\n si_flow = np.asarray(flow) * 0.0283168\n dates = [r[0] for r in series.data]\nplt.plot(dates, si_flow)\nplt.axhline(y=flow_mean, color='r', linestyle='-')\nplt.xlabel('Date')\nplt.ylabel('Discharge (m^3/s)')\nplt.title(\"HU Connie - 1955 (Atlantic)\")\nplt.xticks(rotation='vertical')\nplt.show()", "_____no_output_____" ], [ "max(si_flow)", "_____no_output_____" ], [ "percent_incr= (abs(max(si_flow)-flow_mean)/abs(flow_mean))*100\npercent_incr", "_____no_output_____" ], [ "#take an annual average for the river\nannual_data = DailyValueIO(\n start_date=\"1955-03-01\",\n end_date=\"1955-10-01\",\n station=station_id,\n parameter=param_id,)\nfor series in annual_data:\n flow = [r[1] for r in series.data]\n si_flow_annual = np.asarray(flow) * 0.0283168\n flow_mean_season = np.mean(si_flow_annual)\nprint(abs(flow_mean-flow_mean_season))", "44.594745406511635\n" ], [ "print(flow_mean)\nprint(flow_mean_season)", "168.23010879999998\n212.82485420651162\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb10ca581a39f9f74a55688045b34957d417bb04
123,283
ipynb
Jupyter Notebook
Variational Inference for Hierarchical Poisson Factorization/HPF_notebook.ipynb
leonardoruggieri/Computational-Statistics-PhD
07c95f65b1d288796c2e59a7ea2003c1e4d5019a
[ "MIT" ]
null
null
null
Variational Inference for Hierarchical Poisson Factorization/HPF_notebook.ipynb
leonardoruggieri/Computational-Statistics-PhD
07c95f65b1d288796c2e59a7ea2003c1e4d5019a
[ "MIT" ]
null
null
null
Variational Inference for Hierarchical Poisson Factorization/HPF_notebook.ipynb
leonardoruggieri/Computational-Statistics-PhD
07c95f65b1d288796c2e59a7ea2003c1e4d5019a
[ "MIT" ]
null
null
null
268.006522
62,738
0.90392
[ [ [ "# Scalable Recommendation with Poisson Factorization - Computation Statistics Project 1\n#### The following notebook is an example of the use of the implementation of the mean-field variational algorithm for approximate posterior inference for the Hierarchical Poisson Factorization, by Gopalan et al. (2013).", "_____no_output_____" ], [ "The variational inference algorithm for HPF has been implemented and stored in the module **hpf_vi**.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.special\nimport scipy.stats\nimport sklearn.metrics\nfrom hpf_vi import hpf_vi\nfrom sklearn.metrics import mean_squared_error", "_____no_output_____" ] ], [ [ "The dataset is a sparse matrix of 0 and 1, which represents the interaction between users and items.\n\nThe matrix contains clusters of similar users in terms of items consumed. There are more than 1700 users and approximately 17000 items.", "_____no_output_____" ] ], [ [ "ratings = pd.read_pickle(\"ratings_df.pkl\")\nratings = np.array(ratings)", "_____no_output_____" ] ], [ [ "As one can read in the documentation of **hpf_vi**, the algorithm stops either when the difference between two subsequent log-likelihood is smaller than a tolerance value (which can be changed) or after a user-defined number of iterations.\n\nIn the following code, a function that splits training and testing data is defined.\n\nAs suggested by the authors of the paper, the test set consists of randomly selected ratings, which are set to zero during the training. In the original paper, the algorithm convergence is ultimately evaluated on the evaluation set.", "_____no_output_____" ] ], [ [ "def train_val_split(data, valid_dim=0.2):\n '''\n Creating two additional objects, i.e. training and validation set, which can be used in the fitting process\n\n Parameters:\n data = np.array\n valid_dim = float\n '''\n if valid_dim >= 1:\n raise ValueError(\"valid_dim must be lower than 1\")\n\n train = data.copy()\n valid = np.zeros(data.shape)\n\n for u in np.unique(data.nonzero()[0]):\n ind = data[u].nonzero()[0] \n\n if len(ind) > 0: \n valid_ind = np.random.choice(ind, round(len(ind)*valid_dim), replace=False)\n for i in valid_ind:\n valid[u,i], train[u,i] = data[u,i], 0\n return train, valid\n", "_____no_output_____" ], [ "train, valid = train_val_split(ratings)", "_____no_output_____" ] ], [ [ "We can now fit the model using the variational inference algorithm. Note that the specification of a validation set is optional.\n", "_____no_output_____" ] ], [ [ "model = hpf_vi() # instantiating the model\nmodel.fit(train = train, valid = valid, tol = 1, iterations = 100)", "/content/hpf_vi.py:180: RuntimeWarning: divide by zero encountered in log\n self.logprod = np.log(self.prod)\n" ] ], [ [ "As one can see in the next graph, the log-likelihood, as expected, tends to a higher value, until one of the two stopping criteria is met.\nRecall that the log-likelihood is the following:\n$$\np(y) = \\prod_{u,i}\\frac{(\\theta_u^T\\beta_i)^{y_{u,i}}\\cdot e ^{-\\theta_u^T\\beta_i}}{y_{u,i}!} = \\prod_{u,i:\\;y_{u,i} > 0} \\frac{(\\theta_u^T\\beta_i)^{y_{u,i}}}{y_{u,i}!} \\cdot \\prod_{u,i} e ^{-\\theta_u^T\\beta_i}\n$$", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(9,5))\n\nax.plot(model.ll[1:model.its]) # trimming the loglik computed on the random initialization\nax.set_xlabel(\"Epochs\")\nax.set_ylabel(\"- Loglikelihood\")", "_____no_output_____" ] ], [ [ "The **fit** method will generate a sequence of Mean Squared Errors for both the training and, if provided, the validation set. This second parameter can be used, as suggested by the authors, as a stopping criterion for the algorithm. Nevertheless, we opt for the in-sample log-likelihood.\n\nIn the following, a graph of the MSE for the validation and the training set is plotted as function of the number of iterations.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(9, 5))\n\nax.plot(model.mse_train[1:model.its],label=\"Training set MSE\")\nax.plot(model.mse_valid[1:model.its], label=\"Validation set MSE\")\nax.set_xlabel(\"Epochs\")\nax.set_ylabel(\"MSE\")\nax.legend();", "_____no_output_____" ] ], [ [ "Recommendations can now be made thanks to the estimated parameter. The method **recommend** suggests, for each user, the items that obtained the highest score.", "_____no_output_____" ], [ "## Plotting some results ##\nIn the following, we can qualitatively assess the in-sample performance of the model by comparing the actual vs predicted observations for 3 users of our dataset.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(14, 10), nrows=3)\n\nfor u,i in enumerate([1,100,1000]):\n ax[u].plot(model.predicted[i]/model.predicted[i].max(), label = \"Prediction\")\n ax[u].plot(ratings[i], label = \"Actual observations\")\n ax[u].set_xlabel(\"Item\")\n ax[u].set_ylabel(\"Interaction\")\n ax[u].set_yticklabels([])\n ax[u].legend();", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb10da8e16b0d812ea8d5159594aec5af2150d40
8,896
ipynb
Jupyter Notebook
Julia/Notebooks/1. Introduction-to-Julia-main/0 - Getting started.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Julia/Notebooks/1. Introduction-to-Julia-main/0 - Getting started.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Julia/Notebooks/1. Introduction-to-Julia-main/0 - Getting started.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
2
2022-02-09T15:41:33.000Z
2022-02-11T07:47:40.000Z
17.375
124
0.455373
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb10db646c16bed07c9a15616e5ae88eb4c9c961
69,461
ipynb
Jupyter Notebook
Machine Learning/Models/4-tomato-diesease-prediction.ipynb
TusharVerma015/Ds-Algo-HacktoberFest-1
5ddf3564ab8254a056c07ab1a54dde66458d21c8
[ "MIT" ]
12
2020-10-04T06:48:29.000Z
2021-02-16T17:54:04.000Z
Machine Learning/Models/4-tomato-diesease-prediction.ipynb
TusharVerma015/Ds-Algo-HacktoberFest-1
5ddf3564ab8254a056c07ab1a54dde66458d21c8
[ "MIT" ]
14
2020-10-04T09:09:52.000Z
2021-10-16T19:59:23.000Z
Machine Learning/Models/4-tomato-diesease-prediction.ipynb
TusharVerma015/Ds-Algo-HacktoberFest-1
5ddf3564ab8254a056c07ab1a54dde66458d21c8
[ "MIT" ]
55
2020-10-04T03:09:25.000Z
2021-10-16T09:00:12.000Z
81.146028
22,712
0.812902
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n file = os.path.join(dirname, filename)\n\n# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session", "_____no_output_____" ] ], [ [ "Importing necessary libraries.", "_____no_output_____" ] ], [ [ "from tensorflow.keras.layers import Input, Dense, Flatten\nfrom keras import Model\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.models import Sequential", "_____no_output_____" ] ], [ [ "As we are using VGG16 architecture, it expects the size of 224 by 224(Although, you can set your own size). We will set image size.", "_____no_output_____" ] ], [ [ "image_size = [224, 224]", "_____no_output_____" ], [ "vgg = VGG16(input_shape = image_size + [3], weights = 'imagenet', include_top = False)", "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5\n58892288/58889256 [==============================] - 1s 0us/step\n" ] ], [ [ "The first argument is the shape of input image plus **3**(as image is colured[RBG], for black_and_white add **1**).\nThe second one is the weights eqaul to imagenet. And,\nas we know it gives 1000 outputs. Third one excludes the top layer.", "_____no_output_____" ] ], [ [ "for layer in vgg.layers:\n layer.trainable = False", "_____no_output_____" ] ], [ [ "Some of the layers of VGG16 are already trained. To train them again is not a good practice. Thereby making it False", "_____no_output_____" ] ], [ [ "from glob import glob\nfolders = glob('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/*')", "_____no_output_____" ], [ "folders\n", "_____no_output_____" ] ], [ [ "Flattening the output layer", "_____no_output_____" ] ], [ [ "x = Flatten()(vgg.output)", "_____no_output_____" ], [ "prediction = Dense(len(folders), activation = 'softmax')(x)", "_____no_output_____" ], [ "model = Model(inputs = vgg.input, outputs = prediction)", "_____no_output_____" ], [ "model.summary()", "Model: \"functional_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 224, 224, 3)] 0 \n_________________________________________________________________\nblock1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 25088) 0 \n_________________________________________________________________\ndense (Dense) (None, 10) 250890 \n=================================================================\nTotal params: 14,965,578\nTrainable params: 250,890\nNon-trainable params: 14,714,688\n_________________________________________________________________\n" ] ], [ [ "Compiling the model", "_____no_output_____" ] ], [ [ "model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])", "_____no_output_____" ] ], [ [ "Generating more images", "_____no_output_____" ] ], [ [ "from keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ], [ "train_data_gen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)", "_____no_output_____" ], [ "test_data_gen = ImageDataGenerator(rescale = 1./255)", "_____no_output_____" ], [ "train_set = train_data_gen.flow_from_directory('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/', target_size = (224,224), batch_size = 32, class_mode = 'categorical')", "Found 18345 images belonging to 10 classes.\n" ], [ "test_set = test_data_gen.flow_from_directory('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/valid/', target_size = (224,224), batch_size = 32, class_mode = 'categorical')", "Found 4585 images belonging to 10 classes.\n" ] ], [ [ "Fitting the model", "_____no_output_____" ] ], [ [ "mod = model.fit_generator(\n train_set,\n validation_data=test_set,\n epochs=10,\n steps_per_epoch=len(train_set),\n validation_steps=len(test_set)\n)", "Epoch 1/10\n574/574 [==============================] - 348s 607ms/step - loss: 0.7429 - accuracy: 0.7550 - val_loss: 0.5166 - val_accuracy: 0.8349\nEpoch 2/10\n574/574 [==============================] - 278s 485ms/step - loss: 0.4366 - accuracy: 0.8546 - val_loss: 0.4244 - val_accuracy: 0.8637\nEpoch 3/10\n574/574 [==============================] - 278s 484ms/step - loss: 0.3265 - accuracy: 0.8908 - val_loss: 0.5408 - val_accuracy: 0.8262\nEpoch 4/10\n574/574 [==============================] - 279s 487ms/step - loss: 0.2932 - accuracy: 0.9013 - val_loss: 0.2751 - val_accuracy: 0.9115\nEpoch 5/10\n574/574 [==============================] - 279s 486ms/step - loss: 0.2678 - accuracy: 0.9112 - val_loss: 0.4205 - val_accuracy: 0.8713\nEpoch 6/10\n574/574 [==============================] - 277s 483ms/step - loss: 0.2279 - accuracy: 0.9236 - val_loss: 0.2437 - val_accuracy: 0.9250\nEpoch 7/10\n574/574 [==============================] - 286s 498ms/step - loss: 0.2140 - accuracy: 0.9295 - val_loss: 0.4696 - val_accuracy: 0.8585\nEpoch 8/10\n574/574 [==============================] - 307s 535ms/step - loss: 0.2170 - accuracy: 0.9280 - val_loss: 0.3607 - val_accuracy: 0.8940\nEpoch 9/10\n574/574 [==============================] - 280s 487ms/step - loss: 0.2155 - accuracy: 0.9309 - val_loss: 0.3837 - val_accuracy: 0.9008\nEpoch 10/10\n574/574 [==============================] - 380s 661ms/step - loss: 0.2019 - accuracy: 0.9375 - val_loss: 0.3807 - val_accuracy: 0.8971\n" ], [ "import matplotlib.pyplot as plt\nplt.plot(mod.history['loss'], label='train loss')\nplt.plot(mod.history['val_loss'], label='val loss')\nplt.legend()\nplt.show()\n\n", "_____no_output_____" ], [ "plt.plot(mod.history['accuracy'], label='train accuracy')\nplt.plot(mod.history['val_accuracy'], label='val_accuracy')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb10ddf6771c5c895d1c152d40f45db0d9b678e1
265,589
ipynb
Jupyter Notebook
handouts/handout_08.ipynb
bdlafleur/uq-course
3abf6fc9eed329896a17ccdb28ae16824b386e24
[ "MIT" ]
null
null
null
handouts/handout_08.ipynb
bdlafleur/uq-course
3abf6fc9eed329896a17ccdb28ae16824b386e24
[ "MIT" ]
null
null
null
handouts/handout_08.ipynb
bdlafleur/uq-course
3abf6fc9eed329896a17ccdb28ae16824b386e24
[ "MIT" ]
null
null
null
296.085842
51,442
0.907666
[ [ [ "# Lecture 8: Fitting Generalized Linear Models (Part II)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('talk')\nsns.set_style('white')\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Objectives\n\n+ Maximum Posterior Estimate\n+ Bayesian Linear Regression\n+ Evidence Approximation\n+ Automatic Relevance Determination", "_____no_output_____" ], [ "## Readings\n\nBefore coming to class, please read the following:\n\n+ [Ch. 3 of Bishop, 2006](http://www.amazon.com/Pattern-Recognition-Learning-Information-Statistics/dp/0387310738)\n\n+ [Ohio State University, Bayesian Linear Regression](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&ved=0ahUKEwikxsiPuJPKAhVE32MKHRoMCtsQFggyMAI&url=http%3A%2F%2Fweb.cse.ohio-state.edu%2F~kulis%2Fteaching%2F788_sp12%2Fscribe_notes%2Flecture5.pdf&usg=AFQjCNFvxuyBfFkRN8bdJAvd_dlZdsShEw&sig2=UqakvfANehNUUK1J9rXIiQ)\n\nYou can also check out this 10 minutes short Youtube video on Bayesian Linear Regression - \n+ [Mathematicalmonk, Bayesian Linear Regression](https://www.youtube.com/watch?v=dtkGq9tdYcI)", "_____no_output_____" ], [ "Plese see the 7th handout before you start on this one.\nWe just repeate some of the code that we developed there.\nIn particular, we load the essential modules and we redefine the basis function classes.", "_____no_output_____" ] ], [ [ "# Linear Basis\nclass LinearBasis(object):\n \"\"\"\n Represents a 1D linear basis.\n \"\"\"\n def __init__(self):\n self.num_basis = 2 # The number of basis functions\n def __call__(self, x):\n \"\"\"\n ``x`` should be a 1D array.\n \"\"\"\n return [1., x[0]]\n \n# We need a generic function that computes the design matrix\ndef compute_design_matrix(X, phi):\n \"\"\"\n Arguments:\n \n X - The observed inputs (1D array)\n phi - The basis functions.\n \"\"\"\n num_observations = X.shape[0]\n num_basis = phi.num_basis\n Phi = np.ndarray((num_observations, num_basis))\n for i in xrange(num_observations):\n Phi[i, :] = phi(X[i, :])\n return Phi\n\n\n# Here is a class for the polynomials:\nclass PolynomialBasis(object):\n \"\"\"\n A set of linear basis functions.\n \n Arguments:\n degree - The degree of the polynomial.\n \"\"\"\n def __init__(self, degree):\n self.degree = degree\n self.num_basis = degree + 1\n def __call__(self, x):\n return np.array([x[0] ** i for i in range(self.degree + 1)])\n \n \nclass FourierBasis(object):\n \"\"\"\n A set of linear basis functions.\n \n Arguments:\n num_terms - The number of Fourier terms.\n L - The period of the function.\n \"\"\"\n def __init__(self, num_terms, L):\n self.num_terms = num_terms\n self.L = L\n self.num_basis = 2 * num_terms\n def __call__(self, x):\n res = np.ndarray((self.num_basis,))\n for i in xrange(num_terms):\n res[2 * i] = np.cos(2 * i * np.pi / self.L * x[0])\n res[2 * i + 1] = np.sin(2 * (i+1) * np.pi / self.L * x[0])\n return res\n \n\nclass RadialBasisFunctions(object):\n \"\"\"\n A set of linear basis functions.\n \n Arguments:\n X - The centers of the radial basis functions.\n ell - The assumed lengthscale.\n \"\"\"\n def __init__(self, X, ell):\n self.X = X\n self.ell = ell\n self.num_basis = X.shape[0]\n def __call__(self, x):\n return np.exp(-.5 * (x - self.X) ** 2 / self.ell ** 2).flatten()\n \n\n\nclass StepFunctionBasis(object):\n \"\"\"\n A set of step functions.\n \n Arguments:\n X - The centers of the step functions.\n \"\"\"\n def __init__(self, X):\n self.X = X\n self.num_basis = X.shape[0]\n def __call__(self, x):\n res = np.ones((self.num_basis, ))\n res[x < self.X.flatten()] = 0.\n return res", "_____no_output_____" ] ], [ [ "We will use the motorcycle data set of lecture 7.", "_____no_output_____" ] ], [ [ "data = np.loadtxt('motor.dat')\nX = data[:, 0][:, None]\nY = data[:, 1]", "_____no_output_____" ] ], [ [ "## Probabilistic Regression - Version 2\n+ We wish to model the data using some **fixed** basis/features:\n$$\ny(\\mathbf{x};\\mathbf{w}) = \\sum_{j=1}^{m} w_{j}\\phi_{j}(\\mathbf{x}) = \\mathbf{w^{T}\\boldsymbol{\\phi}(\\mathbf{x})\n}\n$$\n\n+ We *model the measurement process* using a **likelihood** function:\n$$\n\\mathbf{y}_{1:n} | \\mathbf{x}_{1:n}, \\mathbf{w} \\sim p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}, \\mathbf{w}).\n$$\n\n+ We *model the uncertainty in the model parameters* using a **prior**:\n$$\n\\mathbf{w} \\sim p(\\mathbf{w}).\n$$", "_____no_output_____" ], [ "### Gaussian Prior on the Weights\n+ Consider the following **prior** on $\\mathbf{w}$:\n$$\np(\\mathbf{w}|\\alpha) = \\mathcal{N}\\left(\\mathbf{w}|\\mathbf{0},\\alpha^{-1}\\mathbf{I}\\right) = \n\\left(\\frac{\\alpha}{2\\pi}\\right)^{\\frac{m}{2}}\\exp\\left\\{-\\frac{\\alpha}{2}\\lVert\\mathbf{w}\\rVert^2\\right\\}.\n$$\n+ We say:\n\n> Before we see the data, we beleive that $\\mathbf{w}$ must be around zero with a precision of $\\alpha$.", "_____no_output_____" ], [ "### The Posterior of the Weights\n+ Combining the likelihood and the prior, we get using Bayes rule:\n$$\np(\\mathbf{w}|\\mathbf{x}_{1:n},\\mathbf{y}_{1:n}, \\sigma,\\alpha) = \n\\frac{p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}, \\mathbf{w}, \\sigma)p(\\mathbf{w}|\\alpha)}\n{\\int p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}, \\mathbf{w}', \\sigma)p(\\mathbf{w}'|\\alpha)d\\mathbf{w}'}.\n$$\n+ We say\n> The posterior summarizes our state of knowledge about $\\mathbf{w}$ after we see the data,\nif we know $\\alpha$ and $\\sigma$.", "_____no_output_____" ], [ "### Maximum Posterior Estimate\n+ We can find a point estimate of $\\mathbf{w}$ by solving:\n$$\n\\mathbf{w}_{\\mbox{MPE}} = \\arg\\max_{\\mathbf{w}} p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}, \\mathbf{w}, \\sigma)p(\\mathbf{w}|\\alpha).\n$$\n+ For Gaussian likelihood and weights:\n$$\n\\log p(\\mathbf{w}|\\mathbf{x}_{1:n},\\mathbf{y}_{1:n}, \\sigma,\\alpha) = \n- \\frac{1}{2\\sigma^2}\\lVert\\mathbf{\\Phi}\\mathbf{w}-\\mathbf{y}_{1:n}\\rVert^2 -\\frac{\\alpha}{2}\\lVert\\mathbf{w}\\rVert^2.\n$$\n+ With maximum:\n$$\n\\mathbf{w}_{\\mbox{MPE}} = \\left(\\sigma^{-2}\\mathbf{\\Phi}^T\\mathbf{\\Phi}+\\alpha\\mathbf{I}\\right)^{-1}\\mathbf{\\Phi}^T\\mathbf{y}_{1:n}.\n$$\n+ But, no analytic formula for $\\sigma$...", "_____no_output_____" ], [ "### The Stable Way to Compute the MAP Estimate\n+ Construct the positive-definite matrix:\n$$\n\\mathbf{A} = \\left(\\sigma^{-2}\\mathbf{\\Phi}^T\\mathbf{\\Phi}+\\alpha\\mathbf{I}\\right)\n$$\n+ Compute the [Cholesky decomposition](https://en.wikipedia.org/wiki/Cholesky_decomposition) of $\\mathbf{A}$:\n$$\n\\mathbf{A} = \\mathbf{L}\\mathbf{L}^T,\n$$\nwhere $\\mathbf{L}$ is lower triangular.\n+ Then, solve the system:\n$$\n\\mathbf{L}\\mathbf{L}^T\\mathbf{w} = \\mathbf{\\Phi}^T\\mathbf{y}_{1:n},\n$$\ndoing a forward and a backward substitution.\n+ [scipy.linalg.cho_factor](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.cho_factor.html#scipy.linalg.cho_factor) and [scipy.linalg.cho_solve](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.cho_solve.html) can be used for this.", "_____no_output_____" ], [ "### Radial Basis Functions", "_____no_output_____" ] ], [ [ "import scipy.linalg\nell = 2.\nalpha = 5\nsigma = 20.28\nXc = np.linspace(0, 60, 20)\nphi = RadialBasisFunctions(Xc, ell)\nPhi = compute_design_matrix(X, phi)\nA = np.dot(Phi.T, Phi) / sigma ** 2. + alpha * np.eye(Phi.shape[1])\nL = scipy.linalg.cho_factor(A)\nw_MPE = scipy.linalg.cho_solve(L, np.dot(Phi.T, Y))\nprint 'w_MPE:'\nprint w_MPE", "w_MPE:\n[ -0.34092858 -1.68904678 -3.19193641 -5.32186481 -18.32277585\n -100.03625967 -216.35332988 -241.24155683 -165.12418937 -26.36065749\n 51.58925071 52.3753626 17.89682441 9.91868847 9.65346397\n -3.58400667 -3.22118958 -2.04465552 0.77592555 0.97107365]\n" ], [ "# Let's predict on these points:\nX_p = np.linspace(0, 60, 100)[:, None]\nPhi_p = compute_design_matrix(X_p, phi)\nY_p = np.dot(Phi_p, w_MPE)\nY_l = Y_p - 2. * sigma # Lower predictive bound\nY_u = Y_p + 2. * sigma # Upper predictive bound\nfig, ax = plt.subplots()\nax.plot(X, Y, 'x', markeredgewidth=2, label='Observations')\nax.plot(X_p, Y_p, label='MPE Prediction (Radial Basis Functions, alpha=%1.2f)' % alpha)\nax.fill_between(X_p.flatten(), Y_l, Y_u, color=sns.color_palette()[1], alpha=0.25)\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Hands-on\n\n+ Experiment with different alphas.\n+ When are we underfitting?\n+ When are we overfitting?\n+ Which one (if any) gives you the best fit?", "_____no_output_____" ], [ "### Issues with Maximum Posterior Estimate\n+ How many basis functions should I use?\n+ Which basis functions should I use?\n+ How do I pick the parameters of the basis functions, e.g., the lengthscale $\\ell$ of the RBFs, $\\alpha$, etc.?", "_____no_output_____" ], [ "## Probabilistic Regression - Version 3 - Bayesian Linear Regression\n+ For Gaussian likelihood and weights, the posterior is Gaussian:\n$$\np(\\mathbf{w}|\\mathbf{x}_{1:n},\\mathbf{y}_{1:n}, \\sigma, \\alpha) = \\mathcal{N}\\left(\\mathbf{w}|\\mathbf{m}, \\mathbf{S}\\right),\n$$\nwhere\n$$\n\\mathbf{S} = \\left(\\sigma^{-2}\\mathbf{\\Phi}^T\\mathbf{\\Phi}+\\alpha\\mathbf{I}\\right)^{-1},\n$$\nand\n$$\n\\mathbf{m} = \\sigma^{-2}\\mathbf{S}\\Phi^T\\mathbf{y}_{1:n}.\n$$\n+ In general: [Markov Chain Monte Carlo](https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo).", "_____no_output_____" ], [ "### Posterior Predictive Distribution\n+ Using probability theory, we ask: What do we know about $y$ at a new $\\mathbf{x}$ after seeing the data.\n+ We have using the sum rule:\n$$\np(y|\\mathbf{x}, \\mathbf{x}_{1:n}, \\mathbf{y}_{1:n}, \\sigma, \\alpha) = \n\\int p(y | \\mathbf{x}, \\mathbf{w}, \\sigma) p(\\mathbf{w}|\\mathbf{x}_{1:n}, \\mathbf{y}_{1:n},\\sigma,\\alpha)d\\mathbf{w}.\n$$\n+ For Gaussian likelihood and prior:\n$$\np(y|\\mathbf{x}, \\mathbf{x}_{1:n}, \\mathbf{y}_{1:n}, \\sigma, \\alpha) = \\mathcal{N}\\left(y|m(\\mathbf{x}), s^2(\\mathbf{x})\\right),\n$$\nwhere\n$$\nm(\\mathbf{x}) = \\mathbf{m}^T\\boldsymbol{\\phi}(\\mathbf{x})\\;\\mbox{and}\\;s(\\mathbf{x}) = \\boldsymbol{\\phi}(\\mathbf{x})^T\\mathbf{S}\\boldsymbol{\\phi}(\\mathbf{x}) + \\sigma^2.\n$$", "_____no_output_____" ], [ "### Predictive Uncertainty\n+ The **predictive uncertainty** is:\n$$\ns^2(\\mathbf{x}) = \\boldsymbol{\\phi}(\\mathbf{x})^T\\mathbf{S}\\boldsymbol{\\phi}(\\mathbf{x}) + \\sigma^2.\n$$\n+ $\\sigma^2$ corresponds to the measurement noise.\n+ $\\boldsymbol{\\phi}(\\mathbf{x})^T\\mathbf{S}\\boldsymbol{\\phi}(\\mathbf{x})$ is the epistemic uncertainty induced by limited data.", "_____no_output_____" ] ], [ [ "import scipy.linalg\nell = 2.\nalpha = 0.001\nsigma = 20.28\nXc = np.linspace(0, 60, 20)\nphi = RadialBasisFunctions(Xc, ell)\nPhi = compute_design_matrix(X, phi)\nA = np.dot(Phi.T, Phi) / sigma ** 2. + alpha * np.eye(Phi.shape[1])\nL = scipy.linalg.cho_factor(A)\nm = scipy.linalg.cho_solve(L, np.dot(Phi.T, Y) / sigma ** 2) # The posterior mean of w\nS = scipy.linalg.cho_solve(L, np.eye(Phi.shape[1])) # The posterior covariance of w\nPhi_p = compute_design_matrix(X_p, phi)\nY_p = np.dot(Phi_p, m) # The mean prediction\nV_p_ep = np.einsum('ij,jk,ik->i', Phi_p, S, Phi_p) # The epistemic uncertainty\nS_p_ep = np.sqrt(V_p_ep)\nV_p = V_p_ep + sigma ** 2 # Full uncertainty\nS_p = np.sqrt(V_p)\nY_l_ep = Y_p - 2. * S_p_ep # Lower epistemic predictive bound\nY_u_ep = Y_p + 2. * S_p_ep # Upper epistemic predictive bound\nY_l = Y_p - 2. * S_p # Lower predictive bound\nY_u = Y_p + 2. * S_p # Upper predictive bound", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(X, Y, 'x', markeredgewidth=2, label='Observations')\nax.plot(X_p, Y_p, label='Bayesian Prediction (Radial Basis Functions, alpha=%1.2f)' % alpha)\nax.fill_between(X_p.flatten(), Y_l_ep, Y_u_ep, color=sns.color_palette()[2], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_l, Y_l_ep, color=sns.color_palette()[1], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_u_ep, Y_u, color=sns.color_palette()[1], alpha=0.25)\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Sampling Posterior Models\n+ We can actually sample models (functions) from the posterior. Here is how:\n + Sample a $\\mathbf{w}$ from $p(\\mathbf{w}|\\mathbf{x}_{1:n},\\mathbf{y}_{1:n}, \\sigma, \\alpha)$.\n + Look at the sampled model:\n $$\n y(\\mathbf{x};\\mathbf{w}) = \\mathbf{w}^T\\boldsymbol{\\phi}(\\mathbf{x}).\n $$", "_____no_output_____" ] ], [ [ "# We have m, S, X_p, and Phi_p from before\nfig, ax = plt.subplots()\nax.plot(X, Y, 'x', markeredgewidth=2, label='Observations')\nfor i in xrange(10):\n w = np.random.multivariate_normal(m, S)\n Y_p_s = np.dot(Phi_p, w)\n ax.plot(X_p, Y_p_s, color=sns.color_palette()[2], linewidth=0.5);\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')", "_____no_output_____" ] ], [ [ "### Issues with Bayesian Linear Regression\n+ How many basis functions should I use?\n+ Which basis functions should I use?\n+ How do I pick the parameters of the basis functions, e.g., the lengthscale $\\ell$ of the RBFs, $\\alpha$, etc.?+", "_____no_output_____" ], [ "### Hands-on\n\n+ Experiment with different alphas, ells, and sigmas.\n+ When are we underfitting?\n+ When are we overfitting?\n+ Which one (if any) gives you the best fit?\n+ In the figure, right above: Increase the number of posterior $\\mathbf{w}$ samples to get a sense of the epistemic uncertainty induced by the limited data.", "_____no_output_____" ], [ "## Probabilistic Regression - Version 4 - Hierarchical Priors\n+ So, how do we find all the parameters like $\\sigma$, $\\alpha$, $\\ell$, etc?\n+ These are all called **hyper-parameters** of the model.\n+ Call all of them\n$$\n\\boldsymbol{\\theta} = \\{\\sigma, \\alpha, \\ell,\\dots\\}.\n$$", "_____no_output_____" ], [ "### Hierarchical Priors\n+ Model:\n$$\ny(\\mathbf{x};\\mathbf{w}) = \\sum_{j=1}^{m} w_{j}\\phi_{j}(\\mathbf{x}) = \\mathbf{w^{T}\\boldsymbol{\\phi}(\\mathbf{x})}\n$$\n+ Likelihood:\n$$\n\\mathbf{y}_{1:n} | \\mathbf{x}_{1:n}, \\mathbf{w}, \\boldsymbol{\\theta} \\sim p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}, \\mathbf{w}, \\boldsymbol{\\theta}).\n$$\n+ Weight prior:\n$$\n\\mathbf{w} | \\boldsymbol{\\theta} \\sim p(\\mathbf{w}| \\boldsymbol{\\theta}).\n$$\n+ Hyper-prior:\n$$\n\\boldsymbol{\\theta} \\sim p(\\boldsymbol{\\theta}).\n$$", "_____no_output_____" ], [ "### Fully Bayesian Solution\n+ Just write down the posterior of everything:\n$$\np(\\mathbf{w}, \\boldsymbol{\\theta}|\\mathbf{x}_{1:n}, \\mathbf{y}_{1:n}) \\propto p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}|\\mathbf{w},\\boldsymbol{\\theta})p(\\mathbf{w}|\\boldsymbol{\\theta})p(\\boldsymbol{\\theta}).\n$$\n+ and, somehow, sample from it...", "_____no_output_____" ], [ "### The Evidence Approximation\n+ Look at the marginal posterior of $\\boldsymbol{\\theta}$:\n$$\np(\\boldsymbol{\\theta}|\\mathbf{x}_{1:n}, \\mathbf{y}_{1:n}) \\propto \n\\int p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}|\\mathbf{w},\\boldsymbol{\\theta})p(\\mathbf{w}|\\boldsymbol{\\theta})p(\\boldsymbol{\\theta})d\\mathbf{w}.\n$$\n\n+ Assume that the hyper-prior is relatively flat:\n$$\np(\\boldsymbol{\\theta}) \\propto 1,\n$$\n\n+ Use a MAP estimate for $\\boldsymbol{\\theta}$:\n$$\n\\boldsymbol{\\theta}_{\\mbox{EV}} = \\arg\\max_{\\boldsymbol{\\theta}}\\int p(\\mathbf{y}_{1:n}|\\mathbf{x}_{1:n}|\\mathbf{w},\\boldsymbol{\\theta})p(\\mathbf{w}|\\boldsymbol{\\theta})d\\mathbf{w}.\n$$\n\n+ Analytical for Gaussian likelihood and prior.", "_____no_output_____" ], [ "### Implementation Evidence Approximation\n+ There is a fast algorithm for the evidence approximation for Bayesian linear regression.\n+ It would take about an hour to go over it. See Ch. 3 of (Bishop, 2006).\n+ We will use the implementation found in [scikit-learn](http://scikit-learn.org).\n+ If you don't have it:\n```\nconda install scikit-learn\n```", "_____no_output_____" ], [ "### Radial Basis Functions", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import BayesianRidge\nell = 2.\nXc = np.linspace(0, 60, 50)\nphi = RadialBasisFunctions(Xc, ell)\nPhi = compute_design_matrix(X, phi)\nregressor = BayesianRidge()\nregressor.fit(Phi, Y)\n# They are using different names:\nsigma = np.sqrt(1. / regressor.alpha_)\nprint 'best sigma:', sigma\nalpha = regressor.lambda_\nprint 'best alpha:', alpha\nA = np.dot(Phi.T, Phi) / sigma ** 2. + alpha * np.eye(Phi.shape[1])\nL = scipy.linalg.cho_factor(A)\nm = scipy.linalg.cho_solve(L, np.dot(Phi.T, Y) / sigma ** 2) # The posterior mean of w\nS = scipy.linalg.cho_solve(L, np.eye(Phi.shape[1])) # The posterior covariance of w\nPhi_p = compute_design_matrix(X_p, phi)\nY_p = np.dot(Phi_p, m) # The mean prediction\nV_p_ep = np.einsum('ij,jk,ik->i', Phi_p, S, Phi_p) # The epistemic uncertainty\nS_p_ep = np.sqrt(V_p_ep)\nV_p = V_p_ep + sigma ** 2 # Full uncertainty\nS_p = np.sqrt(V_p)\nY_l_ep = Y_p - 2. * S_p_ep # Lower epistemic predictive bound\nY_u_ep = Y_p + 2. * S_p_ep # Upper epistemic predictive bound\nY_l = Y_p - 2. * S_p # Lower predictive bound\nY_u = Y_p + 2. * S_p # Upper predictive bound", "best sigma: 22.2067820906\nbest alpha: 0.00249087490343\n" ], [ "fig, ax = plt.subplots()\nax.plot(X, Y, 'x', markeredgewidth=2, label='Observations')\nax.plot(X_p, Y_p, label='Bayesian Prediction (Radial Basis Functions, alpha=%1.2f)' % alpha)\nax.fill_between(X_p.flatten(), Y_l_ep, Y_u_ep, color=sns.color_palette()[2], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_l, Y_l_ep, color=sns.color_palette()[1], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_u_ep, Y_u, color=sns.color_palette()[1], alpha=0.25)\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Issues with Bayesian Linear Regression\n+ How many basis functions should I use?\n+ Which basis functions should I use?", "_____no_output_____" ], [ "### Hands-on\n\n+ Try the evidence approximation with the Fourier basis.\n+ Try the evidence approximation with the Step function basis.", "_____no_output_____" ], [ "## Probabilistic Regression - Version 5 - Automatic Relevance Determination\n+ Use a different precision $\\alpha_i$ for each weight:\n$$\np(w_j | \\alpha_j) \\propto \\exp\\left\\{-\\alpha_jw_j^2\\right\\},\n$$\n+ so that:\n$$\np(\\mathbf{w}|\\boldsymbol{\\alpha}) = \\propto \\prod_{j=1}^mp(w_j|\\alpha_j).\n$$\n+ Then maximize the **evidence** with respect to all the $\\alpha_j$'s.\n+ **Sparsity**: When $\\alpha_j\\rightarrow\\infty$, $w_j=0$ identically!", "_____no_output_____" ], [ "### Radial Basis Functions", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import ARDRegression\nell = 2.\nXc = np.linspace(0, 60, 50)\nphi = RadialBasisFunctions(Xc, ell)\nPhi = compute_design_matrix(X, phi)\nregressor = ARDRegression()\nregressor.fit(Phi, Y)\n# They are using different names:\nsigma = np.sqrt(1. / regressor.alpha_)\nprint 'best sigma:', sigma\nalpha = regressor.lambda_\nprint 'best alpha:', alpha\nA = np.dot(Phi.T, Phi) / sigma ** 2. + alpha * np.eye(Phi.shape[1])\nL = scipy.linalg.cho_factor(A)\nm = scipy.linalg.cho_solve(L, np.dot(Phi.T, Y) / sigma ** 2) # The posterior mean of w\nS = scipy.linalg.cho_solve(L, np.eye(Phi.shape[1])) # The posterior covariance of w\nPhi_p = compute_design_matrix(X_p, phi)\nY_p = np.dot(Phi_p, m) # The mean prediction\nV_p_ep = np.einsum('ij,jk,ik->i', Phi_p, S, Phi_p) # The epistemic uncertainty\nS_p_ep = np.sqrt(V_p_ep)\nV_p = V_p_ep + sigma ** 2 # Full uncertainty\nS_p = np.sqrt(V_p)\nY_l_ep = Y_p - 2. * S_p_ep # Lower epistemic predictive bound\nY_u_ep = Y_p + 2. * S_p_ep # Upper epistemic predictive bound\nY_l = Y_p - 2. * S_p # Lower predictive bound\nY_u = Y_p + 2. * S_p # Upper predictive bound", "best sigma: 21.1802707506\nbest alpha: [ 2.41622694e+01 4.60393723e+01 6.32601940e+01 6.60248705e+01\n 6.07577537e+01 6.30419345e+01 6.87983839e+01 7.19885330e+01\n 7.47478678e+01 7.18452626e+01 6.67773828e+01 7.04574032e+01\n 7.24203099e+01 6.04550341e+01 3.55032689e+01 1.10968067e-03\n 1.73367476e-04 1.99589349e+01 2.04616910e+01 1.05969545e-04\n 1.86715292e-02 3.14937763e+00 3.87600506e+01 6.61116485e+01\n 6.07405331e+01 3.38580108e+01 1.66337169e-03 9.09294119e-04\n 2.65807248e+01 3.21594802e+01 2.33882525e+01 3.57257599e+01\n 5.45115686e+01 5.24922174e+01 2.87934486e+01 9.11413331e-03\n 3.73413978e+01 5.18181885e+01 5.04733546e+01 3.90117514e+01\n 3.50099821e+01 4.39235876e+01 4.94959003e+01 5.09197345e+01\n 5.16755294e+01 5.22603627e+01 4.72226269e+01 3.69312958e+01\n 2.53199918e+01 1.38991944e+01]\n" ], [ "fig, ax = plt.subplots()\nax.plot(X, Y, 'x', markeredgewidth=2, label='Observations')\nax.plot(X_p, Y_p, label='Bayesian Prediction (Radial Basis Functions, ARD)')\nax.fill_between(X_p.flatten(), Y_l_ep, Y_u_ep, color=sns.color_palette()[2], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_l, Y_l_ep, color=sns.color_palette()[1], alpha=0.25)\nax.fill_between(X_p.flatten(), Y_u_ep, Y_u, color=sns.color_palette()[1], alpha=0.25)\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Issues with Automatic Relevance Determination\n+ What about the input-dependent (heteroscedastic) noise? (ADVANCED).<div class=\"cite2c-biblio\"></div>", "_____no_output_____" ], [ "### Hands-on\n\n+ Try ARD with the Fourier basis.\n+ Try ARD with the Step function basis.\n+ Try ARD with a basis that consists both of Fourier and RBFs. Which one's survive?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cb10ebbba367db10d6154c07052e894ad2c66bb8
447,108
ipynb
Jupyter Notebook
docs/notebooks/06_example_3states.ipynb
guruvamsi-policharla/noisy-krotov
c5397d9dbde68d06f17e88620d6a6b2c74664841
[ "BSD-3-Clause" ]
49
2018-11-07T06:43:33.000Z
2022-03-18T20:53:06.000Z
docs/notebooks/06_example_3states.ipynb
guruvamsi-policharla/noisy-krotov
c5397d9dbde68d06f17e88620d6a6b2c74664841
[ "BSD-3-Clause" ]
94
2018-11-06T20:15:04.000Z
2022-01-06T09:06:15.000Z
docs/notebooks/06_example_3states.ipynb
qucontrol/krotov
9f9a22336c433dc3a37637ce8cc8324df4290b46
[ "BSD-3-Clause" ]
20
2018-11-06T20:03:11.000Z
2022-03-12T05:29:21.000Z
320.277937
259,596
0.927622
[ [ [ "# Optimization of a Dissipative Quantum Gate", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\n%load_ext watermark\nimport sys\nimport os\nimport qutip\nimport numpy as np\nimport scipy\nimport matplotlib\nimport matplotlib.pylab as plt\nimport krotov\nimport copy\nfrom functools import partial\nfrom itertools import product\n%watermark -v --iversions", "Python implementation: CPython\nPython version : 3.7.6\nIPython version : 7.19.0\n\nmatplotlib: 3.3.3\nscipy : 1.3.1\nnumpy : 1.17.2\nkrotov : 1.2.1\nqutip : 4.5.0\nsys : 3.7.6 (default, Jan 6 2020, 00:53:52) \n[Clang 11.0.0 (clang-1100.0.33.16)]\n\n" ] ], [ [ "$\\newcommand{tr}[0]{\\operatorname{tr}}\n\\newcommand{diag}[0]{\\operatorname{diag}}\n\\newcommand{abs}[0]{\\operatorname{abs}}\n\\newcommand{pop}[0]{\\operatorname{pop}}\n\\newcommand{aux}[0]{\\text{aux}}\n\\newcommand{int}[0]{\\text{int}}\n\\newcommand{opt}[0]{\\text{opt}}\n\\newcommand{tgt}[0]{\\text{tgt}}\n\\newcommand{init}[0]{\\text{init}}\n\\newcommand{lab}[0]{\\text{lab}}\n\\newcommand{rwa}[0]{\\text{rwa}}\n\\newcommand{bra}[1]{\\langle#1\\vert}\n\\newcommand{ket}[1]{\\vert#1\\rangle}\n\\newcommand{Bra}[1]{\\left\\langle#1\\right\\vert}\n\\newcommand{Ket}[1]{\\left\\vert#1\\right\\rangle}\n\\newcommand{Braket}[2]{\\left\\langle #1\\vphantom{#2}\\mid{#2}\\vphantom{#1}\\right\\rangle}\n\\newcommand{ketbra}[2]{\\vert#1\\rangle\\!\\langle#2\\vert}\n\\newcommand{op}[1]{\\hat{#1}}\n\\newcommand{Op}[1]{\\hat{#1}}\n\\newcommand{dd}[0]{\\,\\text{d}}\n\\newcommand{Liouville}[0]{\\mathcal{L}}\n\\newcommand{DynMap}[0]{\\mathcal{E}}\n\\newcommand{identity}[0]{\\mathbf{1}}\n\\newcommand{Norm}[1]{\\lVert#1\\rVert}\n\\newcommand{Abs}[1]{\\left\\vert#1\\right\\vert}\n\\newcommand{avg}[1]{\\langle#1\\rangle}\n\\newcommand{Avg}[1]{\\left\\langle#1\\right\\rangle}\n\\newcommand{AbsSq}[1]{\\left\\vert#1\\right\\vert^2}\n\\newcommand{Re}[0]{\\operatorname{Re}}\n\\newcommand{Im}[0]{\\operatorname{Im}}$\n\nThis example illustrates the optimization for a quantum gate in an open quantum system, where the dynamics is governed by the Liouville-von Neumann equation. A naive extension of a gate optimization to Liouville space would seem to imply that it is necessary to optimize over the full basis of Liouville space (16 matrices, for a two-qubit gate). However, [Goerz et al., New J. Phys. 16, 055012 (2014)][1] showed that is not necessary, but that a set of 3 density matrices is sufficient to track the optimization.\n\nThis example reproduces the \"Example II\" from that paper, considering the optimization towards a $\\sqrt{\\text{iSWAP}}$ two-qubit gate on a system of two transmons with a shared transmission line resonator.\n\n[1]: https://michaelgoerz.net/research/Goerz_NJP2014.pdf", "_____no_output_____" ], [ "**Note**: This notebook uses some parallelization features (`parallel_map`/`multiprocessing`). Unfortunately, on Windows (and macOS with Python >= 3.8), `multiprocessing` does not work correctly for functions defined in a Jupyter notebook (due to the [spawn method](https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods) being used on Windows, instead of Unix-`fork`, see also https://stackoverflow.com/questions/45719956). We can use the third-party [loky](https://loky.readthedocs.io/) library to fix this, but this significantly increases the overhead of multi-process parallelization. The use of parallelization here is for illustration only and makes no guarantee of actually improving the runtime of the optimization.", "_____no_output_____" ] ], [ [ "if sys.platform != 'linux':\n krotov.parallelization.set_parallelization(use_loky=True)\nfrom krotov.parallelization import parallel_map", "_____no_output_____" ] ], [ [ "## The two-transmon system", "_____no_output_____" ], [ "We consider the Hamiltonian from Eq (17) in the paper, in the rotating wave approximation, together with spontaneous decay and dephasing of each qubit. Alltogether, we define the Liouvillian as follows:", "_____no_output_____" ] ], [ [ "def two_qubit_transmon_liouvillian(\n ฯ‰1, ฯ‰2, ฯ‰d, ฮด1, ฮด2, J, q1T1, q2T1, q1T2, q2T2, T, Omega, n_qubit\n):\n from qutip import tensor, identity, destroy\n\n b1 = tensor(identity(n_qubit), destroy(n_qubit))\n b2 = tensor(destroy(n_qubit), identity(n_qubit))\n\n H0 = (\n (ฯ‰1 - ฯ‰d - ฮด1 / 2) * b1.dag() * b1\n + (ฮด1 / 2) * b1.dag() * b1 * b1.dag() * b1\n + (ฯ‰2 - ฯ‰d - ฮด2 / 2) * b2.dag() * b2\n + (ฮด2 / 2) * b2.dag() * b2 * b2.dag() * b2\n + J * (b1.dag() * b2 + b1 * b2.dag())\n )\n\n H1_re = 0.5 * (b1 + b1.dag() + b2 + b2.dag()) # 0.5 is due to RWA\n H1_im = 0.5j * (b1.dag() - b1 + b2.dag() - b2)\n\n H = [H0, [H1_re, Omega], [H1_im, ZeroPulse]]\n\n A1 = np.sqrt(1 / q1T1) * b1 # decay of qubit 1\n A2 = np.sqrt(1 / q2T1) * b2 # decay of qubit 2\n A3 = np.sqrt(1 / q1T2) * b1.dag() * b1 # dephasing of qubit 1\n A4 = np.sqrt(1 / q2T2) * b2.dag() * b2 # dephasing of qubit 2\n\n L = krotov.objectives.liouvillian(H, c_ops=[A1, A2, A3, A4])\n return L", "_____no_output_____" ] ], [ [ "We will use internal units GHz and ns. Values in GHz contain an implicit factor 2ฯ€, and MHz and ฮผs are converted to GHz and ns, respectively:", "_____no_output_____" ] ], [ [ "GHz = 2 * np.pi\nMHz = 1e-3 * GHz\nns = 1\nฮผs = 1000 * ns", "_____no_output_____" ] ], [ [ "This implicit factor $2 \\pi$ is because frequencies ($\\nu$) convert to energies as $E = h \\nu$, but our propagation routines assume a unit $\\hbar = 1$ for energies. Thus, the factor $h / \\hbar = 2 \\pi$.", "_____no_output_____" ], [ "We will use the same parameters as those given in Table 2 of the paper:", "_____no_output_____" ] ], [ [ "ฯ‰1 = 4.3796 * GHz # qubit frequency 1\nฯ‰2 = 4.6137 * GHz # qubit frequency 2\nฯ‰d = 4.4985 * GHz # drive frequency\nฮด1 = -239.3 * MHz # anharmonicity 1\nฮด2 = -242.8 * MHz # anharmonicity 2\nJ = -2.3 * MHz # effective qubit-qubit coupling\nq1T1 = 38.0 * ฮผs # decay time for qubit 1\nq2T1 = 32.0 * ฮผs # decay time for qubit 2\nq1T2 = 29.5 * ฮผs # dephasing time for qubit 1\nq2T2 = 16.0 * ฮผs # dephasing time for qubit 2\nT = 400 * ns # gate duration", "_____no_output_____" ], [ "tlist = np.linspace(0, T, 2000)", "_____no_output_____" ] ], [ [ "While in the original paper, each transmon was cut off at 6 levels, here we truncate at 5 levels. This makes the propagation faster, while potentially introducing a slightly larger truncation error.", "_____no_output_____" ] ], [ [ "n_qubit = 5 # number of transmon levels to consider", "_____no_output_____" ] ], [ [ "In the Liouvillian, note the control being split up into a separate real and imaginary part. As a guess control we use a real-valued constant pulse with an amplitude of 35 MHz, acting over 400 ns, with a switch-on and switch-off in the first 20 ns (see plot below)", "_____no_output_____" ] ], [ [ "def Omega(t, args):\n E0 = 35.0 * MHz\n return E0 * krotov.shapes.flattop(t, 0, T, t_rise=(20 * ns), func='sinsq')", "_____no_output_____" ] ], [ [ "The imaginary part start out as zero:", "_____no_output_____" ] ], [ [ "def ZeroPulse(t, args):\n return 0.0", "_____no_output_____" ] ], [ [ "We can now instantiate the Liouvillian:", "_____no_output_____" ] ], [ [ "L = two_qubit_transmon_liouvillian(\n ฯ‰1, ฯ‰2, ฯ‰d, ฮด1, ฮด2, J, q1T1, q2T1, q1T2, q2T2, T, Omega, n_qubit\n)", "_____no_output_____" ] ], [ [ "The guess pulse looks as follows:", "_____no_output_____" ] ], [ [ "def plot_pulse(pulse, tlist, xlimit=None):\n fig, ax = plt.subplots()\n if callable(pulse):\n pulse = np.array([pulse(t, None) for t in tlist])\n ax.plot(tlist, pulse/MHz)\n ax.set_xlabel('time (ns)')\n ax.set_ylabel('pulse amplitude (MHz)')\n if xlimit is not None:\n ax.set_xlim(xlimit)\n plt.show(fig)", "_____no_output_____" ], [ "plot_pulse(L[1][1], tlist)", "_____no_output_____" ] ], [ [ "## Optimization objectives", "_____no_output_____" ], [ "Our target gate is $\\Op{O} = \\sqrt{\\text{iSWAP}}$:", "_____no_output_____" ] ], [ [ "SQRTISWAP = qutip.Qobj(np.array(\n [[1, 0, 0, 0],\n [0, 1 / np.sqrt(2), 1j / np.sqrt(2), 0],\n [0, 1j / np.sqrt(2), 1 / np.sqrt(2), 0],\n [0, 0, 0, 1]]),\n dims=[[2, 2], [2, 2]]\n)", "_____no_output_____" ] ], [ [ "The key idea explored in the paper is that a set of three density matrices is sufficient to track the optimization\n\n$$\n\\begin{align}\n\\Op{\\rho}_1\n &= \\sum_{i=1}^{d} \\frac{2 (d-i+1)}{d (d+1)} \\ketbra{i}{i} \\\\\n\\Op{\\rho}_2\n &= \\sum_{i,j=1}^{d} \\frac{1}{d} \\ketbra{i}{j} \\\\\n\\Op{\\rho}_3\n &= \\sum_{i=1}^{d} \\frac{1}{d} \\ketbra{i}{i}\n\\end{align}\n$$", "_____no_output_____" ], [ "In our case, $d=4$ for a two qubit-gate, and the $\\ket{i}$, $\\ket{j}$ are the canonical basis states $\\ket{00}$, $\\ket{01}$, $\\ket{10}$, $\\ket{11}$", "_____no_output_____" ] ], [ [ "ket00 = qutip.ket((0, 0), dim=(n_qubit, n_qubit))\nket01 = qutip.ket((0, 1), dim=(n_qubit, n_qubit))\nket10 = qutip.ket((1, 0), dim=(n_qubit, n_qubit))\nket11 = qutip.ket((1, 1), dim=(n_qubit, n_qubit))\nbasis = [ket00, ket01, ket10, ket11]", "_____no_output_____" ] ], [ [ "The three density matrices play different roles in the optimization, and, as shown in the paper, convergence may improve significantly by weighing the states relatively to each other. For this example, we place a strong emphasis on the optimization $\\Op{\\rho}_1 \\rightarrow \\Op{O}^\\dagger \\Op{\\rho}_1 \\Op{O}$, by a factor of 20. This reflects that the hardest part of the optimization is identifying the basis in which the gate is diagonal. We will be using the real-part functional ($J_{T,\\text{re}}$) to evaluate the success of $\\Op{\\rho}_i \\rightarrow \\Op{O}\\Op{\\rho}_i\\Op{O}^\\dagger$. Because $\\Op{\\rho}_1$ and $\\Op{\\rho}_3$ are mixed states, the Hilbert-Schmidt overlap will take values smaller than one in the optimal case. To compensate, we divide the weights by the purity of the respective states.", "_____no_output_____" ] ], [ [ "weights = np.array([20, 1, 1], dtype=np.float64)\nweights *= len(weights) / np.sum(weights) # manual normalization\nweights /= np.array([0.3, 1.0, 0.25]) # purities", "_____no_output_____" ] ], [ [ "The `krotov.gate_objectives` routine can initialize the density matrices $\\Op{\\rho}_1$, $\\Op{\\rho}_2$, $\\Op{\\rho}_3$ automatically, via the parameter `liouville_states_set`. Alternatively, we could also use the `'full'` basis of 16 matrices or the extended set of $d+1 = 5$ pure-state density matrices.", "_____no_output_____" ] ], [ [ "objectives = krotov.gate_objectives(\n basis,\n SQRTISWAP,\n L,\n liouville_states_set='3states',\n weights=weights,\n normalize_weights=False,\n)\nobjectives", "_____no_output_____" ] ], [ [ "The use of `normalize_weights=False` is because we have included the purities in the weights, as discussed above.", "_____no_output_____" ], [ "## Dynamics under the Guess Pulse", "_____no_output_____" ], [ "For numerical efficiency, both for the analysis of the guess/optimized controls, we will use a stateful density matrix propagator:", "_____no_output_____" ], [ "A true physical measure for the success of the optimization is the \"average gate fidelity\". Evaluating the fidelity requires to simulate the dynamics of the full basis of Liouville space:", "_____no_output_____" ] ], [ [ "full_liouville_basis = [psi * phi.dag() for (psi, phi) in product(basis, basis)]", "_____no_output_____" ] ], [ [ "We propagate these under the guess control:", "_____no_output_____" ] ], [ [ "def propagate_guess(initial_state):\n return objectives[0].mesolve(\n tlist,\n rho0=initial_state,\n ).states[-1]", "_____no_output_____" ], [ "full_states_T = parallel_map(\n propagate_guess, values=full_liouville_basis,\n)", "_____no_output_____" ], [ "print(\"F_avg = %.3f\" % krotov.functionals.F_avg(full_states_T, basis, SQRTISWAP))", "F_avg = 0.344\n" ] ], [ [ "Note that we use $F_{T,\\text{re}}$, not $F_{\\text{avg}}$ to steer the optimization, as the Krotov boundary condition $\\frac{\\partial F_{\\text{avg}}}{\\partial \\rho^\\dagger}$ would be non-trivial.", "_____no_output_____" ], [ "Before doing the optimization, we can look the population dynamics under the guess pulse. For this purpose we propagate the pure-state density matrices corresponding to the canonical logical basis in Hilbert space, and obtain the expectation values for the projection onto these same states:", "_____no_output_____" ] ], [ [ "rho00, rho01, rho10, rho11 = [qutip.ket2dm(psi) for psi in basis]", "_____no_output_____" ], [ "def propagate_guess_for_expvals(initial_state):\n return objectives[0].propagate(\n tlist,\n propagator=krotov.propagators.DensityMatrixODEPropagator(),\n rho0=initial_state,\n e_ops=[rho00, rho01, rho10, rho11]\n )", "_____no_output_____" ], [ "def plot_population_dynamics(dyn00, dyn01, dyn10, dyn11):\n fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(16, 8))\n axs = np.ndarray.flatten(axs)\n labels = ['00', '01', '10', '11']\n dyns = [dyn00, dyn01, dyn10, dyn11]\n for (ax, dyn, title) in zip(axs, dyns, labels):\n for (i, label) in enumerate(labels):\n ax.plot(dyn.times, dyn.expect[i], label=label)\n ax.legend()\n ax.set_title(title)\n plt.show(fig)", "_____no_output_____" ], [ "plot_population_dynamics(\n *parallel_map(\n propagate_guess_for_expvals,\n values=[rho00, rho01, rho10, rho11],\n )\n)", "_____no_output_____" ] ], [ [ "## Optimization", "_____no_output_____" ], [ "We now define the optimization parameters for the controls, the Krotov step size $\\lambda_a$ and the update-shape that will ensure that the pulse switch-on and switch-off stays intact.", "_____no_output_____" ] ], [ [ "pulse_options = {\n L[i][1]: dict(\n lambda_a=1.0,\n update_shape=partial(\n krotov.shapes.flattop, t_start=0, t_stop=T, t_rise=(20 * ns))\n )\n for i in [1, 2]\n}", "_____no_output_____" ] ], [ [ "Then we run the optimization for 2000 iterations", "_____no_output_____" ] ], [ [ "opt_result = krotov.optimize_pulses(\n objectives,\n pulse_options,\n tlist,\n propagator=krotov.propagators.DensityMatrixODEPropagator(reentrant=True),\n chi_constructor=krotov.functionals.chis_re,\n info_hook=krotov.info_hooks.print_table(J_T=krotov.functionals.J_T_re),\n iter_stop=3,\n)", "iter. J_T โˆ‘โˆซgโ‚(t)dt J ฮ”J_T ฮ”J secs\n0 1.22e-01 0.00e+00 1.22e-01 n/a n/a 17\n1 7.49e-02 2.26e-02 9.75e-02 -4.67e-02 -2.41e-02 48\n2 7.41e-02 3.98e-04 7.45e-02 -8.12e-04 -4.14e-04 35\n3 7.33e-02 3.70e-04 7.37e-02 -7.55e-04 -3.85e-04 36\n" ] ], [ [ "(this takes a while)...", "_____no_output_____" ] ], [ [ "dumpfile = \"./3states_opt_result.dump\"\nif os.path.isfile(dumpfile):\n opt_result = krotov.result.Result.load(dumpfile, objectives)\nelse:\n opt_result = krotov.optimize_pulses(\n objectives,\n pulse_options,\n tlist,\n propagator=krotov.propagators.DensityMatrixODEPropagator(reentrant=True),\n chi_constructor=krotov.functionals.chis_re,\n info_hook=krotov.info_hooks.print_table(J_T=krotov.functionals.J_T_re),\n iter_stop=5,\n continue_from=opt_result\n )\n opt_result.dump(dumpfile)", "_____no_output_____" ], [ "opt_result", "_____no_output_____" ] ], [ [ "## Optimization result", "_____no_output_____" ] ], [ [ "optimized_control = opt_result.optimized_controls[0] + 1j * opt_result.optimized_controls[1]", "_____no_output_____" ], [ "plot_pulse(np.abs(optimized_control), tlist)", "_____no_output_____" ], [ "def propagate_opt(initial_state):\n return opt_result.optimized_objectives[0].propagate(\n tlist,\n propagator=krotov.propagators.DensityMatrixODEPropagator(),\n rho0=initial_state,\n ).states[-1]", "_____no_output_____" ], [ "opt_full_states_T = parallel_map(\n propagate_opt, values=full_liouville_basis,\n)", "_____no_output_____" ], [ "print(\"F_avg = %.3f\" % krotov.functionals.F_avg(opt_full_states_T, basis, SQRTISWAP))", "F_avg = 0.977\n" ], [ "def propagate_opt_for_expvals(initial_state):\n return opt_result.optimized_objectives[0].propagate(\n tlist,\n propagator=krotov.propagators.DensityMatrixODEPropagator(),\n rho0=initial_state,\n e_ops=[rho00, rho01, rho10, rho11]\n )", "_____no_output_____" ] ], [ [ "Plotting the population dynamics, we see the expected behavior for the $\\sqrt{\\text{iSWAP}}$ gate.", "_____no_output_____" ] ], [ [ "plot_population_dynamics(\n *parallel_map(\n propagate_opt_for_expvals,\n values=[rho00, rho01, rho10, rho11],\n )\n)", "_____no_output_____" ], [ "def plot_convergence(result):\n fig, ax = plt.subplots()\n ax.semilogy(result.iters, result.info_vals)\n ax.set_xlabel('OCT iteration')\n ax.set_ylabel(r'optimization error $J_{T, re}$')\n plt.show(fig)", "_____no_output_____" ], [ "plot_convergence(opt_result)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb10f1f9f0ecd1e0aad973fa082018a3bfc90f4b
11,160
ipynb
Jupyter Notebook
examples/4_scikit_learn_compatibility.ipynb
jzjosuerivera71/pysindy-1
6ed02140c3e255c8bb69b19a7d9452930bd3253d
[ "MIT" ]
1
2021-03-02T20:31:40.000Z
2021-03-02T20:31:40.000Z
examples/4_scikit_learn_compatibility.ipynb
jzjosuerivera71/pysindy-1
6ed02140c3e255c8bb69b19a7d9452930bd3253d
[ "MIT" ]
null
null
null
examples/4_scikit_learn_compatibility.ipynb
jzjosuerivera71/pysindy-1
6ed02140c3e255c8bb69b19a7d9452930bd3253d
[ "MIT" ]
null
null
null
29.291339
542
0.550806
[ [ [ "# Working with Scikit-learn\nThis notebook shows how PySINDy objects interface with some useful tools from [Scikit-learn](https://scikit-learn.org/stable/).", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy.integrate import odeint\n\nimport pysindy as ps", "_____no_output_____" ] ], [ [ "Let's generate some training data from the [Lorenz system](https://en.wikipedia.org/wiki/Lorenz_system) with which to experiment.", "_____no_output_____" ] ], [ [ "def lorenz(z, t):\n return [\n 10 * (z[1] - z[0]),\n z[0] * (28 - z[2]) - z[1],\n z[0] * z[1] - (8 / 3) * z[2]\n ]\n\n# Generate training data\ndt = .002\n\nt_train = np.arange(0, 10, dt)\nx0_train = [-8, 8, 27]\nx_train = odeint(lorenz, x0_train, t_train)\n\n# Evolve the Lorenz equations in time using a different initial condition\nt_test = np.arange(0, 15, dt)\nx0_test = np.array([8, 7, 15])\nx_test = odeint(lorenz, x0_test, t_test) ", "_____no_output_____" ] ], [ [ "## Cross-validation\nPySINDy supports Scikit-learn-type cross-validation with a few caveats.\n\n1. We must use **uniform timesteps** using the `t_default` parameter. This is because the `fit` and `score` methods of `SINDy` differ from those used in Scikit-learn in the sense that they both have an optional `t` parameter. Setting `t_default` is a workaround.\n2. We have to be careful about the way we split up testing and training data during cross-validation. Because the `SINDy` object needs to differentiate the data, we need the training and test data to consist of sequential intervals of time. If we randomly sample the data, then the computed derivatives will be horribly inaccurate. Luckily, Scikit-learn has a `TimeSeriesSplit` object for such situations. If we really want to randomly sample the data during cross-validation, there is a way to do so. However, it's more complicated.\n\nNote that we need to prepend `optimizer__`, `feature_library__`, or `differentiation_method__` to the parameter names.", "_____no_output_____" ], [ "### Cross-validation with TimeSeriesSplit", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import TimeSeriesSplit\n\nmodel = ps.SINDy(t_default=dt)\n\nparam_grid = {\n \"optimizer__threshold\": [0.001, 0.01, 0.1],\n \"optimizer__alpha\": [0.01, 0.05, 0.1],\n \"feature_library\": [ps.PolynomialLibrary(), ps.FourierLibrary()],\n \"differentiation_method__order\": [1, 2]\n}\n\nsearch = GridSearchCV(\n model,\n param_grid,\n cv=TimeSeriesSplit(n_splits=5)\n)\nsearch.fit(x_train)\n\nprint(\"Best parameters:\", search.best_params_)\nsearch.best_estimator_.print()", "Best parameters: {'differentiation_method__order': 2, 'feature_library': PolynomialLibrary(), 'optimizer__alpha': 0.01, 'optimizer__threshold': 0.01}\nx0' = -9.999 x0 + 9.999 x1\nx1' = 27.992 x0 + -0.999 x1 + -1.000 x0 x2\nx2' = -2.666 x2 + 1.000 x0 x1\n" ] ], [ [ "### Cross-validation without TimeSeriesSplit\nIf we want to use another cross-validation splitter, we'll need to (a) define a wrapper class which uses the argument \"y\" instead of \"x_dot\" and (b) precompute the derivatives. Note that (b) means that we will not be able to perform cross-validation on the parameters of the differentiation method.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score\n\nclass SINDyCV(ps.SINDy):\n def __init__(\n self,\n optimizer=None,\n feature_library=None,\n differentiation_method=None,\n feature_names=None,\n t_default=1,\n discrete_time=False,\n n_jobs=1\n ):\n super(SINDyCV, self).__init__(\n optimizer=optimizer,\n feature_library=feature_library,\n differentiation_method=differentiation_method,\n feature_names=feature_names,\n t_default=t_default,\n discrete_time=discrete_time,\n n_jobs=n_jobs\n )\n\n def fit(self, x, y, **kwargs):\n return super(SINDyCV, self).fit(x, x_dot=y, **kwargs)\n \n def score(\n self,\n x,\n y,\n t=None,\n u=None,\n multiple_trajectories=False,\n metric=r2_score,\n **metric_kws\n ):\n return super(SINDyCV, self).score(\n x,\n x_dot=y,\n t=t,\n u=u,\n multiple_trajectories=multiple_trajectories,\n metric=metric,\n **metric_kws\n )", "_____no_output_____" ], [ "from sklearn.model_selection import ShuffleSplit\n\nmodel = SINDyCV()\nx_dot = model.differentiate(x_train, t=t_train)\n\nparam_grid = {\n \"optimizer__threshold\": [0.002, 0.01, 0.1],\n \"optimizer__alpha\": [0.01, 0.05, 0.1],\n \"feature_library__degree\": [1, 2, 3],\n}\n\nsearch = GridSearchCV(\n model,\n param_grid,\n cv=ShuffleSplit(n_splits=3, test_size=0.25)\n)\nsearch.fit(x_train, y=x_dot)\nprint(\"Best parameters:\", search.best_params_)\nsearch.best_estimator_.print()", "Best parameters: {'feature_library__degree': 2, 'optimizer__alpha': 0.01, 'optimizer__threshold': 0.002}\nx0' = -9.999 x0 + 9.999 x1\nx1' = 27.992 x0 + -0.999 x1 + -1.000 x0 x2\nx2' = -2.666 x2 + 1.000 x0 x1\n" ] ], [ [ "## Sparse optimizers\nAny of Scikit-learn's [linear models ](https://scikit-learn.org/stable/modules/linear_model.html) can be used for the `optimizer` parameter of a `SINDy` object, though we only recommend using those designed for sparse regression.\n\nIn the examples below we set `fit_intercept` to `False` since the default feature library (polynomials of degree up to two) already includes constant functions.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import ElasticNet\n\nmodel = ps.SINDy(optimizer=ElasticNet(l1_ratio=0.9, fit_intercept=False), t_default=dt)\nmodel.fit(x_train)\nmodel.print()", "x0' = -10.005 x0 + 10.003 x1\nx1' = 27.990 x0 + -0.997 x1 + -1.000 x0 x2\nx2' = -2.665 x2 + 0.001 x0^2 + 0.999 x0 x1\n" ], [ "from sklearn.linear_model import OrthogonalMatchingPursuit\n\nmodel = ps.SINDy(\n optimizer=OrthogonalMatchingPursuit(n_nonzero_coefs=8, fit_intercept=False),\n t_default=dt\n)\nmodel.fit(x_train)\nmodel.print()", "x0' = -10.005 x0 + 10.003 x1\nx1' = 27.990 x0 + -0.997 x1 + -1.000 x0 x2\nx2' = -2.665 x2 + 0.001 x0^2 + 0.999 x0 x1\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb11004903ad833145b7c3e99a2c5494f9340ed9
18,249
ipynb
Jupyter Notebook
code/apriori/apriori.ipynb
nrkapri/machine-learning
69704ac5c8dfc4b87c838da30dba0267b68c4289
[ "MIT" ]
1
2021-02-20T19:47:51.000Z
2021-02-20T19:47:51.000Z
code/apriori/apriori.ipynb
nrkapri/machine-learning
69704ac5c8dfc4b87c838da30dba0267b68c4289
[ "MIT" ]
null
null
null
code/apriori/apriori.ipynb
nrkapri/machine-learning
69704ac5c8dfc4b87c838da30dba0267b68c4289
[ "MIT" ]
null
null
null
30.930508
290
0.482547
[ [ [ "# Apriori", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "!pip install apyori", "Requirement already satisfied: apyori in /usr/local/lib/python3.6/dist-packages (1.1.2)\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Data Preprocessing", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)\ntransactions = []\nfor i in range(0, 7501):\n transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])", "_____no_output_____" ] ], [ [ "## Training the Apriori model on the dataset", "_____no_output_____" ] ], [ [ "from apyori import apriori\nrules = apriori(transactions = transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2, max_length = 2)", "_____no_output_____" ] ], [ [ "## Visualising the results", "_____no_output_____" ], [ "### Displaying the first results coming directly from the output of the apriori function", "_____no_output_____" ] ], [ [ "results = list(rules)", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "### Putting the results well organised into a Pandas DataFrame", "_____no_output_____" ] ], [ [ "def inspect(results):\n lhs = [tuple(result[2][0][0])[0] for result in results]\n rhs = [tuple(result[2][0][1])[0] for result in results]\n supports = [result[1] for result in results]\n confidences = [result[2][0][2] for result in results]\n lifts = [result[2][0][3] for result in results]\n return list(zip(lhs, rhs, supports, confidences, lifts))\nresultsinDataFrame = pd.DataFrame(inspect(results), columns = ['Left Hand Side', 'Right Hand Side', 'Support', 'Confidence', 'Lift'])", "_____no_output_____" ] ], [ [ "### Displaying the results non sorted", "_____no_output_____" ] ], [ [ "resultsinDataFrame", "_____no_output_____" ] ], [ [ "### Displaying the results sorted by descending lifts", "_____no_output_____" ] ], [ [ "resultsinDataFrame.nlargest(n = 10, columns = 'Lift')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb1112c12ce949ace0dac753dc5c3244fe55b553
32,790
ipynb
Jupyter Notebook
spaCy_tuTorial.ipynb
DerwenAI/conda_sux
6adfd4ed87771ab0f37ee471cf0699d89b846dc8
[ "MIT" ]
22
2019-10-21T13:56:40.000Z
2021-11-26T16:18:15.000Z
spaCy_tuTorial.ipynb
DerwenAI/conda_sux
6adfd4ed87771ab0f37ee471cf0699d89b846dc8
[ "MIT" ]
1
2020-03-09T23:50:06.000Z
2021-05-28T13:13:51.000Z
spaCy_tuTorial.ipynb
DerwenAI/conda_sux
6adfd4ed87771ab0f37ee471cf0699d89b846dc8
[ "MIT" ]
14
2019-10-22T16:41:37.000Z
2022-03-14T07:44:47.000Z
35.372168
574
0.621287
[ [ [ "# An Introduction to Natural Language in Python using spaCy", "_____no_output_____" ], [ "## Introduction\n\nThis tutorial provides a brief introduction to working with natural language (sometimes called \"text analytics\") in Pytho, using [spaCy](https://spacy.io/) and related libraries.\nData science teams in industry must work with lots of text, one of the top four categories of data used in machine learning.\nUsually that's human-generated text, although not always.\n\nThink about it: how does the \"operating system\" for business work? Typically, there are contracts (sales contracts, work agreements, partnerships), there are invoices, there are insurance policies, there are regulations and other laws, and so on.\nAll of those are represented as text.\n\nYou may run across a few acronyms: _natural language processing_ (NLP), _natural language understanding_ (NLU), _natural language generation_ (NLG) โ€” which are roughly speaking \"read text\", \"understand meaning\", \"write text\" respectively.\nIncreasingly these tasks overlap and it becomes difficult to categorize any given feature.\n\nThe _spaCy_ framework โ€” along with a wide and growing range of plug-ins and other integrations โ€” provides features for a wide range of natural language tasks.\nIt's become one of the most widely used natural language libraries in Python for industry use cases, and has quite a large community โ€” and with that, much support for commercialization of research advances as this area continues to evolve rapidly.", "_____no_output_____" ], [ "## Getting Started\n\nCheck out the excellent _spaCy_ [installation notes](https://spacy.io/usage) for a \"configurator\" which generates installation commands based on which platforms and natural languages you need to support.\n\nSome people tend to use `pip` while others use `conda`, and there are instructions for both. For example, to get started with _spaCy_ working with text in English and installed via `conda` on a Linux system:\n```\nconda install -c conda-forge spacy\npython -m spacy download en_core_web_sm\n```\n\nBTW, the second line above is a download for language resources (models, etc.) and the `_sm` at the end of the download's name indicates a \"small\" model. There's also \"medium\" and \"large\", albeit those are quite large. Some of the more advanced features depend on the latter, although we won't quite be diving to the bottom of that ocean in this (brief) tutorial.\n\nNow let's load _spaCy_ and run some code:", "_____no_output_____" ] ], [ [ "import spacy\n\nnlp = spacy.load(\"en_core_web_sm\")", "_____no_output_____" ] ], [ [ "That `nlp` variable is now your gateway to all things _spaCy_ and loaded with the `en_core_web_sm` small model for English.\nNext, let's run a small \"document\" through the natural language parser:", "_____no_output_____" ] ], [ [ "text = \"The rain in Spain falls mainly on the plain.\"\ndoc = nlp(text)\n\nfor token in doc:\n print(token.text, token.lemma_, token.pos_, token.is_stop)", "_____no_output_____" ] ], [ [ "First we created a [doc](https://spacy.io/api/doc) from the text, which is a container for a document and all of its annotations. Then we iterated through the document to see what _spaCy_ had parsed.\n\nGood, but it's a lot of info and a bit difficult to read. Let's reformat the _spaCy_ parse of that sentence as a [pandas](https://pandas.pydata.org/) dataframe:", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ncols = (\"text\", \"lemma\", \"POS\", \"explain\", \"stopword\")\nrows = []\n\nfor t in doc:\n row = [t.text, t.lemma_, t.pos_, spacy.explain(t.pos_), t.is_stop]\n rows.append(row)\n\ndf = pd.DataFrame(rows, columns=cols)\n \ndf", "_____no_output_____" ] ], [ [ "Much more readable!\nIn this simple case, the entire document is merely one short sentence.\nFor each word in that sentence _spaCy_ has created a [token](https://spacy.io/api/token), and we accessed fields in each token to show:\n\n - raw text\n - [lemma](https://en.wikipedia.org/wiki/Lemma_(morphology)) โ€“ a root form of the word\n - [part of speech](https://en.wikipedia.org/wiki/Part_of_speech)\n - a flag for whether the word is a _stopword_ โ€“ i.e., a common word that may be filtered out", "_____no_output_____" ], [ "Next let's use the [displaCy](https://ines.io/blog/developing-displacy) library to visualize the parse tree for that sentence:", "_____no_output_____" ] ], [ [ "from spacy import displacy\n\ndisplacy.render(doc, style=\"dep\", jupyter=True)", "_____no_output_____" ] ], [ [ "Does that bring back memories of grade school? Frankly, for those of us coming from more of a computational linguistics background, that diagram sparks joy.\n\nBut let's backup for a moment. How do you handle multiple sentences?\n\nThere are features for _sentence boundary detection_ (SBD) โ€“ also known as _sentence segmentation_ โ€“ based on the builtin/default [sentencizer](https://spacy.io/api/sentencizer):", "_____no_output_____" ] ], [ [ "text = \"We were all out at the zoo one day, I was doing some acting, walking on the railing of the gorilla exhibit. I fell in. Everyone screamed and Tommy jumped in after me, forgetting that he had blueberries in his front pocket. The gorillas just went wild.\"\n\ndoc = nlp(text)\n\nfor sent in doc.sents:\n print(\">\", sent)", "_____no_output_____" ] ], [ [ "When _spaCy_ creates a document, it uses a principle of _non-destructive tokenization_ meaning that the tokens, sentences, etc., are simply indexes into a long array. In other words, they don't carve the text stream into little pieces. So each sentence is a [span](https://spacy.io/api/span) with a _start_ and an _end_ index into the document array:", "_____no_output_____" ] ], [ [ "for sent in doc.sents:\n print(\">\", sent.start, sent.end)", "_____no_output_____" ] ], [ [ "We can index into the document array to pull out the tokens for one sentence:", "_____no_output_____" ] ], [ [ "doc[48:54]", "_____no_output_____" ] ], [ [ "Or simply index into a specific token, such as the verb `went` in the last sentence:", "_____no_output_____" ] ], [ [ "token = doc[51]\nprint(token.text, token.lemma_, token.pos_)", "_____no_output_____" ] ], [ [ "At this point we can parse a document, segment that document into sentences, then look at annotations about the tokens in each sentence. That's a good start.", "_____no_output_____" ], [ "## Acquiring Text\n\nNow that we can parse texts, where do we get texts?\nOne quick source is to leverage the interwebs.\nOf course when we download web pages we'll get HTML, and then need to extract text from them.\n[Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) is a popular package for that.\n\nFirst, a little housekeeping:", "_____no_output_____" ] ], [ [ "import sys\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "### Character Encoding", "_____no_output_____" ], [ "The following shows examples of how to use [codecs](https://docs.python.org/3/library/codecs.html) and [normalize unicode](https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize). NB: the example text comes from the article \"[Metal umlat](https://en.wikipedia.org/wiki/Metal_umlaut)\".", "_____no_output_____" ] ], [ [ "x = \"Rinรดรงรฉrรดse screams ๏ฌ‚ow not unlike an encyclopรฆdia, \\\n'TECHNICIร„NS ร–F SPร…CE SHIP Eร…RTH THIS IS Yร–รœR Cร„PTร…IN SPEร„KING Yร–รœR ร˜ร…PTร…IN IS DEAฬ‹D' to Spฤฑnฬˆal Tap.\"\n\ntype(x)", "_____no_output_____" ] ], [ [ "The variable `x` is a *string* in Python:", "_____no_output_____" ] ], [ [ "repr(x)", "_____no_output_____" ] ], [ [ "Its translation into [ASCII](http://www.asciitable.com/) is unusable by parsers:", "_____no_output_____" ] ], [ [ "ascii(x)", "_____no_output_____" ] ], [ [ "Encoding as [UTF-8](http://unicode.org/faq/utf_bom.html) doesn't help much:", "_____no_output_____" ] ], [ [ "x.encode('utf8')", "_____no_output_____" ] ], [ [ "Ignoring difficult characters is perhaps an even worse strategy:", "_____no_output_____" ] ], [ [ "x.encode('ascii', 'ignore')", "_____no_output_____" ] ], [ [ "However, one can *normalize* text, then encodeโ€ฆ", "_____no_output_____" ] ], [ [ "import unicodedata\n\nunicodedata.normalize('NFKD', x).encode('ascii','ignore')", "_____no_output_____" ] ], [ [ "Even before this normalization and encoding, you may need to convert some characters explicitly **before** parsing. For example:", "_____no_output_____" ] ], [ [ "x = \"The sky โ€œaboveโ€ the port โ€ฆ was the color of โ€˜cable televisionโ€™ โ€“ tuned to the Weather Channelยฎ\"\n\nascii(x)", "_____no_output_____" ] ], [ [ "Consider the results for that line:", "_____no_output_____" ] ], [ [ "unicodedata.normalize('NFKD', x).encode('ascii', 'ignore')", "_____no_output_____" ] ], [ [ "...which still drops characters that may be important for parsing a sentence.\n\nSo a more advanced approach could be:", "_____no_output_____" ] ], [ [ "x = x.replace('โ€œ', '\"').replace('โ€', '\"')\nx = x.replace(\"โ€˜\", \"'\").replace(\"โ€™\", \"'\")\nx = x.replace('โ€ฆ', '...').replace('โ€“', '-')\n\nx = unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('utf-8')\nprint(x)", "_____no_output_____" ] ], [ [ "### Parsing HTML", "_____no_output_____" ], [ "In the following function `get_text()` we'll parse the HTML to find all of the `<p/>` tags, then extract the text for those:", "_____no_output_____" ] ], [ [ "from bs4 import BeautifulSoup\nimport requests\nimport traceback\n\ndef get_text (url):\n buf = []\n \n try:\n soup = BeautifulSoup(requests.get(url).text, \"html.parser\")\n \n for p in soup.find_all(\"p\"):\n buf.append(p.get_text())\n\n return \"\\n\".join(buf)\n except:\n print(traceback.format_exc())\n sys.exit(-1)", "_____no_output_____" ] ], [ [ "Now let's grab some text from online sources.\nWe can compare open source licenses hosted on the [Open Source Initiative](https://opensource.org/licenses/) site:", "_____no_output_____" ] ], [ [ "lic = {}\nlic[\"mit\"] = nlp(get_text(\"https://opensource.org/licenses/MIT\"))\nlic[\"asl\"] = nlp(get_text(\"https://opensource.org/licenses/Apache-2.0\"))\nlic[\"bsd\"] = nlp(get_text(\"https://opensource.org/licenses/BSD-3-Clause\"))\n\nfor sent in lic[\"bsd\"].sents:\n print(\">\", sent)", "_____no_output_____" ] ], [ [ "One common use case for natural language work is to compare texts. For example, with those open source licenses we can download their text, parse, then compare [similarity](https://spacy.io/api/doc#similarity) metrics among them:", "_____no_output_____" ] ], [ [ "pairs = [\n [\"mit\", \"asl\"],\n [\"asl\", \"bsd\"],\n [\"bsd\", \"mit\"]\n]\n\nfor a, b in pairs:\n print(a, b, lic[a].similarity(lic[b]))", "_____no_output_____" ] ], [ [ "That is interesting, since the [BSD](https://opensource.org/licenses/BSD-3-Clause) and [MIT](https://opensource.org/licenses/MIT) licenses appear to be the most similar documents.\nIn fact they are closely related.\n\nAdmittedly, there was some extra text included in each document due to the OSI disclaimer in the footer โ€“ but this provides a reasonable approximation for comparing the licenses.", "_____no_output_____" ], [ "## Natural Language Understanding\n\nNow let's dive into some of the _spaCy_ features for NLU.\nGiven that we have a parse of a document, from a purely grammatical standpoint we can pull the [noun chunks](https://spacy.io/usage/linguistic-features#noun-chunks), i.e., each of the noun phrases:", "_____no_output_____" ] ], [ [ "text = \"Steve Jobs and Steve Wozniak incorporated Apple Computer on January 3, 1977, in Cupertino, California.\"\ndoc = nlp(text)\n\nfor chunk in doc.noun_chunks:\n print(chunk.text)", "_____no_output_____" ] ], [ [ "Not bad. The noun phrases in a sentence generally provide more information content โ€“ as a simple filter used to reduce a long document into a more \"distilled\" representation.\n\nWe can take this approach further and identify [named entities](https://spacy.io/usage/linguistic-features#named-entities) within the text, i.e., the proper nouns:", "_____no_output_____" ] ], [ [ "for ent in doc.ents:\n print(ent.text, ent.label_)", "_____no_output_____" ] ], [ [ "The _displaCy_ library provides an excellent way to visualize named entities:", "_____no_output_____" ] ], [ [ "displacy.render(doc, style=\"ent\", jupyter=True)", "_____no_output_____" ] ], [ [ "If you're working with [knowledge graph](https://www.akbc.ws/) applications and other [linked data](http://linkeddata.org/), your challenge is to construct links between the named entities in a document and other related information for the entities โ€“ which is called [entity linking](http://nlpprogress.com/english/entity_linking.html).\nIdentifying the named entities in a document is the first step in this particular kind of AI work.\nFor example, given the text above, one might link the `Steve Wozniak` named entity to a [lookup in DBpedia](http://dbpedia.org/page/Steve_Wozniak).", "_____no_output_____" ], [ "In more general terms, one can also link _lemmas_ to resources that describe their meanings.\nFor example, in an early section we parsed the sentence `The gorillas just went wild` and were able to show that the lemma for the word `went` is the verb `go`. At this point we can use a venerable project called [WordNet](https://wordnet.princeton.edu/) which provides a lexical database for English โ€“ in other words, it's a computable thesaurus.\n\nThere's a _spaCy_ integration for WordNet called\n[spacy-wordnet](https://github.com/recognai/spacy-wordnet) by [Daniel Vila Suero](https://twitter.com/dvilasuero), an expert in natural language and knowledge graph work.\n\nThen we'll load the WordNet data via NLTK (these things happen):", "_____no_output_____" ] ], [ [ "import nltk\n\nnltk.download(\"wordnet\")", "_____no_output_____" ] ], [ [ "Note that _spaCy_ runs as a \"pipeline\" and allows means for customizing parts of the pipeline in use.\nThat's excellent for supporting really interesting workflow integrations in data science work.\nHere we'll add the `WordnetAnnotator` from the _spacy-wordnet_ project:", "_____no_output_____" ] ], [ [ "!pip install spacy-wordnet", "_____no_output_____" ], [ "from spacy_wordnet.wordnet_annotator import WordnetAnnotator\n\nprint(\"before\", nlp.pipe_names)\n\nif \"WordnetAnnotator\" not in nlp.pipe_names:\n nlp.add_pipe(WordnetAnnotator(nlp.lang), after=\"tagger\")\n \nprint(\"after\", nlp.pipe_names)", "_____no_output_____" ] ], [ [ "Within the English language, some words are infamous for having many possible meanings. For example, click through the results online in a [WordNet](http://wordnetweb.princeton.edu/perl/webwn?s=star&sub=Search+WordNet&o2=&o0=1&o8=1&o1=1&o7=&o5=&o9=&o6=&o3=&o4=&h=) search to find the meanings related to the word `withdraw`.\n\nNow let's use _spaCy_ to perform that lookup automatically:", "_____no_output_____" ] ], [ [ "token = nlp(\"withdraw\")[0]\ntoken._.wordnet.synsets()", "_____no_output_____" ], [ "token._.wordnet.lemmas()", "_____no_output_____" ], [ "token._.wordnet.wordnet_domains()", "_____no_output_____" ] ], [ [ "Again, if you're working with knowledge graphs, those \"word sense\" links from WordNet could be used along with graph algorithms to help identify the meanings for a particular word. That can also be used to develop summaries for larger sections of text through a technique called _summarization_. It's beyond the scope of this tutorial, but an interesting application currently for natural language in industry.", "_____no_output_____" ], [ "Going in the other direction, if you know _a priori_ that a document was about a particular domain or set of topics, then you can constrain the meanings returned from _WordNet_. In the following example, we want to consider NLU results that are within Finance and Banking:", "_____no_output_____" ] ], [ [ "domains = [\"finance\", \"banking\"]\nsentence = nlp(\"I want to withdraw 5,000 euros.\")\n\nenriched_sent = []\n\nfor token in sentence:\n # get synsets within the desired domains\n synsets = token._.wordnet.wordnet_synsets_for_domain(domains)\n \n if synsets:\n lemmas_for_synset = []\n \n for s in synsets:\n # get synset variants and add to the enriched sentence\n lemmas_for_synset.extend(s.lemma_names())\n enriched_sent.append(\"({})\".format(\"|\".join(set(lemmas_for_synset))))\n else:\n enriched_sent.append(token.text)\n\nprint(\" \".join(enriched_sent))", "_____no_output_____" ] ], [ [ "That example may look simple but, if you play with the `domains` list, you'll find that the results have a kind of combinatorial explosion when run without reasonable constraints.\nImagine having a knowledge graph with millions of elements: you'd want to constrain searches where possible to avoid having every query take days/weeks/months/years to compute.", "_____no_output_____" ], [ "Sometimes the problems encountered when trying to understand a text โ€“ or better yet when trying to understand a _corpus_ (a dataset with many related texts) โ€“ become so complex that you need to visualize it first.\nHere's an interactive visualization for understanding texts: [scattertext](https://spacy.io/universe/project/scattertext), a product of the genius of [Jason Kessler](https://twitter.com/jasonkessler).\nTo install:\n\n```\nconda install -c conda-forge scattertext\n```\n\nLet's analyze text data from the party conventions during the 2012 US Presidential elections. It may take a minute or two to run, but the results from all that number crunching is worth the wait.", "_____no_output_____" ] ], [ [ "!pip install scattertext", "_____no_output_____" ], [ "import scattertext as st\n\nif \"merge_entities\" not in nlp.pipe_names:\n nlp.add_pipe(nlp.create_pipe(\"merge_entities\"))\n\nif \"merge_noun_chunks\" not in nlp.pipe_names:\n nlp.add_pipe(nlp.create_pipe(\"merge_noun_chunks\"))\n\nconvention_df = st.SampleCorpora.ConventionData2012.get_data() \ncorpus = st.CorpusFromPandas(convention_df,\n category_col=\"party\",\n text_col=\"text\",\n nlp=nlp).build()", "_____no_output_____" ] ], [ [ "Once you have the `corpus` ready, generate an interactive visualization in HTML:", "_____no_output_____" ] ], [ [ "html = st.produce_scattertext_explorer(\n corpus,\n category=\"democrat\",\n category_name=\"Democratic\",\n not_category_name=\"Republican\",\n width_in_pixels=1000,\n metadata=convention_df[\"speaker\"]\n)", "_____no_output_____" ] ], [ [ "Now we'll render the HTML โ€“ give it a minute or two to load, it's worth the wait...", "_____no_output_____" ] ], [ [ "from IPython.display import IFrame\nfrom IPython.core.display import display, HTML\nimport sys\n\nIN_COLAB = \"google.colab\" in sys.modules\nprint(IN_COLAB)", "_____no_output_____" ] ], [ [ "**NB: use the following cell on Google Colab:**", "_____no_output_____" ] ], [ [ "if IN_COLAB:\n display(HTML(\"<style>.container { width:98% !important; }</style>\"))\n display(HTML(html))", "_____no_output_____" ] ], [ [ "**NB: use the following cell instead on Jupyter in general:**", "_____no_output_____" ] ], [ [ "file_name = \"foo.html\"\n\nwith open(file_name, \"wb\") as f:\n f.write(html.encode(\"utf-8\"))\n\nIFrame(src=file_name, width = 1200, height=700)", "_____no_output_____" ] ], [ [ "Imagine if you had text from the past three years of customer support for a particular product in your organization. Suppose your team needed to understand how customers have been talking about the product? This _scattertext_ library might come in quite handy! You could cluster (k=2) on _NPS scores_ (a customer evaluation metric) then replace the Democrat/Republican dimension with the top two components from the clustering.", "_____no_output_____" ], [ "## Summary\n\nFive years ago, if youโ€™d asked about open source in Python for natural language, a default answer from many people working in data science would've been [NLTK](https://www.nltk.org/).\nThat project includes just about everything but the kitchen sink and has components which are relatively academic.\nAnother popular natural language project is [CoreNLP](https://stanfordnlp.github.io/CoreNLP/) from Stanford.\nAlso quite academic, albeit powerful, though _CoreNLP_ can be challenging to integrate with other software for production use.\n\nThen a few years ago everything in this natural language corner of the world began to change.\nThe two principal authors for _spaCy_ -- [Matthew Honnibal](https://twitter.com/honnibal) and [Ines Montani](https://twitter.com/_inesmontani) -- launched the project in 2015 and industry adoption was rapid.\nThey focused on an _opinionated_ approach (do what's needed, do it well, no more, no less) which provided simple, rapid integration into data science workflows in Python, as well as faster execution and better accuracy than the alternatives.\nBased on those priorities, _spaCy_ become sort of the opposite of _NLTK_.\nSince 2015, _spaCy_ has consistently focused on being an open source project (i.e., depending on its community for directions, integrations, etc.) and being commercial-grade software (not academic research).\nThat said, _spaCy_ has been quick to incorporate the SOTA advances in machine learning, effectively becoming a conduit for moving research into industry.\n\nIt's important to note that machine learning for natural language got a big boost during the mid-2000's as Google began to win international language translation competitions.\nAnother big change occurred during 2017-2018 when, following the many successes of _deep learning_, those approaches began to out-perform previous machine learning models.\nFor example, see the [ELMo](https://arxiv.org/abs/1802.05365) work on _language embedding_ by Allen AI, followed by [BERT](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) from Google, and more recently [ERNIE](https://medium.com/syncedreview/baidus-ernie-tops-google-s-bert-in-chinese-nlp-tasks-d6a42b49223d) by Baidu -- in other words, the search engine giants of the world have gifted the rest of us with a Sesame Street repertoire of open source embedded language models based on deep learning, which is now _state of the art_ (SOTA).\nSpeaking of which, to keep track of SOTA for natural language, keep an eye on [NLP-Progress](http://nlpprogress.com/) and [Papers with Code](https://paperswithcode.com/sota).\n\nThe use cases for natural language have shifted dramatically over the past two years, after deep learning techniques arose to the fore.\nCirca 2014, a natural language tutorial in Python might have shown _word count_ or _keyword search_ or _sentiment detection_ where the target use cases were relatively underwhelming.\nCirca 2019 we're talking about analyzing thousands of documents for vendor contracts in an industrial supply chain optimization ... or hundreds of millions of documents for policy holders of an insurance company, or gazillions of documents regarding financial disclosures.\nMore contemporary natural language work tends to be in NLU, often to support construction of _knowledge graphs,_ and increasingly in NLG where large numbers of similar documents can be summarized at human scale.\n\nThe [spaCy Universe](https://spacy.io/universe) is a great place to check for deep-dives into particular use cases, and to see how this field is evolving. Some selections from this \"universe\" include:\n\n - [Blackstone](https://spacy.io/universe/project/blackstone) โ€“ parsing unstructured legal texts\n - [Kindred](https://spacy.io/universe/project/kindred) โ€“ extracting entities from biomedical texts (e.g., Pharma)\n - [mordecai](https://spacy.io/universe/project/mordecai) โ€“ parsing geographic information\n - [Prodigy](https://spacy.io/universe/project/prodigy) โ€“ human-in-the-loop annotation to label datasets\n - [spacy-raspberry](https://spacy.io/universe/project/spacy-raspberry) โ€“ Raspberry PI image for running _spaCy_ and deep learning on edge devices\n - [Rasa NLU](https://spacy.io/universe/project/rasa) โ€“ Rasa integration for voice apps\n\nAlso, a couple super new items to mention:\n\n - [spacy-pytorch-transformers](https://explosion.ai/blog/spacy-pytorch-transformers) to fine tune (i.e., use _transfer learning_ with) the Sesame Street characters and friends: BERT, GPT-2, XLNet, etc.\n - [spaCy IRL 2019](https://irl.spacy.io/2019/) conference โ€“ check out videos from the talks!\n\nThere's so much more that can be done with _spaCy_ โ€“ hopefully this tutorial provides an introduction. We wish you all the best in your natural language work.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb111983322c204206b32e26467e3c36bf31c99e
4,227
ipynb
Jupyter Notebook
onnx-ecosystem/inference_demos/resnet50_modelzoo_onnxruntime_inference.ipynb
pridkett/onnx-docker
6829940f3b0051b82037f671d8dbda7be001858a
[ "MIT" ]
1
2021-06-25T17:54:18.000Z
2021-06-25T17:54:18.000Z
onnx-ecosystem/inference_demos/resnet50_modelzoo_onnxruntime_inference.ipynb
pridkett/onnx-docker
6829940f3b0051b82037f671d8dbda7be001858a
[ "MIT" ]
null
null
null
onnx-ecosystem/inference_demos/resnet50_modelzoo_onnxruntime_inference.ipynb
pridkett/onnx-docker
6829940f3b0051b82037f671d8dbda7be001858a
[ "MIT" ]
1
2021-04-18T16:30:15.000Z
2021-04-18T16:30:15.000Z
26.584906
185
0.566123
[ [ [ "## Inference for ResNet 50 using ONNX Runtime\n\nThis example demonstrates how to load an image classification model from the [ONNX model zoo](https://github.com/onnx/models) and confirm its accuracy based on included test data.", "_____no_output_____" ] ], [ [ "import numpy as np # we're going to use numpy to process input and output data\nimport onnxruntime # to inference ONNX models, we use the ONNX Runtime\nimport onnx\nfrom onnx import numpy_helper\nimport urllib.request", "_____no_output_____" ], [ "onnx_model_url = \"https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz\"\n\n# retrieve our model from the ONNX model zoo\nurllib.request.urlretrieve(onnx_model_url, filename=\"resnet50v2.tar.gz\")\n\n!tar xvzf resnet50v2.tar.gz", "_____no_output_____" ] ], [ [ "### Load sample inputs and outputs", "_____no_output_____" ] ], [ [ "test_data_dir = 'resnet50v2/test_data_set'\ntest_data_num = 3", "_____no_output_____" ], [ "import glob\nimport os\n\n# Load inputs\ninputs = []\nfor i in range(test_data_num):\n input_file = os.path.join(test_data_dir + '_{}'.format(i), 'input_0.pb')\n tensor = onnx.TensorProto()\n with open(input_file, 'rb') as f:\n tensor.ParseFromString(f.read())\n inputs.append(numpy_helper.to_array(tensor))\n\nprint('Loaded {} inputs successfully.'.format(test_data_num))\n \n# Load reference outputs\n\nref_outputs = []\nfor i in range(test_data_num):\n output_file = os.path.join(test_data_dir + '_{}'.format(i), 'output_0.pb')\n tensor = onnx.TensorProto()\n with open(output_file, 'rb') as f:\n tensor.ParseFromString(f.read()) \n ref_outputs.append(numpy_helper.to_array(tensor))\n \nprint('Loaded {} reference outputs successfully.'.format(test_data_num))", "_____no_output_____" ] ], [ [ "### Inference using ONNX Runtime", "_____no_output_____" ] ], [ [ "# Run the model on the backend\nsession = onnxruntime.InferenceSession('resnet50v2/resnet50v2.onnx', None)\n\n# get the name of the first input of the model\ninput_name = session.get_inputs()[0].name \n\nprint('Input Name:', input_name)", "_____no_output_____" ], [ "%%time\noutputs = [session.run([], {input_name: inputs[i]})[0] for i in range(test_data_num)]", "_____no_output_____" ], [ "print('Predicted {} results.'.format(len(outputs)))\n\n# Compare the results with reference outputs up to 4 decimal places\nfor ref_o, o in zip(ref_outputs, outputs):\n np.testing.assert_almost_equal(ref_o, o, 4)\n \nprint('ONNX Runtime outputs are similar to reference outputs!')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb1137f5d1d3ffbbdfd8005150d068c199250bb0
2,550
ipynb
Jupyter Notebook
Chapter10_Clustering/KMeansExercise/KMeans_solution.ipynb
tomex74/UdemyML
d9600d95783bae553e4142231aeb284a38a69f4e
[ "MIT" ]
8
2020-11-01T13:22:02.000Z
2022-03-18T09:28:12.000Z
Chapter10_Clustering/KMeansExercise/KMeans_solution.ipynb
tomex74/UdemyML
d9600d95783bae553e4142231aeb284a38a69f4e
[ "MIT" ]
null
null
null
Chapter10_Clustering/KMeansExercise/KMeans_solution.ipynb
tomex74/UdemyML
d9600d95783bae553e4142231aeb284a38a69f4e
[ "MIT" ]
9
2020-09-09T08:20:30.000Z
2022-01-08T09:59:59.000Z
22.566372
237
0.542353
[ [ [ "import numpy as np\nnp.random.seed(42)\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score", "_____no_output_____" ] ], [ [ "Aufgabe 1: Lade das Dataset mit allen Features rein.", "_____no_output_____" ] ], [ [ "dataset = load_iris()\nx = dataset.data\ny = dataset.target", "_____no_output_____" ] ], [ [ "Aufgabe 2: Wende das k-Means mit einer beliebigen Anzahl an Cluster an.", "_____no_output_____" ] ], [ [ "scores = []\ns_scores = []\n\nfor n_clusters in [2, 3, 4, 5, 6]:\n max_iter = 10_000\n\n kmeans = KMeans(\n n_clusters=n_clusters,\n max_iter=max_iter,\n n_jobs=-1\n )\n kmeans.fit(x)\n y_pred = kmeans.predict(x)\n s_scores.append(silhouette_score(x, y_pred))\n scores.append(kmeans.score(x))", "_____no_output_____" ] ], [ [ "Aufgabe 3: Berechne den Score fรผr verschiedene Setups.", "_____no_output_____" ] ], [ [ "print(f\"Scores: {scores}\")\nprint(f\"silhouette_score: {s_scores}\")", "Scores: [-152.347951760358, -78.85144142614602, -57.25600931571815, -46.44618205128204, -39.05497786747788]\nsilhouette_score: [0.681046169211746, 0.5528190123564091, 0.49745518901737446, 0.4887488870931048, 0.3664804028900824]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb113810dce9aee07517766a92cb7c65b9fc8a85
39,339
ipynb
Jupyter Notebook
scipy-2016-sklearn/notebooks/18 In Depth - Support Vector Machines.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
2
2021-01-09T15:57:26.000Z
2021-11-29T01:44:21.000Z
scipy-2016-sklearn/notebooks/18 In Depth - Support Vector Machines.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
5
2019-11-15T02:00:26.000Z
2021-01-06T04:26:40.000Z
scipy-2016-sklearn/notebooks/18 In Depth - Support Vector Machines.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
null
null
null
151.303846
17,564
0.883169
[ [ [ "%load_ext watermark\n%watermark -d -u -a 'Andreas Mueller, Kyle Kastner, Sebastian Raschka' -v -p numpy,scipy,matplotlib,scikit-learn", "Andreas Mueller, Kyle Kastner, Sebastian Raschka \nlast updated: 2017-09-10 \n\nCPython 2.7.13\nIPython 5.3.0\n\nnumpy 1.12.1\nscipy 0.19.0\nmatplotlib 2.0.2\nscikit-learn 0.18.1\n" ], [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# SciPy 2016 Scikit-learn Tutorial", "_____no_output_____" ], [ "# In Depth - Support Vector Machines", "_____no_output_____" ], [ "SVM stands for \"support vector machines\". They are efficient and easy to use estimators.\nThey come in two kinds: SVCs, Support Vector Classifiers, for classification problems, and SVRs, Support Vector Regressors, for regression problems.", "_____no_output_____" ], [ "## Linear SVMs", "_____no_output_____" ], [ "The SVM module contains LinearSVC, which we already discussed briefly in the section on linear models.\nUsing ``SVC(kernel=\"linear\")`` will also yield a linear predictor that is only different in minor technical aspects.", "_____no_output_____" ], [ "## Kernel SVMs\nThe real power of SVMs lies in using kernels, which allow for non-linear decision boundaries. A kernel defines a similarity measure between data points. The most common are:\n\n- **linear** will give linear decision frontiers. It is the most computationally efficient approach and the one that requires the least amount of data.\n\n- **poly** will give decision frontiers that are polynomial. The order of this polynomial is given by the 'order' argument.\n\n- **rbf** uses 'radial basis functions' centered at each support vector to assemble a decision frontier. The size of the RBFs ultimately controls the smoothness of the decision frontier. RBFs are the most flexible approach, but also the one that will require the largest amount of data.\n\nPredictions in a kernel-SVM are made using the formular\n\n$$\n\\hat{y} = \\text{sign}(\\alpha_0 + \\sum_{j}\\alpha_j y_j k(\\mathbf{x^{(j)}}, \\mathbf{x}))\n$$\n\nwhere $\\mathbf{x}^{(j)}$ are training samples, $\\mathbf{y}^{(j)}$ the corresponding labels, $\\mathbf{x}$ is a test-sample to predict on, $k$ is the kernel, and $\\alpha$ are learned parameters.\n\nWhat this says is \"if $\\mathbf{x}$ is similar to $\\mathbf{x}^{(j)}$ then they probably have the same label\", where the importance of each $\\mathbf{x}^{(j)}$ for this decision is learned. [Or something much less intuitive about an infinite dimensional Hilbert-space]\n\nOften only few samples have non-zero $\\alpha$, these are called the \"support vectors\" from which SVMs get their name.\nThese are the most discriminant samples.\n\nThe most important parameter of the SVM is the regularization parameter $C$, which bounds the influence of each individual sample:\n\n- Low C values: many support vectors... Decision frontier = mean(class A) - mean(class B)\n- High C values: small number of support vectors: Decision frontier fully driven by most discriminant samples\n\n", "_____no_output_____" ], [ "The other important parameters are those of the kernel. Let's look at the RBF kernel in more detail:\n\n$$k(\\mathbf{x}, \\mathbf{x'}) = \\exp(-\\gamma ||\\mathbf{x} - \\mathbf{x'}||^2)$$", "_____no_output_____" ] ], [ [ "from sklearn.metrics.pairwise import rbf_kernel\n\nline = np.linspace(-3, 3, 100)[:, np.newaxis]\nkernel_value = rbf_kernel(line, [[0]], gamma=1)\nplt.plot(line, kernel_value);", "_____no_output_____" ] ], [ [ "The rbf kernel has an inverse bandwidth-parameter gamma, where large gamma mean a very localized influence for each data point, and\nsmall values mean a very global influence.\nLet's see these two parameters in action:", "_____no_output_____" ] ], [ [ "from figures import plot_svm_interactive\nplot_svm_interactive()", "/home/sunny/anaconda2/lib/python2.7/site-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\n" ] ], [ [ "## Exercise: tune a SVM on the digits dataset", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_digits\nfrom sklearn.svm import SVC\n\ndigits = load_digits()\nX_digits, y_digits = digits.data, digits.target\n\n# split the dataset, apply grid-search", "_____no_output_____" ], [ "#%load solutions/18_svc_grid.py", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb114d90ced36ada021ae8ba3e8d6f7ac535950c
105,070
ipynb
Jupyter Notebook
examples/unreliable_soln.ipynb
chwebster/ThinkBayes2
49af0e36c38c2656d7b91117cfa2b019ead81988
[ "MIT" ]
1,337
2015-01-06T06:23:55.000Z
2022-03-31T21:06:21.000Z
examples/unreliable_soln.ipynb
chwebster/ThinkBayes2
49af0e36c38c2656d7b91117cfa2b019ead81988
[ "MIT" ]
43
2015-04-23T13:14:15.000Z
2022-01-04T12:55:59.000Z
examples/unreliable_soln.ipynb
chwebster/ThinkBayes2
49af0e36c38c2656d7b91117cfa2b019ead81988
[ "MIT" ]
1,497
2015-01-13T22:05:32.000Z
2022-03-30T09:19:53.000Z
395
34,140
0.940916
[ [ [ "# Think Bayes\n\nThis notebook presents example code and exercise solutions for Think Bayes.\n\nCopyright 2018 Allen B. Downey\n\nMIT License: https://opensource.org/licenses/MIT", "_____no_output_____" ] ], [ [ "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import classes from thinkbayes2\nfrom thinkbayes2 import Hist, Pmf, Suite, Beta\nimport thinkplot", "_____no_output_____" ] ], [ [ "## Unreliable observation\n\nSuppose that instead of observing coin tosses directly, you measure the outcome using an instrument that is not always correct. Specifically, suppose there is a probability `y` that an actual heads is reported as tails, or actual tails reported as heads.\n\nWrite a class that estimates the bias of a coin given a series of outcomes and the value of `y`.\n\nHow does the spread of the posterior distribution depend on `y`?", "_____no_output_____" ] ], [ [ "# Solution\n\n# Here's a class that models an unreliable coin\n\nclass UnreliableCoin(Suite):\n \n def __init__(self, prior, y):\n \"\"\"\n prior: seq or map\n y: probability of accurate measurement\n \"\"\"\n super().__init__(prior)\n self.y = y\n \n def Likelihood(self, data, hypo):\n \"\"\"\n data: outcome of unreliable measurement, either 'H' or 'T'\n hypo: probability of heads, 0-100\n \"\"\"\n x = hypo / 100\n y = self.y\n if data == 'H':\n return x*y + (1-x)*(1-y)\n else:\n return x*(1-y) + (1-x)*y", "_____no_output_____" ], [ "# Solution\n\n# Now let's initialize an UnreliableCoin with `y=0.9`:\n\nprior = range(0, 101)\nsuite = UnreliableCoin(prior, y=0.9)\nthinkplot.Pdf(suite)", "_____no_output_____" ], [ "# Solution\n\n# And update with 3 heads and 7 tails.\n\nfor outcome in 'HHHTTTTTTT':\n suite.Update(outcome)\n \nthinkplot.Pdf(suite)", "_____no_output_____" ], [ "# Solution\n\n# Now let's try it out with different values of `y`:\n\ndef plot_posterior(y, data):\n prior = range(0, 101)\n suite = UnreliableCoin(prior, y=y)\n for outcome in data:\n suite.Update(outcome)\n \n thinkplot.Pdf(suite, label='y=%g' % y)", "_____no_output_____" ], [ "# Solution\n\n# The posterior distribution gets wider as the measurement gets less reliable.\n\ndata = 'HHHTTTTTTT'\nplot_posterior(1, data)\nplot_posterior(0.8, data)\nplot_posterior(0.6, data)\nthinkplot.decorate(xlabel='Probability of heads (x)',\n ylabel='PMF')", "_____no_output_____" ], [ "# Solution\n\n# At `y=0.5`, the measurement provides no information, so the posterior equals the prior:\n\nplot_posterior(0.5, data)\nthinkplot.decorate(xlabel='Probability of heads (x)',\n ylabel='PMF')", "_____no_output_____" ], [ "# Solution\n\n# As the coin gets less reliable (below `y=0.5`) the distribution gets narrower again. \n# In fact, a measurement with `y=0` is just as good as one with `y=1`, \n# provided that we know what `y` is.\n\nplot_posterior(0.4, data)\nplot_posterior(0.2, data)\nplot_posterior(0.0, data)\nthinkplot.decorate(xlabel='Probability of heads (x)',\n ylabel='PMF')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb1168b467225d7ed8994992add99a43cd9873d1
12,888
ipynb
Jupyter Notebook
YahooFinance/YahooFinance_Get_Stock_Update.ipynb
krajai/testt
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
[ "BSD-3-Clause" ]
1
2022-03-24T07:46:45.000Z
2022-03-24T07:46:45.000Z
YahooFinance/YahooFinance_Get_Stock_Update.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
YahooFinance/YahooFinance_Get_Stock_Update.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
24.044776
296
0.534295
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# YahooFinance - Get Stock Update\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/YahooFinance/YahooFinance_Get_Stock_Update.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>", "_____no_output_____" ], [ "**Tags:** #yahoofinance #usdinr #plotly #investors #analytics #automation #plotly", "_____no_output_____" ], [ "**Author:** [Megha Gupta](https://github.com/megha2907)", "_____no_output_____" ], [ " Description: With this template you will get INR USD rate visualized on a chart", "_____no_output_____" ], [ "## Input", "_____no_output_____" ], [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import naas \nfrom naas_drivers import yahoofinance, plotly\nimport markdown2\nfrom IPython.display import Markdown as md", "_____no_output_____" ] ], [ [ "### Setup Yahoo parameters", "_____no_output_____" ], [ "๐Ÿ‘‰ Here you can input:<br>\n- yahoo ticker : get tickers <a href='https://finance.yahoo.com/trending-tickers?.tsrc=fin-srch'>here</a>\n- date from\n- date to", "_____no_output_____" ] ], [ [ "TICKER = 'INR=X'\ndate_from = -30\ndate_to = 'today'", "_____no_output_____" ] ], [ [ "### Setup your email parameters\n๐Ÿ‘‰ Here you can input your sender email and destination email\n\nNote: emails are sent from [email protected] by default", "_____no_output_____" ] ], [ [ "email_to = [\"[email protected]\"]\nemail_from = None", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Get the data from yahoo finance using naas drivers", "_____no_output_____" ] ], [ [ "#data cleaning\ndf = yahoofinance.get(TICKER, date_from=date_from, date_to = date_to)\ndf = df.dropna()# drop the na values from the dataframe\ndf.reset_index(drop=True)\ndf = df.sort_values(\"Date\", ascending=False).reset_index(drop=True)\ndf.head()", "_____no_output_____" ] ], [ [ "### Extract value from data", "_____no_output_____" ] ], [ [ "LASTOPEN = round(df.loc[0, \"Open\"], 2)\nLASTCLOSE = round(df.loc[0, \"Close\"], 2)\nYESTERDAYOPEN = round(df.loc[1, \"Open\"], 2)\nYESTERDAYCLOSE = round(df.loc[1, \"Close\"], 2)\nMAXRATE = round(df['Open'].max(),2)\nMXDATEOPEN = df.loc[df['Open'].idxmax(), \"Date\"].strftime(\"%Y-%m-%d\")\nMINRATE = round(df['Open'].min(),2)\nMNDATEOPEN = df.loc[df['Open'].idxmin(), \"Date\"].strftime(\"%Y-%m-%d\")", "_____no_output_____" ] ], [ [ "### Plot the data", "_____no_output_____" ] ], [ [ "last_date = df.loc[df.index[0], \"Date\"].strftime(\"%Y-%m-%d\")\n\noutput = plotly.linechart(df,\n x=\"Date\",\n y=['Open','Close'],\n title=f\"<b>INR USD rates of last month</b><br><span style='font-size: 13px;'>Last value as of {last_date}: Open={LASTOPEN}, Close={LASTCLOSE}</span>\")", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ], [ "### Save the dataset in csv", "_____no_output_____" ] ], [ [ "df.to_csv(f\"{TICKER}_LastMonth.csv\", index=False)", "_____no_output_____" ] ], [ [ "### Create markdown template", "_____no_output_____" ] ], [ [ "%%writefile message.md\nHello world,\n\nThe **TICKER** price is Open LASTOPEN and Close LASTCLOSE right now. <br>\n**Yesterday Open**: YESTERDAYOPEN <br>\n**Yesterday Close**: YESTERDAYCLOSE <br> \nThe Max Open rate of **TICKER** was on MXDATEOPEN which was MAXRATE. <br>\nThe Min Open rate of **TICKER** was on MNDATEOPEN which was MINRATE. <br>\n\nAttached is the excel file for your reference. <br>\n\nHave a nice day.\n<br>\n\nPS: You can [send the email again](link_webhook) if you need a fresh update.<br>\n<div><strong>Full Name</strong></div>\n<div>Open source lover | <a href=\"http://www.naas.ai/\" target=\"_blank\">Naas</a></div>\n<div>+ 33 1 23 45 67 89</div>\n<div><small>This is an automated email from my Naas account</small></div>", "_____no_output_____" ] ], [ [ "### Add email template as dependency", "_____no_output_____" ] ], [ [ "naas.dependency.add(\"message.md\")", "_____no_output_____" ] ], [ [ "### Replace values in template", "_____no_output_____" ] ], [ [ "markdown_file = \"message.md\"\ncontent = open(markdown_file, \"r\").read()\nmd = markdown2.markdown(content)\nmd", "_____no_output_____" ], [ "post = md.replace(\"LASTOPEN\", str(LASTOPEN))\npost = post.replace(\"LASTCLOSE\", str(LASTCLOSE))\npost = post.replace(\"YESTERDAYOPEN\", str(YESTERDAYOPEN))\npost = post.replace(\"YESTERDAYCLOSE\", str(YESTERDAYCLOSE))\npost = post.replace(\"MXDATEOPEN\", str(MXDATEOPEN))\npost = post.replace(\"MAXRATE\", str(MAXRATE))\npost = post.replace(\"MNDATEOPEN\", str(MNDATEOPEN))\npost = post.replace(\"MINRATE\", str(MINRATE))\npost = post.replace(\"TICKER\", str(TICKER))\npost", "_____no_output_____" ] ], [ [ "### Add webhook to run your notebook again", "_____no_output_____" ] ], [ [ "link_webhook = naas.webhook.add()", "_____no_output_____" ] ], [ [ "### Send by email", "_____no_output_____" ] ], [ [ "subject = f\"๐Ÿ“ˆ {TICKER} Open and close rates as of today\"\ncontent = post\nfiles = [f\"{TICKER}_LastMonth.csv\"]\n\nnaas.notification.send(email_to=email_to,\n subject=subject,\n html=content,\n email_from=email_from,\n files=files)", "_____no_output_____" ] ], [ [ "### Schedule your notebook\nPlease uncomment and run the cell below to schedule your notebook everyday at 8:00 during business days", "_____no_output_____" ] ], [ [ "# import naas\n# naas.scheduler.add(\"0 8 1-5 * *\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb116f9f44d317ae9dc8a84686b4c6efb10b601a
11,759
ipynb
Jupyter Notebook
hpc/nways/nways_labs/nways_MD/English/Fortran/jupyter_notebook/openacc/nways_openacc_opt_2.ipynb
programmah/gpubootcamp
82c26e67c8f791e4b7db8e85b739a0f9372e035b
[ "Apache-2.0" ]
189
2020-11-19T21:12:48.000Z
2022-03-31T13:47:01.000Z
hpc/nways/nways_labs/nways_MD/English/Fortran/jupyter_notebook/openacc/nways_openacc_opt_2.ipynb
programmah/gpubootcamp
82c26e67c8f791e4b7db8e85b739a0f9372e035b
[ "Apache-2.0" ]
62
2021-05-05T17:03:18.000Z
2022-03-31T07:23:36.000Z
hpc/nways/nways_labs/nways_MD/English/Fortran/jupyter_notebook/openacc/nways_openacc_opt_2.ipynb
programmah/gpubootcamp
82c26e67c8f791e4b7db8e85b739a0f9372e035b
[ "Apache-2.0" ]
135
2020-11-19T21:32:21.000Z
2022-03-31T11:08:41.000Z
52.030973
605
0.670635
[ [ [ "Before we begin, let's execute the cell below to display information about the CUDA driver and GPUs running on the server by running the `nvidia-smi` command. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.", "_____no_output_____" ] ], [ [ "!nvidia-smi", "_____no_output_____" ] ], [ [ "## Learning objectives\nThe **goal** of this lab is to:\n\n- Dig deeper into kernels by analyzing it with Nsight Compute", "_____no_output_____" ], [ "In the previous section, we learned to optimize the parallel [RDF](../serial/rdf_overview.ipynb) application using OpenACC. Moreover, we used NVIDIA Nsight Systems to get a system-wide performance analysis. Now, let's dig deeper and profile the kernel with the Nsight Compute profiler to get detailed performance metrics and find out how the OpenACC is mapped at the Compute Unified Device Architecture(CUDA) hardware level. Note: You will get a better understanding of the GPU architecture in the CUDA notebooks.\n\n\nTo do this, let's use the [solution](../../source_code/openacc/SOLUTION/rdf_collapse.f90) as a reference to get a similar report from Nsight Compute. Run the application, and profile it with the Nsight Systems first.\n\nNow, let's compile, and profile it with Nsight Systems first.", "_____no_output_____" ] ], [ [ "#compile the solution for Tesla GPU\n!cd ../../source_code/openacc && nvfortran -acc -ta=tesla,lineinfo -Minfo=accel -o rdf nvtx.f90 SOLUTION/rdf_collapse.f90 -L/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/cuda/11.2/lib64 -lnvToolsExt", "_____no_output_____" ], [ "#profile the solution with Nsight Systems \n!cd ../../source_code/openacc && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o rdf_collapse_solution ./rdf", "_____no_output_____" ] ], [ [ "Let's checkout the profiler's report. Download and save the report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../../source_code/openacc/rdf_collapse_solution.qdrep) and open it via the GUI. Now, right click on the kernel `rdf_98_gpu` and click on \"Analyze the Selected Kernel with NVIDIA Nsight Compute\" (see below screenshot). \n\n<img src=\"../images/f_compute_analyz.png\">\n\nThen, make sure to tick the radio button next to \"Display the command line to user NVIDIA Nsight Compute CLI\". \n\n<img src=\"../images/compute_command_line.png\" width=\"50%\" height=\"50%\">\n\nThen, you simply copy the command, run it and analyze the selected kernel. \n\n<img src=\"../images/f_compute_command.png\" width=\"50%\" height=\"50%\">\n\nTo profile the selected kernel, run the below cell (by adding `--set full` we make sure to capture all the sections in Nsight Compute profiler):", "_____no_output_____" ] ], [ [ "#profile the selected kernel in the solution with Nsight compute\n!cd ../../source_code/openacc && ncu --set full --launch-skip 1 --launch-count 1 -o rdf_collapse_solution ./rdf", "_____no_output_____" ] ], [ [ "Let's checkout the Nsight Compute profiler's report together. Download and save the report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../../source_code/openacc/rdf_collapse_solution.ncu-rep) and open it via the GUI. Let's checkout the first section called \"GPU Speed Of Light\". This section gives an overview of the utilization for compute and memory resources on the GPU. As you can see from the below screenshot, the Speed of Light (SOL) reports the achieved percentage of utilization of 30.04% for SM and 70.10% for memory. \n\n\n<img src=\"../images/f_sol.png\">\n\n\n**Extra**: If you can use the baseline feature on the Nsight Compute and compare the analysis of the kernel from this version of the RDF (which uses data directives and collapse clause) with the very first parallel version where we only added parallel directives and used managed memory, you can see how much improvements we got (see the below screenshot for reference):\n\n<img src=\"../images/f_sol_baseline.png\">\n\nIt is clear that we were able to reduce the execution time to half(red rectangle) and increase the SM and memory utilization (green rectangle). However, as you see the device is still underutilized. Let's look at the roofline analysis which indicates that the application is bandwith bound and the kernel exhibits low compute throughput and memory is more heavily utilized than Compute and it is clear the the memory is the bottleneck.\n\n<img src=\"../images/f_roofline_collapse.png\">\n\nThe Nsight Compute profiler suggests us to checkout the \"Memory Workload Analysis\" report sections to see where the memory system bottleneck is. There are 9.85 M instructions loading from or storing to the global memory space. The link going from L1/TEX Cache to Global shows 8.47 M requests generated due to global load instructions.\n\n<img src=\"../images/f_memory_collapse.png\">\n\n\nLet's have a look at the table showing L1/TEX Cache. The \"Sectors/Req\" column shows the average ratio of sectors to requests for the L1 cache. For the same number of active threads in a warp, smaller numbers imply a more efficient memory access pattern. For warps with 32 active threads, the optimal ratios per access size are: `32-bit: 4`, `64-bit: 8`, `128-bit: 16`. Smaller ratios indicate some degree of uniformity or overlapped loads within a cache line. Checkout the [GPU Architecture Terminologies](../GPU_Architecture_Terminologies.ipynb) notebook to learn more about threads and warps.\n\nIn the example screenshot, we can see that this number is higher. This implies uncoalesced memory accesses and will result in increased memory traffic. We are not efficiently utilizing the bytes transferred.\n\n<img src=\"../images/f_memory_sec.png\">\n\nNow, let's have a look at the \"Source Counters\" section located at the end of \"Details\" page of the profiler report. The section contains tables indicating the N highest or lowest values of one or more metrics in the selected kernel source code. Hotspot tables point out performance problems in the source. \n\n<img src=\"../images/f_source_loc.png\">\n\nWe can select the location links to navigate directly to this location in the \"Source\" page. Moreover, you can hover the mouse over a value to see which metrics contribute to it.\n\n<img src=\"../images/f_source_hover.png\">\n\nThe \"Source\" page displays metrics that can be correlated with source code. It is filtered to only show (SASS) functions that were executed in the kernel launch.\n\n<!--<img src=\"../images/source_sass_collapse.png\">-->\n\n<img src=\"../images/f_source_sass.png\">\n\nThe \"Source\" section in the \"Details\" page indicates that the issue is *uncoalesced Global memory access*. \n\n<img src=\"../images/uncoalesced_hint.png\">\n\n**Memory Coalescing**\n\nOn GPUs, threads are executed in warps. When we have a group of 32 contiguous threads called *warp* accessing adjacent locations in memory, we have *Coalesced memory* access and as a result we have few transactions and higher utilization. However, if a warp of 32 threads accessing scattered memory locations, then we have *Uncoalesced memory* access and this results in high number of transactions and low utilization.\n\n\n<img src=\"../images/coalesced_mem.png\">\n\nWithout changing the data structure and refactoring the code, we cannot fix this issue and improve the performance further using OpenACC in a straightforward easier way. The next step would be to look into how to optimize this application further with CUDA and perhaps take advantage of shared memory.", "_____no_output_____" ], [ "## Post-Lab Summary\n\nIf you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below.", "_____no_output_____" ] ], [ [ "%%bash\ncd ..\nrm -f nways_files.zip\nzip -r nways_files.zip *", "_____no_output_____" ] ], [ [ "**After** executing the above zip command, you should be able to download and save the zip file [here] by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../nways_files.zip).\nLet us now go back to parallelizing our code using other approaches.\n\n**IMPORTANT**: Please click on **HOME** to go back to the main notebook for *N ways of GPU programming for MD* code.\n\n-----\n\n# <p style=\"text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em\"> <a href=../../../nways_MD_start.ipynb>HOME</a></p>\n\n-----\n\n\n# Links and Resources\n\n[NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/)\n\n[NVIDIA Nsight Compute](https://developer.nvidia.com/nsight-compute)\n\n[CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads)\n\n**NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems).\n\nDon't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.\n\n--- \n\n## Licensing \n\nThis material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb118903fa454318e4bb39faebef498450c663f7
3,034
ipynb
Jupyter Notebook
Exercicio_17_Catetos_e_Hipotenusa.ipynb
samucale/Curso_em_Video
4a3f09be80cd26202d3404d4b90d882556a623be
[ "MIT" ]
1
2021-03-01T14:30:09.000Z
2021-03-01T14:30:09.000Z
Exercicio_17_Catetos_e_Hipotenusa.ipynb
samucale/Curso_em_Video
4a3f09be80cd26202d3404d4b90d882556a623be
[ "MIT" ]
null
null
null
Exercicio_17_Catetos_e_Hipotenusa.ipynb
samucale/Curso_em_Video
4a3f09be80cd26202d3404d4b90d882556a623be
[ "MIT" ]
null
null
null
26.614035
274
0.486486
[ [ [ "<a href=\"https://colab.research.google.com/github/samucale/CURSO.PYTHON.PROF.GUSTAVO-GUANABARA/blob/main/Exercicio_17_Catetos_e_Hipotenusa.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "*FAร‡A UM PROGRAMA QUE LEIA O COMPRIMENTO DO CATETO OPOSTO E DO CATETO ADJACENTE DE UM TRIANGULO RETANGULO CALCULE E MOSTRE O CUMPRIMENTO DA HPOTENUSA*", "_____no_output_____" ] ], [ [ "from math import hypot", "_____no_output_____" ], [ "co = float(input('Comprimento do cateto oposto: '))\r\nca = float(input('Comprirmtno do cateto adjacente: '))\r\nhi = hypot(co,ca)\r\nprint('A hipotenusa vai medir {:.2f}'.format(hi))\r\n", "Comprimento do cateto oposto: 3\nComprirmtno do cateto adjacente: 2\nA hipotenusa vai medir 3.61\n" ], [ "co = float(input('Comprimento do cateto oposto: '))\r\nca = float(input('Comprimento do cateto adjacente: '))\r\nhi=hypot(co,ca)\r\nprint('A hipotenusa vai medir {:.2f}'.format(hi)) ", "Comprimento do cateto oposto: 3\nComprimento do cateto adjacente: 2\nA hipotenusa vai medir 3.61\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
cb118cbccbfe7ceef428b8b4456f9cc014b7d041
976
ipynb
Jupyter Notebook
statistics-notebook/tests/notebooks/iminuit.ipynb
madsbk/nbi-jupyter-docker-stacks
7f40eb6613ad829e850b00e5fdf2eecbda30aca9
[ "MIT" ]
2
2020-10-04T19:38:23.000Z
2021-04-23T17:26:25.000Z
statistics-notebook/tests/notebooks/iminuit.ipynb
madsbk/nbi-jupyter-docker-stacks
7f40eb6613ad829e850b00e5fdf2eecbda30aca9
[ "MIT" ]
8
2021-05-17T12:26:51.000Z
2022-03-10T09:25:05.000Z
statistics-notebook/tests/notebooks/iminuit.ipynb
madsbk/nbi-jupyter-docker-stacks
7f40eb6613ad829e850b00e5fdf2eecbda30aca9
[ "MIT" ]
2
2020-04-27T06:40:11.000Z
2021-03-04T10:09:17.000Z
19.52
62
0.465164
[ [ [ "from iminuit import Minuit\n\n\ndef f(x, y, z):\n return (x - 2) ** 2 + (y - 3) ** 2 + (z - 4) ** 2\n\n\nm = Minuit(f)\n\nm.migrad() # run optimiser\nprint(m.values) # {'x': 2,'y': 3,'z': 4}\n\nm.hesse() # run covariance estimator\nprint(m.errors) # {'x': 1,'y': 1,'z': 1}", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb11964e1a80a2a5f08c45c39d0e03b8e9fb8e1c
827,403
ipynb
Jupyter Notebook
3. Facial Keypoint Detection, Complete Pipeline.ipynb
jenchen/facial-keypoint-detection
1844ec9a61b8d07aefed8d44d5d9cdbedf5e675b
[ "MIT" ]
null
null
null
3. Facial Keypoint Detection, Complete Pipeline.ipynb
jenchen/facial-keypoint-detection
1844ec9a61b8d07aefed8d44d5d9cdbedf5e675b
[ "MIT" ]
null
null
null
3. Facial Keypoint Detection, Complete Pipeline.ipynb
jenchen/facial-keypoint-detection
1844ec9a61b8d07aefed8d44d5d9cdbedf5e675b
[ "MIT" ]
null
null
null
2,304.743733
323,020
0.961266
[ [ [ "## Face and Facial Keypoint detection\n\nAfter you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.\n\n1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).\n2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.\n3. Use your trained model to detect facial keypoints on the image.\n\n---", "_____no_output_____" ], [ "In the next python cell we load in required libraries for this section of the project.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n%matplotlib inline", "_____no_output_____" ] ], [ [ "#### Select an image \n\nSelect an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.", "_____no_output_____" ] ], [ [ "import cv2\n# load in color image for face detection\nimage = cv2.imread('images/obamas.jpg')\n\n# switch red and blue color channels \n# --> by default OpenCV assumes BLUE comes first, not RED as in many images\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# plot the image\nfig = plt.figure(figsize=(9,9))\nplt.imshow(image)", "_____no_output_____" ] ], [ [ "## Detect all faces in an image\n\nNext, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.\n\nIn the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.\n\nAn example of face detection on a variety of images is shown below.\n\n<img src='images/haar_cascade_ex.png' width=80% height=80%/>\n", "_____no_output_____" ] ], [ [ "# load in a haar cascade classifier for detecting frontal faces\nface_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')\n\n# run the detector\n# the output here is an array of detections; the corners of each detection box\n# if necessary, modify these parameters until you successfully identify every face in a given image\nfaces = face_cascade.detectMultiScale(image, 1.2, 2)\n\n# make a copy of the original image to plot detections on\nimage_with_detections = image.copy()\n\n# loop over the detected faces, mark the image where each face is found\nfor (x,y,w,h) in faces:\n # draw a rectangle around each detected face\n # you may also need to change the width of the rectangle drawn depending on image resolution\n cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3) \n\nfig = plt.figure(figsize=(9,9))\n\nplt.imshow(image_with_detections)", "_____no_output_____" ] ], [ [ "## Loading in a trained model\n\nOnce you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.\n\nFirst, load your best model by its filename.", "_____no_output_____" ] ], [ [ "import torch\nfrom models import Net\n\nnet = Net()\n\n## COMPLETED: load the best saved model parameters (by your path name)\n## You'll need to un-comment the line below and add the correct name for *your* saved model\nnet.load_state_dict(torch.load('saved_models/keypoints_model_rms_prop_20.pt'))\n\n## print out your net and prepare it for testing (uncomment the line below)\nnet.eval()", "_____no_output_____" ] ], [ [ "## Keypoint detection\n\nNow, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.\n\n### TODO: Transform each detected face into an input Tensor\n\nYou'll need to perform the following steps for each detected face:\n1. Convert the face from RGB to grayscale\n2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n4. Reshape the numpy image into a torch image.\n\n**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.\n\nYou may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.\n\n\n### TODO: Detect and display the predicted keypoints\n\nAfter each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be \"un-normalized\" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:\n\n<img src='images/michelle_detected.png' width=30% height=30%/>\n\n\n", "_____no_output_____" ] ], [ [ "def show_keypoints(image, keypoints):\n plt.figure(figsize=(5,5))\n \n keypoints = keypoints.data.numpy()\n keypoints = keypoints * 60.0 + 96 \n keypoints = np.reshape(keypoints, (68, -1)) # reshape to 2 X 68 keypoint\n\n image = image.numpy() \n image = np.transpose(image, (1, 2, 0)) # (H x W x C)\n image = np.squeeze(image)\n plt.imshow(image, cmap='gray')\n plt.scatter(keypoints[:, 0], keypoints[:, 1], s=40, marker='.', c='m')", "_____no_output_____" ], [ "from torch.autograd import Variable\n\nimage_copy = np.copy(image)\n\n# loop over the detected faces from your haar cascade\nfor (x,y,w,h) in faces:\n \n # Select the region of interest that is the face in the image \n# roi = image_copy[y:y+h, x:x+w]\n roi = image_copy[y-30:y+h+50, x-30:x+w+50]\n \n \n ## COMPLETED: Convert the face region from RGB to grayscale\n roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)\n \n ## COMPLETED: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n roi = roi/255\n \n ## COMPLETED: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n roi = cv2.resize(roi, (224, 224))\n \n ## COMPLETED: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)\n if(len(roi.shape) == 2):\n roi = roi.reshape(roi.shape[0], roi.shape[1], 1)\n roi = roi.transpose((2, 0, 1))\n print(roi.shape)\n \n ## COMPLETED: Make facial keypoint predictions using your loaded, trained network \n tensor = Variable(torch.from_numpy(roi))\n tensor = tensor.type(torch.FloatTensor)\n tensor.unsqueeze_(0)\n keypoints = net(tensor)\n \n ## COMPLETED: Display each detected face and the corresponding keypoints \n show_keypoints(tensor.squeeze(0), keypoints)\n", "(1, 224, 224)\n(1, 224, 224)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb11a3c6ebb334e29ebcab93342e61672aa13be7
3,332
ipynb
Jupyter Notebook
DSA/backtracking/wordSearch.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
1
2019-03-27T13:00:28.000Z
2019-03-27T13:00:28.000Z
DSA/backtracking/wordSearch.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
null
null
null
DSA/backtracking/wordSearch.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
null
null
null
28.724138
249
0.469388
[ [ [ "Given a 2D board and a word, find if the word exists in the grid.\n\nThe word can be constructed from letters of sequentially adjacent cell, where \"adjacent\" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.\n\nExample:\n\n board =\n [\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n ]\n\n Given word = \"ABCCED\", return true.\n Given word = \"SEE\", return true.\n Given word = \"ABCB\", return false.", "_____no_output_____" ], [ "> ่ฟ™้“้ข˜ๆ˜ฏๅ…ธๅž‹็š„ๆทฑๅบฆไผ˜ๅ…ˆ้ๅކDFS็š„ๅบ”็”จ๏ผŒๅŽŸไบŒ็ปดๆ•ฐ็ป„ๅฐฑๅƒๆ˜ฏไธ€ไธช่ฟทๅฎซ๏ผŒๅฏไปฅไธŠไธ‹ๅทฆๅณๅ››ไธชๆ–นๅ‘่กŒ่ตฐ๏ผŒๆˆ‘ไปฌไปฅไบŒ็ปดๆ•ฐ็ป„ไธญๆฏไธ€ไธชๆ•ฐ้ƒฝไฝœไธบ่ตท็‚นๅ’Œ็ป™ๅฎšๅญ—็ฌฆไธฒๅšๅŒน้…๏ผŒๆˆ‘ไปฌ่ฟ˜้œ€่ฆไธ€ไธชๅ’ŒๅŽŸๆ•ฐ็ป„็ญ‰ๅคงๅฐ็š„visitedๆ•ฐ็ป„๏ผŒๆ˜ฏboolๅž‹็š„๏ผŒ็”จๆฅ่ฎฐๅฝ•ๅฝ“ๅ‰ไฝ็ฝฎๆ˜ฏๅฆๅทฒ็ป่ขซ่ฎฟ้—ฎ่ฟ‡๏ผŒๅ› ไธบ้ข˜็›ฎ่ฆๆฑ‚ไธ€ไธชcellๅช่ƒฝ่ขซ่ฎฟ้—ฎไธ€ๆฌกใ€‚ๅฆ‚ๆžœไบŒ็ปดๆ•ฐ็ป„board็š„ๅฝ“ๅ‰ๅญ—็ฌฆๅ’Œ็›ฎๆ ‡ๅญ—็ฌฆไธฒwordๅฏนๅบ”็š„ๅญ—็ฌฆ็›ธ็ญ‰๏ผŒๅˆ™ๅฏนๅ…ถไธŠไธ‹ๅทฆๅณๅ››ไธช้‚ปๅญ—็ฌฆๅˆ†ๅˆซ่ฐƒ็”จDFS็š„้€’ๅฝ’ๅ‡ฝๆ•ฐ๏ผŒๅช่ฆๆœ‰ไธ€ไธช่ฟ”ๅ›žtrue๏ผŒ้‚ฃไนˆๅฐฑ่กจ็คบๅฏไปฅๆ‰พๅˆฐๅฏนๅบ”็š„ๅญ—็ฌฆไธฒ๏ผŒๅฆๅˆ™ๅฐฑไธ่ƒฝๆ‰พๅˆฐ.\n\n[ref](https://www.cnblogs.com/grandyang/p/4332313.html)", "_____no_output_____" ] ], [ [ "class Solution(object):\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n visited = {}\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, word, i, j, visited):\n return True\n return False\n \n def dfs(self, board, word, i, j, visited, pos=0):\n if len(word) == pos:\n return True\n if i < 0 or i == len(board) or j < 0 or j == len(board[0]) or visited.get((i,j)) \\\n or word[pos] != board[i][j]:\n return False\n \n visited[(i,j)] = True\n \n res = self.dfs(board, word, i, j+1, visited, pos+1) \\\n or self.dfs(board, word, i, j-1, visited, pos+1) \\\n or self.dfs(board, word, i+1, j, visited, pos+1) \\\n or self.dfs(board, word, i-1, j,visited, pos+1)\n \n visited[(i,j)] = False\n \n return res\n \n# test\nboard =[\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n]\nword = \"ABCCED\"\nSolution().exist(board,word)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
cb11d456cd2e36cd0ca4ab12ad5155a998af841a
944,241
ipynb
Jupyter Notebook
docs/experiments/recruitment_across_datasets.ipynb
AishwaryaSeth/ProgLearn
31b0f3d28ee4714f958e2680622793672cb8134c
[ "MIT" ]
18
2020-05-17T21:56:36.000Z
2020-09-18T17:39:26.000Z
docs/experiments/recruitment_across_datasets.ipynb
AishwaryaSeth/ProgLearn
31b0f3d28ee4714f958e2680622793672cb8134c
[ "MIT" ]
209
2020-06-05T19:08:51.000Z
2020-10-03T16:49:39.000Z
docs/experiments/recruitment_across_datasets.ipynb
AishwaryaSeth/ProgLearn
31b0f3d28ee4714f958e2680622793672cb8134c
[ "MIT" ]
33
2020-06-10T23:12:09.000Z
2020-09-28T05:09:44.000Z
1,430.668182
126,116
0.958306
[ [ [ "# Recruitment Across Datasets\n\nIn this notebook, we further examine the capability of ODIF to transfer across datasets, building upon the prior FTE/BTE experiments on MNIST and Fashion-MNIST. Using the datasets found in [this repo](https://github.com/neurodata/LLF_tidy_images), we perform a series of experiments to evaluate the transfer efficiency and recruitment capabilities of ODIF across five different datasets. The datasets and their content are as follows:\n- Caltech-101: contains images of objects in 101 categories\n- CIFAR-10: contains 32x32 color images of objects in 10 classes\n- CIFAR-100: contains 32x32 color images of objects in 100 classes\n- Food-101: contains images of dishes in 101 categories\n- DTD: contains images of describable textures", "_____no_output_____" ] ], [ [ "import functions.recruitacrossdatasets_functions as fn", "_____no_output_____" ] ], [ [ "**Note:** This notebook tutorial uses functions stored externally within `functions/recruitacrossdatasets_functions.py` to simplify presentation of code. These functions are imported above, along with other libraries.", "_____no_output_____" ], [ "## FTE/BTE Experiment\n\nWe begin our examination of ODIF's transfer capabilities across datasets with the FTE/BTE experiment, which provides background metrics for what the expected performance should be. This helps inform the later recruitment experiment.", "_____no_output_____" ], [ "### Base Experiment\n#### Import and Process Data\n\nLet's first import the data and perform some preprocessing so that it is in the correct format for feeding to ODIF. The following function does so for us:", "_____no_output_____" ] ], [ [ "data, classes = fn.import_data(normalize=False)", "_____no_output_____" ] ], [ [ "#### Define Hyperparameters\n\nWe then define the hyperparameters to be used for the experiment:\n- `model`: model to be used for FTE/BTE experiment\n- `num_tasks`: number of tasks\n- `num_trees`: nuber of trees\n- `reps`: number of repetitions, fewer than actual figures to reduce running time", "_____no_output_____" ] ], [ [ "##### MAIN HYPERPARAMS ##################\nmodel = \"odif\"\nnum_tasks = 5\nnum_trees = 10\nreps = 4\n#########################################", "_____no_output_____" ] ], [ [ "Taking each dataset as a separate task, we have `5` tasks, and we also set a default of `10` trees, with the experiment being run for `30` reps.\n\nNote, in comparison to previous FTE/BTE experiments, the lack of the `num_points_per_task` parameter. Here, we sample based on the label with the least number of samples and take 31 samples from each label.", "_____no_output_____" ], [ "#### Run Experiment and Plot Results\n\nFirst, we call the function to run the experiment:", "_____no_output_____" ] ], [ [ "accuracy_all_task = fn.ftebte_exp(\n data, classes, model, num_tasks, num_trees, reps, shift=0\n)", "_____no_output_____" ] ], [ [ "Using the accuracies over all tasks, we can calculate the error, the forwards transfer efficiency (FTE), the backwards transfer efficiency (BTE), and the overall transfer efficiency (TE).", "_____no_output_____" ] ], [ [ "err, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)", "_____no_output_____" ] ], [ [ "These results are therefore plotted using the function as follows:", "_____no_output_____" ] ], [ [ "fn.plot_ftebte(num_tasks, err, bte, fte, te)", "_____no_output_____" ] ], [ [ "As can be seen from above, there is generally positive forwards and backwards transfer efficiency when evaluating transfer across datasets, even though the datasets contained very different content.", "_____no_output_____" ], [ "### Varying the Number of Trees\n\nWe were also curious how changing the number of trees would affect the results of the FTE/BTE experiment across datasets, and therefore also reran the experiment using `50` trees:", "_____no_output_____" ] ], [ [ "##### MAIN HYPERPARAMS ##################\nmodel = \"odif\"\nnum_tasks = 5\nnum_trees = 50\nreps = 4\n#########################################", "_____no_output_____" ] ], [ [ "Running the experiment, we find the following results:", "_____no_output_____" ] ], [ [ "accuracy_all_task = fn.ftebte_exp(\n data, classes, model, num_tasks, num_trees, reps, shift=0\n)\nerr, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)\nfn.plot_ftebte(num_tasks, err, bte, fte, te)", "_____no_output_____" ] ], [ [ "It seems as if more trees leads to lower transfer efficiency. \n\nWe use `10` trees for the remainder of the experiments to save on computing power.", "_____no_output_____" ], [ "## Recruitment Experiment\n\nNow that we have roughly assessed the performance of ODIF via the FTE/BTE experiment, we are also interested in which recruitment scheme works the best for this set of data.\n\n### Base Experiment\n\nTo quickly reiterate some of the background on the recruitment experiment, there are generally two main schemes for developing lifelong learning algorithms: building and reallocating. The former involves adding new resources as new data comes in, whereas the latter involves compressing current representations to make room for new ones. We want to examine whether current resources could be better leveraged by testing a range of approaches:\n1. **Building (default for Omnidirectional Forest):** train `num_trees` new trees\n2. **Uncertainty forest:** ignore all prior trees\n3. **Recruiting:** select `num_trees` (out of all 450 existing trees) that perform best on the newly introduced 10th task\n4. **Hybrid:** builds `num_trees/2` new trees AND recruits `num_trees/2` best-forming trees\n\nWe compare the results of these approaches based on varying training sample sizes, in the range of `[1, 5, 10, 25]` samples per label.", "_____no_output_____" ], [ "#### Define Hyperparameters\n\nAs always, we define the hyperparameters:\n- `num_tasks`: number of tasks\n- `num_trees`: nuber of trees\n- `reps`: number of repetitions\n- `estimation_set`: size of set used to train for the last task, as a proportion (`1-estimation_set` is the size of the set used for validation, aka the selection of best trees)", "_____no_output_____" ] ], [ [ "############################\n### Main hyperparameters ###\n############################\nnum_tasks = 5\nnum_trees = 10\nreps = 4\nestimation_set = 0.63", "_____no_output_____" ] ], [ [ "#### Run Experiment and Plot Results\n\nWe call our experiment function and input the main hyperparameters:", "_____no_output_____" ] ], [ [ "# run recruitment experiment\nmeans, stds, last_task_sample = fn.recruitment_exp(\n data, classes, num_tasks, num_trees, reps, estimation_set, shift=0\n)", "_____no_output_____" ] ], [ [ "And then we plot the results:", "_____no_output_____" ] ], [ [ "# plot results\nfn.recruitment_plot(means, stds, last_task_sample, num_tasks)", "_____no_output_____" ] ], [ [ "We therefore see that though generalization error remains high on the final task, the lifelong learning algorithm still outperforms the other recruitment schemes overall.", "_____no_output_____" ], [ "### Shifting Dataset Order\n\nSince the above experiment involves fixing DTD as the final dataset, a further experiment involves shifting the order of datasets, so that there is a different dataset as task 5 each time. This allows us to see whether different dataset content would significantly impact the results on the final task. \n\nTo do so, we define the `shift` parameter in our call to the `recruitment_exp` function. This, in turn, calls the `shift_data` function, which moves the first task to the end and thus reorders the sequence of tasks.\n\nMore specifically, if we define `shift=1`, as done below, we would get the following order of datasets:\n1. CIFAR-10\n2. CIFAR-100\n3. Food-101\n4. DTD\n5. Caltech-101", "_____no_output_____" ] ], [ [ "# run recruitment experiment\nmeans, stds, last_task_sample = fn.recruitment_exp(\n data, classes, num_tasks, num_trees, reps, estimation_set, shift=1\n)\n# plot results\nfn.recruitment_plot(means, stds, last_task_sample, num_tasks)", "_____no_output_____" ] ], [ [ "A `shift=2` results in a dataset order of:\n1. CIFAR-100\n2. Food-101\n3. DTD\n4. Caltech-101\n5. CIFAR-10", "_____no_output_____" ] ], [ [ "# run recruitment experiment\nmeans, stds, last_task_sample = fn.recruitment_exp(\n data, classes, num_tasks, num_trees, reps, estimation_set, shift=2\n)\n# plot results\nfn.recruitment_plot(means, stds, last_task_sample, num_tasks)", "_____no_output_____" ] ], [ [ "`shift=3` gives us:\n1. Food-101\n2. DTD\n3. Caltech-101\n4. CIFAR-10\n5. CIFAR-100", "_____no_output_____" ] ], [ [ "# run recruitment experiment\nmeans, stds, last_task_sample = fn.recruitment_exp(\n data, classes, num_tasks, num_trees, reps, estimation_set, shift=3\n)\n# plot results\nfn.recruitment_plot(means, stds, last_task_sample, num_tasks)", "_____no_output_____" ] ], [ [ "And finally, `shift=4` yields:\n1. DTD\n2. Caltech-101\n3. CIFAR-10\n4. CIFAR-100\n5. Food-101", "_____no_output_____" ] ], [ [ "# run recruitment experiment\nmeans, stds, last_task_sample = fn.recruitment_exp(\n data, classes, num_tasks, num_trees, reps, estimation_set, shift=4\n)\n# plot results\nfn.recruitment_plot(means, stds, last_task_sample, num_tasks)", "_____no_output_____" ] ], [ [ "Throughout all the above experiments, even though generalization error remains high due to the sheer amount of different labels across all the different datsets, our lifelong learning algorithm still outperforms the other recruitment methods.", "_____no_output_____" ], [ "## Other Experiments", "_____no_output_____" ], [ "### Effect of Normalization\n\nWhen examining data across different datasets, normalization and standardization of data is often of interest. However, this can also lead to loss of information, as we are placing all the images on the same scale. As a final experiment, we also look into the effect of normalization on the FTE/BTE results.", "_____no_output_____" ], [ "#### Import and Process Data\n\nThe `import_data` function has a `normalize` parameter, where one can specify whether they want to normalize the data, normalize across the dataset, or just normalize across each image. Previously, for the original FTE/BTE experiment, we set `normalize=False`.\n\nHere, we look at the other two options.", "_____no_output_____" ] ], [ [ "# normalize across dataset\ndata1, classes1 = fn.import_data(normalize=\"dataset\")", "_____no_output_____" ], [ "# normalize across each image\ndata2, classes2 = fn.import_data(normalize=\"image\")", "_____no_output_____" ] ], [ [ "#### Define Hyperparameters\n\nWe use the same parameters as before:", "_____no_output_____" ] ], [ [ "##### MAIN HYPERPARAMS ##################\nmodel = \"odif\"\nnum_tasks = 5\nnum_trees = 10\nreps = 4\n#########################################", "_____no_output_____" ] ], [ [ "#### Run Experiment and Plot Results\n\nWe first run the FTE/BTE experiment by normalizing across each dataset, such that the images in each dataset have a range of [0,1] in each channel.", "_____no_output_____" ] ], [ [ "accuracy_all_task = fn.ftebte_exp(\n data1, classes1, model, num_tasks, num_trees, reps, shift=0\n)\nerr, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)\nfn.plot_ftebte(num_tasks, err, bte, fte, te)", "_____no_output_____" ] ], [ [ "We then run the FTE/BTE experiment with normalizing per image, so that each channel in each image is scaled to a range of [0,1].", "_____no_output_____" ] ], [ [ "accuracy_all_task = fn.ftebte_exp(\n data2, classes2, model, num_tasks, num_trees, reps, shift=0\n)\nerr, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)\nfn.plot_ftebte(num_tasks, err, bte, fte, te)", "_____no_output_____" ] ], [ [ "It seems as if normalizing both across the dataset and within each image yield relatively similar results to not normalizing, so we did not perform further experiments to explore this area more at the current point in time.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb11da8630ab0611ab3c55fe442de6c57566343a
103,347
ipynb
Jupyter Notebook
browser_libs.ipynb
gkovacs/habitlab-conservation-analysis
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
[ "MIT" ]
null
null
null
browser_libs.ipynb
gkovacs/habitlab-conservation-analysis
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
[ "MIT" ]
null
null
null
browser_libs.ipynb
gkovacs/habitlab-conservation-analysis
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
[ "MIT" ]
null
null
null
39.17627
1,699
0.662767
[ [ [ "# noexport\n\nimport os\nos.system('export_notebook browser_libs.ipynb')", "_____no_output_____" ], [ "import os\nif 'R_HOME' not in os.environ:\n os.environ['R_HOME'] = '/usr/lib/R'", "_____no_output_____" ], [ "%load_ext rpy2.ipython", "_____no_output_____" ], [ "import json\nimport urllib.request as req\nfrom memoize import memoize # pip install memoize2\nfrom pymongo import MongoClient\nfrom getsecret import getsecret\nimport urllib.parse\nimport moment\nimport datetime\nimport pandas as pd", "_____no_output_____" ], [ "#def get_user_to_all_install_ids():\n# user_to_install = json.loads(req.urlopen(\"http://localhost:5001/get_user_to_all_install_ids\").read().decode(\"utf-8\"))\n# return user_to_install\n\n#def get_collection_names():\n# collection_names = json.loads(req.urlopen(\"http://localhost:5001/listcollections\").read().decode(\"utf-8\"))\n# return collection_names\n\ndef get_session_info_list_for_user(userid):\n output = json.loads(req.urlopen(\"http://localhost:5001/get_session_info_list_for_user?userid=\" + userid).read().decode(\"utf-8\"))\n return output", "_____no_output_____" ], [ "#collection_names = get_collection_items('collections')\n#print(len(collection_names))\n#print(get_collection_for_user('e0ea34c81d4b50cddc7bd752', 'synced:seconds_on_domain_per_session')[0])", "_____no_output_____" ], [ "@memoize\ndef download_url(url):\n return req.urlopen(url).read().decode(\"utf-8\")\n\ndef getjson(path, params={}):\n querystring = urllib.parse.urlencode(params)\n url = 'http://localhost:5001/' + path + '?' + querystring\n return json.loads(download_url(url))\n\ndef make_getjson_func(path, *param_list):\n def f(*arg_list):\n if len(param_list) != len(arg_list):\n print('missing some number of arguments. expected parameters: ' + str(param_list))\n param_dict = {}\n for param,arg in zip(param_list, arg_list):\n param_dict[param] = arg\n return getjson(path, param_dict)\n return f\n\ndef expose_getjson(func_name, *args):\n f = make_getjson_func(func_name, *args)\n globals()[func_name] = f\n return f \n", "_____no_output_____" ], [ "expose_getjson('get_session_info_list_for_user', 'userid')\n#expose_getjson('get_user_to_all_install_ids')\n\n#print(get_user_to_all_install_ids()['e0ea34c81d4b50cddc7bd752'])\n#get_session_info_list = make_getjson_func('get_session_info_list_for_user', 'userid')\n#print(get_session_info_list_for_user('e0ea34c81d4b50cddc7bd752')[0])\n#def get_user_to_all_install_ids(user):\n# return getjson", "_____no_output_____" ], [ "@memoize\ndef get_db(): # this is for the browser\n client = MongoClient(getsecret(\"EXT_URI\"))\n db = client[getsecret(\"DB_NAME\")]\n return db\n\n@memoize\ndef get_collection_items(collection_name):\n db = get_db()\n return [x for x in db[collection_name].find({})]\n\ndef get_collection_for_user(user, collection_name):\n return get_collection_items(user + '_' + collection_name)\n", "_____no_output_____" ], [ "def get_collection_names():\n collection_names = get_collection_items('collections')\n return [x['_id'] for x in collection_names]\n\ndef get_users_with_goal_frequency_set():\n output = []\n collection_names = get_collection_names()\n for collection_name in collection_names:\n if not collection_name.endswith('_synced:goal_frequencies'):\n continue\n username = collection_name.replace('_synced:goal_frequencies', '')\n output.append(username)\n return output\n", "_____no_output_____" ], [ "@memoize\ndef get_user_to_all_install_ids():\n install_info_list = get_collection_items('installs')\n output = {}\n for install_info in install_info_list:\n if 'user_id' not in install_info:\n continue\n user_id = install_info['user_id']\n install_id = install_info.get('install_id', None)\n if user_id not in output:\n output[user_id] = []\n if install_id not in output[user_id]:\n output[user_id].append(install_id)\n return output\n\n#print(get_user_to_all_install_ids()['e0ea34c81d4b50cddc7bd752'])", "_____no_output_____" ], [ "@memoize\ndef get_all_install_ids_for_user(user):\n seconds_on_domain_per_session = get_collection_for_user(user, 'synced:seconds_on_domain_per_session')\n interventions_active_for_domain_and_session = get_collection_for_user(user, 'synced:interventions_active_for_domain_and_session')\n user_to_all_install_ids = get_user_to_all_install_ids()\n output = []\n output_set = set()\n if user in user_to_all_install_ids:\n for install_id in user_to_all_install_ids[user]:\n if install_id not in output_set:\n output_set.add(install_id)\n output.append(install_id)\n for item in seconds_on_domain_per_session:\n if 'install_id' not in item:\n continue\n install_id = item['install_id']\n if install_id not in output_set:\n output_set.add(install_id)\n output.append(install_id)\n for item in interventions_active_for_domain_and_session:\n if 'install_id' not in item:\n continue\n install_id = item['install_id']\n if install_id not in output_set:\n output_set.add(install_id)\n output.append(install_id)\n return output\n\n@memoize\ndef get_is_user_unofficial(user):\n seconds_on_domain_per_session = get_collection_for_user(user, 'synced:seconds_on_domain_per_session')\n interventions_active_for_domain_and_session = get_collection_for_user(user, 'synced:interventions_active_for_domain_and_session')\n #print(seconds_on_domain_per_session[0])\n #print(seconds_on_domain_per_session[0]['developer_mode'])\n for item in seconds_on_domain_per_session:\n if 'unofficial_version' in item:\n return True\n if 'developer_mode' in item and item['developer_mode'] == True:\n return True\n return False\n\n@memoize\ndef get_is_valid_user(user):\n install_ids = get_all_install_ids_for_user(user)\n if len(install_ids) != 1:\n return False\n return True\n\n@memoize\ndef get_valid_user_list():\n user_list = get_users_with_goal_frequency_set()\n output = []\n for user in user_list:\n if not get_is_valid_user(user):\n continue\n output.append(user)\n return output\n\n#get_sessions_for_user('e0ea34c81d4b50cddc7bd752')\n#valid_user_list = get_valid_user_list()\n#print(len(valid_user_list))", "_____no_output_____" ], [ "#get_is_user_unofficial('e0ea34c81d4b50cddc7bd752')\n#get_is_user_unofficial('c11e5f2d93f249b5083989b2')", "_____no_output_____" ], [ "'''\nfunction convert_date_to_epoch(date) {\n let start_of_epoch = moment().year(2016).month(0).date(1).hours(0).minutes(0).seconds(0).milliseconds(0)\n let year = parseInt(date.substr(0, 4))\n let month = parseInt(date.substr(4, 2)) - 1\n let day = parseInt(date.substr(6, 2))\n let date_moment = moment().year(year).month(month).date(day).hours(0).minutes(0).seconds(0).milliseconds(0)\n return date_moment.diff(start_of_epoch, 'days')\n}\n\nfunction convert_epoch_to_date(epoch) {\n let start_of_epoch = moment().year(2016).month(0).date(1).hours(0).minutes(0).seconds(0).milliseconds(0)\n start_of_epoch.add(epoch, 'days')\n return start_of_epoch.format('YYYYMMDD')\n}\n\nfunction timestamp_to_epoch(timestamp) {\n let start_of_epoch = moment().year(2016).month(0).date(1).hours(0).minutes(0).seconds(0).milliseconds(0)\n return moment(timestamp).diff(start_of_epoch, 'days')\n}\n'''\n\ndef convert_date_to_epoch(date):\n #start_of_epoch = moment.now().timezone(\"US/Pacific\").replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n start_of_epoch = moment.now().replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n #date_moment = moment.now().timezone(\"US/Pacific\").replace(years=year, months=month, days=day, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n date_moment = moment.now().replace(years=year, months=month, days=day, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n return date_moment.diff(start_of_epoch).days\n\ndef convert_epoch_to_date(epoch):\n #start_of_epoch = moment.now().timezone(\"US/Pacific\").replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n start_of_epoch = moment.now().replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n start_of_epoch.add(days=epoch)\n return start_of_epoch.format('YYYYMMDD')\n\ndef timestamp_to_epoch(timestamp):\n #start_of_epoch = moment.now().timezone(\"US/Pacific\").replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n #return moment.unix(timestamp).timezone(\"US/Pacific\").diff(start_of_epoch).days\n start_of_epoch = moment.now().replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n return moment.unix(timestamp).diff(start_of_epoch).days\n\ndef timestamp_to_isoweek(timestamp):\n isoWeek = int(datetime.datetime.fromtimestamp(timestamp/1000).isocalendar()[1]) \n return isoWeek\n\ndef epoch_to_isoweek(epoch):\n #start_of_epoch = moment.now().timezone(\"US/Pacific\").replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n start_of_epoch = moment.now().replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n start_of_epoch.add(days=epoch)\n timestamp_seconds = start_of_epoch.epoch()\n isoWeek = int(datetime.datetime.fromtimestamp(timestamp_seconds).isocalendar()[1])\n return isoWeek\n\n#print(timestamp_to_epoch(1537059309631))\n#print(convert_epoch_to_date(988))\n#print(convert_date_to_epoch('20180915'))\n#print(convert_date_to_epoch('20180917'))\n#print(epoch_to_isoweek(990))", "_____no_output_____" ], [ "#a=moment.unix(1537221946630)\n#dir(a)\n#print(timestamp_to_isoweek(1537221946630))", "_____no_output_____" ], [ "'''\n@memoize\ndef get_frequency_info_for_user_epoch(user, epochnum):\n # returns a dictionary mapping goal name -> 1 if frequent, 0 if infrequent\n isoweek_input = epoch_to_isoweek(epochnum)\n goal_frequencies = get_collection_for_user(user, 'synced:goal_frequencies')\n output = {}\n conflict_info_list = []\n for item in goal_frequencies:\n timestamp_local = item['timestamp_local']\n isoweek_local = timestamp_to_isoweek(timestamp_local)\n algorithm_info = json.loads(item['val'])\n algorithm_name = algorithm_info['algorithm']\n onweeks = algorithm_info['onweeks']\n timestamp = algorithm_info['timestamp']\n if algorithm_name == 'isoweek_random':\n is_frequent = onweeks[isoweek_input] == 1\n elif algorithm_name == 'isoweek_alternating':\n is_frequent = isoweek_input % 2 == onweeks\n else:\n raise Exception('unknown frequency selection algorithm ' + algorithm)\n goal = item['key']\n if goal in output:\n conflict_info = {'item': item, 'existing_is_frequent': output[goal], 'is_frequent': is_frequent}\n conflict_info_list.append(conflict_info)\n continue\n output[goal] = is_frequent\n #print(goal)\n #print(is_frequent)\n #print(algorithm_info)\n #print(item)\n return output\n'''\n\n@memoize\ndef get_frequency_info_for_user_epoch(user, epochnum):\n # returns a dictionary mapping goal name -> 1 if frequent, 0 if infrequent\n isoweek_input = epoch_to_isoweek(epochnum)\n goal_frequencies = get_collection_for_user(user, 'synced:goal_frequencies')\n output = {}\n conflict_info_list = []\n goal_frequencies.sort(key=lambda x: x['timestamp'])\n for item in goal_frequencies:\n timestamp_local = item['timestamp_local']\n isoweek_local = timestamp_to_isoweek(timestamp_local)\n algorithm_info = json.loads(item['val'])\n algorithm_name = algorithm_info['algorithm']\n onweeks = algorithm_info['onweeks']\n timestamp = algorithm_info['timestamp']\n if algorithm_name == 'isoweek_random':\n is_frequent = onweeks[isoweek_input] == 1\n elif algorithm_name == 'isoweek_alternating':\n is_frequent = isoweek_input % 2 == onweeks\n else:\n raise Exception('unknown frequency selection algorithm ' + algorithm)\n goal = item['key']\n #if goal in output:\n # conflict_info = {'item': item, 'existing_is_frequent': output[goal], 'is_frequent': is_frequent}\n # conflict_info_list.append(conflict_info)\n # continue\n output[goal] = is_frequent\n #print(goal)\n #print(is_frequent)\n #print(algorithm_info)\n #print(item)\n return output\n\n\n\ndef get_is_goal_frequent_for_user_on_domain_at_epoch(user, target_domain, epochnum):\n goal_to_frequency_info = get_frequency_info_for_user_epoch(user, epochnum)\n for goal_name,is_frequent in goal_to_frequency_info.items():\n domain = get_domain_for_goal(goal_name)\n if domain == target_domain:\n return is_frequent\n # we probably shouldn't have gotten here\n return False\n \n\n#def get_frequency_info_for_goal_on_timestamp(user, goal, )\n\n#print(get_frequency_info_for_user_epoch('c11e5f2d93f249b5083989b2', 990))\n#print(get_is_goal_frequent_for_user_on_domain_at_epoch('c11e5f2d93f249b5083989b2', 'www.youtube.com', 990))", "_____no_output_____" ], [ "@memoize\ndef get_goals_enabled_for_user_sorted_by_timestamp(user):\n goal_info_list = get_collection_for_user(user, 'logs:goals')\n goal_info_list_sorted = []\n for goal_info in goal_info_list:\n if 'timestamp_local' not in goal_info:\n continue\n goal_info_list_sorted.append(goal_info)\n goal_info_list_sorted.sort(key=lambda k: k['timestamp_local'])\n return goal_info_list_sorted\n\ndef get_goals_enabled_for_user_at_timestamp(user, target_timestamp_local):\n goal_info_list_sorted = get_goals_enabled_for_user_sorted_by_timestamp(user)\n enabled_goals = {}\n for goal_info in goal_info_list_sorted:\n # note this can be replaced with binary search if it is slow\n timestamp_local = goal_info['timestamp_local']\n if timestamp_local > target_timestamp_local:\n return enabled_goals\n enabled_goals = goal_info['enabled_goals']\n return enabled_goals\n\ndef get_is_goal_enabled_for_user_at_timestamp(user, target_goal_name, target_timestamp_local):\n goals_enabled_dictionary = get_goals_enabled_for_user_at_timestamp(user, target_timestamp_local)\n for goal_name,is_enabled in goals_enabled_dictionary.items():\n if goal_name == target_goal_name:\n return is_enabled\n return False\n\ndef get_is_goal_enabled_for_user_on_domain_at_timestamp(user, target_domain, target_timestamp_local):\n goals_enabled_dictionary = get_goals_enabled_for_user_at_timestamp(user, target_timestamp_local)\n for goal_name,is_enabled in goals_enabled_dictionary.items():\n domain = get_domain_for_goal(goal_name)\n if domain == target_domain:\n return is_enabled\n return False\n \n#print(get_goals_enabled_for_user_sorted_by_timestamp('c11e5f2d93f249b5083989b2')[0])\n#print(get_goals_active_for_user_at_timestep('c11e5f2d93f249b5083989b2', 1533450980492.0))", "_____no_output_____" ], [ "@memoize\ndef get_goal_intervention_info():\n return json.load(open('goal_intervention_info.json'))\n\n@memoize\ndef get_goal_info_list():\n goal_intervention_info = get_goal_intervention_info()\n return goal_intervention_info['goals']\n\n@memoize\ndef get_goal_info_dict():\n goal_info_list = get_goal_info_list()\n output = {}\n for goal_info in goal_info_list:\n goal_name = goal_info['name']\n output[goal_name] = goal_info\n return output\n\n@memoize\ndef get_domain_for_goal(goal_name):\n goal_info_dict = get_goal_info_dict()\n if goal_name in goal_info_dict:\n return goal_info_dict[goal_name]['domain']\n if goal_name.startswith('custom/spend_less_time_'): # custom/spend_less_time_www.tumblr.com\n return goal_name[23:] # 23 == len('custom/spend_less_time_www.tumblr.com')\n raise Exception('could not find domain for goal ' + goal_name)\n\n#get_goal_info_dict()\n#print(get_domain_for_goal('youtube/spend_less_time'))\n#print(get_domain_for_goal('custom/spend_less_time_www.tumblr.com'))", "_____no_output_____" ], [ "def get_sessions_for_user(user):\n seconds_on_domain_per_session = get_collection_for_user(user, 'synced:seconds_on_domain_per_session')\n interventions_active_for_domain_and_session = get_collection_for_user(user, 'synced:interventions_active_for_domain_and_session')\n #print(seconds_on_domain_per_session[0])\n #print(interventions_active_for_domain_and_session[0])\n output = []\n domain_to_session_id_to_duration_info = {}\n domain_to_session_id_to_intervention_info = {}\n interventions_deployed_with_no_duration_info = []\n seconds_on_domain_per_session.sort(key=lambda k: k['timestamp_local'])\n for item in seconds_on_domain_per_session:\n domain = item['key']\n session_id = item['key2']\n if domain not in domain_to_session_id_to_duration_info:\n domain_to_session_id_to_duration_info[domain] = {}\n domain_to_session_id_to_duration_info[domain][session_id] = item\n for item in interventions_active_for_domain_and_session:\n domain = item['key']\n session_id = item['key2']\n if domain not in domain_to_session_id_to_intervention_info:\n domain_to_session_id_to_intervention_info[domain] = {}\n domain_to_session_id_to_intervention_info[domain][session_id] = item\n domain_to_session_id_to_info = {}\n domain_session_id_pairs = []\n for item in seconds_on_domain_per_session:\n domain = item['key']\n session_id = item['key2']\n duration = item['val']\n timestamp_local = item['timestamp_local']\n timestamp = item['timestamp']\n if domain not in domain_to_session_id_to_info:\n domain_to_session_id_to_info[domain] = {}\n if session_id not in domain_to_session_id_to_info[domain]:\n domain_session_id_pairs.append([domain, session_id])\n domain_to_session_id_to_info[domain][session_id] = {\n 'duration': duration,\n 'timestamp_local': timestamp_local,\n 'timestamp': timestamp,\n 'timestamp_local_last': timestamp_local,\n 'timestamp_last': timestamp,\n }\n info = domain_to_session_id_to_info[domain][session_id]\n info['duration'] = max(duration, info['duration'])\n info['timestamp_local'] = min(timestamp_local, info['timestamp_local'])\n info['timestamp'] = min(timestamp, info['timestamp'])\n info['timestamp_local_last'] = max(timestamp_local, info['timestamp_local_last'])\n info['timestamp_last'] = max(timestamp, info['timestamp_last'])\n \n #for item in seconds_on_domain_per_session:\n # #print(item)\n # domain = item['key']\n # session_id = item['key2']\n # duration = item['val']\n for [domain, session_id] in domain_session_id_pairs:\n item = domain_to_session_id_to_info[domain][session_id]\n #print(item)\n timestamp_local = item['timestamp_local']\n timestamp_local_last = item['timestamp_local_last']\n timestamp = item['timestamp']\n timestamp_last = item['timestamp_last']\n duration = item['duration']\n epoch_local = timestamp_to_epoch(timestamp_local)\n epoch_local_last = timestamp_to_epoch(timestamp_local_last)\n epoch = timestamp_to_epoch(timestamp)\n epoch_last = timestamp_to_epoch(timestamp_last)\n interventions_active_info = None\n interventions_active_list = None\n intervention_active = None\n have_intervention_info = False\n is_preview_mode = False\n is_suggestion_mode = False\n if (domain in domain_to_session_id_to_intervention_info) and (session_id in domain_to_session_id_to_intervention_info[domain]):\n interventions_active_info = domain_to_session_id_to_intervention_info[domain][session_id]\n interventions_active_list = json.loads(interventions_active_info['val'])\n if len(interventions_active_list) > 0:\n intervention_active = interventions_active_list[0]\n is_preview_mode = get_is_intervention_preview_mode(user, intervention_active, session_id)\n is_suggestion_mode = get_is_intervention_suggestion_mode(user, intervention_active, session_id)\n goals_enabled = get_goals_enabled_for_user_at_timestamp(user, timestamp_local)\n is_goal_enabled = get_is_goal_enabled_for_user_on_domain_at_timestamp(user, domain, timestamp_local)\n is_goal_frequent = get_is_goal_frequent_for_user_on_domain_at_epoch(user, domain, epoch_local)\n goal_to_frequency_info = get_frequency_info_for_user_epoch(user, epoch_local)\n output.append({\n 'domain': domain,\n 'session_id': session_id,\n 'is_goal_enabled': is_goal_enabled,\n 'is_goal_frequent': is_goal_frequent,\n 'is_preview_mode': is_preview_mode,\n 'is_suggestion_mode': is_suggestion_mode,\n 'intervention_active': intervention_active,\n 'duration': duration,\n 'timestamp_local': timestamp_local,\n 'timestamp': timestamp,\n 'timestamp_last': timestamp_last,\n 'timestamp_local_last': timestamp_local_last,\n 'epoch_local': epoch_local,\n 'epoch': epoch,\n 'epoch_local_last': epoch_local_last,\n 'epoch_last': epoch_last,\n })\n #if interventions_active_info != None and interventions_active_list != None and len(interventions_active_list) > 0:\n # print(domain)\n # print(is_goal_enabled)\n # print(intervention_active)\n # print(duration)\n # print(is_goal_frequent)\n # print(goals_enabled)\n # print(goal_to_frequency_info)\n # return\n # duration = item['val']\n # print(duration)\n return output\n\ndef get_is_intervention_preview_mode(user, intervention_name, session_id):\n intervention_info_list = get_intervention_info_list_for_user_intervention_session_id(user, intervention_name, session_id)\n for x in intervention_info_list:\n if 'is_preview_mode' in x and x['is_preview_mode'] == True:\n return True\n return False\n\ndef get_is_intervention_suggestion_mode(user, intervention_name, session_id):\n intervention_info_list = get_intervention_info_list_for_user_intervention_session_id(user, intervention_name, session_id)\n for x in intervention_info_list:\n if 'is_suggestion_mode' in x and x['is_suggestion_mode'] == True:\n return True\n return False\n\ndef have_intervention_info_for_session_id(user, intervention_name, session_id):\n intervention_info_list = get_intervention_info_list_for_user_intervention_session_id(user, intervention_name, session_id)\n return len(intervention_info_list) > 0\n\ndef get_intervention_info_list_for_user_intervention_session_id(user, intervention_name, session_id):\n session_to_intervention_info_list = get_session_id_to_intervention_info_list_for_user_and_intervention(user, intervention_name)\n if session_id not in session_to_intervention_info_list:\n return []\n return session_to_intervention_info_list[session_id]\n\n@memoize\ndef get_session_id_to_intervention_info_list_for_user_and_intervention(user, intervention_name):\n output = {}\n intervention_info_list = get_collection_for_user(user, intervention_name.replace('/', ':'))\n for x in intervention_info_list:\n if 'session_id' not in x:\n continue\n session_id = x['session_id']\n if session_id not in output:\n output[session_id] = []\n output[session_id].append(x)\n return output\n\ndef get_intervention_info_for_user_and_session(user, intervention_name, session_id):\n intervention_collection = get_collection_for_user(user, intervention_name.replace('/', ':'))\n print(intervention_collection)\n\n#print(get_sessions_for_user('c11e5f2d93f249b5083989b2'))\n#for session_info in get_sessions_for_user('c11e5f2d93f249b5083989b2'):\n# print(session_info)\n# break\n#print(get_intervention_info_for_user_and_session('c11e5f2d93f249b5083989b2', 'generated_www.tumblr.com/toast_notifications', 0))\n#print(get_session_id_to_intervention_info_list_for_user_and_intervention('c11e5f2d93f249b5083989b2', 'generated_www.tumblr.com/toast_notifications'))", "_____no_output_____" ], [ "#print(get_sessions_for_user('c11e5f2d93f249b5083989b2'))", "_____no_output_____" ], [ "#all_sessions_info_list = []\n#for user in get_valid_user_list():\n# print(user)\n# for info in get_sessions_for_user(user):\n# info['user'] = user\n# all_sessions_info_list.append(info)\n ", "_____no_output_____" ], [ "#print(get_sessions_for_user_by_day_and_goal('c11e5f2d93f249b5083989b2'))", "_____no_output_____" ], [ "def group_sessions_by_domain(session_info_list):\n output = {}\n for item in session_info_list:\n domain = item['domain']\n if domain not in output:\n output[domain] = []\n output[domain].append(item)\n return output\n\ndef group_sessions_by_epoch(session_info_list):\n output = {}\n for item in session_info_list:\n epoch = item['epoch_local']\n if epoch not in output:\n output[epoch] = []\n output[epoch].append(item)\n return output\n\ndef get_total_time_on_other_goal_domains(session_info_list_for_day, domain_to_exclude):\n output = 0\n for domain,session_info_list in group_sessions_by_domain(session_info_list_for_day).items():\n if domain == domain_to_exclude:\n continue\n for session_info in session_info_list:\n is_goal_frequent = session_info['is_goal_frequent']\n is_goal_enabled = session_info['is_goal_enabled']\n duration = session_info['duration']\n if is_goal_enabled != True:\n continue\n output += duration\n return output\n\ndef get_total_time_on_all_other_domains(session_info_list_for_day, domain_to_exclude):\n output = 0\n for domain,session_info_list in group_sessions_by_domain(session_info_list_for_day).items():\n if domain == domain_to_exclude:\n continue\n for session_info in session_info_list:\n is_goal_frequent = session_info['is_goal_frequent']\n is_goal_enabled = session_info['is_goal_enabled']\n duration = session_info['duration']\n output += duration\n return output\n\ndef get_total_time_when_goal_is_enabled(session_info_list):\n output = 0\n for session_info in session_info_list:\n is_goal_frequent = session_info['is_goal_frequent']\n is_goal_enabled = session_info['is_goal_enabled']\n duration = session_info['duration']\n if is_goal_enabled != True:\n continue\n output += duration\n return output\n\ndef get_is_goal_enabled_from_session_info_list(session_info_list):\n output = None\n for session_info in session_info_list:\n is_goal_frequent = session_info['is_goal_frequent']\n is_goal_enabled = session_info['is_goal_enabled']\n if output == None:\n output = is_goal_enabled\n elif output != is_goal_enabled:\n return 'inconsistent'\n return output\n\ndef get_is_goal_frequent_from_session_info_list(session_info_list):\n output = None\n for session_info in session_info_list:\n is_goal_frequent = session_info['is_goal_frequent']\n is_goal_enabled = session_info['is_goal_enabled']\n if output == None:\n output = is_goal_frequent\n elif output != is_goal_frequent:\n return 'inconsistent'\n return output\n\n#def get_is_goal_frequent\n\n@memoize\ndef get_domain_to_epoch_to_time_for_user(user):\n seconds_on_domain_per_day_items = get_collection_for_user(user, 'synced:seconds_on_domain_per_day')\n output = {}\n for item in seconds_on_domain_per_day_items:\n domain = item['key']\n epoch = item['key2']\n duration = item['val']\n if domain not in output:\n output[domain] = {}\n if epoch not in output[domain]:\n output[domain][epoch] = duration\n output[domain][epoch] = max(duration, output[domain][epoch])\n return output\n #print(seconds_on_domain_per_day_items[0])\n\n@memoize\ndef get_epoch_to_domain_to_time_for_user(user):\n seconds_on_domain_per_day_items = get_collection_for_user(user, 'synced:seconds_on_domain_per_day')\n output = {}\n for item in seconds_on_domain_per_day_items:\n domain = item['key']\n epoch = item['key2']\n duration = item['val']\n if epoch not in output:\n output[epoch] = {}\n if domain not in output[epoch]:\n output[epoch][domain] = duration\n output[epoch][domain] = max(duration, output[epoch][domain])\n return output\n\ndef get_time_on_domain_on_epoch_for_user(user, domain, epoch):\n epoch_to_domain_to_time = get_epoch_to_domain_to_time_for_user(user)\n if epoch not in epoch_to_domain_to_time:\n return 0\n if domain not in epoch_to_domain_to_time[epoch]:\n return 0\n return epoch_to_domain_to_time[epoch][domain]\n\n#print(get_sessions_for_user_by_day_and_goal('c11e5f2d93f249b5083989b2'))\n#print(get_domain_to_epoch_to_time_for_user('c11e5f2d93f249b5083989b2'))\n#print(get_time_on_domain_on_epoch_for_user('c11e5f2d93f249b5083989b2', 'www.cnn.com', 953)) # 75\n\ndef get_time_on_all_other_domains_on_epoch_for_user(user, target_domain, epoch):\n epoch_to_domain_to_time = get_epoch_to_domain_to_time_for_user(user)\n if epoch not in epoch_to_domain_to_time:\n return 0\n domain_to_time = epoch_to_domain_to_time[epoch]\n output = 0\n for domain,time in domain_to_time.items():\n if domain == target_domain:\n continue\n output += time\n return output\n\ndef get_time_on_epoch_on_domains_in_set_for_user_except(user, epoch, enabled_domains_set, target_domain):\n epoch_to_domain_to_time = get_epoch_to_domain_to_time_for_user(user)\n if epoch not in epoch_to_domain_to_time:\n return 0\n domain_to_time = epoch_to_domain_to_time[epoch]\n output = 0\n for domain,time in domain_to_time.items():\n if domain == target_domain:\n continue\n if domain not in enabled_domains_set:\n continue\n output += time\n return output\n\ndef difference_ratio(a, b):\n diff = abs(a - b)\n smaller = min(abs(a), abs(b))\n if smaller == 0:\n return 1\n return diff / smaller\n\ndef get_enabled_goal_domains_set_in_session_info_list(session_info_list):\n #output = []\n output_set = set()\n for session_info in session_info_list:\n is_goal_enabled = session_info['is_goal_enabled']\n domain = session_info['domain']\n if is_goal_enabled:\n if domain not in output_set:\n output_set.add(domain)\n #output.append(domain)\n return output_set\n\ndef get_sessions_for_user_by_day_and_goal(user):\n output = []\n session_info_list = get_sessions_for_user(user)\n sessions_grouped_by_epoch = group_sessions_by_epoch(session_info_list)\n epoch_list = sessions_grouped_by_epoch.keys()\n if len(epoch_list) == 0:\n return output\n first_epoch_for_user = max(epoch_list)\n last_epoch_for_user = min(epoch_list)\n for epoch,session_info_list_for_day in sessions_grouped_by_epoch.items():\n info_for_epoch = {}\n info_for_epoch['epoch'] = epoch\n info_for_epoch['days_since_install'] = epoch - first_epoch_for_user\n info_for_epoch['days_until_last'] = last_epoch_for_user - epoch\n info_for_epoch['domains_and_sessions'] = []\n enabled_domains_set = get_enabled_goal_domains_set_in_session_info_list(session_info_list_for_day)\n for domain,session_info_list_for_domain in group_sessions_by_domain(session_info_list_for_day).items():\n info_for_domain = {}\n info_for_domain['domain'] = domain\n this_goal_domain_total_time = get_total_time_when_goal_is_enabled(session_info_list_for_domain)\n other_goal_domain_total_time = get_total_time_on_other_goal_domains(session_info_list_for_day, domain)\n other_all_domain_total_time = get_total_time_on_all_other_domains(session_info_list_for_day, domain)\n info_for_domain['time_on_domain_today'] = this_goal_domain_total_time\n info_for_domain['time_on_domain_today_ref'] = get_time_on_domain_on_epoch_for_user(user, domain, epoch)\n info_for_domain['time_on_all_other_domains_today'] = other_all_domain_total_time\n info_for_domain['time_on_all_other_domains_today_ref'] = get_time_on_all_other_domains_on_epoch_for_user(user, domain, epoch)\n info_for_domain['time_on_other_goal_domains_today'] = other_goal_domain_total_time\n info_for_domain['time_on_other_goal_domains_today_ref'] = get_time_on_epoch_on_domains_in_set_for_user_except(user, epoch, enabled_domains_set, domain)\n info_for_domain['is_goal_enabled'] = get_is_goal_enabled_from_session_info_list(session_info_list_for_domain)\n info_for_domain['is_goal_frequent'] = get_is_goal_frequent_from_session_info_list(session_info_list_for_domain)\n info_for_domain['session_info_list_for_domain'] = session_info_list_for_domain\n info_for_epoch['domains_and_sessions'].append(info_for_domain)\n #print(json.dumps(info_for_epoch))\n #print(epoch)\n #print(domain)\n #print(this_goal_domain_total_time)\n #print(other_goal_domain_total_time)\n #print(session_info_list_for_domain)\n #return\n output.append(info_for_epoch)\n return output\n\n#get_sessions_for_user_by_day_and_goal('c11e5f2d93f249b5083989b2')", "_____no_output_____" ], [ "# noexport\n\n'''\ndef timestamp_to_epoch(timestamp):\n #start_of_epoch = moment.now().timezone(\"US/Pacific\").replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n #return moment.unix(timestamp).timezone(\"US/Pacific\").diff(start_of_epoch).days\n start_of_epoch = moment.now().replace(years=2016, months=1, days=1, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)\n return moment.unix(timestamp).diff(start_of_epoch).days\n'''\n\ndef test_inconsistent():\n #a = {'domain': 'www.youtube.com', 'time_on_domain_today': 26, 'time_on_domain_today_ref': 26, 'time_on_all_other_domains_today': 5754, 'time_on_all_other_domains_today_ref': 6509, 'time_on_other_goal_domains_today': 429, 'time_on_other_goal_domains_today_ref': 429, 'is_goal_enabled': True, 'is_goal_frequent': False, 'session_info_list_for_domain': [{'domain': 'www.youtube.com', 'session_id': 0, 'is_goal_enabled': True, 'is_goal_frequent': False, 'intervention_active': 'youtube/toast_notifications', 'duration': 26, 'timestamp_local': 1533455041603.0, 'timestamp': 1533456061598.0, 'timestamp_last': 1533456061598.0, 'timestamp_local_last': 1533455041603.0, 'epoch_local': 947, 'epoch': 947, 'epoch_local_last': 947, 'epoch_last': 947}]}\n #b = {'user': 'c11e5f2d93f249b5083989b2', 'epoch': 947, 'domain': 'www.youtube.com', 'time_on_domain_today': 26, 'time_on_domain_today_ref': 26, 'time_on_all_other_domains_today': 5754, 'time_on_all_other_domains_today_ref': 6509, 'time_on_other_goal_domains_today': 429, 'time_on_other_goal_domains_today_ref': 429, 'is_goal_enabled': True, 'is_goal_frequent': False, 'intensity': 0.8333333333333334, 'intensity_other_goals': 0.8, 'num_days_available_for_user': 34, 'is_user_unofficial': False, 'have_preview_sessions': False, 'have_suggestion_sessions': False, 'consistent_item': False}\n a = {'domain': 'www.reddit.com', 'time_on_domain_today': 287, 'time_on_domain_today_ref': 333, 'time_on_all_other_domains_today': 1996, 'time_on_all_other_domains_today_ref': 2309, 'time_on_other_goal_domains_today': 386, 'time_on_other_goal_domains_today_ref': 386, 'is_goal_enabled': True, 'is_goal_frequent': True, 'session_info_list_for_domain': [{'domain': 'www.reddit.com', 'session_id': 5, 'is_goal_enabled': True, 'is_goal_frequent': True, 'is_preview_mode': False, 'is_suggestion_mode': False, 'intervention_active': 'reddit/toast_notifications', 'duration': 40, 'timestamp_local': 1534011266360.0, 'timestamp': 1534011282343.0, 'timestamp_last': 1534011531865.0, 'timestamp_local_last': 1534011266360.0, 'epoch_local': 953, 'epoch': 953, 'epoch_local_last': 953, 'epoch_last': 953}, {'domain': 'www.reddit.com', 'session_id': 6, 'is_goal_enabled': True, 'is_goal_frequent': True, 'is_preview_mode': False, 'is_suggestion_mode': False, 'intervention_active': 'reddit/block_after_interval_per_visit', 'duration': 2, 'timestamp_local': 1534012345961.0, 'timestamp': 1534012468728.0, 'timestamp_last': 1534012468728.0, 'timestamp_local_last': 1534012345961.0, 'epoch_local': 953, 'epoch': 953, 'epoch_local_last': 953, 'epoch_last': 953}, {'domain': 'www.reddit.com', 'session_id': 7, 'is_goal_enabled': True, 'is_goal_frequent': True, 'is_preview_mode': False, 'is_suggestion_mode': False, 'intervention_active': 'reddit/toast_notifications', 'duration': 245, 'timestamp_local': 1534014045708.0, 'timestamp': 1534014076469.0, 'timestamp_last': 1534014329949.0, 'timestamp_local_last': 1534014045708.0, 'epoch_local': 953, 'epoch': 953, 'epoch_local_last': 953, 'epoch_last': 953}]}\n b = {'user': 'c11e5f2d93f249b5083989b2', 'epoch': 953, 'domain': 'www.reddit.com', 'time_on_domain_today': 287, 'time_on_domain_today_ref': 333, 'time_on_all_other_domains_today': 1996, 'time_on_all_other_domains_today_ref': 2309, 'time_on_other_goal_domains_today': 386, 'time_on_other_goal_domains_today_ref': 386, 'is_goal_enabled': True, 'is_goal_frequent': True, 'intensity': 1.0, 'intensity_other_goals': 1.0, 'num_days_available_for_user': 34, 'is_user_unofficial': False, 'have_preview_sessions': False, 'have_suggestion_sessions': False, 'consistent_item': False}\n\n epoch_target = b['epoch']\n #print(a)\n user = b['user']\n seconds_on_domain_per_session = get_collection_for_user(user, 'synced:seconds_on_domain_per_session')\n print(seconds_on_domain_per_session[0])\n epoch_to_domain_to_time = get_epoch_to_domain_to_time_for_user(user)\n domain_to_time = epoch_to_domain_to_time[epoch_target]\n print(sum(domain_to_time.values()))\n total = 0\n domain_to_session_id_to_time = {}\n for x in seconds_on_domain_per_session:\n timestamp_local = x['timestamp_local']\n domain = x['key']\n session_id = x['key2']\n epoch = timestamp_to_epoch(timestamp_local)\n if epoch != epoch_target:\n continue\n duration = x['val']\n if domain not in domain_to_session_id_to_time:\n domain_to_session_id_to_time[domain] = {}\n if session_id not in domain_to_session_id_to_time[domain]:\n domain_to_session_id_to_time[domain][session_id] = duration\n domain_to_session_id_to_time[domain][session_id] = max(domain_to_session_id_to_time[domain][session_id], duration)\n for domain,session_id_to_time in domain_to_session_id_to_time.items():\n for session_id,time in session_id_to_time.items():\n total += time\n print(total)\n for domain,ref_time in domain_to_time.items():\n session_id_to_time = domain_to_session_id_to_time[domain]\n calc_time = sum(session_id_to_time.values())\n if ref_time != calc_time:\n print(domain)\n print(ref_time)\n print(calc_time)\n\ntest_inconsistent()", "{'_id': ObjectId('5b66aeb9d206af0013cd7c33'), 'key': 'account.momentumdash.com', 'key2': 0, 'val': 20, 'synced': 0, 'timestamp': 1533456057678.0, 'userid': 'c11e5f2d93f249b5083989b2', 'install_id': 'e3a8a5a39bda2ef7c98a116b', 'collection': 'seconds_on_domain_per_session', 'habitlab_version': '1.0.238', 'timestamp_local': 1533453248519.0}\n2642\n6483\ngagadaily.com\n401\n562\nwww.google.com\n30\n244\nwww.reddit.com\n333\n519\nwww.youtube.com\n386\n2326\nwww.tumblr.com\n503\n1445\nen.wikipedia.org\n20\n201\n" ], [ "def get_sessions_for_user_by_day_and_goal_for_all_users():\n output = []\n for user in get_valid_user_list():\n print(user)\n info_for_user = {}\n info_for_user['user'] = user\n info_for_user['is_user_unofficial'] = get_is_user_unofficial(user)\n info_for_user['days_domains_and_sessions'] = get_sessions_for_user_by_day_and_goal(user)\n output.append(info_for_user)\n return output\n\n", "_____no_output_____" ], [ "# noexport\n\nall_session_info = get_sessions_for_user_by_day_and_goal_for_all_users()\n\nimport json\n\njson.dump(all_session_info, open('browser_all_session_info_sept18_v4_dell.json', 'w'))", "c11e5f2d93f249b5083989b2\n5d73eb22468ea27cdfdd5e2a\n2b5972416d3df31ad7b1c25f\n46f96a6ab8753a767bdd1b1b\n30134f4f9d24141ae0bca891\n11f367d7c4c9743aabbda7ff\ne6be8edae93ade10b9c7f78d\nd45023abf8d0638072dfce0e\n14fd34ba7978f5c31557000d\n4af81d6f6b903b27f34c5f41\n921b2a944cc8faa54efa7e89\n8ad445f80101c40301f1ca52\n9514fe891b9ee2a083cb7cb6\n3aa9e9b4f04c7f4d5a61b58e\ndd6e625a532e9dda1fcfc08e\n44f163249790127e475a4d22\n17e7de4f422a37ce6b911178\n68ce22b94204c4959b3bca47\nb2f7cba6e365326b0867adaa\n5655fa9ee5d384a75feacd10\nfd208d6ea1f97ac6e8d3c647\n790019388193eb88b81aca56\n30f66edc6f4522a2466d5ce0\n6b719706061bd70657d8908a\nbefa7276e957100eebe6bf2c\n4695deda6b34b39075b78589\nc8b0e3c4f63d4dc18538d44f\n8aa90cc3b2a3d1318b82999a\nd0653b1b154105cae6b20f5a\ned86421a4167fd4d84b5dc4a\n8e445b95a635466551c7fe9a\n7de231e89272243eef3728c9\nd56a186f73ff9c6f157e22f6\n413790b746780d4712adad4b\n31a2b705e857b148ca832ca6\nc6424c6fc44ce87b7313e4ea\nf6ac752bff889550a3b3f023\neba693ff2cc67e6a1530697d\n12d99769ecc3de0f63b3352d\n2fbd7c5f1fdb8daea83144a8\naeefb312a051af83e92b4357\n2d70dfa69ebe2178669b477c\needc347c635e403ba503e7a5\n663e5814d27f6e4f070fce5a\n93f5b666051403f6dedc9490\n3d09d074a8cdfc2952feae28\nce15cd45f4aaf5d961f0ecb2\n411d9a9e2121f39f523c477d\nf3d072224df8638db0a2ceb7\n831c61009997cfeaef75f009\na56e6dcdee1fea4923161408\nea891d9cc50061f8a3a35d29\n91e189a42eaf674e95c15b6d\n514e1fad850f91ea92e42934\n2d145f0ba7ed542dc44a8f2e\n8b32c7a5580fa42ac389c73b\nfa7fdcd842e2ac62c878a6ee\na60b8c7c2366a17d698f17d1\nd74156304561149ee1b65cc7\ne3c3fb3cf66b1107b6e67ff4\ndcf2b298ee54df12c314a4ad\na609c71fafd8b6f5e93e1c08\n4b679daec625d8402e318289\n01dee40e4a7ba83f45807af6\n23e17e23064ac431217b9a53\n3392fa5bc920805e390c568a\n102e4cd168d07d1bb50c73ce\n74555da774c222ee8692a379\n2317cffae9bde9f0f7c701b1\na385baf9f4ac009257216dc5\nb2d8a6aef625675232a277c2\nd8fc45352a490fc82a75efb7\n93784828b6daf7ca96fd68a7\n2f9f806b84690ddda9b3dcd2\n0a7a8fa532acc5f50174bfe6\n31081e4bff9251e65dd2cf97\n330c18608b94ff41b3fdeedd\n28ad15ec833aaa34161791f8\n58de4c71549bcd42492672c8\n71c65316ee628039717185a9\n8003c4d51219e6c8d11e4502\n5029a554e21b0a61a55dc84d\n6c71b5b694b9235346d1a818\n3a4a59723d547a6be36780c5\n89f2289ab5c1a98ab3b9df52\n6232e889000db7282a3cd7ee\n9a0fd39cff1f92bc44cccfce\n65ad84cc3ec358f14c310e45\n443f484e50b80b0a6125e5e2\n906f260159671fbf4ec7bd07\n53f95f25d8cbec10911cb105\nf19648ae889c9903459380ec\n35f1a00b3bd94d56c469203f\nf01d6c1b0831d7d6c6489e0e\n2590a1520ad31478a16b3463\n01315e0bd0c4b8edc54e918f\n130d08d1edac012fa98ce918\naf61aff28282bacc5c6cc0df\nbed2fa1fa6de7a1039c32ade\nb46f9ac36ba8aac9382dbff8\n4315593ab82f26c1ff7dd9cf\n1427c75a5da0d8e5c0baf9e4\nee4472a931445659a862fddc\nf9052f638ef50a807fe57b5f\na09f4d8b6326097b44238b62\n47f705e30ecff4ebf2a5522c\na1c0a91106325e1d1c9ef2fd\n251866ed3f0835233d40b479\nc5d97b2a268cf541992057d0\n8272ef2aee16628f2d3c225b\n0704584502a6cec4ac0daf94\nb3f05d6a8367fd02513b1668\n0095622d8fe7c619b1b198f8\n3a07550ec8e0328a7f48e2ea\n784f92ee1c85e2851acd844b\n9a0642946b24f2b10af89ed4\n37615573eea8075a109f0bef\na9763f662478e2f47aa378eb\nd0ef81f2e0516dc6f5d1ee69\n143b8486e02549c025001472\n4dbf973a445791c7bccdd2ee\n44f89bdbde8ed3d4919ad564\na5db6f48951d23868a572f47\n699985170c49bffd2b28061c\n88d5c6fb13cadcf3712af332\nac36c891f5b25533e21167f9\n76121ca2633b06376dc5c549\n3204ce1b391c0a4112db2eb1\n1c0f0dcd2c9877f1175d4629\n4b0a4975fc4c8a684a9222f6\nfaba3f482177619a20758567\n51a6d0a3a33e937927f6b8b9\n28caea4cf40290f7af2469ca\n0b7900bd1602e9fbeb890122\nca32e419e784de2b33c2442e\n814fb345482c6a8af1e0d5de\na8532fa84f6776ed23616344\n4a95a22e5c6f7025fd4e394e\n1f1c589b0868e8f83d145e50\n96b45c45fc641d54cb1d2003\n4112a44f73dc746f1c522390\n39f44b9b02a7e119138ad998\n86e952c1bb61a92264299b43\n1d1be55845f55f469fb9726f\n096c3370481cfb6ca33c9b8d\n9cc960742cdca03908a0ac4b\nd97f556dd49116e9275ea372\nf68581f7d50d50cbbb48473c\n548dddd2e7b14d909c1a151e\nb59b467d24b22067df6bd11b\n85727c7e5e73b4b422803284\n7403f922225a649c5fc303c4\n4e9c0399deb0c8f4069da737\nd49e6ce9c7fadc10f3baf7bd\n6ef82c56be7f71ea81b285c5\ne69c9285e43025701d19c746\nc765fb71d498eb3846f03cd9\n61d6546a142837d50838011b\n67a93aa06208e9db4e383f3f\n15a850b860c220a0c11a7671\nc85fae4f63998b6af2f04993\n2ce399815dfb502a1e4ebe7e\nf966c3f07cdaf149d95e8650\n34b3087e326426b27a53bb20\nb4e23c9a2c8743171e372546\n111cb1c257c679f652aaa1b2\n87022e9d39313bd55dae13d0\n93cd25fc909f54386f218db2\n7dacec5ffdcb913631b20e50\n07f73480290ae981fb5be881\n86b2a8c8b5c9073b8947d3fb\nf180aced4b5d40760f41d2b1\n76728d9f8116f8bbafee8b62\n9a701aeadc4e0edb63f1ab74\n6671d4774d8136762779a558\n10f825312d06430d9df1d699\n808de2c3e71ccb044e34725a\n3048d6534d0bb4853a71b0d8\n42098ce97cb8b11b1cc2bbf4\na0b1a2b40b6ea985e4c28f30\ne84f421b57117aedadd6c5a5\n51907c18ef5d5dc836a5bdde\nfab562f3f00a1966e0a1798a\n2972961a04c458f7f6e1c4ab\nb61d24aed4823357408a53ac\n90c7da6c5847773604a23cca\naabc76884918adb9034f992d\n72cf3e73e5e4ef20e77015e7\n0363146f6187a919f95c9e5a\n2b6d8566a009816440e6a293\ncf14c75a29c21b59cc3a7825\n844c5b3799242dc2b7b74a0a\n270232473297dec6637b3c6b\nc16baf2aa7d96f72c458f138\n6d95e6ac54205c0b22b1a94c\n0dc0a46b91bacab27312f431\n2bad2e1f8ccd980acc333278\n4095b65dbb14fc0707babc28\n8850e54eba81d480971d220b\ne3c18ddaa43bee0e741f291e\nb81d5c416fd640328c399741\ne806f5ea67ee455fda71af5f\n1be4d7384df24d6931896daa\ne40818fa77a7838f699823e1\ned9a3d96f11129c5b2b3547f\n83b89da434f9be86b84ee09f\n255f60e9ba89824b9db956af\nffc59254a5714ddb99024168\nd62fe6f215ed77bf1f8e0a68\n52d88e6c10e52e086adc82de\n77fd92721e6825dfdf769fce\nb69aa34e2c0fdf00886044f9\n2cf7669015370bc1f4999195\n340e96b0ea91dbfa42adf05b\n801437c34080d38d5e98b943\n0f82068a6b0d386a0500aafa\n4de0c06edddf4eab8a274b85\nfdf6b314bc7f0df6146c31d3\nc4a35374ce280d888515ab69\n62898d434b8217836f32c6c7\n830916fa437c12a5eddede9e\n8820761fbadea58f2873f181\nb5dea3df409fd76b3bdb87c0\n42fd386c9acb93871788ee25\n752869fa21afd6963705a260\nc1527297bee36a6bd7cf3198\nc8e5d039e659018198d39b55\n01a12d33c548ebf4a40823c5\n0cfcf76461af97a8741a3182\n2c09357feed7fbdb33498465\nc27d07aeb1a15d98a2395cfa\ncc85f6b8eaafe040b2e13c76\n4dc69d921355ed70bbb67cd9\n84f9973bf7176ec06d8f03e6\n26c093ddccb49a38c5b16e2d\n867e20d67c4a44fd99258fb0\n80e86ffb07804a780fc4571d\n1a15ab8c5d0ef03d6e813dc1\na4fcb48db691e2f28a359ca7\n10877eb0b037f41f45490fd3\n2cbb1fc83722b822e17804ed\nc9228af8f5af6f746b94b9d4\n4c1239c9a7e55ab8913b7f60\n8713954cf3a5de3f08a962ed\n86545035f71b8d2db6f2b0df\nddb36aef6f5bfb30b936792f\naf1b4d70cfc67aa9df57e59a\n93ea679386dbe88fc9f946fc\n9b7a23ba737ec1d60be25ad0\n339bc13c20a57093702ae580\nd019ce42906f82012853873b\n2c02172ac29a90090c398662\ne59575d60feb4da49d46cce6\n67128177612804b7a1e63541\n5c5f6cb12fc0f69fc739d34d\n94c005ce38e74b4c6373ec71\n4e31b94898a52f4c90acff42\nd16a122ead215d892cd95868\n75d2f3897e1f41b7f1a9f584\n82235603d3c577a64274cbb1\n0fbdbe4c0c690a1afe8afbfa\n99e076f002287a6c734b9741\nc5d69c74c2addd0ef01bc3f7\n5f43ffbc2a50eaebb477d6f9\nd0d0e47d7cf49257d859b69a\n8fd3e062e260ee46b69edbe3\n911c99b69f121e790e1053b7\nd45125630120ff97ebca0e48\nd869afabee3c84c0fb39cf5c\nd4adf63d2c732a44aee988b2\n37d7259047070ec5be5edb98\n06d9bec13dc49a925466eae8\n55a627e0d1347f80a30e5f9d\nacf7aec2f0fd018cda806970\n9d7b94be92854a1bb1f449ea\n4dcd8687de59e7da3e4a7833\n4cfb238a2b4d089119649c10\n883c1ba5c4e5472d2f8067d6\nd6dce65891f8d62c46776734\nb0733c6ef7cdd5fff235eb75\n9ff869bd1bb5110f66f1ee19\n498ddb26082b1196ef321fda\n467abe7b126d6dfe7318ad15\n7290d3a2998fb74fa84e3416\n2520d7e4a5723f658397c456\n9a2d7fd18acc3baec3434a40\ndf5e004697838732a762ac28\ned604c323e3dc2432b5fe27e\n500a2c531037252cc61c45f8\n1e099fb1e04fb7159838d5af\n3e91776f98d25b45502ac2db\n0be3cc48ef2cf6bb10219f74\n81e65ac68af6c9f004b00f3f\n39abdd03cb06c7d03cdbfc2a\nafb768ff85b797e337376fc5\n27203d502fc92b0f7840821d\n23d0d8c2679d3d8922049d36\n2b84e6122153e7a0199e2132\nd63b9b6796f79ef2b9c50a80\n25ae1a17e3efe99d51f3dbb5\ne0b1e191d3d16a85569cdf28\n5ec4c042694ebb8de954b495\ndf0b4e455f45c5ae692175bc\ned1649f8ba47ece26dd14380\n6216c587462fb5fa9ca9f0e9\nb0b927ba3c9302dd9a955542\n4f2635a3dc6cc1cf83279cea\naf1d54f0da45b2cf2ea741ef\nfc233d8f8f09501bb2fb09c1\n21367b7608bc2bab1d9bb5a1\ne0ea71583ed30617dda9659c\n48a4fd0a7a1fed6b695f7073\nbb859820384fbf329be6eeac\ncd62691226dfe9dc950a87ac\nc968f30e24cf62ee34399d89\n7db754bd5d421eb2894a7fc0\n8b60b02dc1447c716ec05ade\n8c9ba278ef8c7873d60d8679\n0783e855e7b15834276de631\nafe5f9fc2fbe2fcc511278c9\n1401d72faa6d2196a9a2e981\n0b441b425c019b991354752d\n892c54298a5cf0bca4e83e08\n57379f39746672215d48b6e0\n7aa307679d508d574906a55b\n1d19fc005366cbb99d037a55\n76f531e30e91efbc36c14e54\n52d53bf09504aa2809bf89db\n" ], [ "def convert_list_of_dicts_into_dataframe(dict_list):\n output = {}\n for keyname in dict_list[0].keys():\n output[keyname] = []\n for item in dict_list:\n for k,v in item.items():\n output[k].append(v)\n return pd.DataFrame.from_dict(output)\n\n#print(convert_list_of_dicts_into_dataframe([\n# {'a': 3, 'b': 5},\n# {'a': 4, 'b': 6}\n#]))", "_____no_output_____" ], [ "def make_dataframe_days():\n sessions_for_user_by_day_and_goal_for_all_users = get_sessions_for_user_by_day_and_goal_for_all_users()\n output = []\n for sessions_for_user_by_day_and_goal in sessions_for_user_by_day_and_goal_for_all_users:\n user = sessions_for_user_by_day_and_goal['user']\n for day_domains_and_sessions in sessions_for_user_by_day_and_goal['days_domains_and_sessions']:\n epoch = day_domains_and_sessions['epoch']\n for domain_and_sessions in day_domains_and_sessions['domains_and_sessions']:\n domain = domain_and_sessions['domain']\n time_on_domain_today = domain_and_sessions['time_on_domain_today']\n time_on_all_other_domains_today = domain_and_sessions['time_on_all_other_domains_today']\n time_on_other_goal_domains_today = domain_and_sessions['time_on_other_goal_domains_today']\n is_goal_enabled = domain_and_sessions['is_goal_enabled']\n is_goal_frequent = domain_and_sessions['is_goal_frequent']\n output.append({\n 'user': user,\n 'epoch': epoch,\n 'domain': domain,\n 'time_on_domain_today': time_on_domain_today,\n 'time_on_all_other_domains_today': time_on_all_other_domains_today,\n 'time_on_other_goal_domains_today': time_on_other_goal_domains_today,\n 'is_goal_enabled': is_goal_enabled,\n 'is_goal_frequent': is_goal_frequent,\n })\n return convert_list_of_dicts_into_dataframe(output)\n\n#df = make_dataframe_days()\n#print(make_dataframe_days())", "_____no_output_____" ], [ "#df.to_csv('browser_time_on_domains_sept18.csv')", "_____no_output_____" ], [ "#%%R -i df -w 5 -h 5 --units in -r 200\n\n#install.packages('ez')\n#install.packages('lme4')\n\n#library(lme4)\n#library(sjPlot)\n#library(lmerTest)\n#library(ez)", "_____no_output_____" ], [ "# def get_days_and_sessions_for_user(user):\n# session_info_list = get_sessions_for_user(user)\n# min_epoch = min([x['local_epoch'] for x in session_info_list])\n# max_epoch = max([x['local'] for x in session_info_list])\n# for epoch in range(min_epoch, max_epoch + 1):\n \n\n# print(get_days_and_sessions_for_user('c11e5f2d93f249b5083989b2'))", "_____no_output_____" ], [ "#print(len(get_users_with_goal_frequency_set()))\n#print(valid_user_list[0])", "_____no_output_____" ], [ "def print_stats_on_install_records():\n user_to_all_install_ids = get_user_to_all_install_ids()\n users_with_goal_frequency_set = get_users_with_goal_frequency_set()\n users_with_missing_install_record = []\n users_with_zero_installs = []\n users_with_multiple_installs = []\n users_with_single_install = []\n\n for username in users_with_goal_frequency_set:\n if username not in user_to_all_install_ids:\n users_with_missing_install_record.append(username)\n continue\n install_ids = user_to_all_install_ids[username]\n if len(install_ids) == 0:\n users_with_zero_installs.append(username)\n continue\n if len(install_ids) > 1:\n users_with_multiple_installs.append(username)\n continue\n users_with_single_install.append(username)\n\n print('users with missing install record', len(users_with_missing_install_record))\n print('users with zero installs', len(users_with_zero_installs))\n print('users with multiple installs', len(users_with_multiple_installs))\n print('users with single install', len(users_with_single_install))\n\n\n#print_stats_on_install_records()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb11e25f22211265ae42c034164406a35b080e39
259,113
ipynb
Jupyter Notebook
PlanB/CPU_utilization_Experiment/version3_Experiment_style/Experiment1/Plot_Throughput_Experiment1.ipynb
phoophoo187/Privacy_SDN_Edge_IoT
3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c
[ "MIT" ]
null
null
null
PlanB/CPU_utilization_Experiment/version3_Experiment_style/Experiment1/Plot_Throughput_Experiment1.ipynb
phoophoo187/Privacy_SDN_Edge_IoT
3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c
[ "MIT" ]
null
null
null
PlanB/CPU_utilization_Experiment/version3_Experiment_style/Experiment1/Plot_Throughput_Experiment1.ipynb
phoophoo187/Privacy_SDN_Edge_IoT
3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c
[ "MIT" ]
null
null
null
75.083454
20,136
0.505741
[ [ [ "## Plot the throughput of experiment 1 of version 3", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport requests\nimport io\nimport glob", "_____no_output_____" ] ], [ [ "## Function Read CSV files of Throughput from Iperf log\n", "_____no_output_____" ] ], [ [ "def getDataframeThru(df,start_row,measurement_interval,header_range):\n '''\n This functions will import the data from txt file and return the dataframe without the header of txt file.\n Input: \n measurement_interval = 30 (sec) : \n header_range = 10 lines\n start_row = 0 \n Return: \n df1t : dataframe of througput and jitter\n '''\n \n df1 = df.drop(labels=range(start_row, header_range), axis=0)\n df1t = df1.drop(labels=range(measurement_interval, len(df)), axis=0)\n \n return df1t", "_____no_output_____" ], [ "def getDatafromTxT(filename, headerrange):\n \"\"\"\n Get dataframe from txt file:\n filename : xxx.txt\n headerrange : number of lines that needed to be removed. \n \n return : df : datafame type\n \"\"\"\n h = headerrange + 1\n skip_1 = list(range(0,h, 1))\n df = pd.read_csv(filename,\n skiprows=skip_1,\n header=None,\n delimiter=' ',\n skipinitialspace=True,\n error_bad_lines=False)\n return df\n", "_____no_output_____" ], [ "## Find start row index of itteration\ndef getStartEndID(df,start_data,end_data):\n \"\"\"\n to clean dataframe and return the data with new header\n Input: \n df : datafram without header of txt file \n Output\n strat_indices_list : start indices list\n \"\"\"\n # creating and passing series to new column\n df[\"Start\"]= df[2].str.find(start_data)\n df[\"End\"]= df[2].str.find(end_data)\n\n index = df.index\n strat_indices = index[df[\"Start\"]==0.0]\n strat_indices_list = strat_indices.tolist()\n end_indices = index[df[\"End\"]==0.0]\n end_indices_list = end_indices.tolist()\n \n \n return strat_indices_list, end_indices_list\n ", "_____no_output_____" ], [ "def getCleanData(df,strat_indices_list,end_indices_list):\n \"\"\"\n \n \"\"\"\n df_all = df.drop(labels=range(1, len(df)), axis=0) # create new df\n start_row = 0\n c = 0\n for i in strat_indices_list: \n \n h = i\n print('h =',h)\n m = end_indices_list[c]\n print('m =', m)\n df1 = getDataframeThru(df,start_row,m,h)\n print('df1 = ', df1)\n result = pd.concat([df_all,df1])\n df_all = result\n c = c + 1\n if i == 0:\n \n df_all = df_all.drop(labels=0, axis=0)\n \n \n return df_all", "_____no_output_____" ], [ "def superClean(filename,headerrange,start_data,end_data):\n \"\"\"\n Clean Data from CSV file with remove the unnecessary header\n \"\"\"\n df = getDatafromTxT(filename, headerrange)\n strat_indices_list, end_indices_list = getStartEndID(df,start_data,end_data)\n df_all = getCleanData(df,strat_indices_list,end_indices_list)\n df_all_new = df_all.drop(df_all.columns[[0,1,3,5,7,9]], axis=1) # Replace new columns header\n df_all_new.rename({2 :'Interval', 4 : 'Transfer', 6 :'Bitrate', 8 :'Jitter', 10 :'Lost/Total Datagrams'}, axis=1, inplace=True)\n df = df_all_new.drop(range(0,1))\n df_all_new['Bitrate'] = df['Bitrate'].astype(float) \n time = np.array(range(len(df_all_new.index)))\n\n df_all_new['Time'] = time\n df_all_new['Time'] = df_all_new['Time'].astype(int) \n # avergae throughput \n sumThroughput = df_all_new['Bitrate'].sum()\n avgSumThroughput = sumThroughput/len(time)\n var_throughput = df_all_new['Bitrate'].var()\n \n return avgSumThroughput, var_throughput\n\n \n ", "_____no_output_____" ], [ "def readCSV2pd_Thru(directoryPath,tf_load,edge_name,start_data,end_data,headerrange):\n \"\"\"\n This function is to read a CSV file and return the average value and varience\n input: directoryPath : path of file names\n tf_load : list of traffic load\n \"\"\"\n avg_Thr = []\n var_Thr = []\n for tf in tf_load:\n cpu_data = pd.DataFrame()\n for file_name in glob.glob(directoryPath+edge_name+str(tf)+'.csv'):\n avg_thr,var_thr = superClean(file_name,headerrange,start_data,end_data)\n \n avg_Thr.append(avg_thr)\n var_Thr.append(var_thr)\n return avg_Thr, var_Thr", "_____no_output_____" ] ], [ [ "## Read File CSV", "_____no_output_____" ] ], [ [ "headerrange = 7\nstart_data = '9.0-10.0'\nend_data = '60.0-61.0'\ntf_load = [i*2 for i in range(2,20)]\nedge_name = 'edge1_M'\ndirectoryPath = '/Users/kalika/PycharmProjects/Privacy_SDN_Edge_IoT/PlanB/CPU_utilization_Experiment/version3_Experiment_style/Experiment1/Edge1_iperf_log/'\navg_thr, var_thr = readCSV2pd_Thru(directoryPath,tf_load,edge_name,start_data,end_data,headerrange)", "b'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 86: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 86: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 85: expected 9 fields, saw 12\\n'\nb'Skipping line 86: expected 9 fields, saw 12\\n'\nb'Skipping line 83: expected 9 fields, saw 12\\n'\n" ], [ "print('avg',avg_thr)\nprint('var',var_thr)", "avg [4.115961538461538, 6.169038461538463, 8.225000000000001, 10.298076923076923, 12.357692307692307, 14.417307692307698, 16.47692307692307, 18.536538461538463, 20.596153846153847, 22.653846153846146, 24.715384615384615, 26.765384615384612, 28.58846153846154, 28.503846153846162, 27.796153846153842, 28.853846153846153, 28.382692307692313, 28.70961538461539]\nvar [2.266666666666567e-05, 7.241743109928888e-30, 8.784313725489824e-05, 0.0, 1.5770907217178467e-28, 1.158678897588622e-28, 2.059873595713106e-28, 2.059873595713106e-28, 0.0, 0.18499607843137295, 0.0064000000000000124, 1.094901960784315, 0.6749490196078426, 1.6067843137254902, 3.6476705882352936, 1.0636078431372558, 2.6396313725490206, 1.02083137254902]\n" ], [ "headerrange = 7\nstart_data = '9.0-10.0'\nend_data = '60.0-61.0'\ntf_load = [i*2 for i in range(2,20)]\nedge_name = 'edge2_M'\ndirectoryPath = '/Users/kalika/PycharmProjects/Privacy_SDN_Edge_IoT/PlanB/CPU_utilization_Experiment/version3_Experiment_style/Experiment1/Edge2_iperf_log/'\navg_thr2, var_thr2 = readCSV2pd_Thru(directoryPath,tf_load,edge_name,start_data,end_data,headerrange)", "h = 8\nm = 59\ndf1 = 0 1 2 3 4 5 6 7 8 9 10 11 \\\n8 [ 3] 9.0-10.0 sec 511 KBytes 4.19 Mbits/sec 1.932 ms 0/ 356 \n9 [ 3] 10.0-11.0 sec 512 KBytes 4.20 Mbits/sec 1.491 ms 0/ 357 \n10 [ 3] 11.0-12.0 sec 512 KBytes 4.20 Mbits/sec 1.059 ms 0/ 357 \n11 [ 3] 12.0-13.0 sec 512 KBytes 4.20 Mbits/sec 0.576 ms 0/ 357 \n12 [ 3] 13.0-14.0 sec 511 KBytes 4.19 Mbits/sec 0.917 ms 0/ 356 \n13 [ 3] 14.0-15.0 sec 512 KBytes 4.20 Mbits/sec 0.233 ms 0/ 357 \n14 [ 3] 15.0-16.0 sec 512 KBytes 4.20 Mbits/sec 0.672 ms 0/ 357 \n15 [ 3] 16.0-17.0 sec 511 KBytes 4.19 Mbits/sec 0.451 ms 0/ 356 \n16 [ 3] 17.0-18.0 sec 512 KBytes 4.20 Mbits/sec 0.247 ms 0/ 357 \n17 [ 3] 18.0-19.0 sec 512 KBytes 4.20 Mbits/sec 0.470 ms 0/ 357 \n18 [ 3] 19.0-20.0 sec 511 KBytes 4.19 Mbits/sec 0.432 ms 0/ 356 \n19 [ 3] 20.0-21.0 sec 512 KBytes 4.20 Mbits/sec 0.319 ms 0/ 357 \n20 [ 3] 21.0-22.0 sec 512 KBytes 4.20 Mbits/sec 0.219 ms 0/ 357 \n21 [ 3] 22.0-23.0 sec 511 KBytes 4.19 Mbits/sec 1.959 ms 0/ 356 \n22 [ 3] 23.0-24.0 sec 512 KBytes 4.20 Mbits/sec 0.543 ms 0/ 357 \n23 [ 3] 24.0-25.0 sec 512 KBytes 4.20 Mbits/sec 0.304 ms 0/ 357 \n24 [ 3] 25.0-26.0 sec 511 KBytes 4.19 Mbits/sec 0.118 ms 0/ 356 \n25 [ 3] 26.0-27.0 sec 512 KBytes 4.20 Mbits/sec 0.234 ms 0/ 357 \n26 [ 3] 27.0-28.0 sec 511 KBytes 4.19 Mbits/sec 0.837 ms 0/ 356 \n27 [ 3] 28.0-29.0 sec 512 KBytes 4.20 Mbits/sec 0.852 ms 0/ 357 \n28 [ 3] 29.0-30.0 sec 512 KBytes 4.20 Mbits/sec 0.571 ms 0/ 357 \n29 [ 3] 30.0-31.0 sec 511 KBytes 4.19 Mbits/sec 0.899 ms 0/ 356 \n30 [ 3] 31.0-32.0 sec 512 KBytes 4.20 Mbits/sec 1.324 ms 0/ 357 \n31 [ 3] 32.0-33.0 sec 512 KBytes 4.20 Mbits/sec 0.410 ms 0/ 357 \n32 [ 3] 33.0-34.0 sec 511 KBytes 4.19 Mbits/sec 0.627 ms 0/ 356 \n33 [ 3] 34.0-35.0 sec 511 KBytes 4.19 Mbits/sec 0.544 ms 0/ 356 \n34 [ 3] 35.0-36.0 sec 514 KBytes 4.21 Mbits/sec 1.010 ms 0/ 358 \n35 [ 3] 36.0-37.0 sec 510 KBytes 4.17 Mbits/sec 0.289 ms 0/ 355 \n36 [ 3] 37.0-38.0 sec 514 KBytes 4.21 Mbits/sec 0.328 ms 0/ 358 \n37 [ 3] 38.0-39.0 sec 512 KBytes 4.20 Mbits/sec 0.525 ms 0/ 357 \n38 [ 3] 39.0-40.0 sec 511 KBytes 4.19 Mbits/sec 0.180 ms 0/ 356 \n39 [ 3] 40.0-41.0 sec 512 KBytes 4.20 Mbits/sec 0.888 ms 0/ 357 \n40 [ 3] 41.0-42.0 sec 512 KBytes 4.20 Mbits/sec 0.257 ms 0/ 357 \n41 [ 3] 42.0-43.0 sec 511 KBytes 4.19 Mbits/sec 0.282 ms 0/ 356 \n42 [ 3] 43.0-44.0 sec 512 KBytes 4.20 Mbits/sec 0.149 ms 0/ 357 \n43 [ 3] 44.0-45.0 sec 512 KBytes 4.20 Mbits/sec 0.265 ms 0/ 357 \n44 [ 3] 45.0-46.0 sec 511 KBytes 4.19 Mbits/sec 0.140 ms 0/ 356 \n45 [ 3] 46.0-47.0 sec 512 KBytes 4.20 Mbits/sec 0.100 ms 0/ 357 \n46 [ 3] 47.0-48.0 sec 511 KBytes 4.19 Mbits/sec 0.476 ms 0/ 356 \n47 [ 3] 48.0-49.0 sec 511 KBytes 4.19 Mbits/sec 0.650 ms 0/ 356 \n48 [ 3] 49.0-50.0 sec 514 KBytes 4.21 Mbits/sec 0.653 ms 0/ 358 \n49 [ 3] 50.0-51.0 sec 511 KBytes 4.19 Mbits/sec 0.715 ms 0/ 356 \n50 [ 3] 51.0-52.0 sec 512 KBytes 4.20 Mbits/sec 0.452 ms 0/ 357 \n51 [ 3] 52.0-53.0 sec 512 KBytes 4.20 Mbits/sec 0.760 ms 0/ 357 \n52 [ 3] 53.0-54.0 sec 511 KBytes 4.19 Mbits/sec 0.636 ms 0/ 356 \n53 [ 3] 54.0-55.0 sec 512 KBytes 4.20 Mbits/sec 0.655 ms 0/ 357 \n54 [ 3] 55.0-56.0 sec 512 KBytes 4.20 Mbits/sec 0.501 ms 0/ 357 \n55 [ 3] 56.0-57.0 sec 511 KBytes 4.19 Mbits/sec 1.106 ms 0/ 356 \n56 [ 3] 57.0-58.0 sec 512 KBytes 4.20 Mbits/sec 0.608 ms 0/ 357 \n57 [ 3] 58.0-59.0 sec 512 KBytes 4.20 Mbits/sec 0.709 ms 0/ 357 \n58 [ 3] 59.0-60.0 sec 512 KBytes 4.20 Mbits/sec 0.668 ms 0/ 357 \n\n 12 13 Start End \n8 (0%) NaN 0 -1 \n9 (0%) NaN -1 -1 \n10 (0%) NaN -1 -1 \n11 (0%) NaN -1 -1 \n12 (0%) NaN -1 -1 \n13 (0%) NaN -1 -1 \n14 (0%) NaN -1 -1 \n15 (0%) NaN -1 -1 \n16 (0%) NaN -1 -1 \n17 (0%) NaN -1 -1 \n18 (0%) NaN -1 -1 \n19 (0%) NaN -1 -1 \n20 (0%) NaN -1 -1 \n21 (0%) NaN -1 -1 \n22 (0%) NaN -1 -1 \n23 (0%) NaN -1 -1 \n24 (0%) NaN -1 -1 \n25 (0%) NaN -1 -1 \n26 (0%) NaN -1 -1 \n27 (0%) NaN -1 -1 \n28 (0%) NaN -1 -1 \n29 (0%) NaN -1 -1 \n30 (0%) NaN -1 -1 \n31 (0%) NaN -1 -1 \n32 (0%) NaN -1 -1 \n33 (0%) NaN -1 -1 \n34 (0%) NaN -1 -1 \n35 (0%) NaN -1 -1 \n36 (0%) NaN -1 -1 \n37 (0%) NaN -1 -1 \n38 (0%) NaN -1 -1 \n39 (0%) NaN -1 -1 \n40 (0%) NaN -1 -1 \n41 (0%) NaN -1 -1 \n42 (0%) NaN -1 -1 \n43 (0%) NaN -1 -1 \n44 (0%) NaN -1 -1 \n45 (0%) NaN -1 -1 \n46 (0%) NaN -1 -1 \n47 (0%) NaN -1 -1 \n48 (0%) NaN -1 -1 \n49 (0%) NaN -1 -1 \n50 (0%) NaN -1 -1 \n51 (0%) NaN -1 -1 \n52 (0%) NaN -1 -1 \n53 (0%) NaN -1 -1 \n54 (0%) NaN -1 -1 \n55 (0%) NaN -1 -1 \n56 (0%) NaN -1 -1 \n57 (0%) NaN -1 -1 \n58 (0%) NaN -1 -1 \navg : 4.115769230769231\nvar : 4.729411764705776e-05\nh = 8\nm = 59\ndf1 = 0 1 2 3 4 5 6 7 8 9 10 11 \\\n8 [ 3] 9.0-10.0 sec 768 KBytes 6.29 Mbits/sec 0.140 ms 0/ 535 \n9 [ 3] 10.0-11.0 sec 768 KBytes 6.29 Mbits/sec 0.274 ms 0/ 535 \n10 [ 3] 11.0-12.0 sec 768 KBytes 6.29 Mbits/sec 0.708 ms 0/ 535 \n11 [ 3] 12.0-13.0 sec 768 KBytes 6.29 Mbits/sec 0.146 ms 0/ 535 \n12 [ 3] 13.0-14.0 sec 768 KBytes 6.29 Mbits/sec 0.101 ms 0/ 535 \n13 [ 3] 14.0-15.0 sec 768 KBytes 6.29 Mbits/sec 0.190 ms 0/ 535 \n14 [ 3] 15.0-16.0 sec 768 KBytes 6.29 Mbits/sec 0.136 ms 0/ 535 \n15 [ 3] 16.0-17.0 sec 767 KBytes 6.28 Mbits/sec 0.151 ms 0/ 534 \n16 [ 3] 17.0-18.0 sec 769 KBytes 6.30 Mbits/sec 0.148 ms 0/ 536 \n17 [ 3] 18.0-19.0 sec 768 KBytes 6.29 Mbits/sec 0.165 ms 0/ 535 \n18 [ 3] 19.0-20.0 sec 768 KBytes 6.29 Mbits/sec 0.288 ms 0/ 535 \n19 [ 3] 20.0-21.0 sec 768 KBytes 6.29 Mbits/sec 0.305 ms 0/ 535 \n20 [ 3] 21.0-22.0 sec 768 KBytes 6.29 Mbits/sec 0.426 ms 0/ 535 \n21 [ 3] 22.0-23.0 sec 768 KBytes 6.29 Mbits/sec 0.533 ms 0/ 535 \n22 [ 3] 23.0-24.0 sec 768 KBytes 6.29 Mbits/sec 0.561 ms 0/ 535 \n23 [ 3] 24.0-25.0 sec 767 KBytes 6.28 Mbits/sec 0.859 ms 0/ 534 \n24 [ 3] 25.0-26.0 sec 768 KBytes 6.29 Mbits/sec 0.863 ms 0/ 535 \n25 [ 3] 26.0-27.0 sec 768 KBytes 6.29 Mbits/sec 0.786 ms 0/ 535 \n26 [ 3] 27.0-28.0 sec 768 KBytes 6.29 Mbits/sec 0.768 ms 0/ 535 \n27 [ 3] 28.0-29.0 sec 769 KBytes 6.30 Mbits/sec 0.348 ms 0/ 536 \n28 [ 3] 29.0-30.0 sec 767 KBytes 6.28 Mbits/sec 0.608 ms 0/ 534 \n29 [ 3] 30.0-31.0 sec 769 KBytes 6.30 Mbits/sec 0.069 ms 0/ 536 \n30 [ 3] 31.0-32.0 sec 767 KBytes 6.28 Mbits/sec 0.630 ms 0/ 534 \n31 [ 3] 32.0-33.0 sec 769 KBytes 6.30 Mbits/sec 0.663 ms 0/ 536 \n32 [ 3] 33.0-34.0 sec 767 KBytes 6.28 Mbits/sec 0.296 ms 0/ 534 \n33 [ 3] 34.0-35.0 sec 769 KBytes 6.30 Mbits/sec 0.873 ms 0/ 536 \n34 [ 3] 35.0-36.0 sec 768 KBytes 6.29 Mbits/sec 0.303 ms 0/ 535 \n35 [ 3] 36.0-37.0 sec 767 KBytes 6.28 Mbits/sec 0.552 ms 0/ 534 \n36 [ 3] 37.0-38.0 sec 769 KBytes 6.30 Mbits/sec 0.190 ms 0/ 536 \n37 [ 3] 38.0-39.0 sec 768 KBytes 6.29 Mbits/sec 0.208 ms 0/ 535 \n38 [ 3] 39.0-40.0 sec 768 KBytes 6.29 Mbits/sec 0.090 ms 0/ 535 \n39 [ 3] 40.0-41.0 sec 768 KBytes 6.29 Mbits/sec 0.239 ms 0/ 535 \n40 [ 3] 41.0-42.0 sec 768 KBytes 6.29 Mbits/sec 0.335 ms 0/ 535 \n41 [ 3] 42.0-43.0 sec 768 KBytes 6.29 Mbits/sec 0.455 ms 0/ 535 \n42 [ 3] 43.0-44.0 sec 768 KBytes 6.29 Mbits/sec 0.526 ms 0/ 535 \n43 [ 3] 44.0-45.0 sec 767 KBytes 6.28 Mbits/sec 0.219 ms 0/ 534 \n44 [ 3] 45.0-46.0 sec 767 KBytes 6.28 Mbits/sec 0.898 ms 0/ 534 \n45 [ 3] 46.0-47.0 sec 771 KBytes 6.32 Mbits/sec 0.989 ms 0/ 537 \n46 [ 3] 47.0-48.0 sec 767 KBytes 6.28 Mbits/sec 0.924 ms 0/ 534 \n47 [ 3] 48.0-49.0 sec 768 KBytes 6.29 Mbits/sec 1.029 ms 0/ 535 \n48 [ 3] 49.0-50.0 sec 768 KBytes 6.29 Mbits/sec 0.204 ms 0/ 535 \n49 [ 3] 50.0-51.0 sec 768 KBytes 6.29 Mbits/sec 0.736 ms 0/ 535 \n50 [ 3] 51.0-52.0 sec 768 KBytes 6.29 Mbits/sec 0.271 ms 0/ 535 \n51 [ 3] 52.0-53.0 sec 768 KBytes 6.29 Mbits/sec 0.108 ms 0/ 535 \n52 [ 3] 53.0-54.0 sec 767 KBytes 6.28 Mbits/sec 1.592 ms 0/ 534 \n53 [ 3] 54.0-55.0 sec 768 KBytes 6.29 Mbits/sec 0.317 ms 0/ 535 \n54 [ 3] 55.0-56.0 sec 769 KBytes 6.30 Mbits/sec 1.359 ms 0/ 536 \n55 [ 3] 56.0-57.0 sec 768 KBytes 6.29 Mbits/sec 0.072 ms 0/ 535 \n56 [ 3] 57.0-58.0 sec 768 KBytes 6.29 Mbits/sec 0.246 ms 0/ 535 \n57 [ 3] 58.0-59.0 sec 768 KBytes 6.29 Mbits/sec 1.403 ms 0/ 535 \n58 [ 3] 59.0-60.0 sec 768 KBytes 6.29 Mbits/sec 0.167 ms 0/ 535 \n\n 12 13 Start End \n8 (0%) NaN 0 -1 \n9 (0%) NaN -1 -1 \n10 (0%) NaN -1 -1 \n11 (0%) NaN -1 -1 \n12 (0%) NaN -1 -1 \n13 (0%) NaN -1 -1 \n14 (0%) NaN -1 -1 \n15 (0%) NaN -1 -1 \n16 (0%) NaN -1 -1 \n17 (0%) NaN -1 -1 \n18 (0%) NaN -1 -1 \n19 (0%) NaN -1 -1 \n20 (0%) NaN -1 -1 \n21 (0%) NaN -1 -1 \n22 (0%) NaN -1 -1 \n23 (0%) NaN -1 -1 \n24 (0%) NaN -1 -1 \n25 (0%) NaN -1 -1 \n26 (0%) NaN -1 -1 \n27 (0%) NaN -1 -1 \n28 (0%) NaN -1 -1 \n29 (0%) NaN -1 -1 \n30 (0%) NaN -1 -1 \n31 (0%) NaN -1 -1 \n32 (0%) NaN -1 -1 \n33 (0%) NaN -1 -1 \n34 (0%) NaN -1 -1 \n35 (0%) NaN -1 -1 \n36 (0%) NaN -1 -1 \n37 (0%) NaN -1 -1 \n38 (0%) NaN -1 -1 \n39 (0%) NaN -1 -1 \n40 (0%) NaN -1 -1 \n41 (0%) NaN -1 -1 \n42 (0%) NaN -1 -1 \n43 (0%) NaN -1 -1 \n44 (0%) NaN -1 -1 \n45 (0%) NaN -1 -1 \n46 (0%) NaN -1 -1 \n47 (0%) NaN -1 -1 \n48 (0%) NaN -1 -1 \n49 (0%) NaN -1 -1 \n50 (0%) NaN -1 -1 \n51 (0%) NaN -1 -1 \n52 (0%) NaN -1 -1 \n53 (0%) NaN -1 -1 \n54 (0%) NaN -1 -1 \n55 (0%) NaN -1 -1 \n56 (0%) NaN -1 -1 \n57 (0%) NaN -1 -1 \n58 (0%) NaN -1 -1 \navg : 6.169038461538463\nvar : 5.199999999999885e-05\nh = 8\nm = 59\ndf1 = 0 1 2 3 4 5 6 7 8 9 10 11 \\\n8 [ 3] 9.0-10.0 sec 1.00 MBytes 8.40 Mbits/sec 0.270 ms 0/ 714 \n9 [ 3] 10.0-11.0 sec 1021 KBytes 8.36 Mbits/sec 0.224 ms 0/ 711 \n10 [ 3] 11.0-12.0 sec 1.00 MBytes 8.41 Mbits/sec 0.192 ms 0/ 715 \n11 [ 3] 12.0-13.0 sec 1.00 MBytes 8.40 Mbits/sec 0.245 ms 0/ 714 \n12 [ 3] 13.0-14.0 sec 1024 KBytes 8.38 Mbits/sec 0.233 ms 0/ 713 \n13 [ 3] 14.0-15.0 sec 1024 KBytes 8.38 Mbits/sec 0.202 ms 0/ 713 \n14 [ 3] 15.0-16.0 sec 1021 KBytes 8.36 Mbits/sec 1.631 ms 0/ 711 \n15 [ 3] 16.0-17.0 sec 1.00 MBytes 8.42 Mbits/sec 0.314 ms 0/ 716 \n16 [ 3] 17.0-18.0 sec 1024 KBytes 8.38 Mbits/sec 0.307 ms 0/ 713 \n17 [ 3] 18.0-19.0 sec 1024 KBytes 8.38 Mbits/sec 0.274 ms 0/ 713 \n18 [ 3] 19.0-20.0 sec 1.00 MBytes 8.40 Mbits/sec 0.279 ms 0/ 714 \n19 [ 3] 20.0-21.0 sec 1.00 MBytes 8.40 Mbits/sec 0.974 ms 0/ 714 \n20 [ 3] 21.0-22.0 sec 1022 KBytes 8.37 Mbits/sec 0.352 ms 0/ 712 \n21 [ 3] 22.0-23.0 sec 1.00 MBytes 8.40 Mbits/sec 0.260 ms 0/ 714 \n22 [ 3] 23.0-24.0 sec 1024 KBytes 8.38 Mbits/sec 0.764 ms 0/ 713 \n23 [ 3] 24.0-25.0 sec 1024 KBytes 8.38 Mbits/sec 0.771 ms 0/ 713 \n24 [ 3] 25.0-26.0 sec 1015 KBytes 8.31 Mbits/sec 2.205 ms 0/ 707 \n25 [ 3] 26.0-27.0 sec 1.01 MBytes 8.47 Mbits/sec 0.784 ms 0/ 720 \n26 [ 3] 27.0-28.0 sec 1015 KBytes 8.31 Mbits/sec 1.275 ms 0/ 707 \n27 [ 3] 28.0-29.0 sec 1.01 MBytes 8.47 Mbits/sec 0.225 ms 0/ 720 \n28 [ 3] 29.0-30.0 sec 1024 KBytes 8.38 Mbits/sec 1.032 ms 0/ 713 \n29 [ 3] 30.0-31.0 sec 1024 KBytes 8.38 Mbits/sec 0.935 ms 0/ 713 \n30 [ 3] 31.0-32.0 sec 1.00 MBytes 8.40 Mbits/sec 0.752 ms 0/ 714 \n31 [ 3] 32.0-33.0 sec 1024 KBytes 8.38 Mbits/sec 0.712 ms 0/ 713 \n32 [ 3] 33.0-34.0 sec 1022 KBytes 8.37 Mbits/sec 1.064 ms 0/ 712 \n33 [ 3] 34.0-35.0 sec 1.00 MBytes 8.41 Mbits/sec 0.429 ms 0/ 715 \n34 [ 3] 35.0-36.0 sec 1024 KBytes 8.38 Mbits/sec 0.362 ms 0/ 713 \n35 [ 3] 36.0-37.0 sec 1024 KBytes 8.38 Mbits/sec 0.309 ms 0/ 713 \n36 [ 3] 37.0-38.0 sec 1.00 MBytes 8.40 Mbits/sec 0.456 ms 0/ 714 \n37 [ 3] 38.0-39.0 sec 1022 KBytes 8.37 Mbits/sec 0.516 ms 0/ 712 \n38 [ 3] 39.0-40.0 sec 1.00 MBytes 8.40 Mbits/sec 0.622 ms 0/ 714 \n39 [ 3] 40.0-41.0 sec 1024 KBytes 8.38 Mbits/sec 1.747 ms 0/ 713 \n40 [ 3] 41.0-42.0 sec 1.00 MBytes 8.40 Mbits/sec 0.227 ms 0/ 714 \n41 [ 3] 42.0-43.0 sec 1024 KBytes 8.38 Mbits/sec 0.255 ms 0/ 713 \n42 [ 3] 43.0-44.0 sec 1.00 MBytes 8.40 Mbits/sec 0.669 ms 0/ 714 \n43 [ 3] 44.0-45.0 sec 1024 KBytes 8.38 Mbits/sec 0.220 ms 0/ 713 \n44 [ 3] 45.0-46.0 sec 1024 KBytes 8.38 Mbits/sec 0.239 ms 0/ 713 \n45 [ 3] 46.0-47.0 sec 1024 KBytes 8.38 Mbits/sec 1.133 ms 0/ 713 \n46 [ 3] 47.0-48.0 sec 1.00 MBytes 8.40 Mbits/sec 0.448 ms 0/ 714 \n47 [ 3] 48.0-49.0 sec 1024 KBytes 8.38 Mbits/sec 0.455 ms 0/ 713 \n48 [ 3] 49.0-50.0 sec 1024 KBytes 8.38 Mbits/sec 0.088 ms 0/ 713 \n49 [ 3] 50.0-51.0 sec 1.00 MBytes 8.40 Mbits/sec 0.382 ms 0/ 714 \n50 [ 3] 51.0-52.0 sec 1024 KBytes 8.38 Mbits/sec 0.643 ms 0/ 713 \n51 [ 3] 52.0-53.0 sec 1024 KBytes 8.38 Mbits/sec 1.250 ms 0/ 713 \n52 [ 3] 53.0-54.0 sec 1.00 MBytes 8.40 Mbits/sec 0.188 ms 0/ 714 \n53 [ 3] 54.0-55.0 sec 1024 KBytes 8.38 Mbits/sec 0.188 ms 0/ 713 \n54 [ 3] 55.0-56.0 sec 1024 KBytes 8.38 Mbits/sec 0.522 ms 0/ 713 \n55 [ 3] 56.0-57.0 sec 1024 KBytes 8.38 Mbits/sec 0.364 ms 0/ 713 \n56 [ 3] 57.0-58.0 sec 1.00 MBytes 8.40 Mbits/sec 0.398 ms 0/ 714 \n57 [ 3] 58.0-59.0 sec 1024 KBytes 8.38 Mbits/sec 0.733 ms 0/ 713 \n58 [ 3] 59.0-60.0 sec 1.00 MBytes 8.40 Mbits/sec 0.590 ms 0/ 714 \n\n 12 13 Start End \n8 (0%) NaN 0 -1 \n9 (0%) NaN -1 -1 \n10 (0%) NaN -1 -1 \n11 (0%) NaN -1 -1 \n12 (0%) NaN -1 -1 \n13 (0%) NaN -1 -1 \n14 (0%) NaN -1 -1 \n15 (0%) NaN -1 -1 \n16 (0%) NaN -1 -1 \n17 (0%) NaN -1 -1 \n18 (0%) NaN -1 -1 \n19 (0%) NaN -1 -1 \n20 (0%) NaN -1 -1 \n21 (0%) NaN -1 -1 \n22 (0%) NaN -1 -1 \n23 (0%) NaN -1 -1 \n24 (0%) NaN -1 -1 \n25 (0%) NaN -1 -1 \n26 (0%) NaN -1 -1 \n27 (0%) NaN -1 -1 \n28 (0%) NaN -1 -1 \n29 (0%) NaN -1 -1 \n30 (0%) NaN -1 -1 \n31 (0%) NaN -1 -1 \n32 (0%) NaN -1 -1 \n33 (0%) NaN -1 -1 \n34 (0%) NaN -1 -1 \n35 (0%) NaN -1 -1 \n36 (0%) NaN -1 -1 \n37 (0%) NaN -1 -1 \n38 (0%) NaN -1 -1 \n39 (0%) NaN -1 -1 \n40 (0%) NaN -1 -1 \n41 (0%) NaN -1 -1 \n42 (0%) NaN -1 -1 \n43 (0%) NaN -1 -1 \n44 (0%) NaN -1 -1 \n45 (0%) NaN -1 -1 \n46 (0%) NaN -1 -1 \n47 (0%) NaN -1 -1 \n48 (0%) NaN -1 -1 \n49 (0%) NaN -1 -1 \n50 (0%) NaN -1 -1 \n51 (0%) NaN -1 -1 \n52 (0%) NaN -1 -1 \n53 (0%) NaN -1 -1 \n54 (0%) NaN -1 -1 \n55 (0%) NaN -1 -1 \n56 (0%) NaN -1 -1 \n57 (0%) NaN -1 -1 \n" ], [ "print('avg',avg_thr2)\nprint('var',var_thr2)", "avg [4.115769230769231, 6.169038461538463, 8.225961538461538, 10.294230769230769, 12.35576923076923, 14.419230769230772, 16.474999999999998, 18.53076923076923, 20.582692307692312, 22.63461538461538, 24.68846153846154, 26.744230769230775, 28.550000000000004, 28.500000000000004, 27.778846153846153, 28.84038461538462, 28.375000000000004, 28.680769230769222]\nvar [4.729411764705776e-05, 5.199999999999885e-05, 0.000676313725490197, 0.001984313725490182, 0.00019607843137254766, 0.0009960784313725558, 0.00019607843137255435, 0.0009647058823529143, 0.002007843137254957, 0.5161254901960786, 0.29523137254901954, 2.2269960784313723, 0.7025019607843137, 1.5084705882352945, 3.477035294117647, 0.9821647058823518, 2.5673960784313734, 0.9965019607843137]\n" ] ], [ [ "## Plot Throughput", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\nax = fig.add_subplot(1, 1, 1)\n\n\nax.plot(tf_load, avg_thr, color='green', linestyle='dashed', linewidth = 2,\n marker='o', markerfacecolor='green', markersize=10,label=\"Edge 1\")\nax.plot(tf_load, avg_thr2, color='red', linestyle='dashed', linewidth = 2,\n marker='x', markerfacecolor='red', markersize=10,label=\"Edge 2\")\nplt.ylim(0,30)\nplt.xlim(0,40)\nplt.xlabel('Traffic load $\\lambda_{1,2}$ (Mbps)')\n# naming the y axis\nplt.ylabel('Average of Throughput (Mbps)')\n\nplt.legend()\n\nplt.show()", "_____no_output_____" ], [ "fig = plt.figure()\n\nax = fig.add_subplot(1, 1, 1)\n\nax.plot(tf_load, var_thr, color='green', linestyle='dashed', linewidth = 2,\n marker='o', markerfacecolor='green', markersize=10,label=\"Edge 1\")\n\nax.plot(tf_load, var_thr2, color='red', linestyle='dashed', linewidth = 2,\n marker='x', markerfacecolor='red', markersize=10,label=\"Edge 2\")\n\nplt.ylim(0,20)\nplt.xlim(0,40)\nplt.xlabel('Traffic load $\\lambda_{1,2}$ (Mbps)')\n# naming the y axis\nplt.ylabel('Variance of Throughput')\n\nplt.legend()\n\nplt.show()\n ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb11eec333aaa54295b5406119e0b27395e427e5
2,013
ipynb
Jupyter Notebook
Untitled.ipynb
ugarteg/handson-ml2
bcde76f0f3a859be784a5688b08f9bb719abe2a3
[ "Apache-2.0" ]
null
null
null
Untitled.ipynb
ugarteg/handson-ml2
bcde76f0f3a859be784a5688b08f9bb719abe2a3
[ "Apache-2.0" ]
null
null
null
Untitled.ipynb
ugarteg/handson-ml2
bcde76f0f3a859be784a5688b08f9bb719abe2a3
[ "Apache-2.0" ]
null
null
null
22.366667
88
0.532538
[ [ [ "# Python โ‰ฅ3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)", "_____no_output_____" ], [ "# Scikit-Learn โ‰ฅ0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"", "_____no_output_____" ], [ "import os\ndatapath = os.path.join(\"datasets\", \"lifesat\", \"\")", "_____no_output_____" ], [ "# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"fundamentals\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb11ef24a00de2e0ab18d529a0249f1713e05f13
3,756
ipynb
Jupyter Notebook
Supporting_Notebooks/Taylor-Series.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
1
2019-09-03T00:47:42.000Z
2019-09-03T00:47:42.000Z
Supporting_Notebooks/Taylor-Series.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
null
null
null
Supporting_Notebooks/Taylor-Series.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
1
2020-04-18T15:47:06.000Z
2020-04-18T15:47:06.000Z
29.116279
333
0.484824
[ [ [ "#format the book\n%matplotlib inline\nfrom __future__ import division, print_function\nimport sys\nsys.path.insert(0, '..')\nimport book_format\nbook_format.set_style()", "_____no_output_____" ] ], [ [ "# Linearizing with Taylor Series\n\nTaylor series represents a function as an infinite sum of terms. The terms are linear, even for a nonlinear function, so we can express any arbitrary nonlinear function using linear algebra. The cost of this choice is that unless we use an infinite number of terms the value we compute will be approximate rather than exact.\n\nThe Taylor series for a real or complex function f(x) at x=a is the infinite series\n\n$$f(x) = f(a) + f'(a)(x-a) + \\frac{f''(a)}{2!}(x-a)^2 + \\, ...\\, + \\frac{f^{(n)}(a)}{n!}(x-a)^n + \\, ...$$\n\nwhere $f^{n}$ is the nth derivative of f. To compute the Taylor series for $f(x)=sin(x)$ at $x=0$ let's first work out the terms for f.\n\n$$\\begin{aligned}\nf^{0}(x) &= sin(x) ,\\ \\ &f^{0}(0) &= 0 \\\\\nf^{1}(x) &= cos(x),\\ \\ &f^{1}(0) &= 1 \\\\\nf^{2}(x) &= -sin(x),\\ \\ &f^{2}(0) &= 0 \\\\\nf^{3}(x) &= -cos(x),\\ \\ &f^{3}(0) &= -1 \\\\\nf^{4}(x) &= sin(x),\\ \\ &f^{4}(0) &= 0 \\\\\nf^{5}(x) &= cos(x),\\ \\ &f^{5}(0) &= 1\n\\end{aligned}\n$$\n\nNow we can substitute these values into the equation.\n\n$$\\sin(x) = \\frac{0}{0!}(x)^0 + \\frac{1}{1!}(x)^1 + \\frac{0}{2!}(x)^2 + \\frac{-1}{3!}(x)^3 + \\frac{0}{4!}(x)^4 + \\frac{-1}{5!}(x)^5 + ... $$\n\nAnd let's test this with some code:", "_____no_output_____" ] ], [ [ "import numpy as np\n\nx = .3\nestimate = x + x**3/6 + x**5/120\nexact = np.sin(.3)\n\nprint('estimate of sin(.3) is', estimate)\nprint('exact value of sin(.3) is', exact)", "estimate of sin(.3) is 0.30452025\nexact value of sin(.3) is 0.29552020666133955\n" ] ], [ [ "This is not bad for only three terms. If you are curious, go ahead and implement this as a Python function to compute the series for an arbitrary number of terms.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb11f0fbb7db96aaf22fc95753f99463572792c6
126,929
ipynb
Jupyter Notebook
assignments/assignment_0/gnp_simulation_experiment.ipynb
biqar/Fall-2020-ITCS-8010-MLGraph
ebc2000f6e354f02c84af768261ccec24b988c6a
[ "MIT" ]
3
2021-12-19T06:55:22.000Z
2021-12-19T06:57:21.000Z
assignments/assignment_0/gnp_simulation_experiment.ipynb
biqar/Fall-2020-ITCS-8010-MLGraph
ebc2000f6e354f02c84af768261ccec24b988c6a
[ "MIT" ]
null
null
null
assignments/assignment_0/gnp_simulation_experiment.ipynb
biqar/Fall-2020-ITCS-8010-MLGraph
ebc2000f6e354f02c84af768261ccec24b988c6a
[ "MIT" ]
null
null
null
445.364912
64,656
0.940739
[ [ [ "$\\newcommand{\\xv}{\\mathbf{x}}\n \\newcommand{\\wv}{\\mathbf{w}}\n \\newcommand{\\Chi}{\\mathcal{X}}\n \\newcommand{\\R}{\\rm I\\!R}\n \\newcommand{\\sign}{\\text{sign}}\n \\newcommand{\\Tm}{\\mathbf{T}}\n \\newcommand{\\Xm}{\\mathbf{X}}\n \\newcommand{\\Im}{\\mathbf{I}}\n \\newcommand{\\Ym}{\\mathbf{Y}}\n$\n### ITCS8010\n\n# G_np Simulation Experiment\n\nIn this experiment I like to replicate the behaviour of `Fraction of node in largest CC` and `Fraction of isolated nodes` over the `p*log(n)` in `Erdรถs-Renyi random graph model`.", "_____no_output_____" ] ], [ [ "import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport collections as collec\n%matplotlib inline", "_____no_output_____" ], [ "# Fraction of node in largest CC Vs. {p*log(n)}\nn = 100000\nx1 = []\ny1 = []\n\nfor kave in np.arange(0.5, 3.0, 0.1):\n G = nx.fast_gnp_random_graph(n, kave / (n - 1))\n largest_cc = max(nx.connected_components(G), key=len)\n x1.append(kave)\n y1.append(len(largest_cc)/n)\n # print(kave)\n # print(len(largest_cc)/n)\n\nfig, ax = plt.subplots()\nax.plot(x1, y1)\n\nax.set(xlabel='p*(n-1)', ylabel='Fraction of node in largest CC',\n title='Fraction of node in largest CC Vs. p*(n-1)')\nax.grid()\n\n# fig.savefig(\"test.png\")\nplt.show()", "_____no_output_____" ], [ "# Fraction of isolated nodes Vs. {p*log(n)}\nx2 = []\ny2 = []\n\nfor kave in np.arange(0.3, 1.5, 0.1):\n p = kave / (n - 1)\n G = nx.fast_gnp_random_graph(n, p)\n isolates = len(list(nx.isolates(G)))\n x2.append(p * np.log10(n))\n y2.append(isolates/n)\n # print(kave)\n # print(isolates/n)\n\nfig, ax = plt.subplots()\nax.plot(x2, y2)\n\nax.set(xlabel='p*log(n)', ylabel='Fraction of isolated nodes',\n title='Fraction of isolated nodes Vs. p*log(n)')\nax.grid()\n\n# fig.savefig(\"test.png\")\nplt.show()", "_____no_output_____" ], [ "# Fraction of isolated nodes Vs. {p*log(n)}\nx2 = []\ny2 = []\n\nfor kave in np.arange(0.3, 10, 0.1):\n p = kave / (n - 1)\n G = nx.fast_gnp_random_graph(n, p)\n isolates = len(list(nx.isolates(G)))\n x2.append(p * np.log10(n))\n y2.append(isolates/n)\n # print(kave)\n # print(isolates/n)\n\nfig, ax = plt.subplots()\nax.plot(x2, y2)\n\nax.set(xlabel='p*log(n)', ylabel='Fraction of isolated nodes',\n title='Fraction of isolated nodes Vs. p*log(n)')\nax.grid()\n\n# fig.savefig(\"test.png\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Observation:\n\n1. The result of the first experiment (i.e. `fraction of node in largest CC` varying `p*(n-1)`) gives somewhat similar behaviour we observed in the class slide.\n2. In the second experiment (i.e. plotting `fraction of isolated nodes` on varying `p*log(n)`) gives somewhat different result comparing to the one we found in the class slide. When we plot the graph for p*(n-1) within the range of 0.3 to 1.5 we don't get the long tail; which we can get when we increase the range of p*(n-1) from 0.3 to 10. Just to inform, in this experiment we do run the loop on the different values of `p*(n-1)`, but plot it on `p*log(n)` scale. I am not sure the reason behind type of behaviour.", "_____no_output_____" ], [ "## Key Network Properties\n\nNow we like to use the networkx [[2]](https://networkx.github.io/documentation/stable/) library support to ovserve the values of the key network properties in Erdรถs-Renyi random graph.", "_____no_output_____" ] ], [ [ "# plotting degree distribution\n\nn1 = 180\np1 = 0.11\n\nG = nx.fast_gnp_random_graph(n1, p1)\n\ndegree_sequence = sorted([d for n, d in G.degree()], reverse=True) # degree sequence\ndegreeCount = collec.Counter(degree_sequence)\ndeg, cnt = zip(*degreeCount.items())\n\nfig, ax = plt.subplots()\nplt.bar(deg, cnt, width=0.80, color=\"b\")\n\nplt.title(\"Degree Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Degree\")\nax.set_xticks([d + 0.4 for d in deg])\nax.set_xticklabels(deg)\n\n# draw graph in inset\nplt.axes([0.4, 0.4, 0.5, 0.5])\nGcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])\npos = nx.spring_layout(G)\nplt.axis(\"off\")\nnx.draw_networkx_nodes(G, pos, node_size=20)\nnx.draw_networkx_edges(G, pos, alpha=0.4)\nplt.show()\n\n# diameter and path length\ndia = nx.diameter(G)\nprint(dia)\navg_path_len = nx.average_shortest_path_length(G)\nprint(avg_path_len)", "_____no_output_____" ] ], [ [ "# References\n\n\n[1] Erdล‘s, Paul, and Alfrรฉd Rรฉnyi. 1960. โ€œOn the Evolution of Random Graphs.โ€ Bull. Inst. Internat. Statis. 38 (4): 343โ€“47.\n\n[2] NetworkX, โ€œSoftware for Complex Networks,โ€ https://networkx.github.io/documentation/stable/, 2020, accessed: 2020-10.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb1200e5f226e06d999bb75bc96554aa4c60f417
336,922
ipynb
Jupyter Notebook
Project_1_Babynames.ipynb
maximcondon/Project_BabyNames
73176b6a2473830275db53dcd0d05e0c6f751978
[ "MIT" ]
null
null
null
Project_1_Babynames.ipynb
maximcondon/Project_BabyNames
73176b6a2473830275db53dcd0d05e0c6f751978
[ "MIT" ]
null
null
null
Project_1_Babynames.ipynb
maximcondon/Project_BabyNames
73176b6a2473830275db53dcd0d05e0c6f751978
[ "MIT" ]
null
null
null
64.767782
32,684
0.701379
[ [ [ "# Project 1: Babynames", "_____no_output_____" ], [ "## I. Characterise One File\n\n### 1. Read the data\n\n - Read the file yob2000.txt\n - Name the columns\n - Print the first 10 entries", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "popular_names = pd.read_csv('yob2000.csv', \n names = ['Names', 'Sex', 'Birth Count'])", "_____no_output_____" ], [ "len(popular_names)", "_____no_output_____" ], [ "popular_names.head(10)", "_____no_output_____" ], [ "top_1000 = popular_names.sort_values(by = 'Birth Count',\n ascending=False).reset_index().drop('index', axis=1)", "_____no_output_____" ], [ "top_1000.head(10)", "_____no_output_____" ] ], [ [ "### 2. Calculate total births\n - Calculate the sum of the birth count column in the file yob2000.txt.\\", "_____no_output_____" ] ], [ [ "top_1000['Birth Count'].sum()", "_____no_output_____" ] ], [ [ "### 3. Separate boys / girls\n - Calculate separate sums for boys and girls.\n - Plot both sums in a bar plot", "_____no_output_____" ] ], [ [ "top_1000.groupby('Sex')['Birth Count'].sum()", "_____no_output_____" ], [ "plot_boys_girls = top_1000.groupby('Sex')['Birth Count'].sum()", "_____no_output_____" ], [ "plot_boys_girls.plot.bar()\nplt.ylabel('Birth Count')\nplt.title('Total births of females and males in Year 2000')\nplt.show()", "_____no_output_____" ] ], [ [ "But there's a greater amount of female names!", "_____no_output_____" ] ], [ [ "top_1000['Sex'].value_counts() # counts column values", "_____no_output_____" ] ], [ [ "### 4. Frequent names\n - Count how many names occur at least 1000 times in the file yob2000.txt.", "_____no_output_____" ] ], [ [ "top_1000[top_1000['Birth Count'] > 1000].head(10)", "_____no_output_____" ], [ "top_1000[top_1000['Birth Count'] > 1000]['Birth Count'].count()", "_____no_output_____" ] ], [ [ "### 5. Relative amount\n - Create a new column containing the percentage of a name on the total births of that year.\n - Verify that the sum of percentages is 100%.", "_____no_output_____" ] ], [ [ "(top_1000['Birth Count']/(top_1000['Birth Count'].sum()) * 100).head()", "_____no_output_____" ], [ "top_1000['Percentage of total count'] = top_1000['Birth Count']/(top_1000['Birth Count'].sum()) * 100", "_____no_output_____" ], [ "top_1000.head()", "_____no_output_____" ], [ "top_1000['Percentage of total count'].sum().round()", "_____no_output_____" ] ], [ [ "### 6. Search your name\n - Identify and print all lines containing your name in the year 2000.", "_____no_output_____" ] ], [ [ "top_1000[top_1000['Names'].str.contains('Max')]", "_____no_output_____" ] ], [ [ "### 7. Bar plot\n - Create a bar plot showing 5 selected names for the year 2000.", "_____no_output_____" ] ], [ [ "peppermint = top_1000.set_index('Names').loc[['Max', 'Eric','Josh','Daniela','Michael']]", "_____no_output_____" ], [ "peppermint", "_____no_output_____" ], [ "peppermint_5 = peppermint.groupby('Names')[['Birth Count']].sum()\npeppermint_5", "_____no_output_____" ], [ "peppermint_5.plot.bar(stacked=True, colormap='Accent')", "_____no_output_____" ] ], [ [ "## II. Characterize all files\n\n### 1. Read all names\nTo read the complete dataset, you need to loop though all file names:\n\n yob1880.txt\n yob1881.txt\n yob1882.txt\n ...\n\nComplete the code below by inserting _csv, data````df, names=['name', 'gender', 'count'], y and 2017:\n\n years = range(1880, ____, 10)\n data = []\n for y in years:\n fn = f'yob{_____}.txt'\n df = pd.read____(fn, ____)\n df['year'] = y\n data.append(____)\n df = pd.concat(____)\n \nRun the code and check the size of the resulting data frame.\n\nHint: In addition to some pandas functions, you may need to look up Python format strings.", "_____no_output_____" ] ], [ [ "years = range(1880, 2017)\ndata = []\nfor y in years:\n fn = f'yob{y}.txt'\n df = pd.read_csv(fn, names =['Names', 'Sex', 'Birth Count'])\n df['year'] = y\n data.append(df)\nusa_names = pd.concat(data)", "_____no_output_____" ], [ "usa_names.head(10)", "_____no_output_____" ], [ "len(usa_names)", "_____no_output_____" ] ], [ [ "### 2. Plot a time series\n\n- extract all rows containing your name from the variable df\n- plot the number of babies having your name and gender over time\n- make the plot nicer by adding row/column labels and a title\n- change the color and thickness of the line\n- save the plot as a high-resolution diagram", "_____no_output_____" ] ], [ [ "my_name = usa_names[(usa_names['Names']=='Max') \n & (usa_names['Sex'] == 'M')]\nmy_name.head(10)", "_____no_output_____" ], [ "my_name = my_name.set_index(['Names', 'Sex', 'year']).stack()\nmy_name = my_name.unstack((0,1,3))\nmy_name.head(10)", "_____no_output_____" ], [ "plt.plot(my_name)", "_____no_output_____" ], [ "plt.plot(my_name, linewidth=3, color= 'red')\nplt.xlabel('Year')\nplt.ylabel('Birth Count')\nplt.title('Popularity of the name Max over time')\nplt.savefig('Max_over_time.png', dpi = 300)\nplt.show()", "_____no_output_____" ] ], [ [ "### 3. Name diversity\n- Have the baby names become more diverse over time?\n- What assumptions is your calculation based upon?", "_____no_output_____" ] ], [ [ "usa_names.head(5)", "_____no_output_____" ], [ "name_diversity = usa_names.groupby('year')[['Names']].count()", "_____no_output_____" ], [ "name_diversity.head()", "_____no_output_____" ], [ "plt.plot(name_diversity)\nplt.xlabel('Year')\nplt.ylabel('Number of different names')\nplt.title('Variation of the number of given names over time')\nplt.show()", "_____no_output_____" ] ], [ [ "The SSA files that we are extracting our data from are for the 'Top 1000' names, therefore, there are a certain number of unique names (names with a yearly frequency of less than 5) that will not be included in the data. \n\nOur calculation essentially assumes that the number of names that has a frequency of less than 5 in the 1880s up until 2017 has probably increased too, or are at least equal! i.e. The number of names not present in the Top 1000 list does not affect the data enough that we can't conclude that in the present day there is a greater amount of name diversity.", "_____no_output_____" ], [ "### 4. Long names\n- add an extra column that contains the length of the name\n- print the 10 longest names to the screen.\n\nHint: If having the name in the index was useful so far, it is not so much useful for this task. With df.reset_index(inplace=True) you can move the index to a regular column.", "_____no_output_____" ] ], [ [ "usa_names.head()", "_____no_output_____" ], [ "long_names = list()\n\nfor i in usa_names['Names']:\n long_names.append(len(i))\n \nusa_names['Length of name'] = long_names\nusa_names.head(5)", "_____no_output_____" ], [ "long_names_10 = usa_names.sort_values(by='Length of name', ascending=False).head(10)\nlong_names_10", "_____no_output_____" ] ], [ [ "## III. Plot Celebrities\n\n### 1. Plotting Madonna\n- plot time lines of names of celebrities\n- try actors, presidents, princesses, Star Wars & GoT characters, boot camp participantsโ€ฆ\n\nHint: When was the hit single โ€œLike a Prayerโ€ released?", "_____no_output_____" ] ], [ [ "usa_names.drop(columns='Length of name').head()", "_____no_output_____" ], [ "celebrity = usa_names[usa_names['Names'] == 'Madonna']\ncelebrity = celebrity.drop(columns='Length of name')\nceleb_stacked = celebrity.set_index(['Names', 'Sex', 'year']).stack()\n\nmadonna = celeb_stacked.unstack((0,1,3))", "_____no_output_____" ], [ "plt.plot(madonna, linewidth=2.5)\nplt.xlabel('Year')\nplt.ylabel('Birth Count')\nplt.title('Popularity of Madonna year on year')\nplt.show()", "_____no_output_____" ] ], [ [ "### 2. Total births over time\n- create a plot that shows the total birth rate in the U.S. over time\n- plot the total birth rate for girls/boys separately", "_____no_output_____" ] ], [ [ "year_sum = usa_names.groupby('year')['Birth Count'].sum()\nyear_sum = pd.DataFrame(year_sum)\nyear_sum.head()", "_____no_output_____" ], [ "year_sum.plot()\nplt.xlabel('Year')\nplt.ylabel('Birth Count')\nplt.title('Total Birth Rate in the USA over time')\nplt.show()", "_____no_output_____" ], [ "usa_names.head()", "_____no_output_____" ], [ "usa_females_males = usa_names.groupby(['year','Sex'])['Birth Count'].sum().unstack()\nusa_females_males.head()", "_____no_output_____" ], [ "usa_names_males = usa_females_males.groupby('year')['M'].sum()\nusa_names_females = usa_females_males.groupby('year')['F'].sum()\n\nplt.plot(year_sum)\nplt.plot(usa_names_males)\nplt.plot(usa_names_females)\nplt.xlabel('Year')\nplt.ylabel('Birth Count')\nplt.title('Total, female, and male birth count year on year')\nplt.show()", "_____no_output_____" ] ], [ [ "### 3. Normalize\n- divide the number of births by the total number of births in each year to obtain the relative frequency\n- plot the time series of your name or the celebrity names again.\n\nHint: To reshape the data for plotting, you may find a combination of df.groupby( ) and df.unstack( ) useful.", "_____no_output_____" ] ], [ [ "year_sum = usa_names.groupby('year')[['Birth Count']].sum().reset_index()\nyear_sum.head()", "_____no_output_____" ], [ "usa_names = usa_names.drop(columns='Length of name')\nusa_names.head()", "_____no_output_____" ] ], [ [ "####ย Now let's merge! Almost always 'left' and you will merge 'on' a point they have in common, eg year!\n\nCan change sufixes too!", "_____no_output_____" ] ], [ [ "merged_usa_names = usa_names.merge(year_sum, how='left', on='year',\n suffixes=('_name', '_total'))\nmerged_usa_names.head(10)", "_____no_output_____" ], [ "merged_usa_names['Name Rel. %'] = merged_usa_names['Birth Count_name']/merged_usa_names['Birth Count_total']*100", "_____no_output_____" ], [ "merged_usa_names = merged_usa_names.sort_values(by='Name Rel. %', ascending=False)\nmerged_usa_names.head(10)", "_____no_output_____" ], [ "my_name = merged_usa_names[(merged_usa_names['Names']=='Max') \n & (merged_usa_names['Sex'] == 'M')]\nmy_name.head()", "_____no_output_____" ], [ "my_name = my_name.drop(columns=['Birth Count_name','Birth Count_total'])", "_____no_output_____" ], [ "my_names_stacked = my_name.set_index(['Names','Sex','year']).stack()\nmy_names_stacked.head()", "_____no_output_____" ], [ "my_name = my_names_stacked.unstack((0,1,3))\nmy_name.head()", "_____no_output_____" ], [ "plt.plot(my_name, linewidth=3, color= 'green')\nplt.xlabel('Year')\nplt.ylabel('Name Relativity %')\nplt.title('Percentage of people named Max relative to the total number of births over time')\nplt.show()", "_____no_output_____" ] ], [ [ "## II. Letter Statistics\n\n### 1. First letter statistics\n- use df.apply(func) to add an extra column that contains the first letter of the name.\n- count how many names start with โ€˜Aโ€™.\n- plot the relative occurence of initials over time.\n- what can you conclude from your observations?\n\nHint: You may need to iterate over the names with df.iterrows(). A more elegant solution is possible by writing a Python function and using df.apply()", "_____no_output_____" ] ], [ [ "merged_usa_names.head()", "_____no_output_____" ], [ "def initial(name):\n return name[0]\n\nmerged_usa_names['initial'] = merged_usa_names['Names'].apply(initial)", "_____no_output_____" ], [ "merged_usa_names.head()", "_____no_output_____" ], [ "merged_usa_names[merged_usa_names['initial']== 'A']['initial'].count()", "_____no_output_____" ], [ "first_letter_sum = merged_usa_names.groupby('year')['initial'].value_counts()", "_____no_output_____" ], [ "first_letter_sum.head()", "_____no_output_____" ], [ "df = pd.DataFrame(first_letter_sum)\ndf.head()", "_____no_output_____" ], [ "df = df.reset_index(0)", "_____no_output_____" ], [ "df.columns=['year','sum of initials']", "_____no_output_____" ], [ "df = df.reset_index()\ndf.head()", "_____no_output_____" ], [ "merge = merged_usa_names.merge(df, how='left', on=['year', 'initial'])", "_____no_output_____" ], [ "merge.head()", "_____no_output_____" ], [ "merge['initial Rel. %'] = merge['sum of initials']/merge['Birth Count_total']*100", "_____no_output_____" ], [ "merge.head()", "_____no_output_____" ], [ "merge = merge.sort_values(by='initial Rel. %', ascending=False)\nmerge.head()", "_____no_output_____" ], [ "initials = merge.drop(columns=['Birth Count_name', 'Birth Count_total', 'Name Rel. %', 'Sex', 'Names'])", "_____no_output_____" ], [ "initials.head()", "_____no_output_____" ], [ "initials = initials.drop_duplicates()", "_____no_output_____" ], [ "#initials_s = initials.set_index(['sum of initials', 'initial', 'year']).stack()", "_____no_output_____" ], [ "#initials_s.unstack((0,1, 3))", "_____no_output_____" ], [ "#plt.plot(initials_s, linewidth=3)\n", "_____no_output_____" ] ], [ [ "### 2. Last letter statistics\n- try the same for the final character\n- separate by boys/girls\n- what can you conclude from your observations?", "_____no_output_____" ] ], [ [ "def last_letter(name):\n return name[-1]\n\nmerged_usa_names['last letter'] = merged_usa_names['Names'].apply(last_letter)\n\nmerged_usa_names.head(5)", "_____no_output_____" ] ], [ [ "### 3. e-rich Names\n- Find all names that contain the character โ€˜eโ€™ at least four times.", "_____no_output_____" ] ], [ [ "usa_names.head()", "_____no_output_____" ] ], [ [ "### USE .APPLY to apply a function!\n", "_____no_output_____" ] ], [ [ "def four_e(input):\n check = []\n for i in input:\n if i == 'e' or i == 'E':\n check.append(i)\n return len(check)\n\nusa_names['e occurences'] = usa_names['Names'].apply(four_e)\n \nusa_names.head()", "_____no_output_____" ], [ "many_es = usa_names[usa_names['e occurences'] > 3]\nmany_es.head()", "_____no_output_____" ], [ "len(many_es)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb122238122ccd01472ed1c76e32d6437f015dac
266,794
ipynb
Jupyter Notebook
radio-signal-classification.ipynb
biswajitpawl/radio-signal-classification
94f4a59f841ea09790a6749a24d7016810ab4e2e
[ "MIT" ]
1
2020-11-21T08:24:56.000Z
2020-11-21T08:24:56.000Z
radio-signal-classification.ipynb
biswajitpawl/radio-signal-classification
94f4a59f841ea09790a6749a24d7016810ab4e2e
[ "MIT" ]
null
null
null
radio-signal-classification.ipynb
biswajitpawl/radio-signal-classification
94f4a59f841ea09790a6749a24d7016810ab4e2e
[ "MIT" ]
null
null
null
190.567143
165,174
0.853273
[ [ [ "## Classify Radio Signals from Space using Keras\n\nIn this experiment, we attempt to classify radio signals from space.\n\nDataset has been provided by SETI. Details can be found here:\nhttps://github.com/setiQuest/ML4SETI/blob/master/tutorials/Step_1_Get_Data.ipynb\n", "_____no_output_____" ], [ "## Import necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport seaborn as sns\nimport tensorflow as tf\n\n%matplotlib inline", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "# Mount google drive to get data\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n!ls -l '/content/drive/My Drive/datasets/seti'", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\nยทยทยทยทยทยทยทยทยทยท\nMounted at /content/drive\ntotal 8\ndrwx------ 2 root root 4096 Aug 20 18:37 train\ndrwx------ 2 root root 4096 Aug 20 18:37 validation\n" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "# Load dataset from CSV\ntrain_images = pd.read_csv('/content/drive/My Drive/datasets/seti/train/images.csv', header=None)\ntrain_labels = pd.read_csv('/content/drive/My Drive/datasets/seti/train/labels.csv', header=None)\nval_images = pd.read_csv('/content/drive/My Drive/datasets/seti/validation/images.csv', header=None)\nval_labels = pd.read_csv('/content/drive/My Drive/datasets/seti/validation/labels.csv', header=None)", "_____no_output_____" ], [ "train_images.head()", "_____no_output_____" ], [ "train_labels.head()", "_____no_output_____" ], [ "# Check shape of train_images, train_labels, val_images nad val_labels\n\nprint(\"train_images shape:\", train_images.shape)\nprint(\"train_labels shape:\", train_labels.shape)\n\nprint(\"val_images shape:\", val_images.shape)\nprint(\"val_labels shape:\", val_labels.shape)", "train_images shape: (3200, 8192)\ntrain_labels shape: (3200, 4)\nval_images shape: (800, 8192)\nval_labels shape: (800, 4)\n" ], [ "# Reshape the image sets\n# Get the values as numpy array\n\nx_train = train_images.values.reshape(3200, 64, 128, 1)\nx_val = val_images.values.reshape(800, 64, 128, 1)\n\ny_train = train_labels.values\ny_val = val_labels.values", "_____no_output_____" ] ], [ [ "## Plot 2D spectrogram data", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15,15))\n\nfor i in range(1,4):\n plt.subplot(1,3,i)\n img = np.squeeze(x_train[np.random.randint(x_train.shape[0])])\n plt.imshow(img, cmap='gray')", "_____no_output_____" ] ], [ [ "## Preprocess data", "_____no_output_____" ] ], [ [ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ndatagen_train = ImageDataGenerator(horizontal_flip=True)\ndatagen_train.fit(x_train)\n\ndatagen_val = ImageDataGenerator(horizontal_flip=True)\ndatagen_val.fit(x_val)", "_____no_output_____" ] ], [ [ "## Build model", "_____no_output_____" ] ], [ [ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten\nfrom tensorflow.keras.layers import BatchNormalization, Dropout, Activation \n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam", "_____no_output_____" ], [ "# Initialize model\nmodel = Sequential()\n\n# 1st CNN block\nmodel.add(Conv2D(32, (5,5), padding='same', input_shape=(64,128,1)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\n# 2nd CNN block\nmodel.add(Conv2D(64, (5,5), padding='same'))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\n# Falatter CNN output to feed to FC layer\nmodel.add(Flatten())\n\n# Fully connected layer\nmodel.add(Dense(1024))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.4))\n\n# Softmax layer\nmodel.add(Dense(4, activation='softmax'))", "_____no_output_____" ] ], [ [ "## Compile the model", "_____no_output_____" ] ], [ [ "# Schedule learnning rate decay\nlr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n 0.005,\n decay_steps=5,\n decay_rate=0.9,\n staircase=True)", "_____no_output_____" ], [ "model.compile(optimizer=Adam(lr_schedule), loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 64, 128, 32) 832 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 64, 128, 32) 128 \n_________________________________________________________________\nactivation (Activation) (None, 64, 128, 32) 0 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 32, 64, 32) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 32, 64, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 32, 64, 64) 51264 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 32, 64, 64) 256 \n_________________________________________________________________\nactivation_1 (Activation) (None, 32, 64, 64) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 16, 32, 64) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 16, 32, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 32768) 0 \n_________________________________________________________________\ndense (Dense) (None, 1024) 33555456 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 1024) 4096 \n_________________________________________________________________\nactivation_2 (Activation) (None, 1024) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 4) 4100 \n=================================================================\nTotal params: 33,616,132\nTrainable params: 33,613,892\nNon-trainable params: 2,240\n_________________________________________________________________\n" ] ], [ [ "## Train the model", "_____no_output_____" ] ], [ [ "batch_size = 32\nhistory = model.fit(\n datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),\n steps_per_epoch=len(x_train)//batch_size,\n validation_data = datagen_val.flow(x_val, y_val, batch_size=batch_size, shuffle=True),\n validation_steps = len(x_val)//batch_size,\n epochs=10,\n)", "Epoch 1/10\n100/100 [==============================] - 115s 1s/step - loss: 0.5255 - accuracy: 0.7044 - val_loss: 5.4126 - val_accuracy: 0.2500\nEpoch 2/10\n100/100 [==============================] - 118s 1s/step - loss: 0.4004 - accuracy: 0.7391 - val_loss: 6.4884 - val_accuracy: 0.2500\nEpoch 3/10\n100/100 [==============================] - 115s 1s/step - loss: 0.3882 - accuracy: 0.7412 - val_loss: 5.6150 - val_accuracy: 0.2675\nEpoch 4/10\n100/100 [==============================] - 115s 1s/step - loss: 0.3914 - accuracy: 0.7409 - val_loss: 4.6394 - val_accuracy: 0.4913\nEpoch 5/10\n100/100 [==============================] - 115s 1s/step - loss: 0.3956 - accuracy: 0.7456 - val_loss: 4.2016 - val_accuracy: 0.5000\nEpoch 6/10\n100/100 [==============================] - 114s 1s/step - loss: 0.3904 - accuracy: 0.7416 - val_loss: 3.1053 - val_accuracy: 0.5013\nEpoch 7/10\n100/100 [==============================] - 118s 1s/step - loss: 0.3828 - accuracy: 0.7378 - val_loss: 1.6581 - val_accuracy: 0.4963\nEpoch 8/10\n100/100 [==============================] - 114s 1s/step - loss: 0.3839 - accuracy: 0.7516 - val_loss: 0.6483 - val_accuracy: 0.6625\nEpoch 9/10\n100/100 [==============================] - 114s 1s/step - loss: 0.4086 - accuracy: 0.7347 - val_loss: 0.4419 - val_accuracy: 0.7250\nEpoch 10/10\n100/100 [==============================] - 114s 1s/step - loss: 0.4000 - accuracy: 0.7466 - val_loss: 0.4041 - val_accuracy: 0.7400\n" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "plt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['training', 'validation'])\nplt.show()", "_____no_output_____" ], [ "plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['training', 'validation'])\nplt.show()", "_____no_output_____" ], [ "model.evaluate(x_val, y_val)", "25/25 [==============================] - 6s 235ms/step - loss: 0.4045 - accuracy: 0.7462\n" ], [ "y_true = np.argmax(y_val, 1)\ny_pred = np.argmax(model.predict(x_val), 1)\n\nprint(metrics.classification_report(y_true, y_pred))\nprint(\"Classification accuracy: %.2f\" % metrics.accuracy_score(y_true, y_pred))", " precision recall f1-score support\n\n 0 1.00 0.95 0.98 200\n 1 0.50 0.88 0.64 200\n 2 0.54 0.15 0.23 200\n 3 1.00 1.00 1.00 200\n\n accuracy 0.75 800\n macro avg 0.76 0.75 0.71 800\nweighted avg 0.76 0.75 0.71 800\n\nClassification accuracy: 0.75\n" ], [ "plt.figure(figsize=(8,8))\n\nlabels = [\"squiggle\", \"narrowband\", \"noise\", \"narrowbanddrd\"]\n\nax = plt.subplot()\nsns.heatmap(metrics.confusion_matrix(y_true, y_pred, normalize='true'), annot=True, ax=ax, cmap=plt.cm.Blues)\n\nax.set_title('Confusion Matrix')\nax.xaxis.set_ticklabels(labels)\nax.yaxis.set_ticklabels(labels)", "_____no_output_____" ] ], [ [ "## Conclusions\n\nWinning submission has used ResNet based architechure (WRN) on primary (full) dataset, and achieved a classification accuracy of 94.99%.\nReference: https://github.com/sgrvinod/Wide-Residual-Nets-for-SETI\n\nHere we have used a simple CNN based model. The model did not learn much after the first 2 epochs (accuracy is around 74% after 10 epochs).\n\nReasons:\n\n* The signals in the dataset have a noise factor added to it.\n* Even though the dataset, we have used here, is simpler than the other datasets provided by SETI, it's a bit challenging to extract features using a simple model like ours. So it is essentially a underfitting problem.\n\nPossible improvements:\n\n* Add additional CNN blocks, change filter sizes (e.g. 7x7, 5x5 etc.) to learn more features.\n* Add additional fully connected layers.\n* Here we have used Adam optimizer. It has convergence issues. We can change it SGD, and see what happens.\n* Use a different architechture altogether.\n\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb122cbe003bd9194edf5c9a481f517a819f0626
161,753
ipynb
Jupyter Notebook
notebooks/da_from_delayed_array_vs_df.ipynb
data-exp-lab/yt-dask-experiments
d63ab954ffacb10795a87c856e4388b86b0f957f
[ "BSD-3-Clause" ]
1
2021-02-17T13:53:52.000Z
2021-02-17T13:53:52.000Z
notebooks/da_from_delayed_array_vs_df.ipynb
data-exp-lab/yt-dask-experiments
d63ab954ffacb10795a87c856e4388b86b0f957f
[ "BSD-3-Clause" ]
null
null
null
notebooks/da_from_delayed_array_vs_df.ipynb
data-exp-lab/yt-dask-experiments
d63ab954ffacb10795a87c856e4388b86b0f957f
[ "BSD-3-Clause" ]
null
null
null
106.346483
30,260
0.81494
[ [ [ "### building a dask array without knowing sizes\n\n\n#### from dask.dataframe", "_____no_output_____" ] ], [ [ "from dask import array as da, dataframe as ddf, delayed, compute\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "da.from_delayed", "_____no_output_____" ], [ "def get_chunk_df(array_size,n_cols):\n col_names = [f\"col_{i}\" for i in range(n_cols)]\n pd_df = pd.DataFrame(\n {nm:pd.Series(np.arange(array_size[0])) for ic,nm in enumerate(col_names)}\n )\n return pd_df\n\ndef get_meta(n_cols):\n col_names = [f\"col_{i}\" for i in range(n_cols)]\n return {nm:pd.Series([], dtype=np.float64) for nm in col_names}\n\nn_cols = 5\nmeta_dict = get_meta(n_cols)", "_____no_output_____" ], [ "delayed_chunks = [delayed(get_chunk_df)((10000+10*ch,),n_cols) for ch in range(0,5)]", "_____no_output_____" ], [ "df_delayed = ddf.from_delayed(delayed_chunks,meta=meta_dict)", "_____no_output_____" ], [ "df_delayed", "_____no_output_____" ], [ "df = df_delayed.compute()", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.size", "_____no_output_____" ], [ "type(df)", "_____no_output_____" ], [ "col0 = df_delayed['col_0'].to_dask_array()", "_____no_output_____" ], [ "col0", "_____no_output_____" ], [ "col0.min()", "_____no_output_____" ], [ "col0.max().compute()", "_____no_output_____" ], [ "col0.max().compute()", "_____no_output_____" ], [ "col0np = col0.compute()\n", "_____no_output_____" ], [ "col0np.shape", "_____no_output_____" ], [ "col0np.max()", "_____no_output_____" ] ], [ [ "### direct from_array?", "_____no_output_____" ] ], [ [ "delayed_arrays=[]\nfor ichunk in range(0,5):\n ra_size = 10000+10*ichunk\n delayed_array = delayed(np.arange)(ra_size)\n delayed_arrays.append(da.from_delayed(delayed_array, (ra_size,), dtype=float)) ", "_____no_output_____" ], [ "delayed_arrays", "_____no_output_____" ], [ "hda = da.hstack(delayed_arrays)", "_____no_output_____" ], [ "hda", "_____no_output_____" ], [ "def get_delayed_array(base_chunk_size,n_chunks):\n delayed_arrays = []\n for ichunk in range(0,n_chunks):\n ra_size = base_chunk_size+10*ichunk\n delayed_array = delayed(np.arange)(ra_size)\n delayed_arrays.append(da.from_delayed(delayed_array, (ra_size,), dtype=float)) \n return da.hstack(delayed_arrays)\n\ndef get_delayed_array_from_df(base_chunk_size,n_chunks): \n meta_dict = get_meta(1)\n delayed_chunks = [delayed(get_chunk_df)((base_chunk_size+10*ch,),1) for ch in range(0,n_chunks)]\n df_delayed = ddf.from_delayed(delayed_chunks,meta=meta_dict)\n return df_delayed[list(meta_dict.keys())[0]].to_dask_array()", "_____no_output_____" ], [ "n_chunks = 5\nbase_chunk_size = 1000", "_____no_output_____" ], [ "array_from_hstack = get_delayed_array(base_chunk_size,n_chunks)\narray_from_df = get_delayed_array_from_df(base_chunk_size,n_chunks)", "_____no_output_____" ], [ "array_from_hstack", "_____no_output_____" ], [ "array_from_df", "_____no_output_____" ], [ "h_array = array_from_hstack.compute()\n", "_____no_output_____" ], [ "df_array = array_from_df.compute()", "_____no_output_____" ], [ "h_array.shape", "_____no_output_____" ], [ "df_array.shape", "_____no_output_____" ], [ "np.all(h_array==df_array)", "_____no_output_____" ] ], [ [ "### comparison", "_____no_output_____" ] ], [ [ "def array_construct_compute(base_chunk_size,n_chunks,find_mean=False):\n res1 = get_delayed_array(base_chunk_size,n_chunks)\n if find_mean:\n r = res1.mean().compute()\n else:\n r = res1.compute()\n return\n\ndef df_construct_compute(base_chunk_size,n_chunks,find_mean=False):\n res1 = get_delayed_array_from_df(base_chunk_size,n_chunks)\n if find_mean:\n r = res1.mean().compute()\n else:\n r = res1.compute()\n return", "_____no_output_____" ], [ "base_chunk_size = 100000\ntest_chunks = np.arange(2,100,5)\nresults = pd.DataFrame()\nfor n_chunks in test_chunks: \n time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks)\n time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks) \n new_row = {\n 'chunks':n_chunks,'base_size':base_chunk_size,\"actual_size\":n_chunks * (base_chunk_size + 10),\n 'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,\n 'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,\n }\n results = results.append([new_row],ignore_index=True)\n ", "1.43 ms ยฑ 224 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n8.54 ms ยฑ 1.99 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n5.62 ms ยฑ 273 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n32.3 ms ยฑ 7.26 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n7.16 ms ยฑ 2.42 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n54.8 ms ยฑ 10.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n11.5 ms ยฑ 936 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n71.6 ms ยฑ 15 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n15.4 ms ยฑ 1.04 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n76.8 ms ยฑ 18.6 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n9.97 ms ยฑ 647 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n118 ms ยฑ 12.6 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n14.4 ms ยฑ 1.79 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n168 ms ยฑ 11 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n16.1 ms ยฑ 2.97 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n144 ms ยฑ 32 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n16.9 ms ยฑ 1.09 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n157 ms ยฑ 19.8 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n17.2 ms ยฑ 434 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n178 ms ยฑ 17.3 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n18.6 ms ยฑ 411 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n213 ms ยฑ 9.87 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n20.5 ms ยฑ 780 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n222 ms ยฑ 13.4 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n21.7 ms ยฑ 658 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n235 ms ยฑ 11.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n23.2 ms ยฑ 569 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n247 ms ยฑ 13.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n24.7 ms ยฑ 311 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n257 ms ยฑ 19.4 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n26.5 ms ยฑ 472 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n246 ms ยฑ 13.2 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n28.7 ms ยฑ 639 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n298 ms ยฑ 28.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n29.4 ms ยฑ 427 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n285 ms ยฑ 16.3 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n31.9 ms ยฑ 794 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n319 ms ยฑ 21.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n32.5 ms ยฑ 214 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n340 ms ยฑ 17.8 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n" ], [ "results.head()", "_____no_output_____" ], [ "def plot_results(results,xvar='chunks',log_x=False,log_y=True,second_x=None):\n fig = plt.figure()\n clrs = [[0,0,0,1],[1,0,0,1]] \n ax1 = plt.subplot(2,1,1)\n xvals = results[xvar]\n for fld,clr in zip(['direct','indirect'],clrs): \n plt.plot(xvals,results[fld+'_mean'],color=clr,marker='.')\n clr[3] = 0.5 \n for pm in [1,-1]:\n std_pm = results[fld+'_mean'] + results[fld+'_std']* pm *2 \n plt.plot(xvals,std_pm,color=clr)\n\n if log_y:\n plt.yscale('log')\n if log_x:\n plt.xscale('log')\n \n plt.ylabel('time [s]')\n \n plt.subplot(2,1,2)\n plt.plot(xvals,results.indirect_mean/results.direct_mean,color='k',marker='.')\n plt.ylabel('indirect / direct ')\n plt.xlabel(xvar)\n if log_x:\n plt.xscale('log')\n return fig\n", "_____no_output_____" ], [ "fig = plot_results(results,xvar='chunks')", "_____no_output_____" ], [ "base_chunk_size = 100000\ntest_chunks = np.arange(2,100,5)\nresults_wmn = pd.DataFrame()\nfor n_chunks in test_chunks: \n time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks,True)\n time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks, True) \n new_row = {\n 'chunks':n_chunks,'base_size':base_chunk_size,\"actual_size\":n_chunks * (base_chunk_size + 10),\n 'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,\n 'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,\n }\n results_wmn = results.append([new_row],ignore_index=True)", "2.64 ms ยฑ 560 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n7.94 ms ยฑ 1.76 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n6.87 ms ยฑ 3.09 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n40.9 ms ยฑ 12.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n11.5 ms ยฑ 3.63 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n78.4 ms ยฑ 13.8 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n20.6 ms ยฑ 3.45 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n95.6 ms ยฑ 19.2 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n14.7 ms ยฑ 987 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n113 ms ยฑ 29.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n12.9 ms ยฑ 935 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n149 ms ยฑ 21.9 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n15.7 ms ยฑ 3.6 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n142 ms ยฑ 32.6 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n15.9 ms ยฑ 602 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n215 ms ยฑ 35.2 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n17.7 ms ยฑ 970 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n260 ms ยฑ 12.6 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n22.3 ms ยฑ 4.89 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n286 ms ยฑ 12.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n21.7 ms ยฑ 3.7 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n310 ms ยฑ 16.7 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n22.1 ms ยฑ 741 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n301 ms ยฑ 99.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n23.9 ms ยฑ 291 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n382 ms ยฑ 10.3 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n26.8 ms ยฑ 3.09 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n392 ms ยฑ 22.4 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n28.7 ms ยฑ 1.94 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n400 ms ยฑ 20.4 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n29.9 ms ยฑ 1.28 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n428 ms ยฑ 35.8 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n31.2 ms ยฑ 741 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n461 ms ยฑ 39.7 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n32.4 ms ยฑ 438 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n442 ms ยฑ 106 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n34.9 ms ยฑ 935 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n468 ms ยฑ 60.8 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n35.9 ms ยฑ 1.29 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n554 ms ยฑ 44.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n" ], [ "fig = plot_results(results_wmn)", "_____no_output_____" ], [ "test_sizes = np.logspace(3,6,9-3+1)\nn_chunks = 10\nresults_by_size = pd.DataFrame()\nfor base_chunk_size in test_sizes: \n time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks,True)\n time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks,True)\n new_row = {\n 'chunks':n_chunks,'base_size':base_chunk_size,\"actual_size\":n_chunks * (base_chunk_size + 10),\n 'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,\n 'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,\n }\n results_by_size = results_by_size.append([new_row],ignore_index=True)\n ", "6.41 ms ยฑ 1.97 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n24.7 ms ยฑ 3.7 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n9.9 ms ยฑ 3.92 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n26.8 ms ยฑ 4.61 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n12.5 ms ยฑ 3.55 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n40.8 ms ยฑ 7.36 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n17 ms ยฑ 2.43 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n76.8 ms ยฑ 3.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n14.6 ms ยฑ 5.19 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n77.5 ms ยฑ 6.73 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n11.3 ms ยฑ 3.36 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n71.3 ms ยฑ 7.27 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n12 ms ยฑ 972 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n29.2 ms ยฑ 280 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n" ], [ "results_by_size", "_____no_output_____" ], [ "fig = plot_results(results_by_size,xvar='actual_size',log_x=True)", "_____no_output_____" ], [ "test_sizes = np.logspace(3,6,9-3+1)\nn_chunks = 10\nresults_by_size_nomn = pd.DataFrame()\nfor base_chunk_size in test_sizes: \n time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks)\n time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks)\n new_row = {\n 'chunks':n_chunks,'base_size':base_chunk_size,\"actual_size\":n_chunks * (base_chunk_size + 10),\n 'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,\n 'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,\n }\n results_by_size_nomn = results_by_size.append([new_row],ignore_index=True)", "2.84 ms ยฑ 107 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n21.8 ms ยฑ 5.04 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n4.18 ms ยฑ 1.11 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n22.9 ms ยฑ 5.14 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n7.98 ms ยฑ 2.15 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n39.3 ms ยฑ 3.47 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n6.96 ms ยฑ 2.63 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n71.2 ms ยฑ 5.24 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n7.28 ms ยฑ 4.1 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n56.6 ms ยฑ 15.9 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n10.1 ms ยฑ 3.85 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n59.2 ms ยฑ 19.5 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n19.6 ms ยฑ 1.44 ms per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n37.9 ms ยฑ 573 ยตs per loop (mean ยฑ std. dev. of 5 runs, 10 loops each)\n" ], [ "fig = plot_results(results_by_size_nomn,xvar='actual_size',log_x=True)", "_____no_output_____" ] ], [ [ "the question is: \n\npre-compute time for counting particles + direct array <= indirect array from dataframe ?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb1242f4e4e59bec6944cb2faf420e6c8eddc912
45,941
ipynb
Jupyter Notebook
week 1/02-linear-regression.ipynb
anuraglahon16/DeepLearning-with-PyTorch
f46d44251cae1a2ae3e027533ae839d96b2cb908
[ "MIT" ]
1
2020-07-22T05:05:21.000Z
2020-07-22T05:05:21.000Z
week 1/02-linear-regression.ipynb
anuraglahon16/DeepLearning-with-PyTorch
f46d44251cae1a2ae3e027533ae839d96b2cb908
[ "MIT" ]
null
null
null
week 1/02-linear-regression.ipynb
anuraglahon16/DeepLearning-with-PyTorch
f46d44251cae1a2ae3e027533ae839d96b2cb908
[ "MIT" ]
1
2021-04-02T17:35:54.000Z
2021-04-02T17:35:54.000Z
30.066099
511
0.544633
[ [ [ "## Linear Regression withย PyTorch\n\n#### Part 2 of \"PyTorch: Zero toย GANs\"\n\n*This post is the second in a series of tutorials on building deep learning models with PyTorch, an open source neural networks library developed and maintained by Facebook. Check out the full series:*\n\n\n1. [PyTorch Basics: Tensors & Gradients](https://jovian.ml/aakashns/01-pytorch-basics)\n2. [Linear Regression & Gradient Descent](https://jovian.ml/aakashns/02-linear-regression)\n3. [Image Classfication using Logistic Regression](https://jovian.ml/aakashns/03-logistic-regression) \n4. [Training Deep Neural Networks on a GPU](https://jovian.ml/aakashns/04-feedforward-nn)\n5. [Image Classification using Convolutional Neural Networks](https://jovian.ml/aakashns/05-cifar10-cnn)\n6. [Data Augmentation, Regularization and ResNets](https://jovian.ml/aakashns/05b-cifar10-resnet)\n7. [Generating Images using Generative Adverserial Networks](https://jovian.ml/aakashns/06-mnist-gan)\n\nContinuing where the [previous tutorial](https://jvn.io/aakashns/3143ceb92b4f4cbbb4f30e203580b77b) left off, we'll discuss one of the foundational algorithms of machine learning in this post: *Linear regression*. We'll create a model that predicts crop yields for apples and oranges (*target variables*) by looking at the average temperature, rainfall and humidity (*input variables or features*) in a region. Here's the training data:\n\n![linear-regression-training-data](https://i.imgur.com/6Ujttb4.png)\n\nIn a linear regression model, each target variable is estimated to be a weighted sum of the input variables, offset by some constant, known as a biasย :\n\n```\nyield_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1\nyield_orange = w21 * temp + w22 * rainfall + w23 * humidity + b2\n```\n\nVisually, it means that the yield of apples is a linear or planar function of temperature, rainfall and humidity:\n\n![linear-regression-graph](https://i.imgur.com/4DJ9f8X.png)\n\nThe *learning* part of linear regression is to figure out a set of weights `w11, w12,... w23, b1 & b2` by looking at the training data, to make accurate predictions for new data (i.e. to predict the yields for apples and oranges in a new region using the average temperature, rainfall and humidity). This is done by adjusting the weights slightly many times to make better predictions, using an optimization technique called *gradient descent*.", "_____no_output_____" ], [ "## System setup\n\nThis tutorial takes a code-first approach towards learning PyTorch, and you should try to follow along by running and experimenting with the code yourself. The easiest way to start executing this notebook is to click the **\"Run\"** button at the top of this page, and select **\"Run on Binder\"**. This will run the notebook on [mybinder.org](https://mybinder.org), a free online service for running Jupyter notebooks.\n\n**NOTE**: *If you're running this notebook on Binder, please skip ahead to the next section.*\n\n### Running on your computer locally\n\nYou can clone this notebook hosted on [Jovian.ml](https://www.jovian.ml), install the required dependencies, and start Jupyter by running the following commands on the terminal:\n\n```bash\npip install jovian --upgrade # Install the jovian library \njovian clone aakashns/02-linear-regression # Download notebook & dependencies\ncd 02-linear-regression # Enter the created directory \njovian install # Install the dependencies\nconda activate 02-linear-regression # Activate virtual environment\njupyter notebook # Start Jupyter\n```\n\nOn older versions of conda, you might need to run `source activate 02-linear-regression` to activate the environment. For a more detailed explanation of the above steps, check out the *System setup* section in the [previous notebook](https://jovian.ml/aakashns/01-pytorch-basics).", "_____no_output_____" ], [ "We begin by importing Numpy and PyTorch:", "_____no_output_____" ] ], [ [ "# Uncomment the command below if Numpy or PyTorch is not installed\n# !conda install numpy pytorch cpuonly -c pytorch -y", "_____no_output_____" ], [ "import numpy as np\nimport torch", "_____no_output_____" ] ], [ [ "## Training data\n\nThe training data can be represented using 2 matrices: `inputs` and `targets`, each with one row per observation, and one column per variable.", "_____no_output_____" ] ], [ [ "# Input (temp, rainfall, humidity)\ninputs = np.array([[73, 67, 43], \n [91, 88, 64], \n [87, 134, 58], \n [102, 43, 37], \n [69, 96, 70]], dtype='float32')", "_____no_output_____" ], [ "# Targets (apples, oranges)\ntargets = np.array([[56, 70], \n [81, 101], \n [119, 133], \n [22, 37], \n [103, 119]], dtype='float32')", "_____no_output_____" ] ], [ [ "We've separated the input and target variables, because we'll operate on them separately. Also, we've created numpy arrays, because this is typically how you would work with training data: read some CSV files as numpy arrays, do some processing, and then convert them to PyTorch tensors as follows:", "_____no_output_____" ] ], [ [ "# Convert inputs and targets to tensors\ninputs = torch.from_numpy(inputs)\ntargets = torch.from_numpy(targets)\nprint(inputs)\nprint(targets)", "tensor([[ 73., 67., 43.],\n [ 91., 88., 64.],\n [ 87., 134., 58.],\n [102., 43., 37.],\n [ 69., 96., 70.]])\ntensor([[ 56., 70.],\n [ 81., 101.],\n [119., 133.],\n [ 22., 37.],\n [103., 119.]])\n" ] ], [ [ "## Linear regression model fromย scratch\n\nThe weights and biases (`w11, w12,... w23, b1 & b2`) can also be represented as matrices, initialized as random values. The first row of `w` and the first element of `b` are used to predict the first target variable i.e. yield of apples, and similarly the second for oranges.", "_____no_output_____" ] ], [ [ "# Weights and biases\nw = torch.randn(2, 3, requires_grad=True)\nb = torch.randn(2, requires_grad=True)\nprint(w)\nprint(b)", "tensor([[-0.2269, 0.2678, 1.5796],\n [-1.1205, 0.8164, 0.2107]], requires_grad=True)\ntensor([-0.4371, -0.5532], requires_grad=True)\n" ] ], [ [ "`torch.randn` creates a tensor with the given shape, with elements picked randomly from a [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) with mean 0 and standard deviation 1.\n\nOur *model* is simply a function that performs a matrix multiplication of the `inputs` and the weights `w` (transposed) and adds the bias `b` (replicated for each observation).\n\n![matrix-mult](https://i.imgur.com/WGXLFvA.png)\n\nWe can define the model as follows:", "_____no_output_____" ] ], [ [ "def model(x):\n return x @ w.t() + b", "_____no_output_____" ] ], [ [ "`@` represents matrix multiplication in PyTorch, and theย `.t` method returns the transpose of a tensor.\n\nThe matrix obtained by passing the input data into the model is a set of predictions for the target variables.", "_____no_output_____" ] ], [ [ "# Generate predictions\npreds = model(inputs)\nprint(preds)", "tensor([[ 68.8666, -18.5934],\n [103.5783, -17.1936],\n [107.3293, 23.5771],\n [ 46.3805, -71.9462],\n [120.1905, 15.2536]], grad_fn=<AddBackward0>)\n" ] ], [ [ "Let's compare the predictions of our model with the actual targets.", "_____no_output_____" ] ], [ [ "# Compare with targets\nprint(targets)", "tensor([[ 56., 70.],\n [ 81., 101.],\n [119., 133.],\n [ 22., 37.],\n [103., 119.]])\n" ] ], [ [ "You can see that there's a huge difference between the predictions of our model, and the actual values of the target variables. Obviously, this is because we've initialized our model with random weights and biases, and we can't expect it to *just work*.", "_____no_output_____" ], [ "## Loss function\n\nBefore we improve our model, we need a way to evaluate how well our model is performing. We can compare the model's predictions with the actual targets, using the following method:\n\n* Calculate the difference between the two matrices (`preds` and `targets`).\n* Square all elements of the difference matrix to remove negative values.\n* Calculate the average of the elements in the resulting matrix.\n\nThe result is a single number, known as the **mean squared error** (MSE).", "_____no_output_____" ] ], [ [ "# MSE loss\ndef mse(t1, t2):\n diff = t1 - t2\n return torch.sum(diff * diff) / diff.numel()", "_____no_output_____" ] ], [ [ "`torch.sum` returns the sum of all the elements in a tensor, and theย `.numel` method returns the number of elements in a tensor. Let's compute the mean squared error for the current predictions of our model.", "_____no_output_____" ] ], [ [ "# Compute loss\nloss = mse(preds, targets)\nprint(loss)", "tensor(5812.5947, grad_fn=<DivBackward0>)\n" ] ], [ [ "Hereโ€™s how we can interpret the result: *On average, each element in the prediction differs from the actual target by about 145 (square root of the loss 20834)*. And thatโ€™s pretty bad, considering the numbers we are trying to predict are themselves in the range 50โ€“200. Also, the result is called the *loss*, because it indicates how bad the model is at predicting the target variables. Lower the loss, better the model.", "_____no_output_____" ], [ "## Compute gradients\n\nWith PyTorch, we can automatically compute the gradient or derivative of the loss w.r.t. to the weights and biases, because they have `requires_grad` set to `True`.", "_____no_output_____" ] ], [ [ "# Compute gradients\nloss.backward()", "_____no_output_____" ] ], [ [ "The gradients are stored in theย `.grad` property of the respective tensors. Note that the derivative of the loss w.r.t. the weights matrix is itself a matrix, with the same dimensions.", "_____no_output_____" ] ], [ [ "# Gradients for weights\nprint(w)\nprint(w.grad)", "tensor([[-0.2269, 0.2678, 1.5796],\n [-1.1205, 0.8164, 0.2107]], requires_grad=True)\ntensor([[ 1130.2988, 796.7463, 685.3582],\n [-9002.7490, -9128.7617, -5802.7393]])\n" ] ], [ [ "The loss is a [quadratic function](https://en.wikipedia.org/wiki/Quadratic_function) of our weights and biases, and our objective is to find the set of weights where the loss is the lowest. If we plot a graph of the loss w.r.t any individual weight or bias element, it will look like the figure shown below. A key insight from calculus is that the gradient indicates the rate of change of the loss, or the [slope](https://en.wikipedia.org/wiki/Slope) of the loss function w.r.t. the weights and biases.\n\nIf a gradient element is **positive**:\n* **increasing** the element's value slightly will **increase** the loss.\n* **decreasing** the element's value slightly will **decrease** the loss\n\n![postive-gradient](https://i.imgur.com/hFYoVgU.png)\n\nIf a gradient element is **negative**:\n* **increasing** the element's value slightly will **decrease** the loss.\n* **decreasing** the element's value slightly will **increase** the loss.\n\n![negative=gradient](https://i.imgur.com/w3Wii7C.png)\n\nThe increase or decrease in loss by changing a weight element is proportional to the value of the gradient of the loss w.r.t. that element. This forms the basis for the optimization algorithm that we'll use to improve our model.", "_____no_output_____" ], [ "Before we proceed, we reset the gradients to zero by callingย `.zero_()` method. We need to do this, because PyTorch accumulates, gradients i.e. the next time we callย `.backward` on the loss, the new gradient values will get added to the existing gradient values, which may lead to unexpected results.", "_____no_output_____" ] ], [ [ "w.grad.zero_()\nb.grad.zero_()\nprint(w.grad)\nprint(b.grad)", "tensor([[0., 0., 0.],\n [0., 0., 0.]])\ntensor([0., 0.])\n" ] ], [ [ "## Adjust weights and biases using gradientย descent\n\nWe'll reduce the loss and improve our model using the gradient descent optimization algorithm, which has the following steps:\n\n1. Generate predictions\n\n2. Calculate the loss\n\n3. Compute gradients w.r.t the weights and biases\n\n4. Adjust the weights by subtracting a small quantity proportional to the gradient\n\n5. Reset the gradients to zero\n\nLet's implement the above step by step.", "_____no_output_____" ] ], [ [ "# Generate predictions\npreds = model(inputs)\nprint(preds)", "tensor([[ 68.8666, -18.5934],\n [103.5783, -17.1936],\n [107.3293, 23.5771],\n [ 46.3805, -71.9462],\n [120.1905, 15.2536]], grad_fn=<AddBackward0>)\n" ] ], [ [ "Note that the predictions are same as before, since we haven't made any changes to our model. The same holds true for the loss and gradients.", "_____no_output_____" ] ], [ [ "# Calculate the loss\nloss = mse(preds, targets)\nprint(loss)", "tensor(5812.5947, grad_fn=<DivBackward0>)\n" ], [ "# Compute gradients\nloss.backward()\nprint(w.grad)\nprint(b.grad)", "tensor([[ 1130.2988, 796.7463, 685.3582],\n [-9002.7490, -9128.7617, -5802.7393]])\ntensor([ 13.0690, -105.7805])\n" ] ], [ [ "Finally, we update the weights and biases using the gradients computed above.", "_____no_output_____" ] ], [ [ "# Adjust weights & reset gradients\nwith torch.no_grad():\n w -= w.grad * 1e-5\n b -= b.grad * 1e-5\n w.grad.zero_()\n b.grad.zero_()", "_____no_output_____" ] ], [ [ "A few things to note above:\n\n* We use `torch.no_grad` to indicate to PyTorch that we shouldn't track, calculate or modify gradients while updating the weights and biases.ย \n\n* We multiply the gradients with a really small number (`10^-5` in this case), to ensure that we don't modify the weights by a really large amount, since we only want to take a small step in the downhill direction of the gradient. This number is called the *learning rate* of the algorithm.ย \n\n* After we have updated the weights, we reset the gradients back to zero, to avoid affecting any future computations.", "_____no_output_____" ], [ "Let's take a look at the new weights and biases.", "_____no_output_____" ] ], [ [ "print(w)\nprint(b)", "tensor([[-0.2382, 0.2599, 1.5727],\n [-1.0305, 0.9077, 0.2688]], requires_grad=True)\ntensor([-0.4372, -0.5521], requires_grad=True)\n" ] ], [ [ "With the new weights and biases, the model should have lower loss.", "_____no_output_____" ] ], [ [ "# Calculate loss\npreds = model(inputs)\nloss = mse(preds, targets)\nprint(loss)", "tensor(3987.4089, grad_fn=<DivBackward0>)\n" ] ], [ [ "We have already achieved a significant reduction in the loss, simply by adjusting the weights and biases slightly using gradient descent.", "_____no_output_____" ], [ "## Train for multipleย epochs\n\nTo reduce the loss further, we can repeat the process of adjusting the weights and biases using the gradients multiple times. Each iteration is called an epoch. Let's train the model for 100 epochs.", "_____no_output_____" ] ], [ [ "# Train for 100 epochs\nfor i in range(100):\n preds = model(inputs)\n loss = mse(preds, targets)\n loss.backward()\n with torch.no_grad():\n w -= w.grad * 1e-5\n b -= b.grad * 1e-5\n w.grad.zero_()\n b.grad.zero_()", "_____no_output_____" ] ], [ [ "Once again, let's verify that the loss is now lower:", "_____no_output_____" ] ], [ [ "# Calculate loss\npreds = model(inputs)\nloss = mse(preds, targets)\nprint(loss)", "tensor(92.2694, grad_fn=<DivBackward0>)\n" ] ], [ [ "As you can see, the loss is now much lower than what we started out with. Let's look at the model's predictions and compare them with the targets.", "_____no_output_____" ] ], [ [ "# Predictions\npreds", "_____no_output_____" ], [ "# Targets\ntargets", "_____no_output_____" ] ], [ [ "The prediction are now quite close to the target variables, and we can get even better results by training for a few more epochs. \n\nAt this point, we can save our notebook and upload it to [Jovian.ml](https://www.jovian.ml) for future reference and sharing.", "_____no_output_____" ] ], [ [ "!pip install jovian --upgrade -q", "_____no_output_____" ], [ "import jovian", "_____no_output_____" ], [ "jovian.commit()", "_____no_output_____" ] ], [ [ "`jovian.commit` uploads the notebook to [Jovian.ml](https://www.jovian.ml), captures the Python environment and creates a sharable link for the notebook. You can use this link to share your work and let anyone reproduce it easily with the `jovian clone` command. Jovian also includes a powerful commenting interface, so you (and others) can discuss & comment on specific parts of your notebook:\n\n![commenting on jovian](https://cdn-images-1.medium.com/max/1600/1*b4snnr_5Ve5Nyq60iDtuuw.png)", "_____no_output_____" ], [ "## Linear regression using PyTorch built-ins\n\nThe model and training process above were implemented using basic matrix operations. But since this such a common patternย , PyTorch has several built-in functions and classes to make it easy to create and train models.\n\nLet's begin by importing the `torch.nn` package from PyTorch, which contains utility classes for building neural networks.", "_____no_output_____" ] ], [ [ "import torch.nn as nn", "_____no_output_____" ] ], [ [ "As before, we represent the inputs and targets and matrices.", "_____no_output_____" ] ], [ [ "# Input (temp, rainfall, humidity)\ninputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], \n [102, 43, 37], [69, 96, 70], [73, 67, 43], \n [91, 88, 64], [87, 134, 58], [102, 43, 37], \n [69, 96, 70], [73, 67, 43], [91, 88, 64], \n [87, 134, 58], [102, 43, 37], [69, 96, 70]], \n dtype='float32')\n\n# Targets (apples, oranges)\ntargets = np.array([[56, 70], [81, 101], [119, 133], \n [22, 37], [103, 119], [56, 70], \n [81, 101], [119, 133], [22, 37], \n [103, 119], [56, 70], [81, 101], \n [119, 133], [22, 37], [103, 119]], \n dtype='float32')\n\ninputs = torch.from_numpy(inputs)\ntargets = torch.from_numpy(targets)", "_____no_output_____" ], [ "inputs", "_____no_output_____" ] ], [ [ "We are using 15 training examples this time, to illustrate how to work with large datasets in small batches.", "_____no_output_____" ], [ "## Dataset and DataLoader\n\nWe'll create a `TensorDataset`, which allows access to rows from `inputs` and `targets` as tuples, and provides standard APIs for working with many different types of datasets in PyTorch.", "_____no_output_____" ] ], [ [ "from torch.utils.data import TensorDataset", "_____no_output_____" ], [ "# Define dataset\ntrain_ds = TensorDataset(inputs, targets)\ntrain_ds[0:3]", "_____no_output_____" ] ], [ [ "The `TensorDataset` allows us to access a small section of the training data using the array indexing notation (`[0:3]` in the above code). It returns a tuple (or pair), in which the first element contains the input variables for the selected rows, and the second contains the targets.", "_____no_output_____" ], [ "We'll also create a `DataLoader`, which can split the data into batches of a predefined size while training. It also provides other utilities like shuffling and random sampling of the data.", "_____no_output_____" ] ], [ [ "from torch.utils.data import DataLoader", "_____no_output_____" ], [ "# Define data loader\nbatch_size = 5\ntrain_dl = DataLoader(train_ds, batch_size, shuffle=True)", "_____no_output_____" ] ], [ [ "The data loader is typically used in a `for-in` loop. Let's look at an example.", "_____no_output_____" ] ], [ [ "for xb, yb in train_dl:\n print(xb)\n print(yb)\n break", "tensor([[73., 67., 43.],\n [73., 67., 43.],\n [91., 88., 64.],\n [91., 88., 64.],\n [69., 96., 70.]])\ntensor([[ 56., 70.],\n [ 56., 70.],\n [ 81., 101.],\n [ 81., 101.],\n [103., 119.]])\n" ] ], [ [ "In each iteration, the data loader returns one batch of data, with the given batch size. If `shuffle` is set to `True`, it shuffles the training data before creating batches. Shuffling helps randomize the input to the optimization algorithm, which can lead to faster reduction in the loss.", "_____no_output_____" ], [ "## nn.Linear\n\nInstead of initializing the weights & biases manually, we can define the model using the `nn.Linear` class from PyTorch, which does it automatically.", "_____no_output_____" ] ], [ [ "# Define model\nmodel = nn.Linear(3, 2)\nprint(model.weight)\nprint(model.bias)", "Parameter containing:\ntensor([[-0.1238, 0.1825, -0.4556],\n [-0.4230, -0.0333, -0.2770]], requires_grad=True)\nParameter containing:\ntensor([ 0.1319, -0.2120], requires_grad=True)\n" ] ], [ [ "PyTorch models also have a helpful `.parameters` method, which returns a list containing all the weights and bias matrices present in the model. For our linear regression model, we have one weight matrix and one bias matrix.", "_____no_output_____" ] ], [ [ "# Parameters\nlist(model.parameters())", "_____no_output_____" ] ], [ [ "We can use the model to generate predictions in the exact same way as before:", "_____no_output_____" ] ], [ [ "# Generate predictions\npreds = model(inputs)\npreds", "_____no_output_____" ] ], [ [ "## Loss Function\n\nInstead of defining a loss function manually, we can use the built-in loss function `mse_loss`.", "_____no_output_____" ] ], [ [ "# Import nn.functional\nimport torch.nn.functional as F", "_____no_output_____" ] ], [ [ "The `nn.functional` package contains many useful loss functions and several other utilities. ", "_____no_output_____" ] ], [ [ "# Define loss function\nloss_fn = F.mse_loss", "_____no_output_____" ] ], [ [ "Let's compute the loss for the current predictions of our model.", "_____no_output_____" ] ], [ [ "loss = loss_fn(model(inputs), targets)\nprint(loss)", "tensor(16434.4238, grad_fn=<MseLossBackward>)\n" ] ], [ [ "## Optimizer\n\nInstead of manually manipulating the model's weights & biases using gradients, we can use the optimizer `optim.SGD`. SGD stands for `stochastic gradient descent`. It is called `stochastic` because samples are selected in batches (often with random shuffling) instead of as a single group.", "_____no_output_____" ] ], [ [ "# Define optimizer\nopt = torch.optim.SGD(model.parameters(), lr=1e-5)", "_____no_output_____" ] ], [ [ "Note that `model.parameters()` is passed as an argument to `optim.SGD`, so that the optimizer knows which matrices should be modified during the update step. Also, we can specify a learning rate which controls the amount by which the parameters are modified.", "_____no_output_____" ], [ "## Train the model\n\nWe are now ready to train the model. We'll follow the exact same process to implement gradient descent:\n\n1. Generate predictions\n\n2. Calculate the loss\n\n3. Compute gradients w.r.t the weights and biases\n\n4. Adjust the weights by subtracting a small quantity proportional to the gradient\n\n5. Reset the gradients to zero\n\nThe only change is that we'll work batches of data, instead of processing the entire training data in every iteration. Let's define a utility function `fit` which trains the model for a given number of epochs.", "_____no_output_____" ] ], [ [ "# Utility function to train the model\ndef fit(num_epochs, model, loss_fn, opt, train_dl):\n \n # Repeat for given number of epochs\n for epoch in range(num_epochs):\n \n # Train with batches of data\n for xb,yb in train_dl:\n \n # 1. Generate predictions\n pred = model(xb)\n \n # 2. Calculate loss\n loss = loss_fn(pred, yb)\n \n # 3. Compute gradients\n loss.backward()\n \n # 4. Update parameters using gradients\n opt.step()\n \n # 5. Reset the gradients to zero\n opt.zero_grad()\n \n # Print the progress\n if (epoch+1) % 10 == 0:\n print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))", "_____no_output_____" ] ], [ [ "Some things to note above:\n\n* We use the data loader defined earlier to get batches of data for every iteration.\n\n* Instead of updating parameters (weights and biases) manually, we use `opt.step` to perform the update, and `opt.zero_grad` to reset the gradients to zero.\n\n* We've also added a log statement which prints the loss from the last batch of data for every 10th epoch, to track the progress of training. `loss.item` returns the actual value stored in the loss tensor.\n\nLet's train the model for 100 epochs.", "_____no_output_____" ] ], [ [ "fit(100, model, loss_fn, opt,train_dl)", "Epoch [10/100], Loss: 103.0426\nEpoch [20/100], Loss: 162.5573\nEpoch [30/100], Loss: 198.4801\nEpoch [40/100], Loss: 60.2136\nEpoch [50/100], Loss: 166.9063\nEpoch [60/100], Loss: 90.8259\nEpoch [70/100], Loss: 80.0923\nEpoch [80/100], Loss: 25.8551\nEpoch [90/100], Loss: 49.2282\nEpoch [100/100], Loss: 64.7186\n" ] ], [ [ "Let's generate predictions using our model and verify that they're close to our targets.", "_____no_output_____" ] ], [ [ "# Generate predictions\npreds = model(inputs)\npreds", "_____no_output_____" ], [ "# Compare with targets\ntargets", "_____no_output_____" ] ], [ [ "Indeed, the predictions are quite close to our targets, and now we have a fairly good model to predict crop yields for apples and oranges by looking at the average temperature, rainfall and humidity in a region.", "_____no_output_____" ], [ "## Commit and update the notebook\n\nAs a final step, we can record a new version of the notebook using the `jovian` library.", "_____no_output_____" ] ], [ [ "import jovian", "_____no_output_____" ], [ "jovian.commit()", "_____no_output_____" ] ], [ [ "Note that running `jovian.commit` a second time records a new version of your existing notebook. With Jovian.ml, you can avoid creating copies of your Jupyter notebooks and keep versions organized. Jovian also provides a visual diff ([example](https://jovian.ml/aakashns/keras-mnist-jovian/diff?base=8&remote=2)) so you can inspect what has changed between different versions:\n\n![jovian-commenting](https://i.imgur.com/HF1cOVt.png)", "_____no_output_____" ], [ "## Further Reading\n\nWe've covered a lot of ground this this tutorial, including *linear regression* and the *gradient descent* optimization algorithm. Here are a few resources if you'd like to dig deeper into these topics:\n\n* For a more detailed explanation of derivates and gradient descent, see [these notes from a Udacity course](https://storage.googleapis.com/supplemental_media/udacityu/315142919/Gradient%20Descent.pdf). \n\n* For an animated visualization of how linear regression works, [see this post](https://hackernoon.com/visualizing-linear-regression-with-pytorch-9261f49edb09).\n\n* For a more mathematical treatment of matrix calculus, linear regression and gradient descent, you should check out [Andrew Ng's excellent course notes](https://github.com/Cleo-Stanford-CS/CS229_Notes/blob/master/lectures/cs229-notes1.pdf) from CS229 at Stanford University.\n\n* To practice and test your skills, you can participate in the [Boston Housing Price Prediction](https://www.kaggle.com/c/boston-housing) competition on Kaggle, a website that hosts data science competitions.", "_____no_output_____" ], [ "With this, we complete our discussion of linear regression in PyTorch, and weโ€™re ready to move on to the next topic: *Logistic regression*.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb12436964dd35c1c170d6e06f90e096bfbd5daa
291,266
ipynb
Jupyter Notebook
aiida_bigdft/PyBigDFT/source/notebooks/Spin-Polarization.ipynb
mikiec84/aiida-bigdft-plugin
ce6ddc69def97977fe0209861ea7f1637090b60f
[ "MIT" ]
1
2020-08-05T19:01:51.000Z
2020-08-05T19:01:51.000Z
aiida_bigdft/PyBigDFT/source/notebooks/Spin-Polarization.ipynb
mikiec84/aiida-bigdft-plugin
ce6ddc69def97977fe0209861ea7f1637090b60f
[ "MIT" ]
null
null
null
aiida_bigdft/PyBigDFT/source/notebooks/Spin-Polarization.ipynb
mikiec84/aiida-bigdft-plugin
ce6ddc69def97977fe0209861ea7f1637090b60f
[ "MIT" ]
null
null
null
536.401473
38,904
0.945466
[ [ [ "# Spin-polarized calculations with BigDFT\n\nThe goal of this notebook is to explain how to do a spin-polarized calculation with BigDFT (`nspin=2`).\nWe start with the molecule O$_2$ and a non-spin polarized calculation, which is the code default.\nTo do that we only have to specify the atomic positions of the molecule.", "_____no_output_____" ] ], [ [ "from BigDFT import Calculators as C\ncalc = C.SystemCalculator()\nposO1=3*[0.0]\nposO2=[0.0, 0.0, 1.2075] # in angstroem\ninpt={'posinp': \n { 'positions': [ {'O': posO1 }, {'O': posO2 }], 'units': 'angstroem' }}\nlogNSP = calc.run(input=inpt)", "Initialize a Calculator with OMP_NUM_THREADS=2 and command mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\nCreating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n" ] ], [ [ "Such calculation produced a converged set of KS LDA orbitals, with the following density of states:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nDoS=logNSP.get_dos(label='NSP')\nDoS.plot()", "_____no_output_____" ] ], [ [ "Now we do the same calculation but with spin-polarized specifying `nspin=2`, in the `dft` field.", "_____no_output_____" ] ], [ [ "inpt['dft']={'nspin': 2}\nlogSP = calc.run(input=inpt)", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n" ] ], [ [ "We may see that this run did not produce any difference with respect to the previous one. Even though we doubled the number of orbitals, the input guess wavefunctions and densities are identical in both the spin sectors. As a consequence the energy and the DoS are identical to the NSP case:", "_____no_output_____" ] ], [ [ "print logNSP.energy,logSP.energy\nDoS.append_from_bandarray(logSP.evals,label='SP (m 0)')\nDoS.plot()", "-31.8014240962 -31.8014240962\n" ] ], [ [ "This is due to the fact that:\n 1. We had the same input guess for up and down subspaces;\n 2. We had the same number of orbitals in both the sectors and no empty orbitals during the minimization.\n \nSuch problems can be solved at the same time by performing mixing scheme with *random* initialization of the wavefunctions:", "_____no_output_____" ] ], [ [ "inpt['import']='mixing'\ninpt['mix']={'iscf': 12, 'itrpmax': 20} # mixing on the potential, just 20 Hamiltonian iterations for a quick look\ninpt['dft']['inputpsiid']= 'RANDOM' #for random initialization\nlogSP_mix = calc.run(input=inpt)", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n" ] ], [ [ "We see that with these input parameters the DoS is different from the NSP case, the energy is lower and the net polarization is 2:", "_____no_output_____" ] ], [ [ "print logNSP.energy,logSP_mix.energy\nDoS.append_from_bandarray(logSP_mix.evals,label='SP mix(m 0, RAND)')\nDoS.plot()\nprint 'Magnetic Polarization', logSP_mix.magnetization", "-31.8014240962 -31.86269676\n" ] ], [ [ "We see that to break the symmetry it is therefore necessary to have different IG subspaces between up and down orbitals, otherwise the results will be identical to the NSP case.\nNow that we know the polarization of the molecule, we may perform a direct minimization calculation of the molecule by specifying from the beginning the `mpol: 2` condition. We can also add some empty orbitals using the keyword `norbsempty`.", "_____no_output_____" ] ], [ [ "inpt={'dft': { 'nspin': 2, 'mpol': 2},\n 'mix': { 'norbsempty': 2 },\n 'posinp': \n { 'positions': [ {'O': posO1 }, {'O': posO2 }], 'units': 'angstroem' } }\nlogSP_m2 = calc.run(input=inpt)\nprint logSP_mix.energy,logSP_m2.energy\n\nDoS.append_from_bandarray(logSP_m2.evals,label='SP m 2')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n-31.86269676 -31.8634464905\n" ] ], [ [ "We show that the total magnetization is 2 in the case of the oxygen dimer. The DoS is not exactly the same because the mixing scheme was not fully converged (check increasing the value of `itrpmax`).", "_____no_output_____" ] ], [ [ "DoS=logSP_mix.get_dos(label='SP mix')\nDoS.append_from_bandarray(logSP_m2.evals,label='SP m 2')\nDoS.plot()", "_____no_output_____" ] ], [ [ "## Odd electron system: the N atom\nWhat does happen when the number of electrons is odd as in the case of N?\nIf we do a NSP calculation, the occupation of the last state is 1. Switching only the parameter `nspin` to the value 2, we do the same calculation with averaged-occupation (0.5 for the last up and down state).\n\nTo do a spin-polarisation calculation, we need to change mpol which is the difference between the number of occupied electrons of different spins.\nIn the same way, we can look for the total magnetization using the mixing scheme.", "_____no_output_____" ] ], [ [ "inpt = { 'dft': { 'nspin': 1}, \n 'posinp': { 'units': 'angstroem', \n 'positions': [ {'N': 3*[0.0] } ] } }\nlogNSP = calc.run(input=inpt)\n\ninpt['dft']['nspin'] = 2\nlogSP = calc.run(input=inpt)\n\nprint logNSP.energy,logSP.energy\nprint logNSP.fermi_level,logSP.fermi_level\nDoS=logNSP.get_dos(label='NSP')\nDoS.append_from_bandarray(logSP.evals,label='SP')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\nCreating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n-9.59484936065 -9.59484935017\n-0.240952278062 -0.240952993423\n" ], [ "inpt['dft']['inputpsiid']='RANDOM' #Random input guess\ninpt['mix']={'iscf': 12, 'itrpmax': 30} # mixing on the potential, just 30 Hamiltonian iterations for a quick look\ninpt['import'] = 'mixing'\nlogSP_mix = calc.run(input=inpt)\nprint logSP_mix.magnetization\n\nDoS.append_from_bandarray(logSP_mix.evals,label='SP mix')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n3.0\n" ] ], [ [ "We found a total magnetization of 3 following the Hund's rule.", "_____no_output_____" ], [ "## Defining the input guess (*ig_occupation* keyword)\nWe have shown that by default, the input guess is LCAO (localised atomic orbitals) defining by the pseudo-orbitals.\nThe occupation is sphere symmetry (same occupation per orbital moment).\nWe have used random input guess to break the spin symmetry.\nWe can also use an LCAO input guess and indicate the occupation number for the input guess using the keyword `ig_occupation` in order to break the spin symmetry", "_____no_output_____" ] ], [ [ "inpt['dft']['inputpsiid']='LCAO' #LCAO input guess\ninpt['ig_occupation'] = { 'N': { '2s': { 'up': 1, 'down': 1}, '2p': {'up': [1,1,1], 'down': 0} } }\nlogLCAO_mix = calc.run(input=inpt)\nprint logSP_mix.energy,logLCAO_mix.energy\n\nDoS=logSP_mix.get_dos(label='SP RAN')\nDoS.append_from_bandarray(logLCAO_mix.evals,label='SP LCAO')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n-9.74145176349 -9.74158042631\n" ] ], [ [ "Instead of `ig_occupation`, it is also possible to specify the keyword `IGSpin` per atom in the `posinp` dictionary.", "_____no_output_____" ] ], [ [ "inpt = { 'dft': { 'nspin': 2, 'mpol': 3}, \n 'posinp': { 'units': 'angstroem', \n 'positions': [ {'N': 3*[0.0], 'IGSpin': 3 } ] },\n 'ig_occupation': { 'N': { '2s': { 'up': 1, 'down': 1}, \n '2p': { 'up': [1,1,1], 'down': 0} } } }\n\nlogIG = calc.run(input=inpt)\nprint logSP_mix.energy,logLCAO_mix.energy,logIG.energy\n\nDoS=logLCAO_mix.get_dos(label='LCAO ig_occ')\nDoS.append_from_bandarray(logIG.evals,label='LCAO IGSpin')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n-9.74145176349 -9.74158042631 -9.74158042096\n" ] ], [ [ "## Occupation numbers\nFinally, it is possible to set the occupation numbers for each state by the parameter `occup`.\nIn this case, the direct minimization is done with this occupation number.\nIn the case of N, there are 8 orbitals, the first 4 are up and the other ones down.\nHere we do a calculation following the Hund's rule.", "_____no_output_____" ] ], [ [ "del inpt['ig_occupation']\ninpt['occupation'] = { 'up': { 'Orbital 1': 1, 'Orbital 2': 1, 'Orbital 3': 1, 'Orbital 4': 1 }, # up\n 'down': { 'Orbital 1': 1, 'Orbital 2': 0, 'Orbital 3': 0, 'Orbital 4': 0 } }# down\nlogS = calc.run(input=inpt)\nprint logSP_mix.energy,logLCAO_mix.energy,logIG.energy,logS.energy\n\nDoS.append_from_bandarray(logS.evals,label='SP occup')\nDoS.plot()", "Creating the yaml input file \"input.yaml\"\nExecuting command: mpirun -np 2 /local/deutsch/Forge/BigDFT/build-mpif90-openmp/install/bin/bigdft\n-9.74145176349 -9.74158042631 -9.74158042096 -9.7415803697\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb12462423b65973a09f5aff6437b1e46b2e7f2a
415,053
ipynb
Jupyter Notebook
gc-ai-notebook-tutorials/tutorials/fast-and-lean-data-science/01_MNIST_TPU_Keras.ipynb
kalona/training-data-analyst
fd619ea2c63519463b759393e818078c5a60df15
[ "Apache-2.0" ]
7
2019-05-10T14:13:40.000Z
2022-01-19T16:59:04.000Z
gc-ai-notebook-tutorials/tutorials/fast-and-lean-data-science/01_MNIST_TPU_Keras.ipynb
kalona/training-data-analyst
fd619ea2c63519463b759393e818078c5a60df15
[ "Apache-2.0" ]
11
2020-01-28T22:39:44.000Z
2022-03-11T23:42:53.000Z
gc-ai-notebook-tutorials/tutorials/fast-and-lean-data-science/01_MNIST_TPU_Keras.ipynb
kalona/training-data-analyst
fd619ea2c63519463b759393e818078c5a60df15
[ "Apache-2.0" ]
8
2020-02-03T18:31:37.000Z
2021-08-13T13:58:54.000Z
374.258792
66,428
0.899726
[ [ [ "<a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/01_MNIST_TPU_Keras.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## MNIST on TPU (Tensor Processing Unit)<br>or GPU using tf.Keras and tf.data.Dataset\n<table><tr><td><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/keras-tensorflow-tpu300px.png\" width=\"300\" alt=\"Keras+Tensorflow+Cloud TPU\"></td></tr></table>\n\n\nThis sample trains an \"MNIST\" handwritten digit \nrecognition model on a GPU or TPU backend using a Keras\nmodel. Data are handled using the tf.data.Datset API. This is\na very simple sample provided for educational purposes. Do\nnot expect outstanding TPU performance on a dataset as\nsmall as MNIST.\n\n<h3><a href=\"https://cloud.google.com/gpu/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/gpu-hexagon.png\" width=\"50\"></a> &nbsp;&nbsp;Train on GPU or TPU&nbsp;&nbsp; <a href=\"https://cloud.google.com/tpu/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png\" width=\"50\"></a></h3>\n\n 1. Select a GPU or TPU backend (Runtime > Change runtime type) \n 1. Runtime > Run All (Watch out: the \"Colab-only auth\" cell requires user input)\n\n<h3><a href=\"https://cloud.google.com/ml-engine/\"><img valign=\"middle\" src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/mlengine-hexagon.png\" width=\"50\"></a> &nbsp;&nbsp;Deploy to ML Engine</h3>\n1. At the bottom of this notebook you can deploy your trained model to ML Engine for a serverless, autoscaled, REST API experience. You will need a GCP project and a GCS bucket for this last part.\n\nTPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage (GCS)", "_____no_output_____" ], [ "### Parameters", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 128 # On TPU, this will be the per-core batch size. A Cloud TPU has 8 cores so tha global TPU batch size is 1024\n\ntraining_images_file = 'gs://mnist-public/train-images-idx3-ubyte'\ntraining_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'\nvalidation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'\nvalidation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "import os, re, math, json, shutil, pprint\nimport PIL.Image, PIL.ImageFont, PIL.ImageDraw\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom tensorflow.python.platform import tf_logging\nprint(\"Tensorflow version \" + tf.__version__)", "Tensorflow version 1.13.0-rc1\n" ], [ "#@title visualization utilities [RUN ME]\n\"\"\"\nThis cell contains helper functions used for visualization\nand downloads only. You can skip reading it. There is very\nlittle useful Keras/Tensorflow code here.\n\"\"\"\n\n# Matplotlib config\nplt.rc('image', cmap='gray_r')\nplt.rc('grid', linewidth=0)\nplt.rc('xtick', top=False, bottom=False, labelsize='large')\nplt.rc('ytick', left=False, right=False, labelsize='large')\nplt.rc('axes', facecolor='F8F8F8', titlesize=\"large\", edgecolor='white')\nplt.rc('text', color='a8151a')\nplt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts\nMATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), \"mpl-data/fonts/ttf\")\n\n# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)\ndef dataset_to_numpy_util(training_dataset, validation_dataset, N):\n \n # get one batch from each: 10000 validation digits, N training digits\n unbatched_train_ds = training_dataset.apply(tf.data.experimental.unbatch())\n v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()\n t_images, t_labels = unbatched_train_ds.batch(N).make_one_shot_iterator().get_next()\n \n # Run once, get one batch. Session.run returns numpy results\n with tf.Session() as ses:\n (validation_digits, validation_labels,\n training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])\n \n # these were one-hot encoded in the dataset\n validation_labels = np.argmax(validation_labels, axis=1)\n training_labels = np.argmax(training_labels, axis=1)\n \n return (training_digits, training_labels,\n validation_digits, validation_labels)\n\n# create digits from local fonts for testing\ndef create_digits_from_local_fonts(n):\n font_labels = []\n img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1\n font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)\n font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)\n d = PIL.ImageDraw.Draw(img)\n for i in range(n):\n font_labels.append(i%10)\n d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)\n font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)\n font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])\n return font_digits, font_labels\n\n# utility to display a row of digits with their predictions\ndef display_digits(digits, predictions, labels, title, n):\n plt.figure(figsize=(13,3))\n digits = np.reshape(digits, [n, 28, 28])\n digits = np.swapaxes(digits, 0, 1)\n digits = np.reshape(digits, [28, 28*n])\n plt.yticks([])\n plt.xticks([28*x+14 for x in range(n)], predictions)\n for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):\n if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red\n plt.imshow(digits)\n plt.grid(None)\n plt.title(title)\n \n# utility to display multiple rows of digits, sorted by unrecognized/recognized status\ndef display_top_unrecognized(digits, predictions, labels, n, lines):\n idx = np.argsort(predictions==labels) # sort order: unrecognized first\n for i in range(lines):\n display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],\n \"{} sample validation digits out of {} with bad predictions in red and sorted first\".format(n*lines, len(digits)) if i==0 else \"\", n)\n \n# utility to display training and validation curves\ndef display_training_curves(training, validation, title, subplot):\n if subplot%10==1: # set up the subplots on the first call\n plt.subplots(figsize=(10,10), facecolor='#F0F0F0')\n plt.tight_layout()\n ax = plt.subplot(subplot)\n ax.grid(linewidth=1, color='white')\n ax.plot(training)\n ax.plot(validation)\n ax.set_title('model '+ title)\n ax.set_ylabel(title)\n ax.set_xlabel('epoch')\n ax.legend(['train', 'valid.'])", "_____no_output_____" ] ], [ [ "### Colab-only auth for this notebook and the TPU", "_____no_output_____" ] ], [ [ "IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence\nif IS_COLAB_BACKEND:\n from google.colab import auth\n auth.authenticate_user() # Authenticates the backend and also the TPU using your credentials so that they can access your private GCS buckets", "_____no_output_____" ] ], [ [ "### tf.data.Dataset: parse files and prepare training and validation datasets\nPlease read the [best practices for building](https://www.tensorflow.org/guide/performance/datasets) input pipelines with tf.data.Dataset", "_____no_output_____" ] ], [ [ "def read_label(tf_bytestring):\n label = tf.decode_raw(tf_bytestring, tf.uint8)\n label = tf.reshape(label, [])\n label = tf.one_hot(label, 10)\n return label\n \ndef read_image(tf_bytestring):\n image = tf.decode_raw(tf_bytestring, tf.uint8)\n image = tf.cast(image, tf.float32)/256.0\n image = tf.reshape(image, [28*28])\n return image\n \ndef load_dataset(image_file, label_file):\n imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)\n imagedataset = imagedataset.map(read_image, num_parallel_calls=16)\n labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)\n labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)\n dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))\n return dataset \n \ndef get_training_dataset(image_file, label_file, batch_size):\n dataset = load_dataset(image_file, label_file)\n dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset\n dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)\n dataset = dataset.repeat() # Mandatory for Keras for now\n dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed\n dataset = dataset.prefetch(-1) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)\n return dataset\n \ndef get_validation_dataset(image_file, label_file):\n dataset = load_dataset(image_file, label_file)\n dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset\n dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch\n dataset = dataset.repeat() # Mandatory for Keras for now\n return dataset\n\n# instantiate the datasets\ntraining_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)\nvalidation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)\n\n# For TPU, we will need a function that returns the dataset\ntraining_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)\nvalidation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)", "_____no_output_____" ] ], [ [ "### Let's have a look at the data", "_____no_output_____" ] ], [ [ "N = 24\n(training_digits, training_labels,\n validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)\ndisplay_digits(training_digits, training_labels, training_labels, \"training digits and their labels\", N)\ndisplay_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], \"validation digits and their labels\", N)\nfont_digits, font_labels = create_digits_from_local_fonts(N)", "_____no_output_____" ] ], [ [ "### Keras model: 3 convolutional layers, 2 dense layers\nIf you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: [Tensorflow and deep learning without a PhD](https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/#featured-code-sample)", "_____no_output_____" ] ], [ [ "# This model trains to 99.4% sometimes 99.5% accuracy in 10 epochs (with a batch size of 32)\n\nl = tf.keras.layers\nmodel = tf.keras.Sequential(\n [\n l.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),\n\n l.Conv2D(filters=6, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm\n l.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before \"relu\"\n l.Activation('relu'), # activation after batch norm\n\n l.Conv2D(filters=12, kernel_size=6, padding='same', use_bias=False, strides=2),\n l.BatchNormalization(scale=False, center=True),\n l.Activation('relu'),\n\n l.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2),\n l.BatchNormalization(scale=False, center=True),\n l.Activation('relu'),\n\n l.Flatten(),\n l.Dense(200, use_bias=False),\n l.BatchNormalization(scale=False, center=True),\n l.Activation('relu'),\n l.Dropout(0.5), # Dropout on dense layer only\n\n l.Dense(10, activation='softmax')\n ])\n\nmodel.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# print model layers\nmodel.summary()\n\n# set up learning rate decay\nlr_decay = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 0.0001 + 0.02 * math.pow(0.5, 1+epoch), verbose=True)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nreshape_4 (Reshape) (None, 28, 28, 1) 0 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 28, 28, 6) 54 \n_________________________________________________________________\nbatch_normalization_v1_16 (B (None, 28, 28, 6) 18 \n_________________________________________________________________\nactivation_16 (Activation) (None, 28, 28, 6) 0 \n_________________________________________________________________\nconv2d_13 (Conv2D) (None, 14, 14, 12) 2592 \n_________________________________________________________________\nbatch_normalization_v1_17 (B (None, 14, 14, 12) 36 \n_________________________________________________________________\nactivation_17 (Activation) (None, 14, 14, 12) 0 \n_________________________________________________________________\nconv2d_14 (Conv2D) (None, 7, 7, 24) 10368 \n_________________________________________________________________\nbatch_normalization_v1_18 (B (None, 7, 7, 24) 72 \n_________________________________________________________________\nactivation_18 (Activation) (None, 7, 7, 24) 0 \n_________________________________________________________________\nflatten_4 (Flatten) (None, 1176) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 200) 235200 \n_________________________________________________________________\nbatch_normalization_v1_19 (B (None, 200) 600 \n_________________________________________________________________\nactivation_19 (Activation) (None, 200) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 200) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 10) 2010 \n=================================================================\nTotal params: 250,950\nTrainable params: 250,466\nNon-trainable params: 484\n_________________________________________________________________\n" ] ], [ [ "### Train and validate the model", "_____no_output_____" ] ], [ [ "EPOCHS = 10\nsteps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset\ntpu = None\ntrained_model = model\n\n# Counting steps and batches on TPU: the tpu.keras_to_tpu_model API regards the batch size of the input dataset\n# as the per-core batch size. The effective batch size is 8x more because Cloud TPUs have 8 cores. It increments\n# the step by +8 everytime a global batch (8 per-core batches) is processed. Therefore batch size and steps_per_epoch\n# settings can stay as they are for TPU training. The training will just go faster.\n# Warning: this might change in the final version of the Keras/TPU API.\n\ntry: # TPU detection\n tpu = tf.contrib.cluster_resolver.TPUClusterResolver() # Picks up a connected TPU on Google's Colab, ML Engine, Kubernetes and Deep Learning VMs accessed through the 'ctpu up' utility\n #tpu = tf.contrib.cluster_resolver.TPUClusterResolver('MY_TPU_NAME') # If auto-detection does not work, you can pass the name of the TPU explicitly (tip: on a VM created with \"ctpu up\" the TPU has the same name as the VM)\nexcept ValueError:\n print('Training on GPU/CPU')\n \nif tpu: # TPU training\n strategy = tf.contrib.tpu.TPUDistributionStrategy(tpu)\n trained_model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)\n # Work in progress: reading directly from dataset object not yet implemented\n # for Keras/TPU. Keras/TPU needs a function that returns a dataset.\n history = trained_model.fit(training_input_fn, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,\n validation_data=validation_input_fn, validation_steps=1, callbacks=[lr_decay])\nelse: # GPU/CPU training\n history = trained_model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,\n validation_data=validation_dataset, validation_steps=1, callbacks=[lr_decay]) ", "INFO:tensorflow:Querying Tensorflow master (grpc://10.84.162.34:8470) for TPU system metadata.\nINFO:tensorflow:Found TPU system:\nINFO:tensorflow:*** Num TPU Cores: 8\nINFO:tensorflow:*** Num TPU Workers: 1\nINFO:tensorflow:*** Num TPU Cores Per Worker: 8\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:CPU:0, CPU, -1, 11566067736949806097)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 17179869184, 9640771594661886412)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:0, TPU, 17179869184, 934470963966597236)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:1, TPU, 17179869184, 10577843733729534585)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:2, TPU, 17179869184, 8735154692529739305)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:3, TPU, 17179869184, 17139903440165161182)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:4, TPU, 17179869184, 14146987140115808650)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:5, TPU, 17179869184, 183212961565301714)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:6, TPU, 17179869184, 617792811587803339)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:7, TPU, 17179869184, 14610573210226086519)\nINFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU_SYSTEM:0, TPU_SYSTEM, 17179869184, 12186081503673171976)\nWARNING:tensorflow:tpu_model (from tensorflow.contrib.tpu.python.tpu.keras_support) is experimental and may change or be removed at any time, and without warning.\nINFO:tensorflow:Cloning Adam {'lr': 0.0010000000474974513, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'decay': 0.0, 'epsilon': 1e-07, 'amsgrad': False}\nINFO:tensorflow:Cloning Adam {'lr': 0.0010000000474974513, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'decay': 0.0, 'epsilon': 1e-07, 'amsgrad': False}\n\nEpoch 00001: LearningRateScheduler reducing learning rate to 0.0101.\nEpoch 1/10\nINFO:tensorflow:New input shapes; (re-)compiling: mode=train (# of cores 8), [TensorSpec(shape=(128,), dtype=tf.int32, name=None), TensorSpec(shape=(128, 784), dtype=tf.float32, name=None), TensorSpec(shape=(128, 10), dtype=tf.float32, name=None)]\nINFO:tensorflow:Overriding default placeholder.\nINFO:tensorflow:Cloning Adam {'lr': 0.010099999606609344, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'decay': 0.0, 'epsilon': 1e-07, 'amsgrad': False}\nINFO:tensorflow:Remapping placeholder for reshape_4_input\nINFO:tensorflow:KerasCrossShard: <tensorflow.python.keras.optimizers.Adam object at 0x7f69b8639c50> []\nINFO:tensorflow:Started compiling\nINFO:tensorflow:Finished compiling. Time elapsed: 9.458511114120483 secs\nINFO:tensorflow:Setting weights on TPU model.\nINFO:tensorflow:CPU -> TPU lr: 0.010099999606609344 {0.0101}\nINFO:tensorflow:CPU -> TPU beta_1: 0.8999999761581421 {0.9}\nINFO:tensorflow:CPU -> TPU beta_2: 0.9990000128746033 {0.999}\nINFO:tensorflow:CPU -> TPU decay: 0.0 {0.0}\nWARNING:tensorflow:Cannot update non-variable config: epsilon\nWARNING:tensorflow:Cannot update non-variable config: amsgrad\n463/468 [============================>.] - ETA: 0s - loss: 0.0601 - acc: 0.9817INFO:tensorflow:New input shapes; (re-)compiling: mode=eval (# of cores 8), [TensorSpec(shape=(10000,), dtype=tf.int32, name=None), TensorSpec(shape=(10000, 784), dtype=tf.float32, name=None), TensorSpec(shape=(10000, 10), dtype=tf.float32, name=None)]\nINFO:tensorflow:Overriding default placeholder.\nINFO:tensorflow:Cloning Adam {'lr': 0.010099999606609344, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'decay': 0.0, 'epsilon': 1e-07, 'amsgrad': False}\nINFO:tensorflow:Remapping placeholder for reshape_4_input\nINFO:tensorflow:KerasCrossShard: <tensorflow.python.keras.optimizers.Adam object at 0x7f69b63db080> []\nINFO:tensorflow:Started compiling\nINFO:tensorflow:Finished compiling. Time elapsed: 9.295300960540771 secs\n468/468 [==============================] - 64s 137ms/step - loss: 0.0596 - acc: 0.9818 - val_loss: 0.0319 - val_acc: 0.9898\n\nEpoch 00002: LearningRateScheduler reducing learning rate to 0.0051.\nEpoch 2/10\n468/468 [==============================] - 5s 11ms/step - loss: 0.0083 - acc: 0.9975 - val_loss: 0.0226 - val_acc: 0.9924\n\nEpoch 00003: LearningRateScheduler reducing learning rate to 0.0026.\nEpoch 3/10\n468/468 [==============================] - 5s 10ms/step - loss: 0.0030 - acc: 0.9992 - val_loss: 0.0264 - val_acc: 0.9924\n\nEpoch 00004: LearningRateScheduler reducing learning rate to 0.00135.\nEpoch 4/10\n468/468 [==============================] - 5s 10ms/step - loss: 0.0016 - acc: 0.9996 - val_loss: 0.0220 - val_acc: 0.9934\n\nEpoch 00005: LearningRateScheduler reducing learning rate to 0.0007250000000000001.\nEpoch 5/10\n468/468 [==============================] - 5s 11ms/step - loss: 0.0011 - acc: 0.9998 - val_loss: 0.0221 - val_acc: 0.9937\n\nEpoch 00006: LearningRateScheduler reducing learning rate to 0.0004125.\nEpoch 6/10\n468/468 [==============================] - 5s 11ms/step - loss: 8.7117e-04 - acc: 0.9999 - val_loss: 0.0217 - val_acc: 0.9946\n\nEpoch 00007: LearningRateScheduler reducing learning rate to 0.00025625.\nEpoch 7/10\n468/468 [==============================] - 5s 11ms/step - loss: 7.7994e-04 - acc: 0.9999 - val_loss: 0.0225 - val_acc: 0.9945\n\nEpoch 00008: LearningRateScheduler reducing learning rate to 0.000178125.\nEpoch 8/10\n468/468 [==============================] - 5s 11ms/step - loss: 6.8725e-04 - acc: 0.9999 - val_loss: 0.0230 - val_acc: 0.9941\n\nEpoch 00009: LearningRateScheduler reducing learning rate to 0.0001390625.\nEpoch 9/10\n468/468 [==============================] - 5s 11ms/step - loss: 6.2393e-04 - acc: 0.9999 - val_loss: 0.0225 - val_acc: 0.9942\n\nEpoch 00010: LearningRateScheduler reducing learning rate to 0.00011953125000000001.\nEpoch 10/10\n468/468 [==============================] - 5s 11ms/step - loss: 5.6719e-04 - acc: 0.9999 - val_loss: 0.0230 - val_acc: 0.9943\n" ] ], [ [ "### Visualize training and validation curves", "_____no_output_____" ] ], [ [ "print(history.history.keys())\ndisplay_training_curves(history.history['acc'], history.history['val_acc'], 'accuracy', 211)\ndisplay_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)", "dict_keys(['val_loss', 'val_acc', 'loss', 'acc', 'lr'])\n" ] ], [ [ "### Visualize predictions", "_____no_output_____" ] ], [ [ "# recognize digits from local fonts\nprobabilities = trained_model.predict(font_digits, steps=1)\npredicted_labels = np.argmax(probabilities, axis=1)\ndisplay_digits(font_digits, predicted_labels, font_labels, \"predictions from local fonts (bad predictions in red)\", N)\n\n# recognize validation digits\nprobabilities = trained_model.predict(validation_digits, steps=1)\npredicted_labels = np.argmax(probabilities, axis=1)\ndisplay_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)", "_____no_output_____" ] ], [ [ "## Deploy the trained model to ML Engine\n\nPush your trained model to production on ML Engine for a serverless, autoscaled, REST API experience.\n\nYou will need a GCS bucket and a GCP project for this.\nModels deployed on ML Engine autoscale to zero if not used. There will be no ML Engine charges after you are done testing.\nGoogle Cloud Storage incurs charges. Empty the bucket after deployment if you want to avoid these. Once the model is deployed, the bucket is not useful anymore.", "_____no_output_____" ], [ "### Configuration", "_____no_output_____" ] ], [ [ "PROJECT = \"\" #@param {type:\"string\"}\nBUCKET = \"gs://\" #@param {type:\"string\", default:\"jddj\"}\nNEW_MODEL = True #@param {type:\"boolean\"}\nMODEL_NAME = \"colabmnist\" #@param {type:\"string\"}\nMODEL_VERSION = \"v0\" #@param {type:\"string\"}\n\nassert PROJECT, 'For this part, you need a GCP project. Head to http://console.cloud.google.com/ and create one.'\nassert re.search(r'gs://.+', BUCKET), 'For this part, you need a GCS bucket. Head to http://console.cloud.google.com/storage and create one.'", "_____no_output_____" ] ], [ [ "### Export the model for serving from ML Engine", "_____no_output_____" ] ], [ [ "class ServingInput(tf.keras.layers.Layer):\n # the important detail in this boilerplate code is \"trainable=False\"\n def __init__(self, name, dtype, batch_input_shape=None):\n super(ServingInput, self).__init__(trainable=False, name=name, dtype=dtype, batch_input_shape=batch_input_shape)\n def get_config(self):\n return {'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'name': self.name }\n\n def call(self, inputs):\n # When the deployed model is called through its REST API,\n # the JSON payload is parsed automatically, transformed into\n # a tensor and passed to this input layer. You can perform\n # additional transformations, such as decoding JPEGs for example,\n # before sending the data to your model. However, you can only\n # use tf.xxxx operations.\n return inputs\n\n# little wrinkle: must copy the model from TPU to CPU manually. This is a temporary workaround.\ntf_logging.set_verbosity(tf_logging.INFO)\nrestored_model = model\nrestored_model.set_weights(trained_model.get_weights()) # this copied the weights from TPU, does nothing on GPU\ntf_logging.set_verbosity(tf_logging.WARN)\n\n# add the serving input layer\nserving_model = tf.keras.Sequential()\nserving_model.add(ServingInput('serving', tf.float32, (None, 28*28)))\nserving_model.add(restored_model)\nexport_path = tf.contrib.saved_model.save_keras_model(serving_model, os.path.join(BUCKET, 'keras_export')) # export he model to your bucket\nexport_path = export_path.decode('utf-8')\nprint(\"Model exported to: \", export_path)", "_____no_output_____" ] ], [ [ "### Deploy the model\nThis uses the command-line interface. You can do the same thing through the ML Engine UI at https://console.cloud.google.com/mlengine/models\n", "_____no_output_____" ] ], [ [ "# Create the model\nif NEW_MODEL:\n !gcloud ml-engine models create {MODEL_NAME} --project={PROJECT} --regions=us-central1", "_____no_output_____" ], [ "# Create a version of this model (you can add --async at the end of the line to make this call non blocking)\n# Additional config flags are available: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions\n# You can also deploy a model that is stored locally by providing a --staging-bucket=... parameter\n!echo \"Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/{MODEL_NAME}\"\n!gcloud ml-engine versions create {MODEL_VERSION} --model={MODEL_NAME} --origin={export_path} --project={PROJECT} --runtime-version=1.10", "_____no_output_____" ] ], [ [ "### Test the deployed model\nYour model is now available as a REST API. Let us try to call it. The cells below use the \"gcloud ml-engine\"\ncommand line tool but any tool that can send a JSON payload to a REST endpoint will work.", "_____no_output_____" ] ], [ [ "# prepare digits to send to online prediction endpoint\ndigits = np.concatenate((font_digits, validation_digits[:100-N]))\nlabels = np.concatenate((font_labels, validation_labels[:100-N]))\nwith open(\"digits.json\", \"w\") as f:\n for digit in digits:\n # the format for ML Engine online predictions is: one JSON object per line\n data = json.dumps({\"serving_input\": digit.tolist()}) # \"serving_input\" because the ServingInput layer was named \"serving\". Keras appends \"_input\"\n f.write(data+'\\n')", "_____no_output_____" ], [ "# Request online predictions from deployed model (REST API) using the \"gcloud ml-engine\" command line.\npredictions = !gcloud ml-engine predict --model={MODEL_NAME} --json-instances digits.json --project={PROJECT} --version {MODEL_VERSION}\nprint(predictions)\n\nprobabilities = np.stack([json.loads(p) for p in predictions[1:]]) # first line is the name of the input layer: drop it, parse the rest\npredictions = np.argmax(probabilities, axis=1)\ndisplay_top_unrecognized(digits, predictions, labels, N, 100//N)", "['SEQUENTIAL', '[0.9999998807907104, 7.513082631071047e-14, 1.139144067963116e-08, 3.898011064320042e-14, 1.4082402168750889e-14, 1.2195139070401897e-14, 1.4076502186100015e-09, 6.281215239774263e-13, 1.0418918172661051e-08, 7.728890949465494e-08]', '[1.2769176294114004e-07, 0.9999390840530396, 2.8117156034568325e-05, 2.037909325736109e-06, 3.2849829523229346e-08, 3.794116878452769e-07, 1.2950034100622787e-10, 3.0006724045961164e-05, 2.5415941573569967e-10, 2.944753418887558e-07]', '[5.513777767696126e-10, 4.462803016025418e-09, 0.9999988079071045, 2.5445606866014714e-07, 3.362800218408707e-13, 3.4929680675643837e-12, 1.970903258305401e-12, 1.0858573062932919e-07, 8.707166898602736e-07, 1.5860496249686662e-09]', '[1.2159731028563858e-16, 3.6171125773407087e-13, 8.479520118833891e-14, 1.0, 1.7065597338781782e-18, 4.052971847023912e-11, 2.5456686010569502e-18, 1.1113750978536396e-12, 3.3342556146620517e-13, 3.0945648062281894e-13]', '[1.3914041030460567e-09, 6.120958406796717e-09, 5.256171609069327e-10, 1.1468791165714152e-15, 0.999997615814209, 1.0388691859888888e-12, 4.991534529458219e-12, 8.313129629122784e-10, 6.561372378107171e-16, 2.342358584428439e-06]', '[6.637519581274148e-15, 5.0352647398832495e-12, 1.3441835500858184e-17, 9.253346699988896e-11, 6.521864008590152e-15, 1.0, 5.962111880586374e-12, 1.850795047717024e-16, 4.415692444803337e-15, 9.377286273870578e-12]', '[2.5150718556687934e-06, 2.672544694229395e-12, 6.456924838554867e-12, 6.445683559379994e-14, 1.789606548889544e-12, 6.351873162202537e-05, 0.9999325275421143, 1.9653237912954646e-14, 1.4588888461730676e-06, 2.6729005675463213e-09]', '[1.3009648469619606e-08, 1.0242138159810565e-05, 4.813148916582577e-05, 9.45357242017053e-05, 2.892611671023726e-11, 2.753130274868454e-07, 7.0496594672953e-11, 0.9998446702957153, 4.193605036562076e-07, 1.6763161738708732e-06]', '[2.006222743489161e-09, 6.483021449321322e-14, 3.3234978658036596e-10, 2.5485607579867064e-07, 3.0008507495988797e-15, 2.0648953658053415e-09, 2.7201746063965082e-11, 2.7763540164787992e-12, 0.9999997615814209, 5.347550402490242e-09]', '[2.2925538800677714e-08, 3.3706939149558135e-12, 2.244333385803543e-09, 1.2352420526440255e-06, 3.2652991421855404e-09, 5.3598081528605235e-09, 3.929717879228023e-14, 1.3695869091612245e-11, 3.07038128255499e-08, 0.9999986886978149]', '[0.9905782341957092, 1.2632285688596312e-06, 2.2750856487618876e-07, 2.26209895259899e-08, 5.969365702185314e-06, 2.44452820652441e-07, 3.3346211125717673e-07, 0.009037256240844727, 3.565766348856414e-08, 0.00037631983286701143]', '[8.668889606155972e-09, 0.9999990463256836, 5.03338640100992e-07, 3.2048503850745647e-09, 4.716088852774192e-09, 5.212509108787344e-07, 7.077729957671863e-09, 2.8121615258669408e-08, 5.1107045678788765e-11, 3.0758329910840487e-12]', '[6.631124733758043e-07, 1.0421677387739692e-07, 0.9966933727264404, 0.0033033641520887613, 3.2713046681231983e-12, 6.621821840857578e-11, 3.491223803109289e-12, 1.4349275261338335e-06, 8.594417408858135e-07, 8.163811315853309e-08]', '[1.5095230295782136e-13, 5.683729095012913e-12, 8.220679345583015e-12, 1.0, 1.4089570396825265e-15, 5.657471366382616e-11, 1.2118471259609713e-16, 1.9514740517978524e-11, 2.1724666336708776e-12, 8.06582245438392e-10]', '[7.317008665630453e-10, 8.063854693318717e-06, 3.693021355388737e-08, 1.0824310198165321e-11, 0.9999914169311523, 2.9507969401265655e-09, 4.1460063160414506e-10, 4.6828635191786816e-08, 2.3902275991805055e-11, 5.136867002875078e-07]', '[3.740127374474156e-10, 5.785096401922374e-09, 3.291797043281086e-11, 0.0015708121936768293, 6.850056546298111e-13, 0.9983634352684021, 1.100056490344059e-09, 6.70269006963764e-10, 3.2275556804961525e-07, 6.531834515044466e-05]', '[0.12690246105194092, 2.250766506506352e-08, 9.370008241527117e-11, 6.571281713219079e-11, 3.222530864377404e-08, 0.0001403938076691702, 0.8729522228240967, 8.728854083983606e-09, 2.41645807363966e-07, 4.653611085814191e-06]', '[6.434384403064541e-08, 5.9664885156962555e-06, 0.00010832849511643872, 0.018944410607218742, 3.996034336761767e-10, 1.1033920088721061e-07, 1.6453674533956075e-10, 0.9808399081230164, 5.454069764709857e-07, 0.0001005647427518852]', '[1.0419754659096725e-07, 9.006556351415229e-13, 3.4015994465619315e-09, 6.155378429184566e-09, 7.600395313112074e-10, 2.654578281635622e-07, 3.639417300860259e-08, 2.9406147655786086e-11, 0.9999995231628418, 6.487294967882917e-08]', '[7.643710705451667e-05, 7.699491599844066e-10, 8.652096425976197e-07, 1.8006395521297236e-06, 5.5878972489153966e-05, 7.400533831969369e-06, 3.7410552433669864e-10, 7.490304199109232e-08, 7.470750460925046e-06, 0.9998500347137451]', '[0.9905782341957092, 1.2632285688596312e-06, 2.2750856487618876e-07, 2.26209895259899e-08, 5.969365702185314e-06, 2.44452820652441e-07, 3.3346211125717673e-07, 0.009037256240844727, 3.565766348856414e-08, 0.00037631983286701143]', '[8.668889606155972e-09, 0.9999990463256836, 5.03338640100992e-07, 3.2048503850745647e-09, 4.716088852774192e-09, 5.212509108787344e-07, 7.077729957671863e-09, 2.8121615258669408e-08, 5.1107045678788765e-11, 3.0758329910840487e-12]', '[6.631124733758043e-07, 1.0421677387739692e-07, 0.9966933727264404, 0.0033033641520887613, 3.2713046681231983e-12, 6.621821840857578e-11, 3.491223803109289e-12, 1.4349275261338335e-06, 8.594417408858135e-07, 8.163811315853309e-08]', '[1.5095230295782136e-13, 5.683729095012913e-12, 8.220679345583015e-12, 1.0, 1.4089570396825265e-15, 5.657471366382616e-11, 1.2118471259609713e-16, 1.9514740517978524e-11, 2.1724666336708776e-12, 8.06582245438392e-10]', '[1.23173332644555e-10, 1.6169119376741037e-08, 1.6274865899390534e-09, 1.0270231776132732e-08, 1.0425180718698357e-08, 3.3234479057675514e-10, 2.5102663003817582e-12, 1.0, 4.615344911806929e-11, 4.685018595296242e-09]', '[1.0167626740553715e-09, 9.756616847766963e-09, 1.0, 1.6320973435582364e-12, 3.86473452795421e-14, 4.216184421989427e-15, 2.296446366401028e-09, 4.974486100817188e-12, 7.555724709090716e-13, 9.236253721141784e-16]', '[8.741151136248959e-10, 1.0, 7.2459260813673154e-09, 1.7211329583766144e-10, 1.150615069889227e-08, 1.2079128808295536e-08, 3.484500021855297e-09, 2.2740338501137103e-09, 1.2176815999964674e-08, 9.584719129485109e-11]', '[0.9999994039535522, 1.6767537358575169e-09, 3.659017266954834e-08, 3.711788931770599e-10, 7.472673868580415e-11, 2.8492402881497014e-10, 5.016930799683905e-07, 4.675063003389823e-09, 4.972287581672674e-10, 6.183902723222445e-09]', '[6.501149618642899e-10, 1.2261880399933034e-08, 3.362249020866237e-11, 6.056831251821659e-12, 0.9999996423721313, 1.1205203431785549e-09, 1.0305799547083438e-09, 3.50193832265866e-11, 4.394444808042408e-10, 3.391791381091025e-07]', '[6.547003217338698e-11, 1.0, 2.428389087039129e-11, 3.0837588342255695e-13, 9.449256932470007e-10, 2.4173673479621627e-11, 1.4302745619809709e-11, 6.824233178548411e-09, 4.092511399211851e-11, 9.444328132046653e-12]', '[1.668963437842009e-15, 1.4136006254439337e-10, 4.210665561880933e-13, 1.6192076637291074e-15, 0.9999995231628418, 4.177872214849998e-10, 6.763080763819151e-13, 1.2469456023289638e-11, 5.980914608016974e-08, 4.2309414993724204e-07]', '[2.6180408752365936e-13, 9.47449338426008e-16, 1.1213074332981979e-13, 9.611299793153491e-15, 2.7574875976349444e-12, 2.8541967780123384e-14, 1.9940378151311583e-17, 2.3227357331942125e-17, 2.6149292800536905e-11, 1.0]', '[4.773129447244173e-09, 5.684238149616938e-11, 3.626382614335677e-12, 5.600708698899615e-13, 5.2576281106553324e-09, 0.9991376399993896, 0.0008622082532383502, 6.407548103215532e-11, 1.3724461211950256e-07, 2.443211855052141e-08]', '[7.896538241000672e-13, 8.878188542140158e-15, 4.623315996164009e-16, 3.907789456608635e-12, 5.656613843996183e-09, 8.949951023351499e-12, 8.045193577799821e-16, 2.8718931233129297e-08, 3.3290543932640304e-11, 1.0]', '[1.0, 1.4377010693067405e-10, 2.1650892190194782e-10, 2.699146719391081e-14, 1.3493431198774442e-11, 8.364135772875869e-11, 1.7319630105094852e-11, 8.617400196198055e-12, 1.5089797900796897e-12, 6.8086056792537875e-09]', '[1.0692073094953347e-11, 1.1632406035039263e-17, 1.5655844536047138e-19, 1.3242256070385426e-20, 4.2871376191238415e-16, 3.513095034064079e-13, 1.0, 1.59012118925916e-22, 4.4138781171709773e-13, 4.221150642333628e-18]', '[7.595191836573534e-11, 2.82873789496374e-12, 1.808227504361548e-13, 2.0292494795626226e-08, 3.8910329180907866e-08, 1.2678431637880294e-09, 2.057010811684634e-12, 7.863884543546362e-11, 1.040583086364677e-08, 0.9999998807907104]', '[1.0, 1.2081904920968611e-10, 1.1886547301998007e-09, 2.2129636664813823e-11, 1.9533881456812452e-11, 2.310148738970952e-09, 3.8401872792803715e-09, 4.92702212362417e-10, 2.779515229089924e-10, 4.0637511133923e-09]', '[1.6051888550999704e-12, 1.0, 6.431716053842407e-13, 4.065481458426223e-12, 1.5511689155367492e-11, 1.130847568364679e-10, 3.3869257225205285e-12, 8.835383119576434e-11, 6.332122326480061e-13, 6.953437798424764e-14]', '[2.6973666356067127e-12, 1.3987716540597717e-09, 1.5021863690022758e-13, 2.7496105303725926e-06, 2.170709792470582e-11, 0.9999972581863403, 3.1595330518552345e-11, 1.714980657485654e-10, 1.7235007865323837e-09, 5.8074146186415376e-11]', '[2.373209406769661e-10, 1.3890851359790735e-12, 1.9045336054762663e-12, 1.1944842892575025e-09, 5.842029082714362e-10, 5.780565262569759e-11, 4.9034483071142e-12, 4.563267541612959e-09, 6.583650513647399e-09, 1.0]', '[3.8402933610903744e-11, 3.2117515758045556e-08, 2.375619923000727e-09, 9.507600928770898e-09, 1.9336081347187672e-11, 1.6876781222530113e-11, 2.0765783840734353e-13, 1.0, 7.707845321204554e-14, 6.91492141324801e-11]', '[1.6771906530266278e-07, 1.0102221281726997e-08, 2.642930496676854e-07, 0.999756395816803, 8.554565056329011e-09, 0.00024051597574725747, 4.092471250771723e-09, 3.5999033087819043e-08, 1.936731223395327e-06, 6.518237682939798e-07]', '[5.348785108404142e-13, 1.9750518998051803e-09, 1.1641352487025414e-13, 7.79018065062264e-15, 1.0, 5.046648862694347e-12, 9.017092628127443e-14, 7.69931007837954e-10, 1.037744759915804e-12, 6.548723802818346e-12]', '[1.2732290916028788e-11, 6.921182377217505e-13, 4.190578954807109e-14, 3.5922311791836137e-09, 8.01054067522955e-09, 3.428108352743209e-10, 2.055565665056401e-14, 1.1037659675139366e-08, 2.983930325051176e-10, 1.0]', '[4.060818001305755e-11, 1.2375484751553367e-12, 1.3717386872658804e-13, 6.0044412272656535e-15, 1.2915426920220158e-13, 9.659758859470458e-08, 0.9999998807907104, 8.561188579212604e-16, 5.500839714289718e-11, 1.7043715072086185e-13]', '[8.04214972394135e-10, 7.963991965898032e-11, 3.123994812836983e-11, 6.154825997820024e-14, 2.8394875339898817e-10, 1.5396336427997426e-09, 1.0, 1.2179421103516172e-15, 4.316936530468496e-10, 1.0827288501533139e-12]', '[1.5638746011750098e-15, 3.721255888069347e-13, 5.4754678844646445e-18, 2.6411751258281768e-11, 5.940714279962503e-16, 1.0, 6.932531263457997e-13, 4.8482971452031625e-17, 2.1534982591652277e-15, 8.416578545222819e-11]', '[8.1026201373402e-12, 3.38000460953225e-10, 1.0465055211295038e-10, 2.1918903324313899e-13, 1.0, 2.889972462727375e-11, 1.146634783649736e-11, 1.4221172850437114e-10, 9.309943788116115e-11, 4.488796889745572e-08]', '[0.9999977350234985, 1.9617313416070425e-12, 4.986706048093481e-10, 4.573133191576595e-11, 1.5675002962289852e-10, 1.8488208186617783e-10, 2.214640517195221e-06, 9.699029079879296e-11, 3.515346458371482e-09, 2.7794522239332764e-09]', '[8.260165468287894e-11, 1.9260861794379025e-09, 7.335677204567403e-12, 4.386742080697559e-10, 1.8532902643086935e-11, 2.7192000387477044e-11, 3.5185751275921596e-16, 1.0, 1.0808459738385987e-14, 9.14967684950696e-11]', '[5.719687554530187e-14, 1.9487018335789807e-12, 8.851878851765492e-15, 6.0555212579822484e-18, 1.0, 5.959877638064479e-14, 1.134207424271152e-16, 1.0711217603184115e-15, 1.680025306968324e-14, 2.0674986028756948e-11]', '[1.0, 5.3635498820092664e-12, 6.284221476526852e-10, 8.904000453613392e-11, 5.715251947917277e-13, 1.0672559364044432e-10, 8.919697662770898e-13, 1.039434094352032e-09, 1.1225117363400372e-10, 2.683614663823164e-09]', '[1.6733800456414372e-10, 1.0, 2.998886472482809e-09, 2.53692622464996e-09, 4.671978914849717e-10, 2.9389257694134585e-09, 7.01906172073663e-11, 1.1275340661143218e-08, 2.1741893008186963e-10, 3.57076417045743e-11]', '[2.8720636251335996e-15, 2.9735572728076254e-13, 1.6430376482100273e-13, 1.0, 1.2586803717080496e-16, 5.102717293842263e-13, 4.337484393104897e-19, 2.430094320216014e-11, 4.290853764421683e-15, 9.659519711879838e-12]', '[2.730927983751741e-10, 0.9999983310699463, 2.4402957432556605e-10, 8.70782959627725e-12, 1.6773925608504214e-06, 9.175513127068768e-10, 1.0407689293723266e-10, 9.36913213678281e-09, 1.8637995591319623e-09, 4.096648353879573e-09]', '[2.2642381608431288e-14, 3.456913644228621e-10, 3.335734813370017e-11, 1.0, 2.5317213603326394e-12, 2.3606927967989577e-08, 1.0161615066105537e-13, 4.972138811787374e-10, 3.9210815150347855e-11, 8.299745890560928e-10]', '[1.89496245184273e-06, 4.4884071459527775e-10, 2.928084857911628e-11, 1.6307536757122492e-15, 0.9999980926513672, 1.1779060365979532e-11, 9.853828863981562e-09, 3.7851742139966005e-13, 1.3621513754457932e-14, 2.996773662555796e-11]', '[3.675501607713558e-14, 5.469290020876372e-10, 1.2819681427522767e-10, 1.018629486315703e-10, 2.424896763421336e-13, 1.784392945902713e-12, 6.685311430089502e-17, 1.0, 1.3184028860497611e-14, 3.426258097225833e-14]', '[2.4916554930420887e-10, 8.458215239315336e-10, 1.0, 6.570922972404247e-11, 3.1954331487695636e-14, 1.0530449125537023e-14, 5.389114501874737e-12, 1.426847529018005e-10, 5.90221871377139e-10, 2.2840544559865616e-12]', '[7.35575879720618e-13, 2.3879982435914826e-08, 1.0447481145092752e-05, 4.1255124316741387e-10, 4.821991694825556e-09, 1.6794703648626008e-13, 5.056208815350238e-13, 0.9999895095825195, 5.919684145948073e-15, 1.4456829017326506e-13]', '[3.038281431999579e-11, 1.0, 9.778161613738234e-11, 3.600704153242096e-12, 1.0365184266447613e-08, 1.507293623248529e-09, 1.1147581469028722e-10, 1.9874551782095295e-08, 3.0707318632305913e-11, 3.1718471599218034e-11]', '[1.1206127581431247e-08, 1.1482989066280425e-05, 0.9999880790710449, 2.478421912144313e-09, 1.5679636755638882e-10, 9.636545034164001e-13, 6.33189056742367e-09, 4.920768219562888e-07, 1.7318342593330982e-10, 3.152207011039576e-12]', '[5.642473155376138e-10, 1.0, 9.527973028611303e-12, 6.960564347124887e-10, 3.3664306897662755e-09, 9.999152617012896e-10, 2.149089795011605e-09, 2.0135721978675747e-09, 1.7665425788848665e-09, 5.456723961572152e-10]', '[2.8729296719376407e-11, 1.0, 2.2721673501036044e-11, 3.261986600971989e-12, 1.2380556579927315e-09, 4.474653703123721e-10, 1.4176507190377663e-11, 9.173599380130071e-11, 1.123081419529548e-10, 4.524554775980905e-12]', '[1.5814414664958032e-12, 7.815143199252361e-09, 5.480912932398496e-07, 8.608811996602128e-10, 5.376245226784704e-09, 4.97261194894183e-13, 1.2098575450942423e-13, 0.9999994039535522, 1.148111231872315e-15, 6.044108898269063e-13]', '[1.5078819540648852e-14, 1.3843835799942639e-10, 5.4981375136142115e-17, 9.525554647902938e-16, 0.9999998807907104, 1.1580063297156329e-11, 1.2981616107597682e-13, 2.865326465678608e-12, 9.085062888623818e-13, 9.250923937997868e-08]', '[1.9110974847080797e-07, 9.949619561666623e-05, 0.9931163191795349, 3.261784797814471e-08, 0.006746601313352585, 4.327002045556583e-08, 3.74654664483387e-05, 2.011578015270743e-09, 1.0930571114897703e-08, 2.8557098907810996e-09]', '[1.3288871392180823e-10, 9.651486010398003e-08, 1.7211411318385217e-07, 0.9999994039535522, 2.1714019471374968e-11, 4.1627450286796375e-07, 2.050921715443521e-12, 7.080932284964092e-09, 5.12374621897127e-11, 1.4239661894066558e-08]', '[4.445829121513256e-11, 7.446665679922138e-12, 3.2650566312317306e-15, 3.4874202050477754e-12, 1.8495098573316493e-13, 1.0, 2.0452901594580908e-08, 4.0605816235481096e-13, 3.248182278703382e-10, 1.3290785843850933e-13]', '[1.0987572657272793e-10, 1.0, 4.749957582816933e-08, 2.9316490568476183e-08, 8.643487703352548e-09, 2.457561620872184e-08, 4.2404701972031944e-10, 7.252641820443273e-10, 2.552014821688431e-09, 9.572099779475707e-10]', '[9.50055701156357e-15, 1.2414469570005689e-16, 1.0, 4.533793655715941e-14, 4.897161929991059e-18, 2.768367724803804e-21, 1.8206004407297764e-16, 3.67357593654149e-17, 9.757341419211474e-13, 1.4419766057736245e-18]', '[4.403815853648574e-11, 3.42824186816415e-08, 1.9651467952908064e-12, 1.754121570218814e-10, 0.999988317489624, 4.002535547442676e-08, 2.0235153830316932e-10, 1.5550012832932225e-08, 2.5211532417301896e-08, 1.1572844414331485e-05]', '[1.511114844365835e-11, 8.204208978845884e-10, 1.0619712574599927e-12, 7.98773753304255e-15, 1.0, 1.433656526828031e-10, 1.8680172426260855e-12, 1.5354845216489221e-12, 4.0439334281390515e-13, 3.810600723852531e-09]', '[5.384853327128347e-11, 2.2212837309538297e-13, 4.604066219525589e-14, 9.461094381825702e-16, 1.3025275302780415e-12, 1.1947486333596657e-09, 1.0, 6.388059907978284e-16, 1.2815414961869775e-12, 5.4306484033013816e-15]', '[7.409729080309901e-12, 1.4004025369884765e-11, 1.9612812762748177e-10, 1.0, 2.60755809305957e-11, 2.7500673613345405e-11, 2.154048830580943e-15, 9.704514969399725e-10, 1.3235733342664702e-10, 4.819147685075631e-12]', '[7.89853310523541e-15, 6.857906034501635e-14, 1.9050908357091475e-18, 1.9853901508937177e-12, 3.923261726341415e-13, 1.0, 7.438073594545624e-13, 3.7481909365162235e-16, 1.139086519335794e-14, 1.269629179567744e-09]', '[7.551798474210968e-15, 3.507589523194833e-14, 1.911059357419024e-18, 1.8543899055201152e-11, 4.425795938501481e-14, 1.0, 1.6804316618768134e-11, 4.922505503188765e-17, 6.895708098498057e-14, 1.4811863541241976e-11]', '[7.733314856539497e-12, 4.455843818206441e-16, 5.368597079900982e-19, 9.712758061320101e-17, 9.944574435371352e-16, 1.623672168937773e-10, 1.0, 7.84311783437011e-20, 1.3447108010433695e-11, 5.434471158945291e-19]', '[1.0, 3.0758801061736563e-10, 3.798552861145055e-10, 2.6293240994830144e-14, 5.000539370601798e-13, 2.716376290567979e-10, 2.0774836712034173e-11, 5.307381201191674e-10, 9.534713513517645e-13, 3.223070699220898e-10]', '[1.2374677020934866e-13, 1.318876563516369e-09, 1.020299148208443e-15, 1.2313765458947155e-17, 1.0, 6.359374138398266e-11, 4.617010915838708e-14, 8.454637814500621e-13, 1.053973260663893e-14, 2.6605400174628535e-11]', '[8.452522015645059e-10, 1.0, 4.994303859362503e-10, 1.892183024848615e-12, 2.2018056711203826e-08, 5.711969230937086e-10, 4.665932085146096e-10, 1.0217789281341538e-08, 2.6690458732048228e-09, 5.385438622829142e-11]', '[1.283352243919289e-11, 2.1489368173376738e-14, 2.2460521940589895e-14, 9.780082993460226e-11, 5.553747683961774e-09, 4.173056067369174e-10, 1.0386056435458119e-13, 2.6540589170842566e-10, 2.253628490420101e-12, 1.0]', '[2.3885868283279876e-10, 3.3493396858069735e-12, 5.69019920361436e-15, 1.7436677102189435e-11, 4.469350445290843e-10, 0.9999947547912598, 5.429675372509157e-12, 2.0213855478345977e-08, 4.9688961278882005e-11, 5.207192771194968e-06]', '[1.8649690680661024e-09, 1.8264570966763927e-09, 2.1149026974143226e-10, 2.940401877538079e-08, 1.4676232451549254e-11, 1.8648739441573525e-08, 5.616153277431283e-15, 1.0, 3.1541297893823705e-14, 5.879937092778675e-10]', '[2.0831446376906593e-12, 1.2808210717954482e-15, 5.7784127704962884e-08, 2.2213187733655104e-08, 6.839052859864943e-15, 4.950448939666785e-10, 2.8960927216494306e-14, 4.0887150436208497e-13, 0.9999998807907104, 2.350258043737341e-10]', '[0.0002640413003973663, 1.2716811397694983e-05, 0.0001927968842210248, 2.571404593254556e-06, 0.0002137310366379097, 0.36109480261802673, 0.0006786751328036189, 0.06606225669384003, 0.12096087634563446, 0.4505175054073334]', '[3.4145832862597647e-12, 2.0942297895842898e-10, 0.00010293548984918743, 0.9998968839645386, 8.162392338634597e-14, 1.2850992492374758e-10, 1.1076217017408439e-14, 3.834443518258013e-08, 5.358652188647284e-08, 7.219355779852776e-09]', '[2.5680177605848795e-15, 1.4320339358775414e-09, 1.8668187828918548e-10, 1.611920374955389e-08, 8.625010039509107e-09, 5.996619758912025e-13, 7.583951183144251e-17, 1.0, 1.7355673325505092e-15, 6.843540057560604e-12]', '[4.216574463720979e-13, 1.7450229039539522e-09, 3.858748173421467e-13, 6.1033272416599095e-15, 1.0, 5.90041487891213e-11, 1.012255871572082e-13, 1.1880499839120318e-12, 1.0083868419094588e-12, 4.4044341507287754e-09]', '[2.3980811647561495e-07, 2.9403093293467464e-09, 2.1491428428555004e-12, 2.9632487436037636e-11, 3.257128344813509e-09, 4.289516652988823e-08, 0.9999995231628418, 3.16763461164285e-13, 2.8483049163696705e-07, 4.6763877492583816e-11]', '[3.398048231684214e-10, 1.2109329361464916e-07, 4.788302566949287e-12, 3.7089014043138746e-13, 0.9999855756759644, 1.2746397715091007e-07, 3.7916794703996004e-10, 2.8918305527980692e-09, 1.5938245168101162e-10, 1.4195421499607619e-05]', '[7.377585502180031e-17, 6.877384404156694e-14, 1.026370422029313e-13, 1.0, 2.742977762735735e-16, 8.957649075616581e-13, 1.5381229619967482e-19, 6.7305818036988985e-12, 5.635678013669876e-13, 2.439743719551135e-12]', '[1.0, 1.4868949170868118e-12, 1.242594915851214e-10, 2.503847552470101e-13, 7.108961856683305e-13, 2.709067992157088e-08, 1.9801223771764853e-08, 2.151343339584777e-11, 1.7949895042557173e-11, 1.188511955518834e-09]', '[5.581490478134832e-11, 1.0597416277846605e-08, 3.8600347918027467e-10, 1.054462517302568e-09, 1.4848983154180928e-10, 1.6904422306396327e-11, 1.8537726692233192e-13, 1.0, 5.223953864971764e-13, 3.175732388172037e-08]', '[1.0, 1.0330529834345903e-11, 2.9360822800805764e-11, 2.7415317398519178e-14, 1.6047533852973916e-13, 1.9744650792824503e-12, 2.9999301653926835e-11, 1.7720168865240082e-12, 2.7506520824081837e-13, 2.7061164420416617e-10]', '[2.925083231186676e-10, 1.6385235182547753e-11, 1.0, 2.2559907067454255e-11, 1.6926287178251265e-16, 9.392988909459957e-15, 2.7912918338328504e-15, 1.774279399617551e-12, 1.3006078419114386e-13, 2.070494620751758e-14]', '[2.618952450739176e-11, 3.5503750547144497e-12, 1.1222653767395657e-14, 3.370738410612972e-11, 1.448609565635195e-10, 1.1638671797153943e-12, 1.0156087667904934e-14, 1.373137603621899e-08, 1.1529613708205488e-08, 1.0]', '[2.7663693818319457e-12, 1.0, 1.4656894404760368e-12, 4.113166784166372e-13, 2.8009319308353042e-09, 2.0561462255042073e-10, 5.551062734476808e-11, 3.118617897257536e-08, 5.28550467282507e-12, 8.208485432169288e-13]', '[7.673783493975877e-18, 3.446496560366441e-11, 2.2541893007700826e-14, 5.261899249653368e-11, 1.0812182312663898e-12, 3.098576487674075e-16, 5.035751169013523e-21, 1.0, 6.09824430944e-18, 1.9415174220931142e-14]']\n" ] ], [ [ "## License", "_____no_output_____" ], [ "\n\n---\n\n\nauthor: Martin Gorner<br>\ntwitter: @martin_gorner\n\n\n---\n\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\n---\n\n\nThis is not an official Google product but sample code provided for an educational purpose\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cb124b5f3c208faaf5bad647acfcfe42c8273121
936,013
ipynb
Jupyter Notebook
Clase_folium_Carto.ipynb
camiloceacarvajal/folium-python
3f953a959fd6ab01ee14e46b9418f4b797c95621
[ "MIT" ]
null
null
null
Clase_folium_Carto.ipynb
camiloceacarvajal/folium-python
3f953a959fd6ab01ee14e46b9418f4b797c95621
[ "MIT" ]
null
null
null
Clase_folium_Carto.ipynb
camiloceacarvajal/folium-python
3f953a959fd6ab01ee14e46b9418f4b797c95621
[ "MIT" ]
null
null
null
840.22711
258,184
0.981786
[ [ [ "import folium\nfrom folium.plugins import MarkerCluster\nimport pandas as pd\nimport branca\nimport json\nimport numpy as np\nimport vincent\nimport os\nfrom folium.plugins import Draw\nimport numpy as np\nfrom folium.plugins import HeatMap\nprint(folium.__version__)#muestra la version actual de la libreria ", "0.5.0\n" ] ], [ [ "#### conda install -c conda-forge folium \n#### conda install -c anaconda pandas \n#### conda install -c conda-forge json-c \n#### conda install -c anaconda numpy \n#### conda install -c anaconda vincent\n#### conda install -c conda-forge branca \n#### conda install -c anaconda simplejson \n#### conda install -c anaconda jinja2\n#### conda install -c anaconda pytest ", "_____no_output_____" ], [ "### Datos de Python. Leaflet.js Maps. \n#### Folium se basa en los puntos fuertes del ecosistema Python y las fortalezas de mapeo de la biblioteca Leaflet.js. Manipule sus datos en Python, luego visualรญcelos en un mapa de folletos a travรฉs de Folium.\n\n### Conceptos \n#### Folium facilita la visualizaciรณn de datos manipulados en Python en un mapa interactivo de folletos. Permite tanto el enlace de datos a un mapa para visualizaciones de coropletas como el paso de visualizaciones de Vincent / Vega como marcadores en el mapa.\n\n#### La biblioteca tiene una serie de tilesets incorporados de OpenStreetMap, Mapbox y Stamen, y admite tilesets personalizados con Mapbox o las claves API de Cloudmade. Folium admite superposiciones GeoJSON y TopoJSON, asรญ como la uniรณn de datos a esas superposiciones para crear mapas de coropletas con esquemas de color color-brewer.\n\n<img width=650px src='http://www.reactiongifs.com/r/dph.gif'>\n\n\n\nhttp://folium.readthedocs.io/en/latest/\n", "_____no_output_____" ] ], [ [ "#Definir coordenadas de donde queremos centrar nuestro mapa\nSantiago_coords = [-33.448653, -70.656910] # en geografica \n\n#Crear mapa\nmi_map = folium.Map(location = Santiago_coords, zoom_start = 13)#Cuanto mayor sea el nรบmero de zoom, mรกs cerca se encuentra\n\n#mostrar el mapa\nmi_map", "_____no_output_____" ], [ "#Define las coordenadas que queremos que sean nuestros marcadores\nCartografia_coords = [-33.448653, -70.656910]\nU_Central_Campus_coords = [-33.451471, -70.654607]\nInstituto_Geografico_coords = [-33.450637, -70.657675]\n\n\n#Agregar marcadores al mapa\nfolium.Marker(Cartografia_coords, popup = 'Escuela').add_to(mi_map)\nfolium.Marker(U_Central_Campus_coords, popup = 'central Campus').add_to(mi_map)\nfolium.Marker(Instituto_Geografico_coords, popup = 'IGM').add_to(mi_map)\n\n#muestra el mapa\nmi_map", "_____no_output_____" ], [ "#Agregar un juego de fichas a nuestro mapa\nmap_with_tiles = folium.Map(location = Santiago_coords , tiles = 'stamenwatercolor')\nmap_with_tiles\n# Stamen Terrain ,stamenwatercolo,openstreetmap,cartodbpositron,cartodbdark_matter,mapboxbright,mapboxcontrolroom\n#guardar mapa \n# marca interactiva\nmap_with_tiles.add_child(folium.ClickForMarker(popup=\"papu\"))\n #guardar mapa \n#map_with_tiles.save('bluemap.html')", "_____no_output_____" ], [ "#Usar marcadores de polรญgono con colores en lugar de marcadores predeterminados\npolygon_map = folium.Map(location = Santiago_coords, zoom_start = 16)\n\nCartografia_coords = [-33.448653, -70.656910]\nU_Central_Campus_coords = [-33.451471, -70.654607]\nInstituto_Geografico_coords = [-33.450637, -70.657675]\n\n#agregar marcadaores en el mapa\nfolium.RegularPolygonMarker(Cartografia_coords, popup = 'Carto-geomatica', fill_color = '#00ff40',\n number_of_sides = 3, radius = 10).add_to(polygon_map)\nfolium.RegularPolygonMarker(U_Central_Campus_coords, popup = 'campus central', fill_color = '#bf00ff',\n number_of_sides = 5, radius = 10).add_to(polygon_map)\nfolium.RegularPolygonMarker(Instituto_Geografico_coords, popup = 'IGM', fill_color = '#ff0000',\n number_of_sides = 8, radius = 10).add_to(polygon_map)\n\n#circulo con relleno azul\nfolium.features.CircleMarker(\n location=[-33.448653, -70.656910],\n radius=50,\n popup='utem',\n color='#55cc31',#green\n fill=True,\n fill_color='#3186cc'\n).add_to(polygon_map)\n\n# Interactive marker\npolygon_map.add_child(folium.ClickForMarker(popup=\"papu 1\"))\n\n\n\n#mostrar mapa\npolygon_map", "_____no_output_____" ], [ "m = folium.Map(\n location=[-33.448653, -70.656910],\n tiles='Stamen Toner',\n zoom_start=13\n)\n\n#circulo carmesi\nfolium.features.Circle(\n radius=100,\n location=[-33.451471, -70.654607],\n popup='cartografia',\n color='crimson',\n fill=False,#relleno del circulo\n).add_to(m)\n\n#circulo con relleno azul\nfolium.features.CircleMarker(\n location=[-33.448653, -70.656910],\n radius=50,\n popup='utem',\n color='#3186cc',\n fill=True,\n fill_color='#c131cc'\n).add_to(m)\n\n\nm", "_____no_output_____" ], [ "\n#crear mapa interactivo\nmap_hooray = folium.Map(location=[-33.448653, -70.656910],\n zoom_start = 11) \n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nmap_hooray.add_child(MeasureControl())\n\nmap_hooray", "_____no_output_____" ], [ "map_hooray = folium.Map(location=[-33.448653, -70.656910],\n tiles = \"Stamen Toner\",\n zoom_start = 15)\n\nfolium.Marker([-33.448653, -70.656910],\n popup='carto',\n icon=folium.Icon(color='green')\n ).add_to(map_hooray)\nfolium.Marker([-33.451471, -70.654607], \n popup='igm',\n icon=folium.Icon(color='red',icon='university', prefix='fa') \n ).add_to(map_hooray)\n #icon=folium.Icon(color='blue',icon='bar-chart', prefix='fa') se remplaza por cualquiera\n #cloud\nfolium.Marker([-33.450637, -70.657675], \n popup='u central',\n icon=folium.Icon(color='red',icon='bicycle', prefix='fa')\n ).add_to(map_hooray)\n \n\nmap_hooray.add_child(folium.ClickForMarker(popup=\"bici papu\"))\n\nmap_hooray", "_____no_output_____" ], [ "import folium\nfrom folium import plugins\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndata =[[ -33.4073, -70.6531, 1900. ],\n [ -33.4185, -70.6556, 3200. ],\n [ -33.4116, -70.6509, 5800. ],\n [ -33.4184, -70.6548, 2900. ],\n [ -33.4178, -70.6515, 3312. ],\n [ -33.4159, -70.6574, 2600. ],\n [ -33.4192, -70.6537, 4299. ],\n [ -33.4184, -70.6582 , 5750. ],\n [ -33.4112, -70.6596, 3595. ]]\n\nma = folium.Map([-33.4426, -70.6568], \n control_scale = True, zoom_start=12,tiles='mapboxbright')\n\nplugins.HeatMap(data, radius = 20, min_opacity = 0.1, max_val = 50,gradient={.6: 'blue', .98: 'lime', 1: 'red'}).add_to(ma)\n", "_____no_output_____" ], [ "ma", "_____no_output_____" ], [ "center_pos = [-33.418043, -70.648273]\nfmap = folium.Map(location=center_pos, zoom_start=15,tiles='cartodbdark_matter')\nfmap.add_child(folium.Circle(location=center_pos,\n color='green', # Circle color\n radius=30, # Circle ancho\n popup='Skytree', # contenido emergente\n fill=True, #llenar area media\n fill_opacity=0.5 #establecer trasparencia\n ))\n\npoints = [[-33.4073,-70.6531],\n [-33.417417,-70.652628],\n [-33.418043, -70.648273]]\n\nfmap.add_child(folium.PolyLine(locations=points, # Lista de coordenadas\n weight=8)) #Ancho de lรญnea", "_____no_output_____" ], [ "import numpy as np\nfrom folium.plugins import HeatMap\n\nfmap = folium.Map(location=[-33.448653, -70.656910], zoom_start=12)\n\n#Crear datos aleatorios\ndata = (np.random.normal(size=(100, 3)) * 0.02 *\n np.array([[1, 1, 1]]) +\n np.array([[-33.448653, -70.656910, 1]])).tolist()\n\nfmap.add_child(HeatMap(data=data))", "_____no_output_____" ], [ "import numpy as np\nfrom folium.plugins import HeatMapWithTime\n\ncenter_pos = [-33.448653, -70.656910]\n\n #Use numpy para crear datos iniciales\ninitial_data = (np.random.normal(size=(200, 2)) *\n np.array([[0.02, 0.02]]) +\n np.array([center_pos]))\n\n# Crear datos continuos\ndata = [initial_data.tolist()]\nfor i in range(20):\n data.append((data[i] + np.random.normal(size=(200, 2)) * 0.001).tolist())\n\nfmap = folium.Map(center_pos, zoom_start=11)\nfmap.add_child(HeatMapWithTime(data)) # Mostrar mapa de calor continuo\n#fmap.save('heatmap1.html')\n\n", "_____no_output_____" ], [ "import json\n\nbuoy_map = folium.Map(\n [-33.055721,-71.708766],\n zoom_start=11,\n tiles='Stamen Terrain'\n )\n\nfolium.RegularPolygonMarker(\n [-33.055721,-71.708766],\n fill_color='#43d9de',\n radius=12,\n popup=folium.Popup(max_width=450).add_child(\n folium.Vega(json.load(open('vis1.json')), width=450, height=250))\n ).add_to(buoy_map)\n\nfolium.RegularPolygonMarker(\n [-33.017004,-71.613186],\n fill_color='#43d9de',\n radius=12,\n popup=folium.Popup(max_width=450).add_child(\n folium.Vega(json.load(open('vis2.json')), width=450, height=250))\n ).add_to(buoy_map)\n\nfolium.RegularPolygonMarker(\n [-32.941004,-71.600571],\n fill_color='#43d9de',\n radius=12,\n popup=folium.Popup(max_width=450).add_child(\n folium.Vega(json.load(open('vis3.json')), width=450, height=250))\n ).add_to(buoy_map)\n\nbuoy_map\n#guardar mapa \n#buoy_map.save('mapacongrafico.html')\n", "_____no_output_____" ], [ "m = folium.Map([-33.448653, -70.656910], zoom_start=12,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\nhtml = \"\"\"\n <h1> Esta es una gran ventana emergente</h1><br>\n Con unas pocas lรญneas de cรณdigo...\n <p>\n <code>\n from folium import *<br>\n html\n </code>\n </p>\n \"\"\"\n\nfolium.Marker([-33.448653, -70.656910], popup=html).add_to(m)\n\nm", "_____no_output_____" ], [ "# Vamos a crear una figura, con un mapa dentro.\nf = branca.element.Figure()\nfolium.Map([-33.448653, -70.656910], zoom_start=10).add_to(f)\n\n# Pongamos la figura en un IFrame.\niframe = branca.element.IFrame(width=500, height=300)\nf.add_to(iframe)\n\n# Pongamos el IFrame en una ventana emergente\npopup = folium.Popup(iframe, max_width=2650)\n\n# Vamos a crear otro mapa\nm = folium.Map([-33.448653, -70.656910], zoom_start=4)\n\n# Pongamos el Popup en un marcador, en el segundo mapa.\nfolium.Marker([-33.448653, -70.656910], popup=popup).add_to(m)\n\n\n\nm", "_____no_output_____" ], [ "\nfrom folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = 'https://tile.thunderforest.com/mobile-atlas/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0'\n\nm = folium.Map(location=[ -33.448653, -70.656910], tiles=tiles, attr=attr, zoom_start=14,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\n#Crear datos aleatorios\ndata = (np.random.normal(size=(100, 3)) * 0.02 *\n np.array([[1, 1, 1]]) +\n np.array([[-33.448653, -70.656910, 1]])).tolist()\n\nm.add_child(HeatMap(data=data))\nm.add_child(HeatMap(data=data))\n\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm.save(os.path.join('results', 'trabajofinal.html'))#carpeta donde guardar ,nombre del html\nm\n", "_____no_output_____" ] ], [ [ "penCycleMap\nhttps://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nTransporte\nhttps://tile.thunderforest.com/transport/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nPaisaje\nhttps://tile.thunderforest.com/landscape/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nAl aire libre\nhttps://tile.thunderforest.com/outdoors/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nTransporte oscuro\nhttps://tile.thunderforest.com/transport-dark/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nMapa espinal\nhttps://tile.thunderforest.com/spinal-map/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nPionero\nhttps://tile.thunderforest.com/pioneer/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nAtlas mรณvil\nhttps://tile.thunderforest.com/mobile-atlas/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0\nBarrio\nhttps://tile.thunderforest.com/neighbourhood/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0", "_____no_output_____" ] ], [ [ "#!jupyter nbconvert --to tex Clase_folium_M.ipynb", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = 'https://tile.thunderforest.com/mobile-atlas/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0'\n\n\ncenter_pos = [-33.448653, -70.656910]\n\n #Use numpy para crear datos iniciales\ninitial_data = (np.random.normal(size=(200, 2)) *\n np.array([[0.02, 0.02]]) +\n np.array([center_pos]))\n\n# Crear datos continuos\ndata = [initial_data.tolist()]\nfor i in range(20):\n data.append((data[i] + np.random.normal(size=(200, 2)) * 0.001).tolist())\n\nm = folium.Map(center_pos,tiles='Stamen Terrain',attr=attr, zoom_start=12,control_scale=True,prefer_canvas=True) #control del mapa\nm.add_child(HeatMapWithTime(data)) # Mostrar mapa de calor continuo\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\n#lineas de info\npoints = [[-33.4073,-70.6531],\n [-33.417417,-70.652628],\n [-33.418043, -70.648273]]\n\nm.add_child(folium.PolyLine(locations=points, # Lista de coordenadas\n weight=8)) #Ancho de lรญnea\n\n#circulo con relleno azul\nfolium.features.CircleMarker(\n location=[-33.448653, -70.656910],\n radius=190,\n popup='circulo de interes',\n color='#3186cc',\n fill=True,\n fill_color='#c131cc'\n).add_to(m)\n\n\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='agranda',\n title_cancel='by papu',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm.save(os.path.join('results', 'mapafinalcurso.html'))#carpeta donde guardar ,nombre del html\nm", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = ' https://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0 '\n\nm = folium.Map(location=[ -33.448653, -70.656910], tiles=tiles, attr=attr, zoom_start=14,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm\n", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = ' https://tile.thunderforest.com/transport/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0 '\n\nm = folium.Map(location=[ -33.448653, -70.656910], tiles=tiles, attr=attr, zoom_start=12,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = ' https://tile.thunderforest.com/spinal-map/{z}/{x}/{y}.png?apikey=3cd85f11f4744c0c8c3bdaab8483cde0 '\n\nm = folium.Map(location=[ -33.448653, -70.656910], tiles=tiles, attr=attr, zoom_start=12,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}'\n\nm = folium.Map(location=[ -33.448653, -70.656910], tiles=tiles, attr=attr, zoom_start=14,control_scale=True,# controlar escala\n prefer_canvas=True\n)\n\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm", "_____no_output_____" ], [ "from folium.plugins import Draw\n\nattr = ('&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> '\n 'creadores, &copy; <a href=\"http://cartodb.com/attributions\">Camilo</a>')\ntiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}'\n\nm = folium.Map(location=[ -33.4873, -70.4675], tiles=tiles, attr=attr, zoom_start=14,control_scale=True,# controlar escala\n prefer_canvas=True\n)\npoints = [[-33.4892,-70.4689],\n [-33.4850,-70.4676],\n [-33.4771,-70.4663]]\n\nm.add_child(folium.PolyLine(locations=points, # Lista de coordenadas\n weight=8)) #Ancho de lรญnea\n\n# Agrega herramienta a la esquina superior derecha\nfrom folium.plugins import MeasureControl\nm.add_child(MeasureControl())\n\nm.add_child(folium.LatLngPopup())\n#plugins.ScrollZoomToggler().add_to(m)\nplugins.Fullscreen(\n position='topright',\n title='expander',\n title_cancel='salir',\n force_separate_button=True).add_to(m)\n\ndraw = Draw()\ndraw.add_to(m)\nm", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb124e6026867b509a8da9724b19812896c4baef
6,423
ipynb
Jupyter Notebook
scrape_zipcode.ipynb
camhwilson/DogNamez
78edda5b377e962f0980d1779a7c6b5e0fe52c5a
[ "MIT" ]
null
null
null
scrape_zipcode.ipynb
camhwilson/DogNamez
78edda5b377e962f0980d1779a7c6b5e0fe52c5a
[ "MIT" ]
null
null
null
scrape_zipcode.ipynb
camhwilson/DogNamez
78edda5b377e962f0980d1779a7c6b5e0fe52c5a
[ "MIT" ]
null
null
null
23.877323
85
0.422544
[ [ [ "import requests\nfrom bs4 import BeautifulSoup\nimport pandas", "_____no_output_____" ], [ "URL = \"https://www.zip-codes.com/county/pa-allegheny.asp\"\nr = requests.get(URL)\n \nsoup = BeautifulSoup(r.content, 'html5lib')", "_____no_output_____" ], [ "quotes=[] # a list to store quotes\n \ntable = soup.find('table', attrs = {'class':'statTable'}) ", "_____no_output_____" ], [ "print(type(table))", "<class 'bs4.element.Tag'>\n" ], [ "table_body = table.find('tbody')", "_____no_output_____" ], [ "data = []\nrows = table_body.find_all('tr')\nfor row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n data.append([ele for ele in cols if ele]) # Get rid of empty values", "_____no_output_____" ], [ "#get column names\n\ncolnames = data[0]\n\ndata = data[1:]", "_____no_output_____" ], [ "print(data[130])", "['ZIP Code 15276', 'General', 'Pittsburgh', '0', 'Eastern', '412/724']\n" ], [ "#convert zip code names\nconverted_data = []\nfor i in data:\n if i[3] != '0':\n zip = i[0].split(' ')[2]\n converted_data.append([zip, *i[1:]])\n \n", "_____no_output_____" ], [ "df = pandas.DataFrame(converted_data, columns = colnames)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.to_csv('zcodepopulationdata.csv')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb12507bfa9d9de679e2f10d46c5ec38aeef9bc1
1,603
ipynb
Jupyter Notebook
data/final/ISOT/isot_analysis.ipynb
SkBlaz/KBNR
4c37fe3fdfa7719572affd617e2dab43a54ba1d5
[ "MIT" ]
1
2022-02-04T07:57:55.000Z
2022-02-04T07:57:55.000Z
data/final/ISOT/isot_analysis.ipynb
SkBlaz/KBNR
4c37fe3fdfa7719572affd617e2dab43a54ba1d5
[ "MIT" ]
1
2022-02-24T13:00:33.000Z
2022-02-24T13:00:33.000Z
data/final/ISOT/isot_analysis.ipynb
SkBlaz/KBNR
4c37fe3fdfa7719572affd617e2dab43a54ba1d5
[ "MIT" ]
1
2022-02-04T08:00:16.000Z
2022-02-04T08:00:16.000Z
21.958904
77
0.529008
[ [ [ "import pandas as pd\nsplits = ['train.csv', 'dev.csv', 'test.csv']", "_____no_output_____" ], [ "texts = []", "_____no_output_____" ], [ "for split in splits:\n df = pd.read_csv(split)\n total = len(df)\n texts = texts + df['text_a'].to_list()\n #ids = ids + df['Unnamed: 0'].to_list()\n real = sum(df.label.to_list())\n print(split,total, real, real/total, total - real, 1 - real/total)", "train.csv 29328 15932 0.5432351336606656 13396 0.45676486633933444\ndev.csv 7332 3976 0.5422804146208402 3356 0.4577195853791598\ntest.csv 2445 1289 0.52719836400818 1156 0.47280163599182\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]