hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7c69542b88e4f6050dfca8a867fa19a13610cd3 | 74,881 | ipynb | Jupyter Notebook | student-projects/fall-2020/Kinesso-AdShift-Diversifies-Marketing-Audiences/eda/[DEPRECATED] international_eda/germany/germany_eda.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 28 | 2020-06-15T23:53:36.000Z | 2022-03-19T09:27:02.000Z | student-projects/fall-2020/Kinesso-AdShift-Diversifies-Marketing-Audiences/eda/[DEPRECATED] international_eda/germany/germany_eda.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 4 | 2020-06-24T22:20:31.000Z | 2022-02-28T01:37:36.000Z | student-projects/fall-2020/Kinesso-AdShift-Diversifies-Marketing-Audiences/eda/[DEPRECATED] international_eda/germany/germany_eda.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 78 | 2020-06-19T09:41:01.000Z | 2022-02-05T00:13:29.000Z | 38.049289 | 9,632 | 0.440032 | [
[
[
"import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nimport zipfile\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"**READING IN KINESSO DATA**",
"_____no_output_____"
]
],
[
[
"imp = pd.read_csv('impressions_one_hour.csv')\nimp = imp[imp['country'] == 'Germany']\nimp = imp[~imp['zip_code'].isna()]\nimp['zip_code'] = imp['zip_code'].astype(str)",
"_____no_output_____"
]
],
[
[
"**READING IN 2011 GERMANY CENSUS DATA**",
"_____no_output_____"
]
],
[
[
"# data from: https://www.suche-postleitzahl.org/downloads\nzip_codes = pd.read_csv(\"plz_einwohner.csv\")\n",
"_____no_output_____"
],
[
"def add_zero(x):\n if len(x) == 4:\n return '0'+ x\n else:\n return x",
"_____no_output_____"
]
],
[
[
"**CORRECTING FORMATTING ERROR THAT REMOVED INITIAL '0' FROM ZIPCODES**",
"_____no_output_____"
]
],
[
[
"zip_codes['zipcode'] = zip_codes['zipcode'].astype(str).apply(add_zero)\nzip_codes.head()",
"_____no_output_____"
]
],
[
[
"Real Population of Germany is 83.02 million",
"_____no_output_____"
]
],
[
[
"np.sum(zip_codes['population'])",
"_____no_output_____"
]
],
[
[
"**CALCULATING VALUE COUNTS FROM KINESSO DATA**",
"_____no_output_____"
]
],
[
[
"val_cou = imp['zip_code'].value_counts()\nval_counts = pd.DataFrame(columns=['zipcode', 'count'], data=val_cou)\nval_counts['zipcode'] = val_cou.index.astype(str)\nval_counts['count'] = val_cou.values.astype(int)\nval_counts",
"_____no_output_____"
]
],
[
[
"**MERGING TOGETHER KINESSO VALUE COUNTS WITH KINESSO DATA**\n\n\n*ONLY 19 ZIPCODES DO NOT HAVE CENSUS DATA*",
"_____no_output_____"
]
],
[
[
"population_count = val_counts.merge(right=zip_codes, right_on='zipcode', left_on='zipcode', how='outer')\npopulation_count_f = population_count.dropna()",
"_____no_output_____"
],
[
"#only 19 zipcodes without data\nlen(population_count[population_count['population'].isna()])",
"_____no_output_____"
]
],
[
[
"Here count is the observed number from the Kinesso dataset and population is the expected number from census dataset",
"_____no_output_____"
]
],
[
[
"population_count_f",
"_____no_output_____"
]
],
[
[
"**CALCULATING DEVICE FREQUENCIES FOR EACH ZIPCODE**",
"_____no_output_____"
]
],
[
[
"imp['count'] = [1] * len(imp)\ndevice_model_make_counts = imp.groupby(['zip_code', 'device_make', 'device_model'], as_index=False).count()[['zip_code', 'device_make', 'device_model', 'count']]\ntotal_calc = device_model_make_counts.groupby(['zip_code']).sum()\n",
"_____no_output_____"
],
[
"percent_calc = []\nfor i in device_model_make_counts.index:\n zipc = device_model_make_counts.iloc[i]['zip_code']\n percent_calc = np.append(percent_calc, device_model_make_counts.iloc[i]['count']/total_calc.loc[zipc])",
"_____no_output_____"
],
[
"device_model_make_counts['device % freq']= percent_calc *100\ndevice_model_make_counts['combined'] = device_model_make_counts['device_make'] + ' ' + device_model_make_counts['device_model']\ndevice_model_make_counts['zip_code'] = device_model_make_counts['zip_code'].astype(str).apply(add_zero)\ndevice_model_make_counts\n ",
"_____no_output_____"
]
],
[
[
"**CALCULATING PERCENT DIFFERENCE BETWEEN EXPECTED AND OBSERVED POPULATIONS FOR EACH ZIPCODE**",
"_____no_output_____"
]
],
[
[
"population_count_f['population % expected'] = (population_count_f['population']/sum(population_count_f['population']))*100\npopulation_count_f['population % observed'] = (population_count_f['count']/sum(population_count_f['count']))*100\npopulation_count_f['% difference'] = population_count_f['population % observed'] - population_count_f['population % expected']\npopulation_count_f = population_count_f.rename(columns={'count':'observed population', 'population':'expected population'})\npopulation_count_f",
"<ipython-input-46-cfa73adf08fd>:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n population_count_f['population % expected'] = (population_count_f['population']/sum(population_count_f['population']))*100\n<ipython-input-46-cfa73adf08fd>:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n population_count_f['population % observed'] = (population_count_f['count']/sum(population_count_f['count']))*100\n<ipython-input-46-cfa73adf08fd>:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n population_count_f['% difference'] = population_count_f['population % observed'] - population_count_f['population % expected']\n"
]
],
[
[
"*MERGING TOGETHER WITH DEVICE FREQUENCY DATA*",
"_____no_output_____"
]
],
[
[
"combo = device_model_make_counts.merge(right=population_count_f, right_on='zipcode', left_on='zip_code', how='outer').drop(['count', 'device_make', 'device_model'], axis=1)\ncombined_impressions = combo.sort_values('% difference', ascending=False)\n",
"_____no_output_____"
],
[
"combined_impressions",
"_____no_output_____"
]
],
[
[
"*GROUPING TO IDENTIFY MOST COMMONLY USED DEVICE*",
"_____no_output_____"
]
],
[
[
"most_common_device = combined_impressions.groupby(['zip_code']).max()\nmost_common_device",
"_____no_output_____"
]
],
[
[
"*IDENTIFYING MOST UNDER REPRESENTED ZIP CODES*",
"_____no_output_____"
]
],
[
[
"underrepresented = most_common_device.sort_values('% difference').head(1000)\nunderrepresented.head(10)",
"_____no_output_____"
],
[
"underrepresented['combined'].value_counts()",
"_____no_output_____"
]
],
[
[
"*IDENTIFYING MOST OVER REPRESENTED ZIP CODES*",
"_____no_output_____"
]
],
[
[
"overrepresented = most_common_device.sort_values('% difference', ascending=False).head(1000)\noverrepresented.head(10)",
"_____no_output_____"
],
[
"overrepresented['combined'].value_counts()",
"_____no_output_____"
]
],
[
[
"**I actually decided not to look to closely into the device frequency numbers because for the underrepresented zipcodes there's only like 8-9 people Kinesso advertised to-- and mostly to Apple users interestingly. Instead I did some digging into the top 10 and bottom 10 in a seperate google docs titled: top 10 zipcode investigation** \n\n*quick summary: \n\n**over represented** zip codes belong to large urban cities with more industries and probably higher incomes but idk because I couldn't find zip code specific salary data\n\n**under represented:** zip codes belong to small cities with industries like coal, tourism, and power plants. Also I suspect lower incomes, but idk for sure",
"_____no_output_____"
]
],
[
[
"sns.distplot(most_common_device['% difference'])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c69ba41e5257e0d66a9b4f81837c8c91fb2157 | 9,492 | ipynb | Jupyter Notebook | notebooks/1.0-jf-fetching-tweets-example.ipynb | joaopfonseca/solve-iwmi | f5bd309513c6752d1472e13089bcfbf54ec7e7a2 | [
"MIT"
] | 2 | 2020-11-14T22:35:15.000Z | 2021-09-04T15:02:08.000Z | notebooks/1.0-jf-fetching-tweets-example.ipynb | kanav-mehra/solve-iwmi | d1db518a71f3343f39bfa14eb9234b033e7335eb | [
"MIT"
] | null | null | null | notebooks/1.0-jf-fetching-tweets-example.ipynb | kanav-mehra/solve-iwmi | d1db518a71f3343f39bfa14eb9234b033e7335eb | [
"MIT"
] | 3 | 2020-11-22T20:56:28.000Z | 2022-01-24T07:48:53.000Z | 42.756757 | 1,013 | 0.59134 | [
[
[
"# Fetching Twitter data\n\nThis is a simple notebook containing a simple demonstration on how to fetch tweets using a Twitter Sanbox Environment. The sample data is saved in the form of a json file, which must then be preprocessed.",
"_____no_output_____"
]
],
[
[
"import os\nfrom os.path import join\nfrom searchtweets import load_credentials, gen_rule_payload, ResultStream, collect_results\nimport json\n\nproject_dir = join(os.getcwd(), os.pardir)\nraw_dir = join(project_dir, 'data', 'raw')\ntwitter_creds_path = join(project_dir, 'twitter_creds.yaml')",
"_____no_output_____"
],
[
"search_args = load_credentials(twitter_creds_path, yaml_key='sample_tweets_api')\n\n# this should probably be moved to the configs.yaml\nquery = \"((cyclone amphan) OR amphan)\"\n\n\n##Cyclone amphan\n#Formed:16 May 2020\n#Dissipated:21 May 2020\n\nfrom_date=\"2020-05-14\"\nto_date=\"2020-06-15\"\n\n# I defined results_per_call as 100 which is default for free users. These can be 500 for paid tiers.\nrule = gen_rule_payload(query, results_per_call=100, from_date=\"2020-05-14\", to_date=\"2020-06-15\")\n\nrs = ResultStream(rule_payload=rule,\n max_results=200,\n **search_args)",
"cannot read file /home/jovyan/work/notebooks/../configs.yaml\nError parsing YAML file; searching for valid environment variables\nAccount type is not specified and cannot be inferred.\n Please check your credential file, arguments, or environment variables\n for issues. The account type must be 'premium' or 'enterprise'.\n \n"
],
[
"fname = f'SAMPLE_DATA_QUERY_{query}_FROMDATE_{from_date}_TODATE_{to_date}.json'\nwith open(join(raw_dir, fname), 'a', encoding='utf-8') as f:\n for tweet in rs.stream():\n json.dump(tweet, f)\n f.write('\\n')\nprint('done')",
"done\n"
]
],
[
[
"## Count existing tweets for a given request",
"_____no_output_____"
]
],
[
[
"search_args = load_credentials(twitter_creds_path, yaml_key='search_tweets_api')\nquery = \"(cyclone amphan)\"\n\ncount_rule = gen_rule_payload(query, from_date=\"2020-05-14\", to_date=\"2020-06-15\", count_bucket=\"day\", results_per_call=500)\n\ncounts = collect_results(count_rule, result_stream_args=search_args)\n\ncounts",
"Grabbing bearer token from OAUTH\n"
],
[
"tweets = 0\nfor day in counts:\n tweets+=day['count']\n\ntweets",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c69dfc26e6da36beaf236d6961df4e03c8187c | 322,178 | ipynb | Jupyter Notebook | CSE_321_Software Engineering/Lecture_13_17.080.2020.ipynb | officialrafsan/CS32S20 | 56194694f3a7be299b02a5acc34b1ada2f495e58 | [
"MIT"
] | null | null | null | CSE_321_Software Engineering/Lecture_13_17.080.2020.ipynb | officialrafsan/CS32S20 | 56194694f3a7be299b02a5acc34b1ada2f495e58 | [
"MIT"
] | null | null | null | CSE_321_Software Engineering/Lecture_13_17.080.2020.ipynb | officialrafsan/CS32S20 | 56194694f3a7be299b02a5acc34b1ada2f495e58 | [
"MIT"
] | null | null | null | 2,753.65812 | 209,236 | 0.964107 | [
[
[
"<div class=\"alert alert-success\">\n <b>Author</b>:\n\n Rashik Rahman\n [email protected]\n\n</div>\n\n# [Click here to see class lecture](https://photos.app.goo.gl/rZaFT2Ct6uETfyww6)",
"_____no_output_____"
],
[
"Until now we have learned uml/class diagram. Today we'll learn how to draw object diagram from class diagram. In OOP we've learned about object. In code we created object of a class with its initiated values. Here its the same thing. We have a base class like `dog` then to make an object of it we give the values and create an object.\n\n\n\nHere we create an object of `Department` class whose name is mathStat. mathStat's attribute is degree so the degree can be either undergraduate or graduate. 3\n\n\n\n\n\n",
"_____no_output_____"
],
[
"**USE CASE Diagram**\n\n\n\n\n\n\nHere in the example we can see that customer is actor. But in the exam they won't tell who is the actor, you have to figure it out.\n\n\n\n\n\n\n\n",
"_____no_output_____"
],
[
"\n\n\n\n\n\n\n`How to find actor and use cases`\n\n\n\n\n\n",
"_____no_output_____"
],
[
"# That's all for this lecture!",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c6a9f233d13a6e9486ffc672c60fd3ba9da85a | 2,168 | ipynb | Jupyter Notebook | capture_array.ipynb | trucabrac/Blob-process---tests | cc14460e86b29fa01a2468a449459db216493761 | [
"CC0-1.0"
] | null | null | null | capture_array.ipynb | trucabrac/Blob-process---tests | cc14460e86b29fa01a2468a449459db216493761 | [
"CC0-1.0"
] | null | null | null | capture_array.ipynb | trucabrac/Blob-process---tests | cc14460e86b29fa01a2468a449459db216493761 | [
"CC0-1.0"
] | null | null | null | 24.088889 | 103 | 0.574723 | [
[
[
"https://pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python/\nwhy : reads image directly as np.array -> to directly image process\nNeeds : to add steps for saving (1 step for the orig capture and one for the processed image)",
"_____no_output_____"
]
],
[
[
"# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\nrawCapture = PiRGBArray(camera)\n# allow the camera to warmup\ntime.sleep(0.1)\n# grab an image from the camera\ncamera.capture(rawCapture, format=\"bgr\")\nimage = rawCapture.array\n# display the image on screen and wait for a keypress\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)",
"_____no_output_____"
],
[
"#save the raw image, as captured\ncv2.imwrite(\"/path to storage folder/img_name_raw.tiff\", image)",
"_____no_output_____"
],
[
"#process\n\n\n",
"_____no_output_____"
],
[
"#save the processed image, ready to show\ncv2.imwrite(\"/path to slideshow folder/img_name.jpg\", img)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7c6acbb848c31dfc3fea0f722287bf1c529adb4 | 17,663 | ipynb | Jupyter Notebook | 1 - Python/1 - Python Syntax [exercise-syntax-variables-and-number].ipynb | AkashKumarSingh11032001/Kaggle_Course_Repository | aa983db044565c24bbbff940412b437320e1e2de | [
"MIT"
] | null | null | null | 1 - Python/1 - Python Syntax [exercise-syntax-variables-and-number].ipynb | AkashKumarSingh11032001/Kaggle_Course_Repository | aa983db044565c24bbbff940412b437320e1e2de | [
"MIT"
] | null | null | null | 1 - Python/1 - Python Syntax [exercise-syntax-variables-and-number].ipynb | AkashKumarSingh11032001/Kaggle_Course_Repository | aa983db044565c24bbbff940412b437320e1e2de | [
"MIT"
] | null | null | null | 17,663 | 17,663 | 0.678254 | [
[
[
"**This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/hello-python).**\n\n---\n",
"_____no_output_____"
],
[
"Welcome to your first set of Python coding problems. If this is your first time using Kaggle Notebooks, welcome! \n\nNotebooks are composed of blocks (called \"cells\") of text and code. Each of these is editable, though you'll mainly be editing the code cells to answer some questions.\n\nTo get started, try running the code cell below (by pressing the ► button, or clicking on the cell and pressing ctrl+enter on your keyboard).",
"_____no_output_____"
]
],
[
[
"print(\"You've successfully run some Python code\")\nprint(\"Congratulations!\")",
"You've successfully run some Python code\nCongratulations!\n"
]
],
[
[
"Try adding another line of code in the cell above and re-running it. \n\nNow let's get a little fancier: Add a new code cell by clicking on an existing code cell, hitting the escape key, and then hitting the `a` or `b` key. The `a` key will add a cell above the current cell, and `b` adds a cell below.\n\nGreat! Now you know how to use Notebooks.\n\nEach hands-on exercise starts by setting up our feedback and code checking mechanism. Run the code cell below to do that. Then you'll be ready to move on to question 0.",
"_____no_output_____"
]
],
[
[
"from learntools.core import binder; binder.bind(globals())\nfrom learntools.python.ex1 import *\nprint(\"Setup complete! You're ready to start question 0.\")",
"Setup complete! You're ready to start question 0.\n"
]
],
[
[
"# 0.\n\n*This is a silly question intended as an introduction to the format we use for hands-on exercises throughout all Kaggle courses.*\n\n**What is your favorite color? **\n\nTo complete this question, create a variable called `color` in the cell below with an appropriate value. The function call `q0.check()` (which we've already provided in the cell below) will check your answer.",
"_____no_output_____"
]
],
[
[
"# create a variable called color with an appropriate value on the line below\n# (Remember, strings in Python must be enclosed in 'single' or \"double\" quotes)\ncolor = \"blue\"\n\n# Check your answer\nq0.check()",
"_____no_output_____"
]
],
[
[
"Didn't get the right answer? How do you not even know your own favorite color?!\n\nDelete the `#` in the line below to make one of the lines run. You can choose between getting a hint or the full answer by choosing which line to remove the `#` from. \n\nRemoving the `#` is called uncommenting, because it changes that line from a \"comment\" which Python doesn't run to code, which Python does run.",
"_____no_output_____"
]
],
[
[
"# q0.hint()\n# q0.solution()",
"_____no_output_____"
]
],
[
[
"The upcoming questions work the same way. The only thing that will change are the question numbers. For the next question, you'll call `q1.check()`, `q1.hint()`, `q1.solution()`, for question 2, you'll call `q2.check()`, and so on.",
"_____no_output_____"
],
[
"<hr/>\n\n# 1.\n\nComplete the code below. In case it's helpful, here is the table of available arithmetic operations:\n\n\n\n| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| ``a + b`` | Addition | Sum of ``a`` and ``b`` |\n| ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |\n| ``a * b`` | Multiplication | Product of ``a`` and ``b`` |\n| ``a / b`` | True division | Quotient of ``a`` and ``b`` |\n| ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |\n| ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |\n| ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |\n| ``-a`` | Negation | The negative of ``a`` |\n\n<span style=\"display:none\"></span>\n",
"_____no_output_____"
]
],
[
[
"pi = 3.14159 # approximate\ndiameter = 3\n\n# Create a variable called 'radius' equal to half the diameter\nradius = diameter/2\n\n# Create a variable called 'area', using the formula for the area of a circle: pi times the radius squared\narea = pi * (radius ** 2)\n\n# Check your answer\nq1.check()",
"_____no_output_____"
],
[
"# Uncomment and run the lines below if you need help.\n#q1.hint()\n#q1.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 2.\n\nAdd code to the following cell to swap variables `a` and `b` (so that `a` refers to the object previously referred to by `b` and vice versa).",
"_____no_output_____"
]
],
[
[
"########### Setup code - don't touch this part ######################\n# If you're curious, these are examples of lists. We'll talk about \n# them in depth a few lessons from now. For now, just know that they're\n# yet another type of Python object, like int or float.\na = [1, 2, 3]\nb = [3, 2, 1]\nq2.store_original_ids()\n######################################################################\n\n# Your code goes here. Swap the values to which a and b refer.\n# If you get stuck, you can always uncomment one or both of the lines in\n# the next cell for a hint, or to peek at the solution.\n\na,b = b,a\n######################################################################\n\n# Check your answer\nq2.check()",
"_____no_output_____"
],
[
"#q2.hint()",
"_____no_output_____"
],
[
"#q2.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 3a.\n\nAdd parentheses to the following expression so that it evaluates to 1.",
"_____no_output_____"
]
],
[
[
"(5 - 3) // 2",
"_____no_output_____"
],
[
"#q3.a.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.a.solution()",
"_____no_output_____"
]
],
[
[
"# 3b. <span title=\"A bit spicy\" style=\"color: darkgreen \">🌶️</span>\n\n<small>Questions, like this one, marked a spicy pepper are a bit harder.</small>\n\nAdd parentheses to the following expression so that it evaluates to 0.",
"_____no_output_____"
]
],
[
[
"(8 - (3 * 2)) - (1 + 1)",
"_____no_output_____"
],
[
"#q3.b.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.b.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n\n# 4. \nAlice, Bob and Carol have agreed to pool their Halloween candy and split it evenly among themselves.\nFor the sake of their friendship, any candies left over will be smashed. For example, if they collectively\nbring home 91 candies, they'll take 30 each and smash 1.\n\nWrite an arithmetic expression below to calculate how many candies they must smash for a given haul.",
"_____no_output_____"
]
],
[
[
"# Variables representing the number of candies collected by alice, bob, and carol\nalice_candies = 121\nbob_candies = 77\ncarol_candies = 109\n\n# Your code goes here! Replace the right-hand side of this assignment with an expression\n# involving alice_candies, bob_candies, and carol_candies\nto_smash = (alice_candies + bob_candies + carol_candies) % 3\nprint(to_smash)\n\n# Check your answer\nq4.check()",
"1\n"
],
[
"#q4.hint()\n#q4.solution()",
"_____no_output_____"
]
],
[
[
"# Keep Going\n\nNext up, you'll **[learn to write new functions and understand functions others write](https://www.kaggle.com/colinmorris/functions-and-getting-help)**. This will make you at least 10 times more productive as a Python programmer. ",
"_____no_output_____"
],
[
"---\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7c6b01121bb2d4145fd52db7f572dc1dbaaee1a | 171,954 | ipynb | Jupyter Notebook | covid_data_analysis_solution.ipynb | rahulkumbhar8888/DataScience | edd152c36d5d5abe140a5eee8721257ddb221214 | [
"MIT"
] | 4 | 2021-05-08T03:40:33.000Z | 2021-06-13T08:18:55.000Z | covid_data_analysis_solution.ipynb | rahulkumbhar8888/DataScience | edd152c36d5d5abe140a5eee8721257ddb221214 | [
"MIT"
] | null | null | null | covid_data_analysis_solution.ipynb | rahulkumbhar8888/DataScience | edd152c36d5d5abe140a5eee8721257ddb221214 | [
"MIT"
] | 3 | 2021-05-01T13:54:35.000Z | 2021-05-15T16:12:55.000Z | 141.293344 | 35,824 | 0.845121 | [
[
[
"### Assignment\n\n#### Data Description\n- covid data of daily cummulative cases of India as reported from January 2020 to 8th August 2020\n- Source: https://www.kaggle.com/sudalairajkumar/covid19-in-india\n\n#### Conduct below Insight investigation\n1. Find which state has highest mean of cummulative confirmed cases since reported from Jan 2020\n<br>- Plot line graph plotting means of top 10 States having highest daily confirmed cases\n2. Which state has highest Death Rate for the month of June, July & Aug\n<br> - Plot bar graph of Death Rates for the top 5 states\n\n#### Below key steps to be adopted to solve above Questions\n- Load Data --> Clean data / Data munging --> Grouping of Data by State --> Exploration using plots",
"_____no_output_____"
],
[
"#### Load Packages",
"_____no_output_____"
]
],
[
[
"import pandas as pd # for cleaning and loading data from csv file\nimport numpy as np\nfrom matplotlib import pyplot as plt # package for plotting graphs\nimport datetime\nimport seaborn as sns; sns.set(color_codes=True)\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"#### Load data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"covid_19_india.csv\")\ndf.head() # Preview first 5 rows of dataframe",
"_____no_output_____"
],
[
"# Convert Date column which is a string into datetime object\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"], format = \"%d/%m/%y\")\ndf.head()",
"_____no_output_____"
]
],
[
[
"#### Cleaning of data\n- The dataset consists of cummulative values, aim is to create columns with daily reported deaths and confirmed cases.\n- Below method is helper function to create column consisting of daily cases reported from Cummulative freq column",
"_____no_output_____"
]
],
[
[
"ex = np.unique(df['State/UnionTerritory'])\nex",
"_____no_output_____"
]
],
[
[
"From above unique values of states it is clear that Telangana is represented in multiple ways. We will change each occurrence of Telangna state with standard spelling",
"_____no_output_____"
]
],
[
[
"def clean_stateName(stateName):\n if stateName == 'Telangana***':\n stateName = 'Telangana'\n elif stateName == 'Telengana':\n stateName = 'Telangana'\n elif stateName == 'Telengana***':\n stateName = 'Telangana'\n \n return stateName",
"_____no_output_____"
]
],
[
[
"- Apply method is used to apply either user defined or builtin function across every cell of dataframe\n- Commonly lambda function is used to apply method across each cell\n- A lambda function is a small anonymous function.\n- A lambda function can take any number of arguments, but can only have one expression.",
"_____no_output_____"
]
],
[
[
"df[\"State/UnionTerritory\"] = df[\"State/UnionTerritory\"].apply(lambda x: clean_stateName(x))\nnp.unique(df[\"State/UnionTerritory\"]) # to identify all unique values in a column of dataframe or array",
"_____no_output_____"
],
[
"def daily_cases(dframe, stateColumn,dateColumn, cummColumn):\n # Sort column containing state and then by date in ascending order\n dframe.sort_values(by = [stateColumn, dateColumn], inplace = True)\n newColName = 'daily_' + cummColumn\n dframe[newColName] = dframe[cummColumn].diff() # diff is pandas method to caclucate difference between consecutive values\n# print(dframe.tail())\n '''\n Below line uses shift method of pandas to compare consecutive state names and if they are not different\n as shown by using ! symbol then create list of boolean, True for if they are different else False\n ''' \n mask = dframe[stateColumn] != dframe[stateColumn].shift(1)\n dframe[newColName][mask] = np.nan # where value of mask =True the cell value will be replaced by NaN\n dframe[newColName] = dframe[newColName].apply(lambda x: 0 if x < 0 else x) # replace negative values by 0\n# dframe.drop('diffs',axis=1, inplace = True)\n \n return dframe\n ",
"_____no_output_____"
],
[
"df_new = daily_cases(dframe= df, stateColumn= 'State/UnionTerritory',dateColumn= 'Date', cummColumn= 'Confirmed')",
"<ipython-input-7-4efcf8e0fcc5>:12: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n dframe[newColName][mask] = np.nan # where value of mask =True the cell value will be replaced by NaN\n"
],
[
"df_new[df_new[\"State/UnionTerritory\"]==\"Maharashtra\"].tail(n=5)",
"_____no_output_____"
]
],
[
[
"#### Q1. Find which state has highest mean of cummulative confirmed cases since reported from Jan 2020",
"_____no_output_____"
]
],
[
[
"# Hint : Groupby state names to find their means for confirmed cases\n\ndf_group = df_new.groupby([\"State/UnionTerritory\"])['daily_Confirmed'].mean()",
"_____no_output_____"
],
[
"df_group = df_group.sort_values(ascending= False)[0:10]\ndf_group",
"_____no_output_____"
],
[
"df_group.index",
"_____no_output_____"
],
[
"ax = sns.lineplot(x=df_group.index, y= df_group.values)\nplt.scatter(x=df_group.index, y= df_group.values, c = 'r')\nax.figure.set_figwidth(12)\nax.figure.set_figheight(4)\nax.set_ylabel(\"Mean of Daily Confirmed Cases\")",
"_____no_output_____"
]
],
[
[
"#### Q2. Which state has highest Death Rate for the month of June, July & Aug",
"_____no_output_____"
]
],
[
[
"# Hint - explore how a datetime column of dataframe can be filtered using specific months\ndf_months = df_new['Date'].apply(lambda x: x.month in [6,7,8]) # this will create boolean basis comparison of months",
"_____no_output_____"
],
[
"df_final = df_new[df_months] # Filtered dataframe consisting of data from June, July & Aug",
"_____no_output_____"
],
[
"df_final.tail()",
"_____no_output_____"
],
[
"df_final['death_rate'] = df_final['Deaths'] / df_final['Confirmed'] *100",
"<ipython-input-17-199836f307be>:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df_final['death_rate'] = df_final['Deaths'] / df_final['Confirmed'] *100\n"
],
[
"df_final.tail()",
"_____no_output_____"
],
[
"df_groups_deaths = df_final.groupby([\"State/UnionTerritory\"])['death_rate'].mean()",
"_____no_output_____"
],
[
"top_10_deathrates = df_groups_deaths.sort_values(ascending= False)[0:10]",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nfig.set_figwidth(15)\nfig.set_figheight(6)\nax.bar(x = top_10_deathrates.index, height = top_10_deathrates.values)\nax.set_xlabel('States')\nax.set_ylabel('Death Rates %')\nax.set_title('Top 10 States with Highest Death Rate since June 2020')\nfor i, v in enumerate(top_10_deathrates.values):\n ax.text(i, v, s = (\"%.2f\" % v), color='blue', fontweight='bold', fontsize = 12) # %.2f will print decimals upto 2 places\nplt.xticks(rotation=45) # this line will rotate the x axis label in 45 degrees to make it more readable",
"_____no_output_____"
]
],
[
[
"### Q3. Explore Trend in Confirmed Cases for the state of Maharashtra\n- Plot line graph with x axis as Date column and y axis as daily confirmed cases. - such a graph is also called\nas Time Series Plot",
"_____no_output_____"
],
[
"#### Hint - explore on google or in matplotlib for Time series graph from a dataframe",
"_____no_output_____"
]
],
[
[
"df_mah = df_new[df_new[\"State/UnionTerritory\"]=='Maharashtra']",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nfig.set_figwidth(15)\nfig.set_figheight(6)\nax.plot(df_mah[\"Date\"],df_mah[\"daily_Confirmed\"])",
"_____no_output_____"
],
[
"df_mah = df_final[df_final[\"State/UnionTerritory\"]=='Maharashtra']\nfig, ax = plt.subplots()\nfig.set_figwidth(15)\nfig.set_figheight(6)\nax.plot(df_mah[\"Date\"],df_mah[\"death_rate\"])\nax.scatter(df_mah[\"Date\"],df_mah[\"death_rate\"])\nax.set_xlabel('Date')\nax.set_ylabel('Death Rate')\nax.set_title('Death Rate in Maharastra')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7c6b4e5937f24aaa2d0039cea0a722d195932f9 | 1,198 | ipynb | Jupyter Notebook | solar-learn.ipynb | anasir514/colab | fc75014432ae608ce1afa9b595cbaa1cb74e21e6 | [
"MIT"
] | null | null | null | solar-learn.ipynb | anasir514/colab | fc75014432ae608ce1afa9b595cbaa1cb74e21e6 | [
"MIT"
] | null | null | null | solar-learn.ipynb | anasir514/colab | fc75014432ae608ce1afa9b595cbaa1cb74e21e6 | [
"MIT"
] | null | null | null | 23.490196 | 223 | 0.471619 | [
[
[
"<a href=\"https://colab.research.google.com/github/anasir514/colab/blob/main/solar-learn.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"print((4 + 8) / 2)",
"6.0\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e7c6babb3aaeca24396358a7aab686fab8e4db64 | 34,823 | ipynb | Jupyter Notebook | notebooks/TestAndTrainingDataForFeatureSelection.ipynb | isabelladegen/mlp-2021 | f577d97d060156bc09cdd03635cdefa7a7f7d839 | [
"BSD-2-Clause"
] | null | null | null | notebooks/TestAndTrainingDataForFeatureSelection.ipynb | isabelladegen/mlp-2021 | f577d97d060156bc09cdd03635cdefa7a7f7d839 | [
"BSD-2-Clause"
] | null | null | null | notebooks/TestAndTrainingDataForFeatureSelection.ipynb | isabelladegen/mlp-2021 | f577d97d060156bc09cdd03635cdefa7a7f7d839 | [
"BSD-2-Clause"
] | null | null | null | 56.166129 | 1,289 | 0.477386 | [
[
[
"# Check values before feature selection in both training and test data\n\n- nan\n- different enough values",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport glob\nimport os\n\ntraining_df = pd.concat(map(pd.read_csv, glob.glob(os.path.join('../Data/Train', \"*.csv\"))), ignore_index=True)\ntest_df = test_data = pd.read_csv('../data/test.csv')",
"_____no_output_____"
],
[
"training_df_shape = training_df.shape\ntest_df_shape = test_df.shape\nall_stations = set(training_df['station'])\n\ndef nan_analysis(column_name):\n training_with_null_df = training_df[training_df[column_name].isnull()]\n training_nan = training_with_null_df.shape\n print(f'Number of Nan for {column_name}: {training_nan} of {training_df_shape}')\n test_nan = test_df[test_df[column_name].isnull()].shape\n print(f'Number of Nan for {column_name}: {test_nan} of {test_df_shape}')\n return training_with_null_df[['station']]\n\n\ndef value_analysis(column_name):\n return pd.merge(training_df[[column_name]].describe(),\n test_df[[column_name]].describe(),\n left_index=True,\n right_index=True,\n suffixes=('training', 'test'))\n\ndef station_ids_for_non_nan(column_name):\n training_not_null = training_df[training_df[column_name].notnull()]\n training_not_null_stations = set(training_not_null['station'])\n print(f'Not nan for {column_name}: {training_not_null.shape} of {training_df_shape}')\n print(f'Station with only null values: {all_stations - training_not_null_stations}')\n",
"_____no_output_____"
]
],
[
[
"# Weather Data",
"_____no_output_____"
]
],
[
[
"precipitation = 'precipitation.l.m2'\nprecipitation_nan = nan_analysis(precipitation)\nvalue_analysis(precipitation)\n\n# -> Training data has no values for precipitation not a good feature",
"Number of Nan for precipitation.l.m2: (75, 25) of (55875, 25)\nNumber of Nan for precipitation.l.m2: (0, 25) of (2250, 25)\n"
],
[
"column = 'temperature.C'\ntemperature_nan = nan_analysis(column)\nvalue_analysis(column)\n\n# min temperature is quite different between training and test but there seems to be enough data",
"Number of Nan for temperature.C: (75, 25) of (55875, 25)\nNumber of Nan for temperature.C: (0, 25) of (2250, 25)\n"
],
[
"column = 'windMaxSpeed.m.s'\nwindmax_nan = nan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for windMaxSpeed.m.s: (75, 25) of (55875, 25)\nNumber of Nan for windMaxSpeed.m.s: (0, 25) of (2250, 25)\n"
],
[
"column = 'windMeanSpeed.m.s'\nwindmean_nan = nan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for windMeanSpeed.m.s: (75, 25) of (55875, 25)\nNumber of Nan for windMeanSpeed.m.s: (0, 25) of (2250, 25)\n"
],
[
"column = 'windDirection.grades'\nwinddir_nan = nan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for windDirection.grades: (375, 25) of (55875, 25)\nNumber of Nan for windDirection.grades: (0, 25) of (2250, 25)\n"
],
[
"column = 'relHumidity.HR'\nrelhum_nan = nan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for relHumidity.HR: (75, 25) of (55875, 25)\nNumber of Nan for relHumidity.HR: (0, 25) of (2250, 25)\n"
],
[
"column = 'airPressure.mb'\nairpressure_nan = nan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for airPressure.mb: (75, 25) of (55875, 25)\nNumber of Nan for airPressure.mb: (0, 25) of (2250, 25)\n"
],
[
"# all weather measure are missing 75\ndiff = set(airpressure_nan.index) - set(relhum_nan.index)\n",
"_____no_output_____"
],
[
"diff = set(winddir_nan.index) - set(relhum_nan.index)",
"_____no_output_____"
]
],
[
[
"# Is Holiday\n",
"_____no_output_____"
]
],
[
[
"column = 'isHoliday'\nnan_analysis(column)\nvalue_analysis(column)",
"Number of Nan for isHoliday: (0, 25) of (55875, 25)\nNumber of Nan for isHoliday: (0, 25) of (2250, 25)\n"
]
],
[
[
"# Bikes Profile Data",
"_____no_output_____"
]
],
[
[
"column = 'full_profile_3h_diff_bikes'\nnan_analysis(column)\nstation_ids_for_non_nan(column)\nvalue_analysis(column)\n# each station has none null values!",
"Number of Nan for full_profile_3h_diff_bikes: (12825, 25) of (55875, 25)\nNumber of Nan for full_profile_3h_diff_bikes: (0, 25) of (2250, 25)\nNot nan for full_profile_3h_diff_bikes: (43050, 25) of (55875, 25)\nStation with only null values: set()\n"
],
[
"column = 'full_profile_bikes'\nnan_analysis(column)\nstation_ids_for_non_nan(column)\nvalue_analysis(column)",
"Number of Nan for full_profile_bikes: (12600, 25) of (55875, 25)\nNumber of Nan for full_profile_bikes: (0, 25) of (2250, 25)\nNot nan for full_profile_bikes: (43275, 25) of (55875, 25)\nStation with only null values: set()\n"
],
[
"#select the none nan",
"_____no_output_____"
],
[
"column = 'short_profile_3h_diff_bikes'\nnan_analysis(column)\nstation_ids_for_non_nan(column)\nvalue_analysis(column)",
"Number of Nan for short_profile_3h_diff_bikes: (12825, 25) of (55875, 25)\nNumber of Nan for short_profile_3h_diff_bikes: (0, 25) of (2250, 25)\nNot nan for short_profile_3h_diff_bikes: (43050, 25) of (55875, 25)\nStation with only null values: set()\n"
],
[
"column = 'short_profile_bikes'\nnan_analysis(column)\nstation_ids_for_non_nan(column)\nvalue_analysis(column)",
"Number of Nan for short_profile_bikes: (12600, 25) of (55875, 25)\nNumber of Nan for short_profile_bikes: (0, 25) of (2250, 25)\nNot nan for short_profile_bikes: (43275, 25) of (55875, 25)\nStation with only null values: set()\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c6d38b632f1726eb404ce978a46aec8754c7f0 | 68,253 | ipynb | Jupyter Notebook | matrix_one/day5.ipynb | aamator7/dw_matrix | 353ddaa2ababbe96568bf81335f2f45229018d88 | [
"MIT"
] | null | null | null | matrix_one/day5.ipynb | aamator7/dw_matrix | 353ddaa2ababbe96568bf81335f2f45229018d88 | [
"MIT"
] | null | null | null | matrix_one/day5.ipynb | aamator7/dw_matrix | 353ddaa2ababbe96568bf81335f2f45229018d88 | [
"MIT"
] | null | null | null | 68,253 | 68,253 | 0.673421 | [
[
[
"!pip install eli5",
"Collecting eli5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl (105kB)\n\r\u001b[K |███ | 10kB 14.0MB/s eta 0:00:01\r\u001b[K |██████▏ | 20kB 1.8MB/s eta 0:00:01\r\u001b[K |█████████▎ | 30kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████▍ | 40kB 1.8MB/s eta 0:00:01\r\u001b[K |███████████████▌ | 51kB 2.2MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 61kB 2.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▊ | 71kB 3.0MB/s eta 0:00:01\r\u001b[K |████████████████████████▊ | 81kB 2.3MB/s eta 0:00:01\r\u001b[K |███████████████████████████▉ | 92kB 2.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 102kB 2.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 112kB 2.9MB/s \n\u001b[?25hRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nInstalling collected packages: eli5\nSuccessfully installed eli5-0.10.1\n"
],
[
"import pandas as pd\nimport numpy as np\n\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import mean_absolute_error\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom ast import literal_eval\nfrom tqdm import tqdm_notebook\n\n",
"_____no_output_____"
],
[
"cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix\"",
"/content/drive/My Drive/Colab Notebooks/dw_matrix\n"
],
[
"ls data",
"men_shoes.csv\n"
],
[
"df = pd.read_csv('data/men_shoes.csv', low_memory=False)",
"_____no_output_____"
],
[
"def run_model (feats, model = DecisionTreeRegressor(max_depth=5)):\n X= df[feats].values\n y=df['prices_amountmin'].values\n\n scores=cross_val_score(model, X, y, scoring='neg_mean_absolute_error')\n return np.mean(scores),np.std(scores)",
"_____no_output_____"
],
[
"df['brand_cat']=df['brand'].map(lambda x: str(x).lower()).factorize()[0]\nrun_model(['brand_cat'])",
"_____no_output_____"
],
[
"model = RandomForestRegressor(max_depth=5,n_estimators=100,random_state=0)\nrun_model(['brand_cat'], model)",
"_____no_output_____"
],
[
"df.features.head().values",
"_____no_output_____"
],
[
"str_dict = '[{\"key\":\"Gender\",\"value\":[\"Men\"]},{\"key\":\"Shoe Size\",\"value\":[\"M\"]},{\"key\":\"Shoe Category\",\"value\":[\"Men\\'s Shoes\"]},{\"key\":\"Color\",\"value\":[\"Multicolor\"]},{\"key\":\"Manufacturer Part Number\",\"value\":[\"8190-W-NAVY-7.5\"]},{\"key\":\"Brand\",\"value\":[\"Josmo\"]}]'\nliteral_eval(str_dict)",
"_____no_output_____"
],
[
"def parse_features(x):\n output_dict ={}\n if str(x)=='nan':return output_dict\n features = literal_eval(x.replace('\\\\\"','\"'))\n for item in features:\n key = item['key'].lower().strip()\n value = item['value'][0].lower().strip()\n output_dict[key]=value\n return output_dict\n\ndf['features_parsed'] = df['features'].map(parse_features)",
"_____no_output_____"
],
[
"keys = set()\ndf['features_parsed'].map(lambda x: keys.update(x.keys()))\nlen(keys)",
"_____no_output_____"
],
[
"[{'key': 'Gender', 'value': ['Men']}, {'key': 'Shoe Size', 'value': ['M']}, {'key': 'Shoe Category', 'value': [\"Men's Shoes\"]}, {'key': 'Color', 'value': ['Multicolor']}, {'key': 'Manufacturer Part Number', 'value': ['8190-W-NAVY-7.5']}, {'key': 'Brand', 'value': ['Josmo']}]",
"_____no_output_____"
],
[
"df.features_parsed.head().values",
"_____no_output_____"
],
[
"def get_name_feat(key):\n return 'feat_'+key\n\nfor key in tqdm_notebook(keys):\n df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df[False == df['feat_athlete'].isnull()].shape[0]/df.shape[0]*100",
"_____no_output_____"
],
[
"df.shape[0]",
"_____no_output_____"
],
[
"keys_stat = {}\nfor key in keys:\n keys_stat[key] = df[False == df[get_name_feat(key)].isnull()].shape[0]/df.shape[0]*100",
"_____no_output_____"
],
[
"keys_stat",
"_____no_output_____"
],
[
"{k:v for k,v in keys_stat.items() if v > 30}",
"_____no_output_____"
],
[
"df['feat_brand_cat']=df['feat_brand'].factorize()[0]\ndf['feat_color_cat']=df['feat_color'].factorize()[0]\ndf['feat_gender_cat']=df['feat_gender'].factorize()[0]\ndf['feat_material_cat']=df['feat_material'].factorize()[0]\ndf['feat_manufacturer part number_cat']=df['feat_manufacturer part number'].factorize()[0]\n\nfor key in keys:\n df[get_name_feat(key)+'_cat']= df[get_name_feat(key)].factorize()[0]",
"_____no_output_____"
],
[
"df ['brand']=df['brand'].map(lambda x: str(x).lower())\ndf [ df.brand != df.feat_brand][['brand','feat_brand']].head()",
"_____no_output_____"
],
[
"feats = ['brand_cat','feat_material_cat','feat_adjustable_cat','feat_brand_cat','feat_movement_cat','feat_fabric content_cat']\n#feats +=[ ]\n\n\nmodel = RandomForestRegressor (max_depth=5, n_estimators=100)\nresult = run_model(feats,model)\nresult",
"_____no_output_____"
],
[
"feats_cat = [x for x in df.columns if '_cat' in x]\nfeats_cat",
"_____no_output_____"
],
[
"X=df[feats].values\ny=df['prices_amountmin'].values\n\nm= RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)\nm.fit(X,y)\nprint (result)\nperm = PermutationImportance(m,random_state=1).fit(X,y);\neli5.show_weights(perm, feature_names=feats)",
"(-57.15104696094037, 4.179350915464255)\n"
],
[
"df['brand'].value_counts()",
"_____no_output_____"
],
[
"ls matrix_one",
"day3.ipynb day5.ipynb\n"
],
[
"!git add matrix_one/day5.ipynb",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c6d94a25340121fa419eb248e92fa7293b001a | 23,937 | ipynb | Jupyter Notebook | 1.Study/2. with computer/4.Programming/2.Python/3. Study/01_Python/0408_1_Lecture_python.ipynb | jskim0406/Study | 07b559b95f8f658303ee53114107ae35940a6080 | [
"MIT"
] | null | null | null | 1.Study/2. with computer/4.Programming/2.Python/3. Study/01_Python/0408_1_Lecture_python.ipynb | jskim0406/Study | 07b559b95f8f658303ee53114107ae35940a6080 | [
"MIT"
] | null | null | null | 1.Study/2. with computer/4.Programming/2.Python/3. Study/01_Python/0408_1_Lecture_python.ipynb | jskim0406/Study | 07b559b95f8f658303ee53114107ae35940a6080 | [
"MIT"
] | null | null | null | 20.706747 | 124 | 0.457785 | [
[
[
"# 17. Module, Package, Try_except, Numpy1_20191011_014_Day4_2부",
"_____no_output_____"
],
[
"## Magic method 정리\n\n- 클래스 생성 후, 클래스 object의 기본 연산기능 보강할 때 활용 가능 주목!\n- object를 더할 때, plus(1,2) 함수 쓰지 않고, num1 + num2 로도 연산이 가능!\n\n- 비교\n - \\__eq__(==), \\__ne__(!=)\n - \\__lt__(<, less than), \\__gt__(>, greater than), \\__le__(<=, less or equal), \\__ge__(>=, gre or equal)\n- 연산\n - \\__add__(+), \\__sub__(-), \\__mul__(*), \\__truediv__(/)\n - \\__floordiv__(//), \\__mod__(%), \\__pow__(**)\n- 그외\n - \\__repr__(object의 그냥 represent), \\__str__(object의 print) **----> return str( ) -----> string 데이터로 리턴해줘야 함**",
"_____no_output_____"
]
],
[
[
"# Magic method 사용한 클래스 정의 및 object 연산\n# 예) integer 클래스 생성",
"_____no_output_____"
]
],
[
[
"class Integer:\n \n def __init__(self,number):\n self.num = number\n \n# def __add__(self,unit):\n# return self.num + unit.num\n\n# def __str__(self):\n# return str(self.num)\n\n# def __repr__(self):\n# return str(self.num)\n\nnum1 = Integer(1)\nnum2 = Integer(2)\n\nnum1+num2\n\n# 그냥 num1 + num2 하면,, 각 변수에는 1과 2가 들어가있으니, 당연히 + 연산 되어야 하는 거 아닌가 하겠지만,,\n# num1과 num2의 클래스(사용자정의 데이터타입), 데이터타입이 Integer라는 내가 정의 내린 타입이기 때문에,, __add__ 가 따로 없다.\n# 따라서, 저렇게 기본 연산자 활용하려면 magic method를 다시 재정의 해줘야 한다.",
"_____no_output_____"
]
],
[
[
"a = 1\na.__add__(2) # ====> a.num + 2.num ==== self.num + unit.num ===== def __add__(self, unit):",
"_____no_output_____"
],
[
"num1",
"_____no_output_____"
],
[
"print(num1)",
"<__main__.Integer object at 0x104eb7c90>\n"
]
],
[
[
"# 1. 클래스 예제\n\n- 계좌 클래스 만들기\n- 변수 : 자산(asset), 이자율(interest)\n- 함수 : 인출(draw), 입금(interest), 이자추가(add_interest)\n- 인출 시, 자산 이상의 돈을 인출할 수 없다.",
"_____no_output_____"
]
],
[
[
"class Account:\n def __init__(self,asset,interest=1.05):\n self.asset = asset\n self.interest = interest\n \n def draw(self,amount):\n if self.asset >= amount:\n self.asset -= amount\n print(\"{}원이 인출되었습니다.\".format(amount))\n \n else:\n print('{}원이 부족합니다.'.format((amount-self.asset)))\n \n def insert(self,amount):\n self.asset += amount\n print('{}원이 입금되었습니다.'.format(amount))\n \n def add_interest(self):\n self.asset *= self.interest\n print('{}원의 이자가 입금되었습니다.'.format((self.asset*(self.interest-1))))\n \n def __repr__(self):\n return \"asset : {}, interest : {}\".format(self.asset, self.interest)\n \nacc1 = Account(10000)\nacc1.asset",
"_____no_output_____"
],
[
"acc1",
"_____no_output_____"
],
[
"acc1.draw(12000)",
"2000원이 부족합니다.\n"
],
[
"acc1.draw(3000)",
"3000원이 인출되었습니다.\n"
],
[
"acc1",
"_____no_output_____"
],
[
"acc1.insert(5000)\nacc1",
"5000원이 입금되었습니다.\n"
],
[
"acc1.add_interest(),1\nacc1",
"630.0000000000006원의 이자가 입금되었습니다.\n"
]
],
[
[
"# Module package\n\n* 변수, 함수 < 클래스 < 모듈 < 패키지\n\n- 모듈 : 변수와 함수, 클래스를 모아놓은 ( .py ) 확장자를 가진 파일 ( 클래스 보다 조금 더 큰 범위 )\n- 패키지 : 모듈보다 한 단계 큰 기능. 모듈의 기능을 디렉토리 별로 정리해놓은 개념",
"_____no_output_____"
],
[
"1. 모듈 생성\n2. 모듈 호출",
"_____no_output_____"
],
[
"# 1. 모듈 생성(파일 생성)",
"_____no_output_____"
]
],
[
[
"!ls",
"0401_Lecture_python.ipynb 0406_Lecture_python.ipynb dss.py\r\n0402_Lecture_python.ipynb 0407_Lecture_python.ipynb \u001b[34mschool\u001b[m\u001b[m\r\n0403_Lecture_python.ipynb 0408_Lecture_python.ipynb\r\n"
],
[
"%%writefile dss.py\n\n# 모듈 파일 생성 (매직 커맨드 사용)\n\n# 1) %% -> 이 셀에 있는 내용에 전부다 writefile 을 적용하겠다.\n# 2) dss.py 라는 파일을 만들어서, 써있는 코드들을 이 파일에 저장하겠다.\n\n# 모듈 생성 -> 파일 저장\n# 1. 모듈 생성 (모듈 = 클래스, 함수, 변수의 set)\n\nnum = 1234\n\ndef disp1(msg):\n print(\"disp1\", msg)\n \ndef disp2(msg):\n print('disp2', msg)\n \nclass Calc:\n def plus(self, *args):\n return sum(args)\n",
"Overwriting dss.py\n"
],
[
"!ls",
"0401_Lecture_python.ipynb 0406_Lecture_python.ipynb dss.py\r\n0402_Lecture_python.ipynb 0407_Lecture_python.ipynb \u001b[34mschool\u001b[m\u001b[m\r\n0403_Lecture_python.ipynb 0408_Lecture_python.ipynb\r\n"
],
[
"%reset",
"Once deleted, variables cannot be recovered. Proceed (y/[n])? \nNothing done.\n"
],
[
"%whos",
"Variable Type Data/Info\n------------------------------\ndss module <module 'school.dss.data1<...>업/school/dss/data1.py'>\nschool module <module 'school' (namespace)>\nurl module <module 'school.web.url' <...>수업/school/web/url.py'>\n"
]
],
[
[
"# 2. 모듈 호출",
"_____no_output_____"
]
],
[
[
"# 모듈 호출 : import ( .py 제외한 파일명 )\n\nimport dss\n%whos",
"Variable Type Data/Info\n--------------------------------\nCalc type <class 'dss.Calc'>\ncalc Calc <dss.Calc object at 0x109abb690>\ndisp1 function <function disp1 at 0x109a88ef0>\ndisp2 function <function disp2 at 0x109ab75f0>\ndss module <module 'dss' from '/User<...>ᆼ/0. 스쿨 수업/dss.py'>\nnum int 1234\nschool module <module 'school' (namespace)>\nurl module <module 'school.web.url' <...>수업/school/web/url.py'>\n"
],
[
"dss.num",
"_____no_output_____"
],
[
"dss.disp1('안녕')",
"disp1 안녕\n"
],
[
"calc = dss.Calc()",
"_____no_output_____"
],
[
"calc.plus(1,2,3,4,5,6)",
"_____no_output_____"
]
],
[
[
"# 3. 모듈 내 특정 변수, 함수 호출",
"_____no_output_____"
]
],
[
[
"# import random --> random 모듈을 불러온 것 (random.py 라는 파일의 코드(모듈 적어놓은) 가져온 것)\n# random.randint(1,5) --> random 모듈 내 randint라는 함수를 가져온 것.\n# calc.plus --> dss 라는 모듈의 plus라는 함수 가져온 것.",
"_____no_output_____"
],
[
"# 모듈 안에 특정 함수, 변수, 클래스 호출\n# '모듈.변수' 로 적지 않고, '모듈' 로 바로 호출 가능\n\nfrom dss import num, disp2",
"_____no_output_____"
],
[
"%whos",
"Variable Type Data/Info\n--------------------------------\nCalc type <class 'dss.Calc'>\ncalc Calc <dss.Calc object at 0x109baed10>\ndisp1 function <function disp1 at 0x109a88ef0>\ndisp2 function <function disp2 at 0x109ab75f0>\ndss module <module 'dss' from '/User<...>ᆼ/0. 스쿨 수업/dss.py'>\nnum int 1234\nschool module <module 'school' (namespace)>\nurl module <module 'school.web.url' <...>수업/school/web/url.py'>\n"
],
[
"dss.num",
"_____no_output_____"
],
[
"num",
"_____no_output_____"
]
],
[
[
"# 4. 모듈 내 모든 변수, 함수 호출",
"_____no_output_____"
]
],
[
[
"%reset",
"Once deleted, variables cannot be recovered. Proceed (y/[n])? \nNothing done.\n"
],
[
"from dss import *\n\n%whos",
"Variable Type Data/Info\n--------------------------------\nCalc type <class 'dss.Calc'>\ncalc Calc <dss.Calc object at 0x109baed10>\ndisp1 function <function disp1 at 0x109a88ef0>\ndisp2 function <function disp2 at 0x109ab75f0>\ndss module <module 'dss' from '/User<...>ᆼ/0. 스쿨 수업/dss.py'>\nnum int 1234\nschool module <module 'school' (namespace)>\nurl module <module 'school.web.url' <...>수업/school/web/url.py'>\n"
]
],
[
[
"# 5. 패키지\n\n- 패키지 생성\n- 패키지 호출\n- setup.py 패키지 설치 파일 만들기 \n\n- 패키지(디렉토리) : 모듈(파일)",
"_____no_output_____"
],
[
"## 1) 패키지 ( 디렉토리 (dss / web) ) 생성",
"_____no_output_____"
]
],
[
[
"# !mkdir p- ---> school 밑에 dss 디렉토리 생성\n!mkdir -p school/dss",
"_____no_output_____"
],
[
"# !mkdir p- ---> school 밑에 web 디렉토리 생성\n!mkdir -p school/web",
"_____no_output_____"
],
[
"!tree school",
"\u001b[01;34mschool\u001b[00m\r\n├── \u001b[01;34mdss\u001b[00m\r\n│ ├── __init__.py\r\n│ ├── data1.py\r\n│ └── data2.py\r\n└── \u001b[01;34mweb\u001b[00m\r\n ├── __init__.py\r\n └── url.py\r\n\r\n2 directories, 5 files\r\n"
]
],
[
[
"### tree 설치\n- homebrew 설치\n - homebrew : https://brew.sh/index_ko\n - homebrew : osx 패키지 관리 설치 툴\n - /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)\"\n - brew install tree",
"_____no_output_____"
],
[
"## 2) 모듈(파일) 생성",
"_____no_output_____"
]
],
[
[
"# 이 단계는 파이썬 3.8버젼 이후 부터는 안해도 됨\n# !touch --> 파일 생성\n!touch school/dss/__init__.py\n!touch school/web/__init__.py",
"_____no_output_____"
],
[
"!tree school",
"\u001b[01;34mschool\u001b[00m\r\n├── \u001b[01;34mdss\u001b[00m\r\n│ ├── __init__.py\r\n│ ├── data1.py\r\n│ └── data2.py\r\n└── \u001b[01;34mweb\u001b[00m\r\n ├── __init__.py\r\n └── url.py\r\n\r\n2 directories, 5 files\r\n"
],
[
"%%writefile school/dss/data1.py\n# dss라는 패키지 안에 모듈(파일)을 추가\n# web이라는 디렉토리 안에 모듈(파일)을 추가\n\ndef plus(*args):\n print('data1')\n return sum(args)",
"Overwriting school/dss/data1.py\n"
],
[
"%%writefile school/dss/data2.py\n\ndef plus2(*args):\n print('data2')\n return sum(args)",
"Overwriting school/dss/data2.py\n"
],
[
"%%writefile school/web/url.py\n\ndef make(url):\n return url if url[:7] == 'http://' else 'http://'+url",
"Overwriting school/web/url.py\n"
],
[
"!tree school",
"\u001b[01;34mschool\u001b[00m\r\n├── \u001b[01;34mdss\u001b[00m\r\n│ ├── __init__.py\r\n│ ├── data1.py\r\n│ └── data2.py\r\n└── \u001b[01;34mweb\u001b[00m\r\n ├── __init__.py\r\n └── url.py\r\n\r\n2 directories, 5 files\r\n"
]
],
[
[
"## 3) 패키지 경로 안에 있는 모듈을 찾아들어가 사용",
"_____no_output_____"
]
],
[
[
"import school.dss.data1",
"_____no_output_____"
],
[
"%whos",
"Variable Type Data/Info\n------------------------------\nschool module <module 'school' (namespace)>\n"
],
[
"# school 디렉토리 - dss 디렉토리 - data1 모듈 - plus 함수 호출\nschool.dss.data1.plus(1,2,3)",
"data1\n"
],
[
"# 모듈 호출 명령어 너무 길다 import school.dss.data1\n# alias 로 단축명 생성\n\nimport school.dss.data1 as dss",
"_____no_output_____"
],
[
"dss.plus(1,2,3)",
"data1\n"
],
[
"# school web : 디렉토리\n# url : 모듈\nfrom school.web import url",
"_____no_output_____"
],
[
"url.make('google.com')",
"_____no_output_____"
],
[
"# 패키지의 위치 : 특정 디렉토리에 있는 패키지는 어디에서나 import 가능",
"_____no_output_____"
],
[
"import random",
"_____no_output_____"
],
[
"import sys\n\nfor path in sys.path:\n print(path)",
"/Users/kimjeongseob/Desktop/0. 데이터사이언스스쿨/2. 프로그래밍/0. 스쿨 수업\n/Users/kimjeongseob/opt/anaconda3/lib/python37.zip\n/Users/kimjeongseob/opt/anaconda3/lib/python3.7\n/Users/kimjeongseob/opt/anaconda3/lib/python3.7/lib-dynload\n\n/Users/kimjeongseob/opt/anaconda3/lib/python3.7/site-packages\n/Users/kimjeongseob/opt/anaconda3/lib/python3.7/site-packages/aeosa\n/Users/kimjeongseob/opt/anaconda3/lib/python3.7/site-packages/IPython/extensions\n/Users/kimjeongseob/.ipython\n"
],
[
"# !ls /Users/kimjeongseob/opt/anaconda3/lib/python3.7\n\n# 아래의 출력 결과를 변수에다 넣을 수 있음\n\nA = !ls /Users/kimjeongseob/opt/anaconda3/lib/python3.7\nlen(A), A[-5:]",
"_____no_output_____"
],
[
"# setup.py 를 작성해서 패키지를 설치해서 사용\n# setuptools 를 이용",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c6f0a4c61778e1f7c5267b1513c629db92a98f | 1,632 | ipynb | Jupyter Notebook | Mundo01/Desafio030.ipynb | BrunaKuntz/PythonMundo01 | 998eb7ffff97b692de65a6ce522bae57f6582279 | [
"Apache-2.0"
] | null | null | null | Mundo01/Desafio030.ipynb | BrunaKuntz/PythonMundo01 | 998eb7ffff97b692de65a6ce522bae57f6582279 | [
"Apache-2.0"
] | null | null | null | Mundo01/Desafio030.ipynb | BrunaKuntz/PythonMundo01 | 998eb7ffff97b692de65a6ce522bae57f6582279 | [
"Apache-2.0"
] | null | null | null | 25.5 | 247 | 0.484069 | [
[
[
"<a href=\"https://colab.research.google.com/github/BrunaKuntz/Python-Curso-em-Video/blob/main/Mundo01/Desafio030.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"\n\n\n# **Desafio 030**\n**Python 3 - 1º Mundo**\n\nDescrição: Crie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR.\n\nLink: https://www.youtube.com/watch?v=4vFCzKuHOn4&t=4s",
"_____no_output_____"
]
],
[
[
"num = int(input('Digite um número: '))\nif num % 2 == 0:\n print('O número é par.')\nelse:\n print('O número é ímpar.')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7c70f0939aa000ed47e9d9445f0026151b49156 | 347,086 | ipynb | Jupyter Notebook | examples/open-loop_simulation_example.ipynb | KenedyMatiasso/PyAAT | 4c2940797ac3b882e6b0ed70ed2c5f37bbd4b22e | [
"MIT"
] | 1 | 2021-03-03T20:44:15.000Z | 2021-03-03T20:44:15.000Z | examples/open-loop_simulation_example.ipynb | KenedyMatiasso/PyAAT | 4c2940797ac3b882e6b0ed70ed2c5f37bbd4b22e | [
"MIT"
] | null | null | null | examples/open-loop_simulation_example.ipynb | KenedyMatiasso/PyAAT | 4c2940797ac3b882e6b0ed70ed2c5f37bbd4b22e | [
"MIT"
] | null | null | null | 353.808359 | 36,624 | 0.941187 | [
[
[
"# Example 5 - Open-loop simulation",
"_____no_output_____"
],
[
"An open-loop simulation is the case where no state-feedback control is used. It means that only time-dependent control is used or not control at all. This kind of simulation is mainly useful for stability analysis and for cheching the trimmed behaviior (including perturbations around the trimmed conditions).",
"_____no_output_____"
],
[
"### Import atmosphere model",
"_____no_output_____"
]
],
[
[
"from pyaat.atmosphere import atmosCOESA\natm = atmosCOESA()",
"_____no_output_____"
]
],
[
[
"### Import gravity model",
"_____no_output_____"
]
],
[
[
"from pyaat.gravity import VerticalConstant\ngrav = VerticalConstant()",
"_____no_output_____"
]
],
[
[
"### Import Aircraft model",
"_____no_output_____"
]
],
[
[
"from pyaat.aircraft import Aircraft\nairc = Aircraft()",
"_____no_output_____"
]
],
[
[
"### Import propulsion model",
"_____no_output_____"
]
],
[
[
"from pyaat.propulsion import SimpleModel\nprop = SimpleModel()",
"_____no_output_____"
]
],
[
[
"### Create a system",
"_____no_output_____"
]
],
[
[
"from pyaat.system import system\nComplete_system = system(atmosphere = atm, propulsion = prop, aircraft = airc, gravity = grav)",
"_____no_output_____"
]
],
[
[
"### Trimm at cruize condition",
"_____no_output_____"
]
],
[
[
"Xe, Ue = Complete_system.trimmer(condition='cruize', HE = 10000., VE = 200)",
"_____no_output_____"
]
],
[
[
"### Printing equilibrium states and controls",
"_____no_output_____"
]
],
[
[
"from pyaat.tools import printInfo\nprintInfo(Xe, Ue, frame ='body')",
"--------------------------------\n------------ STATES ------------\n------------- BODY -------------\n--------------------------------\nx\n0.0\n-------------\ny\n0.0\n-------------\nz\n-10000.0\n-------------\nu\n199.90512762194552\n-------------\nv\n-1.4802192864744595e-21\n-------------\nw\n6.159541415857848\n-------------\nphi\n0.0\n-------------\ntheta\n1.7648577035616007\n-------------\npsi\n0.0\n-------------\np\n4.415466257582898e-22\n-------------\nq\n-8.573425743483335e-15\n-------------\nr\n3.275803472664854e-22\n"
],
[
"printInfo(Xe, Ue, frame ='aero')",
"--------------------------------\n------------ STATES ------------\n------------- AERO -------------\n--------------------------------\nV\n200.0\n-------------\nalpha\n1.7648577035615969\n-------------\nbeta\n-4.240515893442633e-22\n-------------\nphi\n0.0\n-------------\ntheta\n1.7648577035616007\n-------------\npsi\n0.0\n-------------\np\n4.415466257582898e-22\n-------------\nq\n-8.573425743483335e-15\n-------------\nr\n3.275803472664854e-22\n-------------\nx0\n0.0\n-------------\ny0\n0.0\n-------------\nH\n10000.0\n"
],
[
"printInfo(Xe, Ue, frame='controls')",
"--------------------------------\n----------- CONTROLS -----------\n--------------------------------\ndelta_p\n34.65222851433093\n-------------\ndelta_e\n-2.208294991778133\n-------------\ndelta_a\n4.978810759532202e-22\n-------------\ndelta_r\n-8.268303092392625e-22\n"
]
],
[
[
"## Simulation",
"_____no_output_____"
],
[
"The open-loop simulation is carried out using the method 'propagate'. Mandatory inputs are the time of simulation TF, the equilibrium states Xe, the equilibrium control Ue, and a bolean variable called 'perturbation' which defines is applied during the simulation or not.",
"_____no_output_____"
],
[
"### Equilibrium simulation",
"_____no_output_____"
]
],
[
[
"solution, control = Complete_system.propagate(Xe, Ue, TF = 180, perturbation = False)",
"_____no_output_____"
]
],
[
[
"The outputs are two multidimentional arrays, containing the states over time and control over time.",
"_____no_output_____"
]
],
[
[
"print('Solution')\nsolution",
"Solution\n"
],
[
"print('control')\ncontrol",
"control\n"
]
],
[
[
"The time array can be accessed directly on the system.",
"_____no_output_____"
]
],
[
[
"time = Complete_system.time\ntime",
"_____no_output_____"
]
],
[
[
"Check out the documentation for more information about the outputs.",
"_____no_output_____"
],
[
"### Ploting the results\nSome plots can be generated directly using the plotter util embeeded within PyAAT.",
"_____no_output_____"
]
],
[
[
"from pyaat.tools import plotter\npltr = plotter()",
"_____no_output_____"
],
[
"pltr.states = solution\npltr.time = Complete_system.time\npltr.control = control",
"_____no_output_____"
],
[
"pltr.LinVel(frame = 'body')",
"_____no_output_____"
],
[
"pltr.LinPos()",
"_____no_output_____"
],
[
"pltr.Attitude()",
"_____no_output_____"
],
[
"pltr.AngVel()",
"_____no_output_____"
],
[
"pltr.Controls()",
"_____no_output_____"
],
[
"pltr.LinPos3D()",
"_____no_output_____"
]
],
[
[
"All states and controls remain constant over time, as expected.",
"_____no_output_____"
],
[
"## Out-of-equilibrium simulations\n\nSometimes we may be interested in verifying the behavior of the aircraft out of the equilibrium states. It can be done by applying perturbations.\n\nNote that you would obtain the same result if you input a vector Xe out of equilibrium, but consider that it may cause confusion and in more advanced simulations (considering closed-loop control) it might lead to errors.",
"_____no_output_____"
],
[
"### Perturbation on states",
"_____no_output_____"
]
],
[
[
"solution, control = Complete_system.propagate(Xe, Ue, T0 = 0.0, TF = 30.0, dt = 0.01, perturbation = True, state = {'beta':2., 'alpha':2.})",
"_____no_output_____"
],
[
"pltr.states = solution\npltr.time = Complete_system.time\npltr.control = control",
"_____no_output_____"
],
[
"pltr.LinVel(frame = 'aero')",
"_____no_output_____"
],
[
"pltr.LinPos()",
"_____no_output_____"
],
[
"pltr.Attitude()",
"_____no_output_____"
],
[
"pltr.AngVel()",
"_____no_output_____"
],
[
"pltr.Controls()",
"_____no_output_____"
]
],
[
[
"### open-loop control",
"_____no_output_____"
],
[
"Some usual control inputs are also embeeded within the toolbox, such as the doublet and step.",
"_____no_output_____"
]
],
[
[
"from pyaat.control import doublet, step",
"_____no_output_____"
]
],
[
[
"#### Doublet input on elevator",
"_____no_output_____"
]
],
[
[
"doub = doublet()\ndoub.command = 'elevator'\ndoub.amplitude = 3\ndoub.T = 1\ndoub.t_init = 2",
"_____no_output_____"
]
],
[
[
"#### Step input on aileron",
"_____no_output_____"
]
],
[
[
"st =step()\nst.command = 'aileron'\nst.amplitude = 1\nst.t_init = 2",
"_____no_output_____"
],
[
"solution, control = Complete_system.propagate(Xe, Ue, TF = 50, perturbation=True, control = [doub, st])",
"_____no_output_____"
]
],
[
[
"One can input as many control perturbation as we want, and we can combine it with states perturbations is desired.",
"_____no_output_____"
]
],
[
[
"pltr.states = solution\npltr.time = Complete_system.time\npltr.control = control",
"_____no_output_____"
],
[
"pltr.Controls()",
"_____no_output_____"
],
[
"pltr.LinVel(frame = 'aero')",
"_____no_output_____"
],
[
"pltr.LinPos()",
"_____no_output_____"
],
[
"pltr.Attitude()",
"_____no_output_____"
],
[
"pltr.AngVel()",
"_____no_output_____"
],
[
"pltr.LinPos3D()",
"_____no_output_____"
]
],
[
[
"#### Yes, the aircraft is falling! The lateral-directional dynamics is instable (check it out using the tools from pyaat.flight_mechanics)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7c71287878de49abf98551ded208f10ec2fd75f | 481,095 | ipynb | Jupyter Notebook | 04-Visualization-Matplotlib-Pandas/04-02-Pandas Visualization/Pandas Built-in Data Visualization.ipynb | rikimarutsui/Python-for-Finance-Repo | cd4553da2df56e3552251fdcaeb5c0dcfc378bc5 | [
"Apache-2.0"
] | 29 | 2019-01-03T15:19:16.000Z | 2022-02-25T03:03:13.000Z | 04-Visualization-Matplotlib-Pandas/04-02-Pandas Visualization/Pandas Built-in Data Visualization.ipynb | rikimarutsui/Python-for-Finance-Repo | cd4553da2df56e3552251fdcaeb5c0dcfc378bc5 | [
"Apache-2.0"
] | null | null | null | 04-Visualization-Matplotlib-Pandas/04-02-Pandas Visualization/Pandas Built-in Data Visualization.ipynb | rikimarutsui/Python-for-Finance-Repo | cd4553da2df56e3552251fdcaeb5c0dcfc378bc5 | [
"Apache-2.0"
] | 10 | 2017-04-25T05:49:31.000Z | 2018-11-28T13:37:12.000Z | 554.256912 | 94,962 | 0.937216 | [
[
[
"___\n\n<a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a>\n___\n# Pandas Built-in Data Visualization\n\nIn this lecture we will learn about pandas built-in capabilities for data visualization! It's built-off of matplotlib, but it baked into pandas for easier usage! \n\nLet's take a look!",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## The Data\n\nThere are some fake data csv files you can read in as dataframes:",
"_____no_output_____"
]
],
[
[
"df1 = pd.read_csv('df1',index_col=0)\ndf2 = pd.read_csv('df2')",
"_____no_output_____"
]
],
[
[
"## Style Sheets\n\nMatplotlib has [style sheets](http://matplotlib.org/gallery.html#style_sheets) you can use to make your plots look a little nicer. These style sheets include plot_bmh,plot_fivethirtyeight,plot_ggplot and more. They basically create a set of style rules that your plots follow. I recommend using them, they make all your plots have the same look and feel more professional. You can even create your own if you want your company's plots to all have the same look (it is a bit tedious to create on though).\n\nHere is how to use them.\n\n**Before plt.style.use() your plots look like this:**",
"_____no_output_____"
]
],
[
[
"df1['A'].hist()",
"_____no_output_____"
]
],
[
[
"Call the style:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nplt.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"Now your plots look like this:",
"_____no_output_____"
]
],
[
[
"df1['A'].hist()",
"_____no_output_____"
],
[
"plt.style.use('bmh')\ndf1['A'].hist()",
"_____no_output_____"
],
[
"plt.style.use('dark_background')\ndf1['A'].hist()",
"_____no_output_____"
],
[
"plt.style.use('fivethirtyeight')\ndf1['A'].hist()",
"_____no_output_____"
],
[
"plt.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"Let's stick with the ggplot style and actually show you how to utilize pandas built-in plotting capabilities!",
"_____no_output_____"
],
[
"# Plot Types\n\nThere are several plot types built-in to pandas, most of them statistical plots by nature:\n\n* df.plot.area \n* df.plot.barh \n* df.plot.density \n* df.plot.hist \n* df.plot.line \n* df.plot.scatter\n* df.plot.bar \n* df.plot.box \n* df.plot.hexbin \n* df.plot.kde \n* df.plot.pie\n\nYou can also just call df.plot(kind='hist') or replace that kind argument with any of the key terms shown in the list above (e.g. 'box','barh', etc..)\n___",
"_____no_output_____"
],
[
"Let's start going through them!\n\n## Area",
"_____no_output_____"
]
],
[
[
"df2.plot.area(alpha=0.4)",
"_____no_output_____"
]
],
[
[
"## Barplots",
"_____no_output_____"
]
],
[
[
"df2.head()",
"_____no_output_____"
],
[
"df2.plot.bar()",
"_____no_output_____"
],
[
"df2.plot.bar(stacked=True)",
"_____no_output_____"
]
],
[
[
"## Histograms",
"_____no_output_____"
]
],
[
[
"df1['A'].plot.hist(bins=50)",
"_____no_output_____"
]
],
[
[
"## Line Plots",
"_____no_output_____"
]
],
[
[
"df1.plot.line(x=df1.index,y='B',figsize=(12,3),lw=1)",
"_____no_output_____"
]
],
[
[
"## Scatter Plots",
"_____no_output_____"
]
],
[
[
"df1.plot.scatter(x='A',y='B')",
"_____no_output_____"
]
],
[
[
"You can use c to color based off another column value\nUse cmap to indicate colormap to use. \nFor all the colormaps, check out: http://matplotlib.org/users/colormaps.html",
"_____no_output_____"
]
],
[
[
"df1.plot.scatter(x='A',y='B',c='C',cmap='coolwarm')",
"_____no_output_____"
]
],
[
[
"Or use s to indicate size based off another column. s parameter needs to be an array, not just the name of a column:",
"_____no_output_____"
]
],
[
[
"df1.plot.scatter(x='A',y='B',s=df1['C']*200)",
"C:\\Users\\Marcial\\Anaconda3\\lib\\site-packages\\matplotlib\\collections.py:877: RuntimeWarning: invalid value encountered in sqrt\n scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor\n"
]
],
[
[
"## BoxPlots",
"_____no_output_____"
]
],
[
[
"df2.plot.box() # Can also pass a by= argument for groupby",
"_____no_output_____"
]
],
[
[
"## Hexagonal Bin Plot\n\nUseful for Bivariate Data, alternative to scatterplot:",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])\ndf.plot.hexbin(x='a',y='b',gridsize=25,cmap='Oranges')",
"_____no_output_____"
]
],
[
[
"____",
"_____no_output_____"
],
[
"## Kernel Density Estimation plot (KDE)",
"_____no_output_____"
]
],
[
[
"df2['a'].plot.kde()",
"_____no_output_____"
],
[
"df2.plot.density()",
"_____no_output_____"
]
],
[
[
"That's it! Hopefully you can see why this method of plotting will be a lot easier to use than full-on matplotlib, it balances ease of use with control over the figure. A lot of the plot calls also accept additional arguments of their parent matplotlib plt. call. \n\n\n# Great Job!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c7137909a290317debc69d1e56ee915bc20201 | 281,834 | ipynb | Jupyter Notebook | _notebooks/2022-02-01-EDA-test.ipynb | christopherGuan/sample-ds-blog | 52859b8801125ee8a5a330557e1248937952f43f | [
"Apache-2.0"
] | null | null | null | _notebooks/2022-02-01-EDA-test.ipynb | christopherGuan/sample-ds-blog | 52859b8801125ee8a5a330557e1248937952f43f | [
"Apache-2.0"
] | null | null | null | _notebooks/2022-02-01-EDA-test.ipynb | christopherGuan/sample-ds-blog | 52859b8801125ee8a5a330557e1248937952f43f | [
"Apache-2.0"
] | null | null | null | 358.111817 | 155,549 | 0.931453 | [
[
[
"# Amazon Shure MV7 EDA and Sentement Analysis\n\n- toc: true\n- branch: master\n- badges: true\n- comments: true\n- categories: [Fastpages, Jupyter, Python, Selenium, Stoc]\n- annotations: true\n- hide: false\n- image: images/diagram.png\n- layout: post\n- search_exclude: true",
"_____no_output_____"
],
[
"### Required Packages\n\n[wordcloud](https://github.com/amueller/word_cloud), \n[geopandas](https://geopandas.org/en/stable/getting_started/install.html), \n[nbformat](https://pypi.org/project/nbformat/), \n[seaborn](https://seaborn.pydata.org/installing.html), \n[scikit-learn](https://scikit-learn.org/stable/install.html)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"### Now let's get started!\nFirst thing first, you need to load all the necessary libraries:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom wordcloud import WordCloud\nfrom wordcloud import STOPWORDS\nimport re\nimport plotly.graph_objects as go\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"## Read the Data",
"_____no_output_____"
]
],
[
[
"#Import Data\ndf = pd.read_csv(\"/Users/zeyu/Desktop/DS/Ebay & Amazon/Amazon_reviews_scraping/Amazon_reviews_scraping/full_reviews.csv\")",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"## Data Cleaning\n\nStep 1:\n- Splite column Date to Country and Date\n- Combine the two rating columns to one\n- Convert type of date from string to datetime",
"_____no_output_____"
]
],
[
[
"#Clean Data\ninfo = []\nfor i in df[\"date\"]:\n x = re.sub(\"Reviewed in \", \"\", i)\n x1 = re.sub(\" on \", \"*\", x)\n info.append(x1)\n\ndf[\"date\"] = pd.DataFrame({\"date\": info})\ndf[['country','date']] = df.date.apply(\n lambda x: pd.Series(str(x).split(\"*\")))\n\nstar = []\nstar = df.stars1.combine_first(df.stars2)\ndf[\"star\"] = pd.DataFrame({\"star\": star})\n\ndel df['stars1']\ndel df['stars2']\n\n#Convert String to Date\ndf.date = pd.to_datetime(df.date)",
"_____no_output_____"
]
],
[
[
"Step 2:\n- Two methods to verify if column \"star\" contain any NaN\n- Converted the type of column \"star\" from string to Int",
"_____no_output_____"
]
],
[
[
"\"nan\" in df['star']",
"_____no_output_____"
],
[
"df_no_star = df[df['star'].isna()]\ndf_no_star",
"_____no_output_____"
],
[
"#Convert 2.0 out of 5 stars to 2\ndf_int = []\n#df_with_star[\"stars\"] = [str(x).replace(':',' ') for x in df[\"stars\"]]\n\nfor i in df[\"star\"]:\n x = re.sub(\".0 out of 5 stars\", \"\", i)\n df_int.append(x)\n\ndf[\"rating\"] = pd.DataFrame({\"rating\": df_int})\ndf[\"rating\"] = df[\"rating\"].astype(int)\ndel df['star']",
"_____no_output_____"
]
],
[
[
"This is the data looks like after cleaning.\n",
"_____no_output_____"
],
[
"## EDA",
"_____no_output_____"
]
],
[
[
"temp = df['rating'].value_counts()\nfig = go.Figure(go.Bar(\n x=temp,\n y=temp.index,\n orientation='h'))\n\nfig.show()",
"_____no_output_____"
],
[
"df_country = df['country'].value_counts()\nfig = go.Figure(go.Bar(\n x=df_country,\n y=df_country.index,\n orientation='h'))\n\nfig.show()",
"_____no_output_____"
],
[
"mean_rating = df['rating'].mean()\nmean_rating",
"_____no_output_____"
],
[
"\"\"\"fig = px.line(df, x=temp.index, y=temp.rating, title='Life expectancy in Canada')\nfig.show()\"\"\"\nimport plotly.express as px\ntemp = df.groupby([df['date'].dt.date]).mean()\ntemp",
"_____no_output_____"
],
[
"#Average rating each month\ntemp = df.groupby(df['date'].dt.strftime('%B'))['rating'].mean().sort_values()\norder_temp = temp.reindex([\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"November\", \"December\"])\norder_temp.plot()",
"_____no_output_____"
],
[
"#Quantity of reviews in each month.\ntemp = df.groupby(df['date'].dt.strftime('%B'))['rating'].count().sort_values()\norder_temp = temp.reindex([\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"November\", \"December\"])\norder_temp.plot()",
"_____no_output_____"
],
[
"#Many words are useless so create a stopword list\nstopwords = set(STOPWORDS)\nstopwords.update([\"Mic\", \"Microphone\", \"using\",\"sound\",\"use\"])\n\n\ndef cleaned_visualise_word_map(x):\n words=\" \"\n for msg in x:\n msg = str(msg).lower()\n words = words+msg+\" \"\n wordcloud = WordCloud(stopwords = stopwords, width=3000, height=2500, background_color='white').generate(words)\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 14\n fig_size[1] = 7\n #Display image appear more smoothly\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show(wordcloud)\ncleaned_visualise_word_map(df[\"review\"])",
"_____no_output_____"
],
[
"df = df[df['rating'] != 3]\ndf['sentiment'] = df['rating'].apply(lambda rating : +1 if rating > 3 else -1)",
"_____no_output_____"
],
[
"positive = df[df['sentiment'] == 1]\nnegative = df[df['sentiment'] == -1]",
"_____no_output_____"
],
[
"df['sentimentt'] = df['sentiment'].replace({-1 : 'negative'})\ndf['sentimentt'] = df['sentimentt'].replace({1 : 'positive'})\nfig = px.histogram(df, x=\"sentimentt\")\nfig.update_traces(marker_color=\"indianred\",marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5)\nfig.update_layout(title_text='Product Sentiment')\nfig.show()",
"_____no_output_____"
],
[
"stopwords = set(STOPWORDS)\n#stopwords.update([\"Mic\", \"Microphone\", \"using\", \"sound\", \"use\"]) \n\n## good and great removed because they were included in negative sentiment\npos = \" \".join(review for review in positive.title)\nwordcloud2 = WordCloud(stopwords=stopwords).generate(pos)\nplt.imshow(wordcloud2, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()",
"_____no_output_____"
],
[
"pos = \" \".join(review for review in negative.title)\nwordcloud2 = WordCloud(stopwords=stopwords).generate(pos)\nplt.imshow(wordcloud2, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Sentiment Analysis",
"_____no_output_____"
]
],
[
[
"def remove_punctuation(text):\n final = \"\".join(u for u in text if u not in (\"?\", \".\", \";\", \":\", \"!\",'\"'))\n return final\n\ndf['review'] = df['review'].apply(remove_punctuation)\ndf = df.dropna(subset=['title'])\ndf['title'] = df['title'].apply(remove_punctuation)",
"_____no_output_____"
],
[
"dfNew = df[['title','sentiment']]\ndfNew.head()",
"_____no_output_____"
],
[
"dfLong = df[['review','sentiment']]\ndfLong.head()",
"_____no_output_____"
],
[
"index = df.index\ndf['random_number'] = np.random.randn(len(index))\ntrain = df[df['random_number'] <= 0.8]\ntest = df[df['random_number'] > 0.8]",
"_____no_output_____"
],
[
"#change df frame to a bag of words\nfrom sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer(token_pattern=r'\\b\\w+\\b')",
"_____no_output_____"
]
],
[
[
"[Vectorizer](https://towardsdatascience.com/hacking-scikit-learns-vectorizers-9ef26a7170af) &\n[Bag-of-Words](https://towardsdatascience.com/hacking-scikit-learns-vectorizers-9ef26a7170af)\n",
"_____no_output_____"
]
],
[
[
"train_matrix = vectorizer.fit_transform(train['title'])\ntest_matrix = vectorizer.transform(test['title'])",
"_____no_output_____"
],
[
"train_matrix_l = vectorizer.fit_transform(train['review'])\ntest_matrix_l = vectorizer.transform(test['review'])",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()",
"_____no_output_____"
],
[
"X_train = train_matrix\nX_test = test_matrix\ny_train = train['sentiment']\ny_test = test['sentiment']",
"_____no_output_____"
],
[
"X_train_l = train_matrix_l\nX_test_l = test_matrix_l\ny_train_l = train['sentiment']\ny_test_l = test['sentiment']",
"_____no_output_____"
],
[
"lr.fit(X_train,y_train)",
"_____no_output_____"
],
[
"lr.fit(X_train_l,y_train_l)",
"_____no_output_____"
],
[
"predictions = lr.predict(X_test)",
"_____no_output_____"
],
[
"predictions_l = lr.predict(X_test_l)",
"_____no_output_____"
],
[
"# find accuracy, precision, recall:\nfrom sklearn.metrics import confusion_matrix,classification_report\nnew = np.asarray(y_test)\nconfusion_matrix(predictions,y_test)",
"_____no_output_____"
],
[
"long = np.asarray(y_test_l)\nconfusion_matrix(predictions_l,y_test_l)",
"_____no_output_____"
],
[
"print(classification_report(predictions,y_test))\n#0.88 Accuracy",
" precision recall f1-score support\n\n -1 0.00 0.00 0.00 0\n 1 1.00 0.89 0.94 116\n\n accuracy 0.89 116\n macro avg 0.50 0.44 0.47 116\nweighted avg 1.00 0.89 0.94 116\n\n"
],
[
"print(classification_report(predictions_l,y_test_l))",
" precision recall f1-score support\n\n -1 0.00 0.00 0.00 0\n 1 1.00 0.89 0.94 116\n\n accuracy 0.89 116\n macro avg 0.50 0.44 0.47 116\nweighted avg 1.00 0.89 0.94 116\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c717121f218b8d5774e94d0493cf7a11fab315 | 832,008 | ipynb | Jupyter Notebook | Labs/Labs5-8/Lab7.ipynb | jeff-abe/PHYS434 | 56b71785f609f1ddf722cfbc5ed60b28b243f338 | [
"MIT"
] | null | null | null | Labs/Labs5-8/Lab7.ipynb | jeff-abe/PHYS434 | 56b71785f609f1ddf722cfbc5ed60b28b243f338 | [
"MIT"
] | 6 | 2020-10-10T15:49:04.000Z | 2020-12-14T00:36:17.000Z | Labs/Labs5-8/Lab7.ipynb | jeff-abe/PHYS434 | 56b71785f609f1ddf722cfbc5ed60b28b243f338 | [
"MIT"
] | null | null | null | 819.712315 | 359,556 | 0.952011 | [
[
[
"#Setup\n%matplotlib inline\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import stats\nimport pickle\nimport pandas as pd\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\nplt.rcParams[\"figure.figsize\"] = (20,10)",
"_____no_output_____"
],
[
"data = open('higgs_100000_pt_250_500.pkl','rb')\nnew_dict = pickle.load(data)\ndata2 = open('qcd_100000_pt_250_500.pkl','rb')\nqcd_dict = pickle.load(data2)",
"_____no_output_____"
],
[
"high_lumi = pd.read_hdf('data_highLumi_pt_250_500.h5')",
"_____no_output_____"
],
[
"low_lumi = pd.read_hdf('data_lowLumi_pt_250_500.h5')",
"_____no_output_____"
],
[
"#Random Samples using expected number of events for a given run\nstate = 123\nhiggs_events = new_dict.sample(n=100, random_state = state)\nqcd_events = qcd_dict.sample(n=20000, random_state = state)",
"_____no_output_____"
],
[
"#Making lists of labels\nhiggs_labels = [1]*100000\nqcd_labels = [0]*100000",
"_____no_output_____"
],
[
"#Labeling and combining sampled data\nnew_dict['label'] = higgs_labels\nqcd_dict['label'] = qcd_labels\nsample = pd.concat([new_dict,qcd_dict])",
"_____no_output_____"
]
],
[
[
"## **Part 1:** Event Selection Optimization",
"_____no_output_____"
],
[
"#### 1) Make a stacked histogram plot for the feature variable: mass",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1,1)\nax.hist(higgs_events['mass'],density = True,alpha = 0.8, label = 'higgs')\nax.hist(qcd_events['mass'],density = True,alpha = 0.8, label = 'qcd')\nplt.legend(fontsize = 18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Expected events in background is 20,000 and is poisson distirbuted",
"_____no_output_____"
],
[
"#### $\\cdot$ Use Poisson statistics for significance calculation",
"_____no_output_____"
]
],
[
[
"np.random.seed(123)\ndist = stats.poisson.rvs(20000, size = 10000)\nplt.hist(dist,density = True, bins = np.linspace(19450,20550,50), label = 'Expected Yield Distribution')\nplt.axvline(20100,color = 'red',label = 'Observed Yield')\nplt.legend(fontsize = 18)\nplt.show()",
"_____no_output_____"
],
[
"print('Significance of 20100 events:', np.round(stats.norm.isf(stats.poisson.sf(20100,20000)),3),'sigma')",
"Significance of 20100 events: 0.711 sigma\n"
]
],
[
[
"$\\frac{\\textbf{N}_{Higgs}}{\\sqrt{\\textbf{N}_{QCD}}} = \\frac{100}{\\sqrt{20000}} = 0.707$\n\nThis value is different than the value obtained in the previous calculation. This is because the value $\\frac{\\textbf{N}_{Higgs}}{\\sqrt{\\textbf{N}_{QCD}}}$ is the number of standard deviations away from the mean the measurment is, while the number from the above calculation is how the probability of the background producing a value larger than the observed value corresponds to the standard normal distributions $\\sigma$.",
"_____no_output_____"
]
],
[
[
"def mult_cut(qcd,higgs,features,cuts):\n '''\n Parameters:\n qcd - qcd data dictionary\n higgs - higgs data dictionary\n features (list) - the features to apply cuts to\n cuts (list of touples) - in format ((min,max),(min,max)) \n Returns:\n number of qcd and higgs events\n cut min and max\n significance\n '''\n qcd_factor = 20000/len(qcd)\n higgs_factor = 100/len(higgs)\n mu = qcd\n signal = higgs\n for i in range(0,len(features)):\n a = np.array(mu[features[i]])\n b = np.array(signal[features[i]])\n mu = mu[:][np.logical_and(a>cuts[i][0], a<cuts[i][1])]\n signal = signal[:][np.logical_and(b>cuts[i][0], b<cuts[i][1])]\n mu = len(mu)*qcd_factor\n signal = len(signal)*higgs_factor\n sig = np.round(stats.norm.isf(stats.poisson.sf(mu + signal,mu)),3)\n print(features,'cuts', cuts ,'leaves',mu,'expected qcd events and',signal,'expected higgs events')\n print('Significance of', mu+signal ,'events:',sig,'sigma')\n print('---------------------------------------------\\n')",
"_____no_output_____"
]
],
[
[
"#### 2) Identify mass cuts to optimize the expected significance",
"_____no_output_____"
]
],
[
[
"s = 120\nfor n in range(0,7):\n mult_cut(qcd_dict,new_dict,['mass'],[(s,150)])\n s+=1",
"['mass'] cuts [(120, 150)] leaves 2381.6 expected qcd events and 78.065 expected higgs events\nSignificance of 2459.665 events: 1.591 sigma\n---------------------------------------------\n\n['mass'] cuts [(121, 150)] leaves 2262.4 expected qcd events and 77.038 expected higgs events\nSignificance of 2339.438 events: 1.615 sigma\n---------------------------------------------\n\n['mass'] cuts [(122, 150)] leaves 2157.4 expected qcd events and 75.605 expected higgs events\nSignificance of 2233.005 events: 1.632 sigma\n---------------------------------------------\n\n['mass'] cuts [(123, 150)] leaves 2052.6 expected qcd events and 73.643 expected higgs events\nSignificance of 2126.243 events: 1.625 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 150)] leaves 1953.0 expected qcd events and 70.109 expected higgs events\nSignificance of 2023.109 events: 1.59 sigma\n---------------------------------------------\n\n['mass'] cuts [(125, 150)] leaves 1852.6000000000001 expected qcd events and 59.393 expected higgs events\nSignificance of 1911.9930000000002 events: 1.365 sigma\n---------------------------------------------\n\n['mass'] cuts [(126, 150)] leaves 1755.2 expected qcd events and 31.146 expected higgs events\nSignificance of 1786.346 events: 0.749 sigma\n---------------------------------------------\n\n"
],
[
"s = 132\nfor n in range(0,7):\n mult_cut(qcd_dict,new_dict,['mass'],[(124,s)])\n s-=1",
"['mass'] cuts [(124, 132)] leaves 724.6 expected qcd events and 69.554 expected higgs events\nSignificance of 794.154 events: 2.563 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 131)] leaves 640.6 expected qcd events and 68.992 expected higgs events\nSignificance of 709.592 events: 2.682 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 130)] leaves 551.6 expected qcd events and 67.891 expected higgs events\nSignificance of 619.491 events: 2.842 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 129)] leaves 469.20000000000005 expected qcd events and 65.21600000000001 expected higgs events\nSignificance of 534.416 events: 2.956 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 128)] leaves 382.8 expected qcd events and 60.361000000000004 expected higgs events\nSignificance of 443.161 events: 3.034 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 127)] leaves 291.40000000000003 expected qcd events and 53.394 expected higgs events\nSignificance of 344.79400000000004 events: 3.032 sigma\n---------------------------------------------\n\n['mass'] cuts [(124, 126)] leaves 197.8 expected qcd events and 38.963 expected higgs events\nSignificance of 236.763 events: 2.68 sigma\n---------------------------------------------\n\n"
]
],
[
[
"Cut optimization was performed on the unsampled data in order to not overfit the cuts to the sample selected. The optimal cuts kept data with a mass between 124 and 128, and with those cuts yielded a measurement significance of 3.034 sigma.",
"_____no_output_____"
],
[
"#### 3) Make stacked histogram plots for the rest of the features\n##### With and without optimal mass cuts",
"_____no_output_____"
]
],
[
[
"plt.rcParams[\"figure.figsize\"] = (20,50)\nfig, ((ax1,ax2),(ax3,ax4),(ax5,ax6),(ax7,ax8),(ax9,ax10),(ax11,ax12),(ax13,ax14),(ax15,ax16),(ax17,ax18),(ax19,ax20),(ax21,ax22),(ax23,ax24),(ax25,ax26),(ax27,ax28)) = plt.subplots(14,2)\naxes = ((ax1,ax2),(ax3,ax4),(ax5,ax6),(ax7,ax8),(ax9,ax10),(ax11,ax12),(ax13,ax14),(ax15,ax16),(ax17,ax18),(ax19,ax20),(ax21,ax22),(ax23,ax24),(ax25,ax26),(ax27,ax28))\nlabels = ['pt', 'eta', 'phi', 'mass', 'ee2', 'ee3', 'd2', 'angularity', 't1', 't2', 't3', 't21', 't32', 'KtDeltaR']\na = np.array(new_dict['mass'])\nb = np.array(qcd_dict['mass'])\nfor i in range(0,14):\n axes[i][0].hist(new_dict[labels[i]],density = True, alpha = 0.7,label = 'higgs')\n axes[i][0].hist(qcd_dict[labels[i]],density = True, alpha = 0.7,label = 'qcd')\n axes[i][0].set_xlabel(labels[i])\n axes[i][0].legend()\n axes[i][1].hist(new_dict[labels[i]][np.logical_and(a<135, a>124)],density = True, alpha = 0.7,label = 'higgs with mass cuts')\n axes[i][1].hist(qcd_dict[labels[i]][np.logical_and(b<135, b>124)],density = True, alpha = 0.7,label = 'qcd with mass cuts')\n axes[i][1].set_xlabel(labels[i])\n axes[i][1].legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### 4) Optimize event selections using multiple features",
"_____no_output_____"
]
],
[
[
"mult_cut(qcd_dict,new_dict,['d2'],[(0,1.42)])",
"['d2'] cuts [(0, 1.42)] leaves 2029.6000000000001 expected qcd events and 64.296 expected higgs events\nSignificance of 2093.896 events: 1.415 sigma\n---------------------------------------------\n\n"
],
[
"mult_cut(qcd_dict,new_dict,['t3'],[(0,0.17)])\nmult_cut(qcd_dict,new_dict,['KtDeltaR'],[(0.48,0.93)])\nmult_cut(qcd_dict,new_dict,['ee2'],[(0.11,0.21)])\nmult_cut(qcd_dict,new_dict,['d2'],[(0,1.42)])",
"['t3'] cuts [(0, 0.17)] leaves 712.6 expected qcd events and 46.548 expected higgs events\nSignificance of 759.148 events: 1.744 sigma\n---------------------------------------------\n\n['KtDeltaR'] cuts [(0.48, 0.93)] leaves 4809.2 expected qcd events and 72.474 expected higgs events\nSignificance of 4881.674 events: 1.042 sigma\n---------------------------------------------\n\n['ee2'] cuts [(0.11, 0.21)] leaves 4373.6 expected qcd events and 73.07300000000001 expected higgs events\nSignificance of 4446.673000000001 events: 1.102 sigma\n---------------------------------------------\n\n['d2'] cuts [(0, 1.42)] leaves 2029.6000000000001 expected qcd events and 64.296 expected higgs events\nSignificance of 2093.896 events: 1.415 sigma\n---------------------------------------------\n\n"
],
[
"mult_cut(qcd_events,higgs_events,['mass','d2'],[(124,128),(0,1.42)])\nmult_cut(qcd_events,higgs_events,['mass','KtDeltaR'],[(124,128),(0.48,0.93)])\nmult_cut(qcd_events,higgs_events,['mass','ee2'],[(124,128),(0.11,0.21)])\nmult_cut(qcd_events,higgs_events,['mass','t3'],[(124,128),(0,0.17)])",
"['mass', 'd2'] cuts [(124, 128), (0, 1.42)] leaves 28.0 expected qcd events and 55.0 expected higgs events\nSignificance of 83.0 events: 8.48 sigma\n---------------------------------------------\n\n['mass', 'KtDeltaR'] cuts [(124, 128), (0.48, 0.93)] leaves 150.0 expected qcd events and 60.0 expected higgs events\nSignificance of 210.0 events: 4.666 sigma\n---------------------------------------------\n\n['mass', 'ee2'] cuts [(124, 128), (0.11, 0.21)] leaves 266.0 expected qcd events and 61.0 expected higgs events\nSignificance of 327.0 events: 3.648 sigma\n---------------------------------------------\n\n['mass', 't3'] cuts [(124, 128), (0, 0.17)] leaves 18.0 expected qcd events and 39.0 expected higgs events\nSignificance of 57.0 events: 7.418 sigma\n---------------------------------------------\n\n"
],
[
"mult_cut(qcd_events,higgs_events,['mass','d2','KtDeltaR'],[(124,128),(0,1.42),(0.48,0.93)])",
"['mass', 'd2', 'KtDeltaR'] cuts [(124, 128), (0, 1.42), (0.48, 0.93)] leaves 18.0 expected qcd events and 51.0 expected higgs events\nSignificance of 69.0 events: 9.238 sigma\n---------------------------------------------\n\n"
]
],
[
[
"#### 5) Plot 2-dimensional scattering plots between top two most discriminative features",
"_____no_output_____"
]
],
[
[
"plt.rcParams[\"figure.figsize\"] = (20,10)\nfig, (ax1,ax2) = plt.subplots(1,2)\nax1.plot(qcd_dict['mass'],qcd_dict['d2'],color = 'red', label = 'QCD',ls='',marker='.',alpha=0.5)\nax1.plot(new_dict['mass'],qcd_dict['d2'],color = 'blue',label = 'Higgs',ls='',marker='.',alpha=0.5)\nax1.legend(fontsize = 18)\nax1.set_xlabel('mass',fontsize = 18)\nax1.set_ylabel('d2',fontsize = 18)\n\nax2.plot(qcd_dict['mass'],qcd_dict['KtDeltaR'],color = 'red', label = 'QCD',ls='',marker='.',alpha=0.5)\nax2.plot(new_dict['mass'],qcd_dict['KtDeltaR'],color = 'blue',label = 'Higgs',ls='',marker='.',alpha=0.5)\nax2.legend(fontsize = 18)\nax2.set_xlabel('mass',fontsize = 18)\nax2.set_ylabel('KtDeltaR',fontsize = 18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Using Maching Learning to predict",
"_____no_output_____"
]
],
[
[
"sample_train, sample_test = train_test_split(sample,test_size = 0.2)\n\nX_train = sample_train.drop('label',axis = 1)\ny_train = sample_train['label']\n\nX_test = sample_test.drop('label',axis = 1)\ny_test = sample_test['label']",
"_____no_output_____"
],
[
"mdl = MLPClassifier(hidden_layer_sizes = (8,20,20,8,8,4),max_iter=200,alpha = 10**-6,learning_rate = 'invscaling')\nmdl.fit(X_train,y_train)",
"_____no_output_____"
],
[
"sum(mdl.predict(X_test) == y_test)/len(y_test)",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix\nconf = confusion_matrix(y_test,mdl.predict(X_test))\nprint([conf[1]*100/sum(y_test == 1),conf[0]*20000/sum(y_test == 0)])",
"[array([10.91109111, 89.08890889]), array([18293.17068293, 1706.82931707])]\n"
],
[
"true_higgs = conf[1][1]*100/sum(y_test == 1)\nfalse_higgs = conf[0][1]*20000/sum(y_test == 0)\nprint(false_higgs,true_higgs)",
"1706.8293170682932 89.08890889088909\n"
],
[
"sig = stats.norm.isf(stats.poisson.sf(k = true_higgs+false_higgs, mu = false_higgs))\nprint(\"significance using neural network is\",np.round(sig,3),'sigma')",
"significance using neural network is 2.132 sigma\n"
]
],
[
[
"Machine learning model chosen was less effective than the cuts that I had determined. With a more optimized loss function I'm sure machine learning would out perform manually selected cuts, but in this instance it didn't.",
"_____no_output_____"
],
[
"## **Part 2:** Pseudo-experiment data analysis",
"_____no_output_____"
]
],
[
[
"#Defining a function to make cuts and return the cut data, not calculating significance like previous function\ndef straight_cut(data,features,cuts):\n for i in range(0,len(features)):\n a = np.array(data[features[i]])\n data = data[:][np.logical_and(a>cuts[i][0], a<cuts[i][1])]\n return data",
"_____no_output_____"
]
],
[
[
"#### 1) High Luminosity",
"_____no_output_____"
]
],
[
[
"plt.rcParams[\"figure.figsize\"] = (20,30)\nfig, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2)\naxes = (ax1,ax2,ax3,ax4,ax5,ax6)\nfeatures = ['mass','d2','KtDeltaR','ee2','t3','ee3']\nfor i in range(0,6):\n counts,bins = np.histogram(new_dict[features[i]],bins = 50)\n axes[i].hist(bins[:-1],bins, weights = counts*40344/100000, color = 'red',label = 'Higgs',alpha = 0.7)\n counts,bins = np.histogram(qcd_dict[features[i]],bins = 50)\n axes[i].hist(bins[:-1],bins, weights = counts*40344/100000, color = 'blue',label = 'QCD',alpha = 0.7)\n axes[i].hist(high_lumi[features[i]], color = 'green',label = 'data', bins = 50,alpha = 0.7)\n axes[i].legend()\nplt.show()",
"_____no_output_____"
],
[
"plt.rcParams[\"figure.figsize\"] = (20,30)\nfig, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2)\naxes = (ax1,ax2,ax3,ax4,ax5,ax6)\nfeatures = ['mass','d2','KtDeltaR','ee2','t3','ee3']\ncut_higgs = straight_cut(new_dict,['mass','d2','KtDeltaR'],[(124,128),(0,1.42),(0.48,0.93)])\ncut_qcd = straight_cut(qcd_dict,['mass','d2','KtDeltaR'],[(124,128),(0,1.42),(0.48,0.93)])\ncut_high = straight_cut(high_lumi,['mass','d2','KtDeltaR'],[(124,128),(0,1.42),(0.48,0.93)])\nfor i in range(0,6):\n counts,bins = np.histogram(cut_higgs[features[i]])\n axes[i].hist(bins[:-1],bins, weights = counts*40344/100000, color = 'red',label = 'Higgs',alpha = 0.7)\n counts,bins = np.histogram(cut_qcd[features[i]])\n axes[i].hist(bins[:-1],bins, weights = counts*40344/100000, color = 'blue',label = 'QCD',alpha = 0.7)\n axes[i].hist(cut_high[features[i]], color = 'green',label = 'data',alpha = 0.7)\n axes[i].legend()\n axes[i].set_yscale('log')\nplt.show()",
"_____no_output_____"
],
[
"n_qcd = len(cut_qcd)*40344/100000\nn_observed = len(cut_high)\nsig = np.round(stats.norm.isf(stats.poisson.sf(n_observed,n_qcd)),3)\nprint('Significance of', n_observed ,'events:',sig,'sigma')",
"Significance of 128 events: 10.724 sigma\n"
]
],
[
[
"The same cuts made on the simulated data gave a lower significance of $9.2\\sigma$",
"_____no_output_____"
],
[
"#### 2) Low Luminosity",
"_____no_output_____"
]
],
[
[
"plt.rcParams[\"figure.figsize\"] = (20,30)\nfig, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2)\naxes = (ax1,ax2,ax3,ax4,ax5,ax6)\nfeatures = ['mass','d2','KtDeltaR','ee2','t3','ee3']\nfor i in range(0,6):\n counts,bins = np.histogram(new_dict[features[i]],bins = 50)\n axes[i].hist(bins[:-1],bins, weights = counts*4060/100000, color = 'red',label = 'Higgs',alpha = 0.7)\n counts,bins = np.histogram(qcd_dict[features[i]],bins = 50)\n axes[i].hist(bins[:-1],bins, weights = counts*4060/100000, color = 'blue',label = 'QCD',alpha = 0.7)\n axes[i].hist(low_lumi[features[i]], color = 'green',label = 'data', bins = 50,alpha = 0.7)\n axes[i].legend()\nplt.show()",
"_____no_output_____"
],
[
"plt.rcParams[\"figure.figsize\"] = (20,30)\nfig, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2)\naxes = (ax1,ax2,ax3,ax4,ax5,ax6)\nfeatures = ['mass','d2','KtDeltaR','ee2','t3','ee3']\ncut_low = straight_cut(low_lumi,['mass','d2','KtDeltaR'],[(124,128),(0,1.42),(0.48,0.93)])\nfor i in range(0,6):\n counts,bins = np.histogram(cut_higgs[features[i]])\n axes[i].hist(bins[:-1],bins, weights = counts*4060/100000, color = 'red',label = 'Higgs',alpha = 0.7)\n counts,bins = np.histogram(cut_qcd[features[i]])\n axes[i].hist(bins[:-1],bins, weights = counts*4060/100000, color = 'blue',label = 'QCD',alpha = 0.7)\n axes[i].hist(cut_low[features[i]], color = 'green',label = 'data',alpha = 0.7)\n axes[i].legend()\n axes[i].set_yscale('log')\nplt.show()",
"_____no_output_____"
],
[
"n_qcd = len(cut_qcd)*4060/100000\nn_observed = len(cut_low)\nsig = np.round(stats.norm.isf(stats.poisson.sf(n_observed,n_qcd)),3)\nprint('Significance of', n_observed ,'events:',sig,'sigma')",
"Significance of 9 events: 2.273 sigma\n"
]
],
[
[
"#### 3) Confidence Levels of signal yield\n\n95% Upper limit for signal yield low luminosity\n\n$$\\sum_{k = 9}^{\\infty}P(\\mu,k) = 0.95$$\n$$P(\\mu,k) = \\frac{e^{-\\mu}\\mu^k}{k!}$$\n$$\\sum_{k = 0}^{9}\\frac{e^{-\\mu}\\mu^k}{k!} = 0.05$$\n$$\\mu = 15.71$$",
"_____no_output_____"
]
],
[
[
"print('With a true signal of 15.71, the probability seeing something stronger than 9 events is:',np.round(stats.poisson.sf(9,15.71),4))",
"With a true signal of 15.71, the probability seeing something stronger than 9 events is: 0.9501\n"
]
],
[
[
"This means that 95% of the time would see more than 9 events if there were a true signal strength of 15.71 events.",
"_____no_output_____"
],
[
"For the low luminosity data we expected to see 4.22 events, since the data is poisson distributed we will round up to 5 events in order to get more than 95%\n\n$$\\sum_{k = 5}^{\\infty}P(\\mu,k) = 0.95$$\n$$P(\\mu,k) = \\frac{e^{-\\mu}\\mu^k}{k!}$$\n$$\\sum_{k = 0}^{5}\\frac{e^{-\\mu}\\mu^k}{k!} = 0.05$$\n$$\\mu = 10.51$$",
"_____no_output_____"
]
],
[
[
"prob = 0\nmu = 128\nwhile prob>0.05:\n prob = stats.poisson.cdf(128,mu)\n mu+=0.02\nprint(mu,prob)",
"128 0\n"
],
[
"print('With a true signal of 10.513, the probability seeing something stronger than 4.22 events is:',np.round(stats.poisson.sf(4.22,10.513),4))",
"With a true signal of 10.513, the probability seeing something stronger than 4.22 events is: 0.9791\n"
]
],
[
[
"The expected upper limit of 10.513 is lower than the observed upper limit of 15.71, this means that while there were more observed events than expected, we cannot say with 95% certainty that there was no signal present, but we cannot also say with certainty that we have seen a signal.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c71b72018db35066eaf429c9a225c287eb2937 | 13,081 | ipynb | Jupyter Notebook | docs/weighting.ipynb | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 6 | 2019-06-26T14:37:49.000Z | 2020-08-10T22:26:34.000Z | docs/weighting.ipynb | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 94 | 2019-06-22T14:57:58.000Z | 2020-09-08T16:35:38.000Z | docs/weighting.ipynb | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 6 | 2020-09-08T18:29:36.000Z | 2021-04-01T18:31:42.000Z | 27.082816 | 822 | 0.432994 | [
[
[
"# Weighting in taxcalc_helpers\n\n## Setup",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nimport taxcalc as tc\nimport microdf as mdf",
"_____no_output_____"
],
[
"tc.__version__",
"_____no_output_____"
]
],
[
[
"## Load data\n\nStart with a `DataFrame` with `nu18` and `XTOT`, and also calculate `XTOT_m`.",
"_____no_output_____"
]
],
[
[
"df = mdf.calc_df(group_vars=['nu18'], metric_vars=['XTOT'])\ndf.columns",
"_____no_output_____"
]
],
[
[
"From this we can calculate the number of people and tax units by the tax unit's number of children.",
"_____no_output_____"
]
],
[
[
"df.groupby('nu18')[['s006_m', 'XTOT_m']].sum()",
"_____no_output_____"
]
],
[
[
"What if we also want to calculate the total number of *children* by the tax unit's number of children?\n\nFor this we can use `add_weighted_metrics`, the function called within `calc_df`.",
"_____no_output_____"
]
],
[
[
"mdf.add_weighted_metrics(df, ['nu18'])",
"_____no_output_____"
]
],
[
[
"Now we can do the same thing as before, with the new `nu18_m` column.",
"_____no_output_____"
]
],
[
[
"df.groupby('nu18')[['nu18_m']].sum()",
"_____no_output_____"
]
],
[
[
"We can also calculate weighted sums without adding the weighted metric.",
"_____no_output_____"
]
],
[
[
"total_children = mdf.weighted_sum(df, 'nu18', 's006')\n# Fix this decimal.\n'Total children: ' + str(round(total_children / 1e6)) + 'M.'",
"_____no_output_____"
]
],
[
[
"We can also calculate the weighted mean and median.",
"_____no_output_____"
]
],
[
[
"mdf.weighted_mean(df, 'nu18', 's006')",
"_____no_output_____"
],
[
"mdf.weighted_median(df, 'nu18', 's006')",
"_____no_output_____"
]
],
[
[
"We can also look at more quantiles.\n\n*Note that weighted quantiles have a different interface.*",
"_____no_output_____"
]
],
[
[
"decile_bounds = np.arange(0, 1.1, 0.1)\ndeciles = mdf.weighted_quantile(df, 'nu18', 's006', decile_bounds)\npd.DataFrame(deciles, index=decile_bounds)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c72ce080189c1d23a3db4a93112fc7dcf387d2 | 499,034 | ipynb | Jupyter Notebook | docs/source/examples/Natural and artificial perturbations.ipynb | helgee/poliastro | a8e2200e82ba67c976514048383de7b832a90705 | [
"MIT"
] | null | null | null | docs/source/examples/Natural and artificial perturbations.ipynb | helgee/poliastro | a8e2200e82ba67c976514048383de7b832a90705 | [
"MIT"
] | null | null | null | docs/source/examples/Natural and artificial perturbations.ipynb | helgee/poliastro | a8e2200e82ba67c976514048383de7b832a90705 | [
"MIT"
] | null | null | null | 49.704582 | 17,944 | 0.697772 | [
[
[
"# Natural and artificial perturbations",
"_____no_output_____"
]
],
[
[
"import functools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.ion()\n\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import solar_system_ephemeris\n\nfrom poliastro.twobody.propagation import propagate, cowell\nfrom poliastro.ephem import build_ephem_interpolant\nfrom poliastro.core.elements import rv2coe\n\nfrom poliastro.core.util import norm\nfrom poliastro.util import time_range\nfrom poliastro.core.perturbations import (\n atmospheric_drag, third_body, J2_perturbation\n)\nfrom poliastro.bodies import Earth, Moon\nfrom poliastro.twobody import Orbit\nfrom poliastro.plotting import OrbitPlotter2D, OrbitPlotter3D",
"_____no_output_____"
]
],
[
[
"### Atmospheric drag ###\nThe poliastro package now has several commonly used natural perturbations. One of them is atmospheric drag! See how one can monitor decay of the near-Earth orbit over time using our new module poliastro.twobody.perturbations!",
"_____no_output_____"
]
],
[
[
"R = Earth.R.to(u.km).value\nk = Earth.k.to(u.km**3 / u.s**2).value\n\norbit = Orbit.circular(Earth, 250 * u.km, epoch=Time(0.0, format='jd', scale='tdb'))\n\n# parameters of a body\nC_D = 2.2 # dimentionless (any value would do)\nA = ((np.pi / 4.0) * (u.m**2)).to(u.km**2).value # km^2\nm = 100 # kg\nB = C_D * A / m\n\n# parameters of the atmosphere\nrho0 = Earth.rho0.to(u.kg / u.km**3).value # kg/km^3\nH0 = Earth.H0.to(u.km).value\ntof = (100000 * u.s).to(u.day).value\ntr = time_range(0.0, periods=2000, end=tof, format='jd', scale='tdb')\ncowell_with_ad = functools.partial(cowell, ad=atmospheric_drag,\n R=R, C_D=C_D, A=A, m=m, H0=H0, rho0=rho0)\n\nrr = propagate(\n orbit, (tr - orbit.epoch).to(u.s), method=cowell_with_ad\n)",
"_____no_output_____"
],
[
"plt.ylabel('h(t)')\nplt.xlabel('t, days')\nplt.plot(tr.value, rr.data.norm() - Earth.R);",
"_____no_output_____"
]
],
[
[
"### Evolution of RAAN due to the J2 perturbation ###\nWe can also see how the J2 perturbation changes RAAN over time!",
"_____no_output_____"
]
],
[
[
"r0 = np.array([-2384.46, 5729.01, 3050.46]) * u.km\nv0 = np.array([-7.36138, -2.98997, 1.64354]) * u.km / u.s\n\norbit = Orbit.from_vectors(Earth, r0, v0)\n\ntof = 48.0 * u.h\n\n# This will be easier with propagate\n# when this is solved:\n# https://github.com/poliastro/poliastro/issues/257\nrr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n np.linspace(0, tof, 2000),\n ad=J2_perturbation,\n J2=Earth.J2.value,\n R=Earth.R.to(u.km).value\n)\n\nk = Earth.k.to(u.km**3 / u.s**2).value\nrr = rr.to(u.km).value\nvv = vv.to(u.km / u.s).value\n\nraans = [rv2coe(k, r, v)[3] for r, v in zip(rr, vv)]\nplt.ylabel('RAAN(t)')\nplt.xlabel('t, h')\nplt.plot(np.linspace(0, tof, 2000), raans);",
"_____no_output_____"
]
],
[
[
"### 3rd body ###\nApart from time-independent perturbations such as atmospheric drag, J2/J3, we have time-dependend perturbations. Lets's see how Moon changes the orbit of GEO satellite over time!",
"_____no_output_____"
]
],
[
[
"# database keeping positions of bodies in Solar system over time\nsolar_system_ephemeris.set('de432s')\n\nj_date = 2454283.0 * u.day # setting the exact event date is important\n\ntof = (60 * u.day).to(u.s).value\n\n# create interpolant of 3rd body coordinates (calling in on every iteration will be just too slow)\nbody_r = build_ephem_interpolant(Moon, 28 * u.day, (j_date, j_date + 60 * u.day), rtol=1e-2)\n\nepoch = Time(j_date, format='jd', scale='tdb')\ninitial = Orbit.from_classical(Earth, 42164.0 * u.km, 0.0001 * u.one, 1 * u.deg, \n 0.0 * u.deg, 0.0 * u.deg, 0.0 * u.rad, epoch=epoch)\n\n# multiply Moon gravity by 400 so that effect is visible :)\ncowell_with_3rdbody = functools.partial(cowell, rtol=1e-6, ad=third_body,\n k_third=400 * Moon.k.to(u.km**3 / u.s**2).value, \n third_body=body_r)\n\ntr = time_range(j_date.value, periods=1000, end=j_date.value + 60, format='jd', scale='tdb')\n\nrr = propagate(\n initial, (tr - initial.epoch).to(u.s), method=cowell_with_3rdbody\n)",
"_____no_output_____"
],
[
"frame = OrbitPlotter3D()\n\nframe.set_attractor(Earth)\nframe.plot_trajectory(rr, label='orbit influenced by Moon')",
"_____no_output_____"
]
],
[
[
"### Thrusts ###\nApart from natural perturbations, there are artificial thrusts aimed at intentional change of orbit parameters. One of such changes is simultaineous change of eccenricy and inclination.",
"_____no_output_____"
]
],
[
[
"from poliastro.twobody.thrust import change_inc_ecc\n\necc_0, ecc_f = 0.4, 0.0\na = 42164 # km\ninc_0 = 0.0 # rad, baseline\ninc_f = (20.0 * u.deg).to(u.rad).value # rad\nargp = 0.0 # rad, the method is efficient for 0 and 180\nf = 2.4e-6 # km / s2\n\nk = Earth.k.to(u.km**3 / u.s**2).value\ns0 = Orbit.from_classical(\n Earth,\n a * u.km, ecc_0 * u.one, inc_0 * u.deg,\n 0 * u.deg, argp * u.deg, 0 * u.deg,\n epoch=Time(0, format='jd', scale='tdb')\n)\n \na_d, _, _, t_f = change_inc_ecc(s0, ecc_f, inc_f, f)\n\ncowell_with_ad = functools.partial(cowell, rtol=1e-6, ad=a_d)\n\ntr = time_range(0.0, periods=1000, end=(t_f * u.s).to(u.day).value, format='jd', scale='tdb')\n\nrr2 = propagate(\n s0, (tr - s0.epoch).to(u.s), method=cowell_with_ad\n)",
"_____no_output_____"
],
[
"frame = OrbitPlotter3D()\n\nframe.set_attractor(Earth)\nframe.plot_trajectory(rr2, label='orbit with artificial thrust')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c7403ab72c80c700146b2cb28382a111cc9208 | 32,815 | ipynb | Jupyter Notebook | code/dataset/verify_split_tests.ipynb | frankaging/Reason-SCAN | 1e6726aab70c915a3d3068bed77ae255feeaf6f1 | [
"CC-BY-4.0"
] | 14 | 2021-07-05T19:05:53.000Z | 2022-02-08T10:09:36.000Z | code/dataset/verify_split_tests.ipynb | frankaging/Reason-SCAN | 1e6726aab70c915a3d3068bed77ae255feeaf6f1 | [
"CC-BY-4.0"
] | 1 | 2021-11-21T10:44:51.000Z | 2021-12-02T15:11:51.000Z | code/dataset/verify_split_tests.ipynb | frankaging/Reason-SCAN | 1e6726aab70c915a3d3068bed77ae255feeaf6f1 | [
"CC-BY-4.0"
] | 2 | 2021-06-17T08:16:36.000Z | 2021-06-21T17:54:54.000Z | 32.108611 | 445 | 0.568399 | [
[
[
"#### Reason for these tests\nA PR is raised in [ISSUE_1](https://github.com/frankaging/Reason-SCAN/issues/1), the reporter finds some discrepancies in split numbers. Specifically, the `test` split in our main data frame, is not matching up with our sub-test splits as `p1`, `p2` and `p3`. This PR further exposes another issue with our documentations about the splits (i.e., how we generate our splits). Thus, we use this live debug notebook to address these comments.",
"_____no_output_____"
],
[
"#### The Issue",
"_____no_output_____"
]
],
[
[
"import os, json\np1_test_path_to_data = \"../../ReaSCAN-v1.0/ReaSCAN-compositional-p1-test/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {p1_test_path_to_data}...\")\np1_test_data = json.load(open(p1_test_path_to_data, \"r\"))\nprint(len(p1_test_data[\"examples\"][\"test\"]))\n\np2_test_path_to_data = \"../../ReaSCAN-v1.0/ReaSCAN-compositional-p2-test/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {p2_test_path_to_data}...\")\np2_test_data = json.load(open(p2_test_path_to_data, \"r\"))\nprint(len(p2_test_data[\"examples\"][\"test\"]))\n\np3_test_path_to_data = \"../../ReaSCAN-v1.0/ReaSCAN-compositional-p3-test/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {p3_test_path_to_data}...\")\np3_test_data = json.load(open(p3_test_path_to_data, \"r\"))\nprint(len(p3_test_data[\"examples\"][\"test\"]))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-p1-test/data-compositional-splits.txt...\n921\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-p2-test/data-compositional-splits.txt...\n2120\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-p3-test/data-compositional-splits.txt...\n2712\n"
],
[
"len(p1_test_data[\"examples\"][\"test\"]) + len(p2_test_data[\"examples\"][\"test\"]) + len(p3_test_data[\"examples\"][\"test\"])",
"_____no_output_____"
],
[
"ReaSCAN_path_to_data = \"../../ReaSCAN-v1.0/ReaSCAN-compositional/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {ReaSCAN_path_to_data}...\")\nReaSCAN_data = json.load(open(ReaSCAN_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional/data-compositional-splits.txt...\n"
],
[
"p1_test_example_filtered = []\np2_test_example_filtered = []\np3_test_example_filtered = []\nfor example in ReaSCAN_data[\"examples\"][\"test\"]:\n if example['derivation'] == \"$OBJ_0\":\n p1_test_example_filtered += [example]\n elif example['derivation'] == \"$OBJ_0 ^ $OBJ_1\":\n p2_test_example_filtered += [example]\n elif example['derivation'] == \"$OBJ_0 ^ $OBJ_1 & $OBJ_2\":\n p3_test_example_filtered += [example]\nprint(f\"p1 test example count={len(p1_test_example_filtered)}\")\nprint(f\"p2 test example count={len(p2_test_example_filtered)}\")\nprint(f\"p3 test example count={len(p3_test_example_filtered)}\")",
"p1 test example count=907\np2 test example count=2122\np3 test example count=2724\n"
],
[
"len(p1_test_example_filtered) + len(p2_test_example_filtered) + len(p3_test_example_filtered)",
"_____no_output_____"
]
],
[
[
"For instance, as you can see `p1 test example count` should be equal to `921`, but it is not. However, you can see that the total number of test examples matches up. The **root cause** potentially is that our sub-test splits are created asynchronously with the test split in the main data. \n\nBefore confirming the **root cause**, we need to first analyze what is the actual **impact** on performance numbers? Are they changing our results qualitatively? or just quantitatively? We come up with some tests around this issue starting from basic to more complex.",
"_____no_output_____"
],
[
"#### Test-1: Validity\nWe need to ensure our sub-test splits **only** contain commands appear in the training set. Otherwise, our test splits become compositional splits.",
"_____no_output_____"
]
],
[
[
"train_command_set = set([])\nfor example in ReaSCAN_data[\"examples\"][\"train\"]:\n train_command_set.add(example[\"command\"])",
"_____no_output_____"
],
[
"for example in p1_test_data[\"examples\"][\"test\"]:\n assert example[\"command\"] in train_command_set\nfor example in p2_test_data[\"examples\"][\"test\"]:\n assert example[\"command\"] in train_command_set\nfor example in p3_test_data[\"examples\"][\"test\"]:\n assert example[\"command\"] in train_command_set\nprint(\"Test-1 Passed\")",
"Test-1 Passed\n"
]
],
[
[
"#### Test-2: Overestimating?\nWhat about the shape world? Are there overlaps between train and test?",
"_____no_output_____"
]
],
[
[
"import hashlib\ntrain_example_hash = set([])\nfor example in ReaSCAN_data[\"examples\"][\"train\"]:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n train_example_hash.add(example_hash_object.hexdigest())\nassert len(train_example_hash) == len(ReaSCAN_data[\"examples\"][\"train\"])",
"_____no_output_____"
],
[
"p1_test_example_hash = set([])\nfor example in p1_test_data[\"examples\"][\"test\"]:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n p1_test_example_hash.add(example_hash_object.hexdigest())\nassert len(p1_test_example_hash) == len(p1_test_data[\"examples\"][\"test\"])\n\np2_test_example_hash = set([])\nfor example in p2_test_data[\"examples\"][\"test\"]:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n p2_test_example_hash.add(example_hash_object.hexdigest())\nassert len(p2_test_example_hash) == len(p2_test_data[\"examples\"][\"test\"])\n\np3_test_example_hash = set([])\nfor example in p3_test_data[\"examples\"][\"test\"]:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n p3_test_example_hash.add(example_hash_object.hexdigest())\nassert len(p3_test_example_hash) == len(p3_test_data[\"examples\"][\"test\"])",
"_____no_output_____"
],
[
"p1_test_dup_count = 0\nfor hash_str in p1_test_example_hash:\n if hash_str in train_example_hash:\n p1_test_dup_count += 1\n \np2_test_dup_count = 0\nfor hash_str in p2_test_example_hash:\n if hash_str in train_example_hash:\n p2_test_dup_count += 1\n\np3_test_dup_count = 0\nfor hash_str in p3_test_example_hash:\n if hash_str in train_example_hash:\n p3_test_dup_count += 1",
"_____no_output_____"
],
[
"print(f\"p1_test_dup_count={p1_test_dup_count}\")\nprint(f\"p2_test_dup_count={p2_test_dup_count}\")\nprint(f\"p3_test_dup_count={p3_test_dup_count}\")",
"p1_test_dup_count=858\np2_test_dup_count=1982\np3_test_dup_count=2548\n"
],
[
"main_p1_test_example_hash = set([])\nfor example in p1_test_example_filtered:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n main_p1_test_example_hash.add(example_hash_object.hexdigest())\nassert len(main_p1_test_example_hash) == len(p1_test_example_filtered)",
"_____no_output_____"
],
[
"main_p1_test_dup_count = 0\nfor hash_str in main_p1_test_example_hash:\n if hash_str in train_example_hash:\n main_p1_test_dup_count += 1",
"_____no_output_____"
],
[
"print(f\"main_p1_test_dup_count={main_p1_test_dup_count}\")",
"main_p1_test_dup_count=0\n"
]
],
[
[
"**Conclusion**: Yes. As you can see, we have many duplicated examples in our random tests. This means that, we need to use updated testing splits for evaluating performance. As a result, the **table 3** in the paper needs to be updated since it is now overestimating model performance for non-generalizing test splits (e.g., `p1`, `p2` nad `p3`).",
"_____no_output_____"
],
[
"**Action Required**: Need to re-evaluation model performance on those splits.",
"_____no_output_____"
],
[
"#### Test-3: Does this issue affect any other generalization splits?\nDoes our generalization splits containing duplicates?",
"_____no_output_____"
]
],
[
[
"def get_example_hash_set(split):\n split_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-{split}/data-compositional-splits.txt\"\n print(f\"Reading dataset from file: {split_test_path_to_data}...\")\n split_test_data = json.load(open(split_test_path_to_data, \"r\"))\n split_test_data_test_example_hash = set([])\n for example in split_test_data[\"examples\"][\"test\"]:\n example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))\n split_test_data_test_example_hash.add(example_hash_object.hexdigest())\n assert len(split_test_data_test_example_hash) == len(split_test_data[\"examples\"][\"test\"])\n return split_test_data_test_example_hash\n ",
"_____no_output_____"
],
[
"a1_hash = get_example_hash_set(\"a1\")\na2_hash = get_example_hash_set(\"a2\")\na3_hash = get_example_hash_set(\"a3\")\n\nb1_hash = get_example_hash_set(\"b1\")\nb2_hash = get_example_hash_set(\"b2\")\n\nc1_hash = get_example_hash_set(\"c1\")\nc2_hash = get_example_hash_set(\"c2\")",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a1/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a2/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a3/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-b1/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-b2/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-c1/data-compositional-splits.txt...\nReading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-c2/data-compositional-splits.txt...\n"
],
[
"a1_dup_count = 0\nfor hash_str in a1_hash:\n if hash_str in train_example_hash:\n a1_dup_count += 1\na2_dup_count = 0\nfor hash_str in a2_hash:\n if hash_str in train_example_hash:\n a2_dup_count += 1\na3_dup_count = 0\nfor hash_str in a3_hash:\n if hash_str in train_example_hash:\n a3_dup_count += 1",
"_____no_output_____"
],
[
"print(f\"a1_dup_count={a1_dup_count}\")\nprint(f\"a2_dup_count={a2_dup_count}\")\nprint(f\"a3_dup_count={a3_dup_count}\")",
"a1_dup_count=0\na2_dup_count=0\na3_dup_count=0\n"
],
[
"b1_dup_count = 0\nfor hash_str in b1_hash:\n if hash_str in train_example_hash:\n b1_dup_count += 1\nb2_dup_count = 0\nfor hash_str in b2_hash:\n if hash_str in train_example_hash:\n b2_dup_count += 1",
"_____no_output_____"
],
[
"print(f\"b1_dup_count={b1_dup_count}\")\nprint(f\"b2_dup_count={b2_dup_count}\")",
"b1_dup_count=0\nb2_dup_count=0\n"
],
[
"c1_dup_count = 0\nfor hash_str in c1_hash:\n if hash_str in train_example_hash:\n c1_dup_count += 1\nc2_dup_count = 0\nfor hash_str in c2_hash:\n if hash_str in train_example_hash:\n c2_dup_count += 1",
"_____no_output_____"
],
[
"print(f\"c1_dup_count={c1_dup_count}\")\nprint(f\"c2_dup_count={c2_dup_count}\")",
"c1_dup_count=0\nc2_dup_count=0\n"
]
],
[
[
"**Conclusion**: No.",
"_____no_output_____"
],
[
"#### Test-4: What about correctness of generalization splits in general?\nWe see there is no duplicate, but what about general correctness? Are their created correctly? In this section, we add more sanity checks to show correctness of each generalization split.\n\nFor each split, we verify two things:\n* the generalization split can ONLY contain test examples that it is designed to test.\n* the training split DOES NOT contain examples that are aligned with the generalization split.",
"_____no_output_____"
],
[
"A1:novel color modifier",
"_____no_output_____"
]
],
[
[
"split_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-a1/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a1/data-compositional-splits.txt...\n"
],
[
"for example in split_test_data[\"examples\"][\"test\"]:\n assert \"yellow,square\" in example[\"command\"]",
"_____no_output_____"
],
[
"for example in ReaSCAN_data[\"examples\"][\"train\"]:\n assert \"yellow,square\" not in example[\"command\"]",
"_____no_output_____"
]
],
[
[
"A2: novel color attribute",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-a2/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a2/data-compositional-splits.txt...\n"
],
[
"for example in ReaSCAN_data[\"examples\"][\"train\"]:\n assert \"red,square\" not in example[\"command\"]",
"_____no_output_____"
],
[
"for example in split_test_data[\"examples\"][\"test\"]:\n if \"red,square\" not in example[\"command\"]:\n # then, some background object referred in the command needs to be a red square!!\n if example[\"derivation\"] == \"$OBJ_0\":\n assert example['situation']['placed_objects']['0']['object']['shape'] == \"square\"\n assert example['situation']['placed_objects']['0']['object']['color'] == \"red\"\n elif example[\"derivation\"] == \"$OBJ_0 ^ $OBJ_1\":\n assert example['situation']['placed_objects']['0']['object']['shape'] == \"square\" or example['situation']['placed_objects']['1']['object']['shape'] == \"square\"\n assert example['situation']['placed_objects']['0']['object']['color'] == \"red\" or example['situation']['placed_objects']['1']['object']['color'] == \"red\"\n elif example[\"derivation\"] == \"$OBJ_0 ^ $OBJ_1 & $OBJ_2\":\n assert example['situation']['placed_objects']['0']['object']['shape'] == \"square\" or example['situation']['placed_objects']['1']['object']['shape'] == \"square\" or example['situation']['placed_objects']['2']['object']['shape'] == \"square\"\n assert example['situation']['placed_objects']['0']['object']['color'] == \"red\" or example['situation']['placed_objects']['1']['object']['color'] == \"red\" or example['situation']['placed_objects']['2']['object']['color'] == \"red\"\n else:\n pass",
"_____no_output_____"
]
],
[
[
"A3: novel size attribute",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-a3/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-a3/data-compositional-splits.txt...\n"
],
[
"for example in split_test_data[\"examples\"][\"test\"]:\n assert \"small,cylinder\" in example['command'] or \\\n \"small,red,cylinder\" in example['command'] or \\\n \"small,blue,cylinder\" in example['command'] or \\\n \"small,yellow,cylinder\" in example['command'] or \\\n \"small,green,cylinder\" in example['command']",
"_____no_output_____"
],
[
"for example in ReaSCAN_data[\"examples\"][\"train\"]:\n assert not (\"small,cylinder\" in example['command'] or \\\n \"small,red,cylinder\" in example['command'] or \\\n \"small,blue,cylinder\" in example['command'] or \\\n \"small,yellow,cylinder\" in example['command'] or \\\n \"small,green,cylinder\" in example['command'])",
"_____no_output_____"
]
],
[
[
"B1: novel co-occurrence of objects",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-b1/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-b1/data-compositional-splits.txt...\n"
],
[
"from collections import namedtuple, OrderedDict\nseen_command_structs = {}\nseen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!\nseen_object_co = set([])\nseen_rel_co = set([])\n\nfor example_selected in ReaSCAN_data[\"examples\"][\"train\"]:\n rel_map = OrderedDict({})\n for ele in example_selected[\"relation_map\"]:\n rel_map[tuple(ele[0])] = ele[1]\n example_struct = OrderedDict({\n 'obj_pattern_map': example_selected[\"object_pattern_map\"],\n 'rel_map': rel_map,\n 'obj_map': example_selected[\"object_expression\"],\n 'grammer_pattern': example_selected['grammer_pattern'],\n 'adverb': example_selected['adverb_in_command'],\n 'verb': example_selected['verb_in_command']\n })\n obj_co = []\n for k, v in example_selected[\"object_expression\"].items():\n if v not in seen_concepts:\n seen_concepts[v] = 1\n else:\n seen_concepts[v] += 1\n obj_co += [v]\n obj_co.sort()\n seen_object_co.add(tuple(obj_co))\n \n rel_co = []\n for k, v in rel_map.items():\n if v not in seen_concepts:\n seen_concepts[v] = 1\n else:\n seen_concepts[v] += 1\n rel_co += [v]\n rel_co.sort()\n seen_rel_co.add(tuple(rel_co))",
"_____no_output_____"
],
[
"test_seen_command_structs = {}\ntest_seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!\ntest_seen_object_co = set([])\ntest_seen_rel_co = set([])\n\nfor example_selected in split_test_data[\"examples\"][\"test\"]:\n rel_map = OrderedDict({})\n for ele in example_selected[\"relation_map\"]:\n rel_map[tuple(ele[0])] = ele[1]\n example_struct = OrderedDict({\n 'obj_pattern_map': example_selected[\"object_pattern_map\"],\n 'rel_map': rel_map,\n 'obj_map': example_selected[\"object_expression\"],\n 'grammer_pattern': example_selected['grammer_pattern'],\n 'adverb': example_selected['adverb_in_command'],\n 'verb': example_selected['verb_in_command']\n })\n obj_co = []\n for k, v in example_selected[\"object_expression\"].items():\n if v not in test_seen_concepts:\n test_seen_concepts[v] = 1\n else:\n test_seen_concepts[v] += 1\n obj_co += [v]\n obj_co.sort()\n test_seen_object_co.add(tuple(obj_co))\n \n rel_co = []\n for k, v in rel_map.items():\n if v not in test_seen_concepts:\n test_seen_concepts[v] = 1\n else:\n test_seen_concepts[v] += 1\n rel_co += [v]\n rel_co.sort()\n test_seen_rel_co.add(tuple(rel_co))",
"_____no_output_____"
],
[
"test_seen_object_co.intersection(seen_object_co)",
"_____no_output_____"
]
],
[
[
"B2: novel co-occurrence of relations",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-b2/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-b2/data-compositional-splits.txt...\n"
],
[
"test_seen_command_structs = {}\ntest_seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!\ntest_seen_object_co = set([])\ntest_seen_rel_co = set([])\n\nfor example_selected in split_test_data[\"examples\"][\"test\"]:\n rel_map = OrderedDict({})\n for ele in example_selected[\"relation_map\"]:\n rel_map[tuple(ele[0])] = ele[1]\n example_struct = OrderedDict({\n 'obj_pattern_map': example_selected[\"object_pattern_map\"],\n 'rel_map': rel_map,\n 'obj_map': example_selected[\"object_expression\"],\n 'grammer_pattern': example_selected['grammer_pattern'],\n 'adverb': example_selected['adverb_in_command'],\n 'verb': example_selected['verb_in_command']\n })\n obj_co = []\n for k, v in example_selected[\"object_expression\"].items():\n if v not in test_seen_concepts:\n test_seen_concepts[v] = 1\n else:\n test_seen_concepts[v] += 1\n obj_co += [v]\n obj_co.sort()\n test_seen_object_co.add(tuple(obj_co))\n \n rel_co = []\n for k, v in rel_map.items():\n if v not in test_seen_concepts:\n test_seen_concepts[v] = 1\n else:\n test_seen_concepts[v] += 1\n rel_co += [v]\n rel_co.sort()\n test_seen_rel_co.add(tuple(rel_co))",
"_____no_output_____"
],
[
"test_seen_rel_co",
"_____no_output_____"
]
],
[
[
"C1:novel conjunctive clause length",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-c1/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-c1/data-compositional-splits.txt...\n"
],
[
"for example in split_test_data[\"examples\"][\"test\"]:\n assert example[\"derivation\"] == \"$OBJ_0 ^ $OBJ_1 & $OBJ_2 & $OBJ_3\"\n assert (example[\"command\"].count(\"and\")) == 2",
"_____no_output_____"
]
],
[
[
"C2:novel relative clauses",
"_____no_output_____"
]
],
[
[
"# this test may be a little to weak for now. maybe improve it to verify the shape world?\nsplit_test_path_to_data = f\"../../ReaSCAN-v1.0/ReaSCAN-compositional-c2/data-compositional-splits.txt\"\nprint(f\"Reading dataset from file: {split_test_path_to_data}...\")\nsplit_test_data = json.load(open(split_test_path_to_data, \"r\"))",
"Reading dataset from file: ../../ReaSCAN-v1.0/ReaSCAN-compositional-c2/data-compositional-splits.txt...\n"
],
[
"for example in split_test_data[\"examples\"][\"test\"]:\n assert example[\"derivation\"] == \"$OBJ_0 ^ $OBJ_1 ^ $OBJ_2\"\n assert (example[\"command\"].count(\"that,is\")) == 2",
"_____no_output_____"
]
],
[
[
"**Conclusion**: No.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c747f3a4d010563d6326b006c2e585dabf31ac | 19,718 | ipynb | Jupyter Notebook | profiling.ipynb | tusharhero/pytorch-wavenet | b101c93ceda5c9c0ec722da8261e4497a7924826 | [
"MIT"
] | 858 | 2017-04-20T13:18:15.000Z | 2022-03-23T21:07:51.000Z | profiling.ipynb | tusharhero/pytorch-wavenet | b101c93ceda5c9c0ec722da8261e4497a7924826 | [
"MIT"
] | 41 | 2017-06-22T02:26:39.000Z | 2022-03-27T23:29:37.000Z | profiling.ipynb | tusharhero/pytorch-wavenet | b101c93ceda5c9c0ec722da8261e4497a7924826 | [
"MIT"
] | 225 | 2017-06-21T06:32:03.000Z | 2022-03-27T23:28:00.000Z | 50.688946 | 121 | 0.48438 | [
[
[
"import time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom wavenet_model import *\nfrom audio_data import WavenetDataset\nfrom wavenet_training import *\nfrom model_logging import *\nfrom scipy.io import wavfile\n\ndtype = torch.FloatTensor\nltype = torch.LongTensor\n\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n print('use gpu')\n dtype = torch.cuda.FloatTensor\n ltype = torch.cuda.LongTensor\nelse: \n print(\"no gpu found\")",
"no gpu found\n"
],
[
"model = WaveNetModel(layers=10,\n blocks=4,\n dilation_channels=32,\n residual_channels=32,\n skip_channels=32,\n output_length=64,\n dtype=dtype)\n#model = load_latest_model_from('snapshots', use_cuda=use_cuda)\nif use_cuda:\n model.cuda()\n\nprint('model: ', model)\nprint('receptive field: ', model.receptive_field)\nprint('parameter count: ', model.parameter_count())",
"model: WaveNetModel(\n (filter_convs): ModuleList(\n (0): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (1): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (2): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (3): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (4): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (5): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (6): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (7): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (8): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (9): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (10): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (11): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (12): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (13): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (14): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (15): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (16): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (17): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (18): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (19): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (20): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (21): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (22): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (23): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (24): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (25): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (26): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (27): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (28): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (29): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (30): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (31): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (32): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (33): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (34): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (35): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (36): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (37): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (38): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (39): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n )\n (gate_convs): ModuleList(\n (0): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (1): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (2): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (3): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (4): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (5): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (6): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (7): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (8): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (9): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (10): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (11): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (12): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (13): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (14): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (15): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (16): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (17): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (18): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (19): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (20): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (21): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (22): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (23): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (24): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (25): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (26): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (27): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (28): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (29): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (30): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (31): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (32): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (33): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (34): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (35): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (36): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (37): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (38): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n (39): Conv1d (32, 32, kernel_size=(2,), stride=(1,), bias=False)\n )\n (residual_convs): ModuleList(\n (0): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (1): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (2): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (3): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (4): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (5): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (6): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (7): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (8): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (9): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (10): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (11): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (12): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (13): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (14): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (15): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (16): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (17): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (18): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (19): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (20): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (21): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (22): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (23): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (24): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (25): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (26): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (27): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (28): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (29): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (30): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (31): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (32): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (33): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (34): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (35): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (36): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (37): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (38): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (39): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (skip_convs): ModuleList(\n (0): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (1): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (2): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (3): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (4): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (5): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (6): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (7): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (8): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (9): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (10): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (11): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (12): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (13): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (14): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (15): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (16): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (17): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (18): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (19): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (20): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (21): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (22): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (23): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (24): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (25): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (26): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (27): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (28): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (29): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (30): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (31): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (32): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (33): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (34): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (35): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (36): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (37): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (38): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n (39): Conv1d (32, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (start_conv): Conv1d (1, 32, kernel_size=(1,), stride=(1,), bias=False)\n (end_conv): Conv1d (32, 256, kernel_size=(1,), stride=(1,))\n)\nreceptive field: 4093\nparameter count: 254240\n"
],
[
"batch_size = 32\ninput_data = Variable(torch.zeros([batch_size, 1, model.receptive_field + model.output_length - 1]))\nprint(input_data)",
"Variable containing:\n( 0 ,.,.) = \n 0 0 0 ... 0 0 0\n\n( 1 ,.,.) = \n 0 0 0 ... 0 0 0\n\n( 2 ,.,.) = \n 0 0 0 ... 0 0 0\n ... \n\n( 29 ,.,.) = \n 0 0 0 ... 0 0 0\n\n( 30 ,.,.) = \n 0 0 0 ... 0 0 0\n\n( 31 ,.,.) = \n 0 0 0 ... 0 0 0\n[torch.FloatTensor of size 32x1x4156]\n\n"
],
[
"with torch.autograd.profiler.profile(enabled=True, use_cuda=True) as prof:\n out = model(input_data)\n loss = F.cross_entropy(out.squeeze(), Variable(torch.zeros([batch_size * model.output_length]).type(ltype)))\n loss.backward()\nprint(prof.key_averages().table(sort_by='cpu_time_total'))",
"_____no_output_____"
],
[
"prof.export_chrome_trace('profiling/latest_trace.json')",
"_____no_output_____"
],
[
"with torch.autograd.profiler.profile() as prof:\n model.generate_fast(num_samples=100)\nprint(prof.key_averages().table(sort_by='cpu_time_total'))",
"one generating step does take approximately 0.012284069061279297 seconds)\n--------------- --------------- --------------- --------------- --------------- ---------------\nName CPU time CUDA time Calls CPU total CUDA total\n--------------- --------------- --------------- --------------- --------------- ---------------\nview 6.082us 0.000us 1 6.082us 0.000us\nsqueeze 1.857us 0.000us 100 185.680us 0.000us\nthreshold 3.185us 0.000us 100 318.454us 0.000us\ndiv 3.349us 0.000us 100 334.918us 0.000us\nsoftmax 6.903us 0.000us 100 690.307us 0.000us\nunsqueeze 1.704us 0.000us 4000 6816.902us 0.000us\nmul 2.180us 0.000us 4000 8721.320us 0.000us\nsigmoid 2.209us 0.000us 4000 8837.406us 0.000us\ntanh 2.279us 0.000us 4000 9115.436us 0.000us\nadd 2.339us 0.000us 8000 18713.918us 0.000us\ncat 5.684us 0.000us 3508 19939.675us 0.000us\nSetItem 10.601us 0.000us 4000 42403.212us 0.000us\nIndex 8.634us 0.000us 15408 133035.871us 0.000us\nConvForward 16.638us 0.000us 16200 269537.283us 0.000us\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c7492ab819996b46cc884bde9098cbbf54de06 | 9,522 | ipynb | Jupyter Notebook | testpy/Model Choice Loop.ipynb | vitorpavinato/abcranger | 71f950817bedeebed12d13610d8747c6dcc75a72 | [
"MIT"
] | 1 | 2020-07-29T13:31:43.000Z | 2020-07-29T13:31:43.000Z | testpy/Model Choice Loop.ipynb | vitorpavinato/abcranger | 71f950817bedeebed12d13610d8747c6dcc75a72 | [
"MIT"
] | null | null | null | testpy/Model Choice Loop.ipynb | vitorpavinato/abcranger | 71f950817bedeebed12d13610d8747c6dcc75a72 | [
"MIT"
] | null | null | null | 31.634551 | 97 | 0.509662 | [
[
[
"from scipy.ndimage.filters import gaussian_filter1d\nimport pandas as pd\nimport seaborn as sn\nimport sys\nsys.path.insert(0, '../build')\n\nimport pyabcranger\nimport sys\nimport elfi\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.stattools import acf, pacf\nimport math\nimport numpy as np\n\n\ndef MAq(t, n_obs=10000, batch_size=1, random_state=None):\n # Make inputs 2d arrays for numpy broadcasting with w\n s = t.shape[1]\n assert t.shape[0] == batch_size\n random_state = random_state or np.random\n w = random_state.randn(batch_size, n_obs+s) # i.i.d. sequence ~ N(0,1)\n x = w[:, s:]\n for p in range(s):\n x = x + np.repeat(np.reshape(t[:, p], (batch_size, 1)),\n n_obs, axis=1)*w[:, (s-p-1):(-p-1)]\n return x\n\ndef generate_maq_priors(q, tq , batch_size=1, random_state=None):\n assert tq.shape[0] == batch_size\n d = q // 2\n if (q % 2) == 0:\n d = d - 1\n random_state = random_state or np.random\n nc = random_state.randint(q, size=batch_size)\n nd = random_state.randint(d, size=batch_size)\n #r = np.random.uniform(min, max, (batch_size, 1))\n genr = np.exp(random_state.dirichlet(\n np.ones(q), batch_size)*np.log(np.abs(1/tq[:,np.newaxis])))\n # genr = genr * randSign(q,(r <= 0),batch_size)\n genr[:, -1] = -genr[:, -1]\n alphas = np.zeros((batch_size, q))\n for i in range(batch_size):\n gen = random_state.uniform(0, math.pi, nd[i])\n d2 = (q - (2*nd[i])) // 2\n if (q % 2) == 0:\n d2 = d2 - 1\n nq = random_state.randint(d2)\n alphas[i, :nd[i]] = gen\n alphas[i, nd[i]:(2*nd[i])] = -gen\n alphas[i, -(2*nq+1):] = -1\n roots = np.zeros((batch_size, q), dtype=complex)\n roots.real = np.cos(alphas)\n roots.imag = np.sin(alphas)\n if (q % 2) != 0:\n roots[:, nc] = -roots[:, nc]\n roots = roots / genr\n assert np.min(np.abs(roots)) > 1, str(roots) # Prior constraint checking\n poly = np.apply_along_axis(\n np.polynomial.polynomial.polyfromroots, 1, roots).real[:, 1:]\n return poly * np.reshape(tq, (batch_size, 1))\n\nNcovmult=4\n\ndef pautocorr(x, to=1):\n C = np.zeros((x.shape[0], to*Ncovmult))\n for i in range(x.shape[0]):\n C[i, 0::Ncovmult] = acf(x[i][1:], True, nlags=to, fft=True)[1:]\n res = pacf(x[i][1:], nlags=to, method='ols', alpha=0.05)\n C[i, 1::Ncovmult] = res[0][1:]\n C[i, 2::Ncovmult] = res[1][1:, 0]\n C[i, 3::Ncovmult] = res[1][1:, 1]\n return C\n\nclass ClassPrior(elfi.Distribution):\n def rvs(n, size=1, random_state=None):\n random_state = random_state or np.random\n return random_state.choice(n,size,p=np.arange(n,0,-1)/(n*(n+1)/2))\n \nclass GlobalPrior(elfi.Distribution):\n def rvs(qp, tq, qpriors, size=1, random_state=None):\n class_count = np.zeros(qpriors.shape[0], dtype='int')\n res = np.zeros((size[0], maxt))\n for q in range(qpriors.shape[0]):\n qr = qpriors[q]\n class_count[q] = np.sum(qp == q)\n if (class_count[q] > 0):\n res[qp == q, :qr] = generate_maq_priors(\n qr, tq[qp == q], class_count[q],random_state)\n return res\n \ndef listvar(prefix, s):\n return [prefix+str(i) for i in range(1, s+1)]\n\ndef listvarautocorr(s):\n arr = []\n for i in range(1, s//Ncovmult+1):\n arr.append(\"acf\"+str(i))\n arr.append(\"pacf\"+str(i))\n arr.append(\"pacfq1_\"+str(i))\n arr.append(\"pacfq2_\"+str(i))\n return arr",
"_____no_output_____"
],
[
"minprior = 1\nmaxprior = 2\nntree = 500\nNy = 200 # Length of the serie\nNcov = 20 # Maximum of autocorrelation lag\nq = 10 # Our chosen q for the observed data\nnref = 2000 # Number of expected simulated data from ABC\nbatchsize = 100\n\n#qpriors = np.array([6,7,8,9,10,11,12,13,14,15,16])\nqpriors = np.arange(6,17,dtype=np.int)\nnclasses = qpriors.shape[0]\nmaxt = np.max(qpriors)",
"_____no_output_____"
],
[
"tq = elfi.Prior('uniform',1,1)\nqp = elfi.Prior(ClassPrior, nclasses)\nt = elfi.Prior(GlobalPrior, qp, tq, qpriors)\n\nY = elfi.Simulator(MAq, t)\nS = elfi.Summary(pautocorr, Y, Ncov)\nd = elfi.Distance('euclidean', S)\n\nelfi.set_client('multiprocessing')\nrej = elfi.Rejection(d, batch_size=batchsize, output_names=['S'])",
"_____no_output_____"
],
[
"from tqdm.notebook import tqdm, trange\npredicted = []\npostproba = []\n\n\nwith trange(100) as tr:\n for k in tr:\n # Generation of the observed data\n modsimple = generate_maq_priors(q, np.random.uniform(low=1.0,high=2.0,size=(1)))\n y_obs = MAq(modsimple, Ny)\n\n Y.become(elfi.Simulator(MAq,t,observed=y_obs))\n result = rej.sample(nref, quantile=1.0,bar=False)\n\n rf = pyabcranger.reftable(\n nref,\n [np.sum(result.samples['qp'] == i) for i in range(nclasses)],\n qpriors,\n listvar('t', maxt),\n listvarautocorr(result.outputs['S'].shape[1]),\n result.outputs['S'],\n result.samples['t'],\n result.samples['qp']+1\n )\n\n postres = pyabcranger.modelchoice(\n rf, S.observed[0], \"--ntree \"+str(ntree), True)\n\n tr.set_postfix(model=qpriors[postres.predicted_model])\n predicted.append(qpriors[postres.predicted_model])\n postproba.append(postres.post_proba)",
"_____no_output_____"
],
[
"plt.figure()\nplt.hist(predicted,np.arange(6,18),weights=postproba,align='left')\nplt.xticks(np.arange(6,17));",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c74c5ad743b27042521f235c6e254d53af1908 | 26,145 | ipynb | Jupyter Notebook | hypothesis_testing/10_HypothesisTesting/13_Simulating From the Null Hypothesis.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
] | null | null | null | hypothesis_testing/10_HypothesisTesting/13_Simulating From the Null Hypothesis.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
] | null | null | null | hypothesis_testing/10_HypothesisTesting/13_Simulating From the Null Hypothesis.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
] | 1 | 2018-03-11T22:03:02.000Z | 2018-03-11T22:03:02.000Z | 93.042705 | 6,666 | 0.848231 | [
[
[
"### Simulating From the Null Hypothesis\n\nLoad in the data below, and follow the questions to assist with answering the quiz questions below.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nnp.random.seed(42)\n\nfull_data = pd.read_csv('../data/coffee_dataset.csv')\nsample_data = full_data.sample(200)",
"_____no_output_____"
]
],
[
[
"`1.` If you were interested in if the average height for coffee drinkers is the same as for non-coffee drinkers, what would the null and alternative be? Place them in the cell below, and use your answer to answer the first quiz question below.",
"_____no_output_____"
],
[
"**Since there is no directional component associated with this statement, a not equal to seems most reasonable.**\n\n$$H_0: \\mu_{coff} - \\mu_{no} = 0$$\n\n\n$$H_0: \\mu_{coff} - \\mu_{no} \\neq 0$$\n\n\n**$\\mu_{coff}$ and $\\mu_{no}$ are the population mean values for coffee drinkers and non-coffee drinkers, respectivley.**",
"_____no_output_____"
],
[
"`2.` If you were interested in if the average height for coffee drinkers is less than non-coffee drinkers, what would the null and alternative be? Place them in the cell below, and use your answer to answer the second quiz question below.",
"_____no_output_____"
],
[
"**In this case, there is a question associated with a direction - that is the average height for coffee drinkers is less than non-coffee drinkers. Below is one of the ways you could write the null and alternative. Since the mean for coffee drinkers is listed first here, the alternative would suggest that this is negative.**\n\n$$H_0: \\mu_{coff} - \\mu_{no} \\geq 0$$\n\n\n$$H_0: \\mu_{coff} - \\mu_{no} < 0$$\n\n\n**$\\mu_{coff}$ and $\\mu_{no}$ are the population mean values for coffee drinkers and non-coffee drinkers, respectivley.**",
"_____no_output_____"
],
[
"`3.` For 10,000 iterations: bootstrap the sample data, calculate the mean height for coffee drinkers and non-coffee drinkers, and calculate the difference in means for each sample. You will want to have three arrays at the end of the iterations - one for each mean and one for the difference in means. Use the results of your sampling distribution, to answer the third quiz question below.",
"_____no_output_____"
]
],
[
[
"nocoff_means, coff_means, diffs = [], [], []\n\nfor _ in range(10000):\n bootsamp = sample_data.sample(200, replace = True)\n coff_mean = bootsamp[bootsamp['drinks_coffee'] == True]['height'].mean()\n nocoff_mean = bootsamp[bootsamp['drinks_coffee'] == False]['height'].mean()\n # append the info \n coff_means.append(coff_mean)\n nocoff_means.append(nocoff_mean)\n diffs.append(coff_mean - nocoff_mean) \n ",
"_____no_output_____"
],
[
"np.std(nocoff_means) # the standard deviation of the sampling distribution for nocoff",
"_____no_output_____"
],
[
"np.std(coff_means) # the standard deviation of the sampling distribution for coff",
"_____no_output_____"
],
[
"np.std(diffs) # the standard deviation for the sampling distribution for difference in means",
"_____no_output_____"
],
[
"plt.hist(nocoff_means, alpha = 0.5);\nplt.hist(coff_means, alpha = 0.5); # They look pretty normal to me!",
"_____no_output_____"
],
[
"plt.hist(diffs, alpha = 0.5); # again normal - this is by the central limit theorem",
"_____no_output_____"
]
],
[
[
"`4.` Now, use your sampling distribution for the difference in means and [the docs](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.normal.html) to simulate what you would expect if your sampling distribution were centered on zero. Also, calculate the observed sample mean difference in `sample_data`. Use your solutions to answer the last questions in the quiz below.",
"_____no_output_____"
],
[
"** We would expect the sampling distribution to be normal by the Central Limit Theorem, and we know the standard deviation of the sampling distribution of the difference in means from the previous question, so we can use this to simulate draws from the sampling distribution under the null hypothesis. If there is truly no difference, then the difference between the means should be zero.**",
"_____no_output_____"
]
],
[
[
"null_vals = np.random.normal(0, np.std(diffs), 10000) # Here are 10000 draws from the sampling distribution under the null",
"_____no_output_____"
],
[
"plt.hist(null_vals); #Here is the sampling distribution of the difference under the null",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e7c74f137c481d7ea9b6d67f72d17f7e26d0c365 | 33,682 | ipynb | Jupyter Notebook | iterators-generators-and-uncertainty.ipynb | python-supply/iterators-generators-and-uncertainty | 8ba7133d9337d0256a4bf55da0dd7dbc802dd1cc | [
"MIT"
] | null | null | null | iterators-generators-and-uncertainty.ipynb | python-supply/iterators-generators-and-uncertainty | 8ba7133d9337d0256a4bf55da0dd7dbc802dd1cc | [
"MIT"
] | null | null | null | iterators-generators-and-uncertainty.ipynb | python-supply/iterators-generators-and-uncertainty | 8ba7133d9337d0256a4bf55da0dd7dbc802dd1cc | [
"MIT"
] | null | null | null | 27.097345 | 861 | 0.564485 | [
[
[
"# Iterators, Generators, and Uncertainty",
"_____no_output_____"
],
[
"Suppose you are working on a Python API that provides access to a real-time data stream (perhaps from an array of sensors or from a web service that handles user requests). You would like to deliver to the consumers of your API a simple but flexible abstraction that allows them to operate on new items from the stream when they choose to do so. Furthermore, you would like the API to allow users to do the following three things:\n\n* specify fall-back or default data streams (*e.g.*, if their first choice of stream is exhausted);\n* interleave items coming from multiple streams (presenting them as a single, new stream); and\n* process the items from a stream in parallel using multiprocessing.\n\nWhat abstraction should you use? How much of it must be custom-built and how much can be done using native Python features? When working with data streams, state spaces, and other abstractions that represent large or unbounded structures, it can be tempting to custom-build solutions that may become increasingly complex and difficult to maintain. Understanding a range of features that are already available in a language or its built-in libraries can help mitigate this while saving significant time and effort (both your own and that of others who build upon your work).",
"_____no_output_____"
],
[
"Iterators and generators are powerful tools in the Python language that have compelling applications in a number of contexts. This article reviews how they are defined, how they can be used, how they are related to one another, and how they can help you work in an elegant and flexible way with data structures and data streams of an unknown or infinite size.",
"_____no_output_____"
],
[
"## Iterables, Iterators, and Generators",
"_____no_output_____"
],
[
"When discussing Python, the terms *iterable*, *iterator*, and *generator* often appear in similar contexts or are even used interchangeably. These language features also solve similar problems. This can lead to some confusion, but there is a reason that on occasion these are conflated.",
"_____no_output_____"
],
[
"One way to understand the term *iterator* is that it refers to *any* Python data structure that *has an interface* that supports iteration over objects one at a time. A data structure is *iterable* if there is *at least one way* to construct an iterator that traverses it in some way. On the other hand, a *generator* is a particular kind of data structure, defined in a specific way within Python, that maintains an internal state and constructs or retrieves zero or more objects or values one at a time.",
"_____no_output_____"
],
[
"Thus, a generator by virtue of its characteristics can have an interface that allows it to qualify as an iterator, which consequently also makes it an iterable. In fact, all generators are iterators and iterable. However, not all iterators or iterable data structures are generators because there exist other approaches for building a Python object that possesses the kind of interface an iterator or iterable is expected to have.",
"_____no_output_____"
],
[
"## Iterators",
"_____no_output_____"
],
[
"If you want to implement an iterator data structure directly, you need to include a special method `__next__` in the class definition, which will be invoked whenever the built-in [`next`](https://docs.python.org/3/library/functions.html#next) function is applied to an instance of that data structure. The `skips` data structure below can emit every other positive integer via its definition of a `__next__` method.",
"_____no_output_____"
]
],
[
[
"class skips:\n def __init__(self):\n self.integer = 0\n\n def __next__(self):\n self.integer += 2\n return self.integer",
"_____no_output_____"
]
],
[
[
"Now it is possible to use the built-in [`next`](https://docs.python.org/3/library/functions.html#next) function to retrieve each item one at a time from an instance of the `skips` data structure.",
"_____no_output_____"
]
],
[
[
"ns = skips()\n[next(ns), next(ns), next(ns)]",
"_____no_output_____"
]
],
[
[
"The number of items over which the data structure will iterate can be limited by raising the `StopIteration` exception when more items can not (or should not) be returned.",
"_____no_output_____"
]
],
[
[
"class skips:\n def __init__(self, start, end):\n self.integer = start-2\n self.end = end\n\n def __next__(self):\n self.integer += 2\n if self.integer > self.end:\n raise StopIteration\n return self.integer",
"_____no_output_____"
]
],
[
[
"It is then the responsibility of any code that uses an instance of this iterator to catch this exception and handle it appropriately. It is worth acknowledging that this is a somewhat unusual use of a language feature normally associated with catching errors (because an iterator being exhausted is not always an error condition).",
"_____no_output_____"
]
],
[
[
"ns = skips(0, 10)\nwhile True:\n try:\n print(next(ns))\n except StopIteration:\n break",
"0\n2\n4\n6\n8\n10\n"
]
],
[
[
"## Iterables",
"_____no_output_____"
],
[
"In Python, there is a distinction between an *iterator* and an *iterable data structure*. This distinction is useful to maintain for a variety of reasons, including the ones below.\n\n* You may not want to clutter a data structure (as it may represent a spreadsheet, a database table, a large graph, and so on) with the state necessary to keep track of an iteration process.\n* You may want the data structure to support *multiple* iterators, either semantically (*e.g.*, iteration over rows versus over columns) or in terms of implementation (*e.g.*, breadth-first search versus depth-first search).\n* You may want to make it easy to *reset* iteration without fiddling with the internal state of a data structure instance (*i.e.*, resetting a traversal of the data structure instance could involve simply creating a fresh iterator).",
"_____no_output_____"
],
[
"As an example, consider a data structure `interval` that represents all positive integers in some range. Users might be allowed to obtain two different kinds of iterators for an instance of this data structure: those that iterate over only the even integers and those that iterate over only the odd integers.",
"_____no_output_____"
]
],
[
[
"class interval:\n def __init__(self, lower, upper):\n self.lower = lower\n self.upper = upper\n\n def evens(self):\n return skips(\n self.lower + (0 if (self.lower % 2) == 0 else 1),\n self.upper\n )\n \n def odds(self):\n return skips(\n self.lower + (0 if (self.lower % 2) == 1 else 1),\n self.upper\n )",
"_____no_output_____"
]
],
[
[
"The example below illustrates how an iterator returned by one of the methods in the definition of `interval` can be used.",
"_____no_output_____"
]
],
[
[
"ns = interval(0, 10).odds()\nwhile True: # Keep iterating and printing until exhaustion.\n try:\n print(next(ns))\n except StopIteration:\n break",
"1\n3\n5\n7\n9\n"
]
],
[
[
"So far in this article, the distinction between *iterators* and *iterable data structures* has been explicit for clarity. However, the convention that is supported (and sometimes expected) throughout Python is that an iterable data structure has a *single* iterator that can be used to iterate over it. This iterator is returned by a special method [`__iter__`](https://docs.python.org/3/reference/datamodel.html#object.__iter__) that is included in the class definition. In the example below, the `interval` class supports the creation of an iterator that visits every integer in the interval.",
"_____no_output_____"
]
],
[
[
"class every:\n def __init__(self, start, end):\n self.integer = start - 1\n self.end = end\n\n def __next__(self):\n self.integer += 1\n if self.integer > self.end:\n raise StopIteration\n return self.integer\n\nclass interval:\n def __init__(self, lower, upper):\n self.lower = lower\n self.upper = upper\n\n def __iter__(self):\n return every(self.lower, self.upper)",
"_____no_output_____"
]
],
[
[
"Python's built-in [`iter`](https://docs.python.org/3/library/functions.html#iter) function can be used to invoke `__iter__` for an instance of this data structure.",
"_____no_output_____"
]
],
[
[
"ns = iter(interval(1, 3))\nwhile True: # Keep iterating and printing until exhaustion.\n try:\n print(next(ns))\n except StopIteration:\n break",
"1\n2\n3\n"
]
],
[
[
"Including a definition for an `__iter__` method also makes it possible to use many of Python's built-in functions and language constructs that expect an iterable data structure. This includes functions such as [`list`](https://docs.python.org/3/library/functions.html#func-list) and [`set`](https://docs.python.org/3/library/functions.html#func-set), which use `iter` to obtain an iterator for their inputs.",
"_____no_output_____"
]
],
[
[
"list(interval(0, 10)), set(interval(0, 10))",
"_____no_output_____"
]
],
[
[
"This also includes comprehensions and `for` loops.",
"_____no_output_____"
]
],
[
[
"for n in interval(1, 4):\n print([k for k in interval(1, n)])",
"[1]\n[1, 2]\n[1, 2, 3]\n[1, 2, 3, 4]\n"
]
],
[
[
"There is nothing stopping you from making the iterator itself an iterable by having it return itself, as in the variant below.",
"_____no_output_____"
]
],
[
[
"class every:\n def __init__(self, start, end):\n self.integer = start - 1\n self.end = end\n\n def __next__(self):\n self.integer += 1\n if self.integer > self.end:\n raise StopIteration\n return self.integer\n\n def __iter__(self):\n return self",
"_____no_output_____"
]
],
[
[
"This approach ensures that there is no ambiguity (from a programmer's perspective) about what will happen when built-in functions such as `list` are applied to an instance of the data structure.",
"_____no_output_____"
]
],
[
[
"list(every(0, 10))",
"_____no_output_____"
]
],
[
[
"This practice is common and is the cause of some of the confusion and conflation that occurs between iterators and iterables. In addition to the potential for confusion, users of such a data structure must be careful to use the iterator as an iterable only once (or, alternatively, the object must reset its internal state every time `__iter__` is invoked).",
"_____no_output_____"
]
],
[
[
"ns = every(0, 10)\nlist(ns), list(ns) # Only returns contents the first time.",
"_____no_output_____"
]
],
[
[
"Nevertheless, this can also be a useful practice. Going back to the example with `evens` and `odds`, ensuring the iterators returned by these methods are also iterable means they can be fed directly into contexts that expect an iterable.",
"_____no_output_____"
]
],
[
[
"class skips:\n def __init__(self, start, end):\n self.integer = start - 2\n self.end = end\n\n def __next__(self):\n self.integer += 2\n if self.integer > self.end:\n raise StopIteration\n return self.integer\n\n def __iter__(self):\n return self\n\nclass interval:\n def __init__(self, lower, upper):\n self.lower = lower\n self.upper = upper\n\n def evens(self):\n return skips(\n self.lower + (0 if (self.lower % 2) == 0 else 1),\n self.upper\n )\n \n def odds(self):\n return skips(\n self.lower + (0 if (self.lower % 2) == 1 else 1),\n self.upper\n )",
"_____no_output_____"
]
],
[
[
"The example below illustrates how this kind of interface can be used.",
"_____no_output_____"
]
],
[
[
"i = interval(0, 10)\nlist(i.evens()), set(i.odds())",
"_____no_output_____"
]
],
[
[
"## Generators",
"_____no_output_____"
],
[
"Generators are data structures defined using either the `yield` statement or comprehension notation (also known as a [generator expression](https://docs.python.org/3/glossary.html#term-generator-expression)). The example below defines a generator `skips` using both approaches. ",
"_____no_output_____"
]
],
[
[
"def skips(start, end):\n integer = start\n while integer <= end:\n yield integer\n integer += 2\n\ndef skips(start, end):\n return (\n integer\n for integer in range(start, end)\n if (integer - start) % 2 == 0\n )",
"_____no_output_____"
]
],
[
[
"When it is evaluated, a generator returns an iterator (more precisely called a [generator iterator](https://docs.python.org/3/glossary.html#term-generator-iterator)). These are technically both iterators and iterables. For example, as with any iterator, `next` can be applied directly to instances of this data structure.",
"_____no_output_____"
]
],
[
[
"ns = skips(0, 10)\nnext(ns), next(ns), next(ns)",
"_____no_output_____"
]
],
[
[
"As with any iterator, exhaustion can be detected by catching the `StopIteration` exception.",
"_____no_output_____"
]
],
[
[
"ns = skips(0, 2)\ntry:\n next(ns), next(ns), next(ns)\nexcept StopIteration:\n print(\"Exhausted generator iterator.\")",
"Exhausted generator iterator.\n"
]
],
[
[
"Finally, an instance of the data structure can be used in any context that expects an iterable.",
"_____no_output_____"
]
],
[
[
"list(skips(0, 10))",
"_____no_output_____"
]
],
[
[
"It is possible to confirm that the result of evaluating `skips` is indeed a generator by checking its type.\n",
"_____no_output_____"
]
],
[
[
"import types\nisinstance(skips(0, 10), types.GeneratorType)",
"_____no_output_____"
]
],
[
[
"It is also possible to inspect its type to confirm that `skips` indeed evaluates to an iterator.",
"_____no_output_____"
]
],
[
[
"import collections\nisinstance(skips(0, 10), collections.abc.Iterator)",
"_____no_output_____"
]
],
[
[
"## Data Structures of Infinite or Unknown Size",
"_____no_output_____"
],
[
"Among the use cases that demonstrate how iterators/generators serve as a powerful language feature are scenarios involving data structures whose size is unknown or unbounded/infinite (such as streams, very large files, databases, and so on). You have already seen that you can define an iterable that can produce new objects or values indefinitely, so iterables are an effective way to represent and encapsulate such structures.",
"_____no_output_____"
],
[
"Returning to the example described at the beginning of the article, recall that you are faced with creating a Python API for working with data streams that might (or might not) run out of items that can be drawn from them. The advantages of leveraging iterables and generators should be evident at this point, so suppose you move ahead with this option and implement an iterable to represent a data stream. How can you address the three specific requirements (*i.e.*, default/fall-back streams, interleaving, and splitting for parallelism) using these features?",
"_____no_output_____"
],
[
"To satisfy the first requirement, you must allow a user to exhaust one iterable and then switch to another one. This is straightforward to do by constructing a generator that concatenates two iterables.",
"_____no_output_____"
]
],
[
[
"def concatenate(xs, ys):\n for x in xs:\n yield x\n for y in ys:\n yield y",
"_____no_output_____"
]
],
[
[
"Concatenating two instances of an iterable data structure is now straightforward.",
"_____no_output_____"
]
],
[
[
"list(concatenate(skips(0,5), skips(6,11)))",
"_____no_output_____"
]
],
[
[
"Notice that if the first iterable is never exhausted, the second one will never be used.",
"_____no_output_____"
],
[
"To address the second requirement, first consider a simpler scenario. What if you would like to \"line up\" or \"pair up\" entries in two or more iterables? You can use the built-in [`zip`](https://docs.python.org/3/library/functions.html#zip) function.",
"_____no_output_____"
]
],
[
[
"list(zip(skips(0,5), skips(6,11)))",
"_____no_output_____"
]
],
[
[
"Notice that the result of evaluating `zip` is indeed an iterator.",
"_____no_output_____"
]
],
[
[
"import collections\nisinstance(\n zip(skips(0,5), skips(6,11)),\n collections.abc.Iterator\n)",
"_____no_output_____"
]
],
[
[
"Combining `zip` with comprehension syntax, you can now define a generator that *interleaves* two iterables (switching back and forth between emitting an item from one and then the other).",
"_____no_output_____"
]
],
[
[
"def interleave(xs, ys):\n return (\n z \n for (x, y) in zip(xs, ys) \n for z in (x, y)\n )",
"_____no_output_____"
]
],
[
[
"As with concatenation, interleaving is now concise and straightforward.",
"_____no_output_____"
]
],
[
[
"list(interleave(skips(0,5), skips(6,11)))",
"_____no_output_____"
]
],
[
[
"Finally, how can you help users process items from a stream in parallel? Because you are already using iterables, users have some options available to them from the built-in [`itertools`](https://docs.python.org/3/library/itertools.html) library.",
"_____no_output_____"
],
[
"One option is [`islice`](https://docs.python.org/3/library/itertools.html#itertools.islice), which behaves in a similar manner to Python [slice notation](https://docs.python.org/3/library/functions.html?highlight=slice#slice) (such as `xs[0:10]` to extract the first ten entries from a list `xs`). Users can use this function to extract items in batches and (1) pass each item in a batch to its own separate thread or (2) pass batches of items to separate threads. A basic batching method is presented below.",
"_____no_output_____"
]
],
[
[
"from itertools import islice\ndef batch(xs, size):\n ys = list(islice(xs, 0, size))\n while len(ys) > 0:\n yield ys\n ys = list(islice(xs, 0, size))",
"_____no_output_____"
]
],
[
[
"Notice that this method inherits the graceful behavior of slice notation when the boundaries of the slices do not line up exactly with the number entries in the data structure instance.",
"_____no_output_____"
]
],
[
[
"list(batch(skips(0,21), 3))",
"_____no_output_____"
]
],
[
[
"Can you define a generator that returns batches of batches (*e.g.*, at most `n` batches each of size at most `k`)?",
"_____no_output_____"
],
[
"Another option is to use the [`tee`](https://docs.python.org/3/library/itertools.html#itertools.tee) function, which can duplicate a single iterable into multiple iterables. However, this function is really only simulating this effect by storing a large amount of auxiliary information from one of the iterables. Thus, it may use a significant amount of memory and is not safe to use with multiprocessing. It is best suited for situations in which the iterables are known to have a small number of items, as in the example below.",
"_____no_output_____"
]
],
[
[
"from itertools import tee\n(a, b) = tee(skips(0,11), 2)\n(list(a), list(b))",
"_____no_output_____"
]
],
[
[
"The example above is arguably implemented in a more clear and familiar way by simply wrapping the iterables using `list`.",
"_____no_output_____"
]
],
[
[
"ns = list(skips(0,11))\n(ns, ns)",
"_____no_output_____"
]
],
[
[
"## Further Reading",
"_____no_output_____"
],
[
"This article presents the relationships and distinctions between a number of related Python language constructs, and illustrates via the use case of data streams how these constructs can be leveraged. You can visit the Python Wiki if you are looking for additional discussion and examples of both [iterators](https://wiki.python.org/moin/Iterator) and [generators](https://wiki.python.org/moin/Generators). The Python documentation also contains a [Functional Programming HOWTO](https://docs.python.org/3/howto/functional.html) that discusses how iterators and generators offer new kinds of modularity and composability. Furthermore, the Python documentation entry for the built-in [itertools](https://docs.python.org/3/library/itertools.html) library contains many built-in functions and recommended patterns that are specialized to particular scenarios.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7c7532e6b317f73a46f791f42c752ea415743c8 | 35,741 | ipynb | Jupyter Notebook | Gravedad.ipynb | Urakami97/Gravedad-Tarea- | f0b59954d0fd0833d2be2eeb8cfab73fe7ece89b | [
"MIT"
] | null | null | null | Gravedad.ipynb | Urakami97/Gravedad-Tarea- | f0b59954d0fd0833d2be2eeb8cfab73fe7ece89b | [
"MIT"
] | null | null | null | Gravedad.ipynb | Urakami97/Gravedad-Tarea- | f0b59954d0fd0833d2be2eeb8cfab73fe7ece89b | [
"MIT"
] | null | null | null | 281.425197 | 33,042 | 0.918777 | [
[
[
"## TAREA GRAVEDAD",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"m = 1 \nx_0 = .5\nx_0_dot = .1",
"_____no_output_____"
],
[
"t = np.linspace(0, 50, 300)",
"_____no_output_____"
],
[
"gravedad=np.array([9.81,2.78,8.87,3.72,22.88])\ngravedad",
"_____no_output_____"
],
[
"plt.figure(figsize = (7, 4))\nfor indx, g in enumerate (gravedad):\n omega_0 = np.sqrt(g/m)\n x_t = x_0 *np.cos(omega_0 *t) + (x_0_dot/omega_0) * np.sin(omega_0 *t)\n x_t_dot = -omega_0 * x_0 * np.sin(omega_0 * t) + x_0_dot * np.cos(omega_0 * t)\n plt.plot(x_t, x_t_dot/omega_0, 'ro', ms = 2)\n plt.legend(loc='best', bbox_to_anchor=(1.01, 0.5), prop={'size': 14})\n plt.scatter (x_t , (x_t_dot/omega_0), cmap = \"viridis\", label = g)\nplt.show()",
"C:\\Users\\MaríaEsther\\Anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:545: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.\n warnings.warn(\"No labelled objects found. \"\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c7542bac23bf7a6eb0da727ff0b15dbeaeb03f | 79,742 | ipynb | Jupyter Notebook | code/Project_1_US_Children_9-30_final2.ipynb | cwilklow/ModSimPy | 793421d9610cedfa47ce976091ef917932577ce6 | [
"MIT"
] | null | null | null | code/Project_1_US_Children_9-30_final2.ipynb | cwilklow/ModSimPy | 793421d9610cedfa47ce976091ef917932577ce6 | [
"MIT"
] | null | null | null | code/Project_1_US_Children_9-30_final2.ipynb | cwilklow/ModSimPy | 793421d9610cedfa47ce976091ef917932577ce6 | [
"MIT"
] | null | null | null | 123.822981 | 21,920 | 0.854832 | [
[
[
"# Modeling and Simulation in Python-Project 1\n\n\nDhara Patel and Corinne Wilklow \n\nCopyright 2018 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)\n",
"_____no_output_____"
]
],
[
[
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim library\nfrom modsim import *\n\nfrom pandas import read_html\n\nprint('done')",
"done\n"
],
[
"# Importing Population Data\nfilename = 'data/US_Population_data.html'\ntables = read_html(filename, header=0, index_col=0, decimal='M')\ntable = tables[3]\ntable1 = table[1910.0:2010.0]\ntable1.columns = ['population']\nprint(table1)",
" population\nCensusyear \n1910.0 92228496.0\n1920.0 106021537.0\n1930.0 123202624.0\n1940.0 132164569.0\n1950.0 151325798.0\n1960.0 179323175.0\n1970.0 203211926.0\n1980.0 226545805.0\n1990.0 248709873.0\n2000.0 281421906.0\n2010.0 308745538.0\n"
]
],
[
[
"<bk> \n The state: initial child population, initial United States population\n\nThe system: birth rates, child mortality rates, mature rates(birth rates 18 years prior)\n\nMetrics: annual child population",
"_____no_output_____"
]
],
[
[
"def plot_results(population, childseries, title):\n \"\"\"Plot the estimates and the model.\n \n population: TimeSeries of historical population data\n childseries: TimeSeries of child population estimates\n title: string\n \"\"\"\n plot(population, ':', label='US Population')\n if len(childseries):\n plot(childseries, color='gray', label='US Children')\n # plot(ratioseries, label='Ratio of children')\n decorate(xlabel='Year', \n ylabel='Population (million)',\n title=title)",
"_____no_output_____"
],
[
"def plot_ratio(ratioseries, title):\n \"\"\"Plot the estimates and the model.\n \n population: TimeSeries of historical population data\n childseries: TimeSeries of child population estimates\n title: string\n \"\"\"\n if len(ratioseries):\n plot(ratioseries, color='gray', label='Ratio of Children')\n # plot(ratioseries, label='Ratio of children')\n decorate(xlabel='Year', \n ylabel='Population (million)',\n title=title)",
"_____no_output_____"
],
[
"population = table1.population / 1e6\nchildseries = TimeSeries()\nratioseries = TimeSeries()\nplot_results(population, childseries, 'U.S. population')",
"_____no_output_____"
]
],
[
[
"## Why is the proportion of children in the United States decreasing?\n<bk>\n Over the past two decades, the United States population grew by about 20%. During the same time frame, the nation’s child population grew by only 5%. The population all around the world is aging, and children represent a smaller and smaller share of it. There are other countries in which this decrease is more dramatic, such as Germany or Japan which no longer have a positive natural increase in population. A decreasing proportion of children is a problem because the issue will only compound over time, until the population as a whole begins to decline. \n<bk>\nThe decreasing ratio of children could be due to several factors: declining fertility rates, an aging population, and a drop in net immigration levels. Our model focuses on the effects of fertility rates and child mortality rates on proportions of children in the US. Specifically, if we sweep birthrates and child mortality rates, what effects does that have on the population as a whole? Could changing birth rates and death rates account for the entirety of the changing demographics? We will use US Census data from 1910-2010 to compare to our results.\n",
"_____no_output_____"
]
],
[
[
"#sweeping both the mortality rate and the birth rate will make the model more accurate \nbirthrate = [29.06, 25.03, 19.22, 22.63, 24.86, 20.33, 15.57, 15.83, 15.08, 13.97]\ndeathrate = linspace(0.0065, 0.0031, 10)\nmaturerate = [31.5, 29.06, 25.03, 19.22, 22.63, 24.86, 20.33, 15.57, 15.83, 15.08]\nprint(birthrate)\nprint(deathrate)\nprint(maturerate)",
"[29.06, 25.03, 19.22, 22.63, 24.86, 20.33, 15.57, 15.83, 15.08, 13.97]\n[0.0065 0.00612222 0.00574444 0.00536667 0.00498889 0.00461111\n 0.00423333 0.00385556 0.00347778 0.0031 ]\n[31.5, 29.06, 25.03, 19.22, 22.63, 24.86, 20.33, 15.57, 15.83, 15.08]\n"
],
[
"state = State(children = 47.3, t_pop= 151325798.0/1e6, ratio = 47.3/151325798.0/1e6)",
"_____no_output_____"
]
],
[
[
"Parameters:",
"_____no_output_____"
]
],
[
[
"system = System(birthrate = birthrate,\n maturerate = maturerate,\n deathrate = deathrate,\n t_0 = 1910.0,\n t_end = 2010.0,\n state=state)",
"_____no_output_____"
]
],
[
[
"Our update function computes the updated state of these parameters at the end of each ten year increment. ",
"_____no_output_____"
]
],
[
[
"def update_func1(state, t, system):\n t_pop=151325798.0\n\n if t == 1910:\n i = int((t-1910)/10)\n else: \n i = int((t-1910)/10 - 1)\n \n mrate = system.maturerate \n brate = system.birthrate\n drate = system.deathrate\n\n \n births = brate[i]/100 * state.children #metric\n maturings = mrate[i]/100 * state.children #metric\n deaths = drate[i]/100 * state.children #metric\n\n population = state.children + births - maturings - deaths\n #print('children',children)\n \n return State(children=population)",
"_____no_output_____"
]
],
[
[
"To test our update function, we'll input the initial condition:",
"_____no_output_____"
]
],
[
[
"update_func1(state,system.t_0,system)",
"_____no_output_____"
],
[
"def run_simulation(state, system, update_func):\n \"\"\"Simulate the system using any update function.\n \n state: initial State object\n system: System object\n update_func: function that computes the population next year\n \n returns: TimeSeries of Ratios\n \"\"\"\n #t_pop=151325798.0\n results = TimeSeries()\n state = system.state\n results[system.t_0] = state.children\n \n \n for t in linrange(1910.0, 2020.0):\n if t%10 == 0:\n '''if t == 1910:\n i = int((t-1910)/10)\n else: \n i = int((t-1910)/10 - 1)'''\n state.children = update_func1(state, t, system)\n results[t] = state.children\n \n return results",
"_____no_output_____"
],
[
"print(population[1910])",
"92.228496\n"
],
[
"'''def update_ratio(state, t, system):\n childpop = state.children\n popu = population[t]\n \n ratio = childpop/popu \n return State(ratio = ratio)\n \ndef run_ratio(state, system, update_ratio):\n results = TimeSeries()\n results[system.t_0] = state.ratio\n \n for t in linrange(1910.0, 2020.0):\n if t%10 == 0:\n results[t] = update_ratio(state, t, system)'''",
"_____no_output_____"
],
[
"childseries = run_simulation(state, system, update_func1)\n\nfor t in linrange(1910.0, 2020.0):\n if t%10 == 0:\n ratioseries[t] = childseries[t]/population[t]\n\nprint(ratioseries)",
"1910 0.500310\n1920 0.424573\n1930 0.350618\n1940 0.307835\n1950 0.278010\n1960 0.239825\n1970 0.202035\n1980 0.172592\n1990 0.157614\n2000 0.138243\n2010 0.124606\ndtype: float64\n"
],
[
"empty = TimeSeries()\nfig1 = plot_results(population, childseries, 'Population of children in U.S.')\n",
"_____no_output_____"
],
[
"fig2 = plot_ratio(ratioseries, 'Ratio of children in the U.S.')",
"_____no_output_____"
]
],
[
[
"## Interpretation \n<bk> \n The model above uses birth rates and child mortality rates to model the decline of child population in the United States. According to the model, the population of children shrunk from 46.14 million in 1910 to 38.47 million in 2010. The first model predicts that even as the population as a whole grows, the lower birth rate will cause the population of children to decrease. The second model, showing a ratio of the overall population to the child population, allows us to conclude that a decreased birth rate over time could easily lead to the declining population that we see today. \n<bk>\nAlthough this model allows us to conclude that birth rate and mortality rate has a definite effect on the child population as a whole, it does not allow us to conclude decisively why the population of children is decreasing. \n<bk>\nIn the future, we could build a model that takes into account more parameters, such as immigration rate, and attempt to build a model that fits all facets of the data. We could also explore how different portions of the population, not just children, are aging and changing the demographics of our society. \n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7c76af59e07a427c1466645d92b1b7d7b87b79f | 7,811 | ipynb | Jupyter Notebook | tfidf_tokenizer/jupyter_tokenizer.ipynb | koaNam/yata2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | tfidf_tokenizer/jupyter_tokenizer.ipynb | koaNam/yata2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | tfidf_tokenizer/jupyter_tokenizer.ipynb | koaNam/yata2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | 22.190341 | 150 | 0.454871 | [
[
[
"corpus = [\n 'this is the first document',\n 'this document is the second document',\n 'and this is the third one',\n 'is this the first document',\n]",
"_____no_output_____"
],
[
"from gensim.models import TfidfModel\nfrom gensim.corpora import Dictionary\n\ncorp = [x.split(\" \") for x in corpus]\ndct = Dictionary(corp) ",
"_____no_output_____"
],
[
"for id in dct:\n print(f\"{id}: {dct[id]}\")",
"0: document\n1: first\n2: is\n3: the\n4: this\n5: second\n6: and\n7: one\n8: third\n"
],
[
"c = [dct.doc2bow(line) for line in corp] \n\nmodel = TfidfModel(c) ",
"_____no_output_____"
],
[
"\"this is a test document\".split(\" \")",
"_____no_output_____"
],
[
"dct.doc2bow(['EXPERT', 'WARN', 'BACKLASH', 'DONALD', 'TRUMPS', 'CHINA', 'TRADE', 'POLICIES', 'NEW', 'YORK', 'TIMES'])",
"_____no_output_____"
],
[
"[(dct[i[0]] + \": \" + str(i[1])) for i in model.__getitem__(dct.doc2bow(\"this is a test document\".split(\" \")), eps=-1)]",
"_____no_output_____"
],
[
"model.__getitem__(dct.doc2bow(\"this is a test document\".split(\" \")), eps=-1)",
"_____no_output_____"
],
[
"[(dct[i[0]] + \": \" + str(i[1])) for i in model.__getitem__(dct.doc2bow(\"this is test\".split(\" \")), eps=0)]",
"_____no_output_____"
],
[
"[(dct[i[0]] + \": \" + str(i[1])) for i in model[c[0]]]\n",
"_____no_output_____"
],
[
"from elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\nimport spacy\nimport re\n\nclass ElasticCorpus:\n \n def __init__(self, host, port, username, password):\n self.elastic = Elasticsearch([{'host':\"localhost\" , 'port': 9200}], http_auth=(\"elastic\",\"elastic\"))\n self.space = spacy.load('en_core_web_sm')\n self.dictonary = Dictionary()\n \n def __iter__(self):\n for entry in helpers.scan(self.elastic, query={\"query\": {\"match_all\": {}}}, _source=[\"title\"], index=\"documents\", size=2000):\n text = entry[\"_source\"][\"title\"]\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n text = re.sub(' +', ' ', text)\n \n doc = self.space(text)\n tokens = [t.lemma_.upper() for t in doc if not t.is_stop]\n self.dictonary.add_documents([tokens])\n \n print(self.counter)\n \n yield self.dictonary.doc2bow(tokens)",
"_____no_output_____"
],
[
"ec = ElasticCorpus(\"localhost\", 9200, \"elastic\", \"elastic\")",
"_____no_output_____"
],
[
"model = TfidfModel(ec) ",
"_____no_output_____"
],
[
"from elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\n\nes = Elasticsearch([{'host':\"localhost\" , 'port': 9200}], http_auth=(\"elastic\",\"elastic\"))\n\ni=0\nfor entry in helpers.scan(es, query={\"query\": {\"match_all\": {}}}, _source=[\"title\"], index=\"documents\", size=2000):\n i = i +1\n print(entry[\"_source\"][\"title\"])\n break\n \nprint(i)",
"Experts Warn of Backlash in Donald Trump’s China Trade Policies - The New York Times\n1\n"
],
[
"numList = [12, 13, 14, 15, 16]\nnumList[:3]",
"_____no_output_____"
],
[
"es.count(index='documents', body={'query': {\"match_all\": {}}})[\"count\"]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c77a4026f0b93525cac645f10cf471297705fe | 48,469 | ipynb | Jupyter Notebook | eval/diff_3d_ROC_for_training.ipynb | searobbersduck/FattyLiver_Solution | 7b8542e70cdb4417889799ea6da2c794e9eae392 | [
"MIT"
] | null | null | null | eval/diff_3d_ROC_for_training.ipynb | searobbersduck/FattyLiver_Solution | 7b8542e70cdb4417889799ea6da2c794e9eae392 | [
"MIT"
] | null | null | null | eval/diff_3d_ROC_for_training.ipynb | searobbersduck/FattyLiver_Solution | 7b8542e70cdb4417889799ea6da2c794e9eae392 | [
"MIT"
] | null | null | null | 122.706329 | 35,708 | 0.851183 | [
[
[
"import os\nos.environ['CUDA_VISIBLE_DEVICES']='1'\nimport sys\n\nsys.path.append('../')\n\nimport csv\nimport numpy as np\nimport sys\nimport scipy.ndimage as nd\nimport json\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nfrom models.resnet import *\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport time\nimport math\nfrom utils.utils import AverageMeter\nfrom datasets.FattyLiverDatasets import FattyLiverClsDatasetsDiff3D \nfrom train.train_3d_cls2 import test\n\nimport torch.nn.functional as F\n\nimport scipy.ndimage as nd\nimport json\nimport pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"config_file = '../config/config_diff_3d.json'\nwith open(config_file,encoding='gb2312') as f:\n config = json.load(f)\nconfig",
"_____no_output_____"
],
[
"batch_size = 2\nnum_workers = 4\nphase = 'train'\nepochs = 10000\ndisplay = 2\ncrop_size = [16, 384, 512]",
"_____no_output_____"
],
[
"model = resnet34(num_classes=2, \n shortcut_type=True, \n sample_size_y=crop_size[1], \n sample_size_x=crop_size[2], sample_duration=crop_size[0])\n\n\n# pretrained_weights = '../data/experiment_0/9.model_cls2_exp1/ct_pos_recogtion_20200819110631/ct_pos_recognition_0047_best.pth'\n# pretrained_weights = '../data/experiment_0/9.model_cls2_exp1/ct_pos_recogtion_20200820103204/ct_pos_recognition_0007_best.pth'\n# pretrained_weights = '../data/experiment_0/9.model_cls2_exp1/ct_pos_recogtion_20200820135922/ct_pos_recognition_0022_best.pth'\n\npretrained_weights = '../data/z16_zhenni_Fattyliver_v3_cls2/raw_diff_Fattyliver/raw_diff_0.7647058823529411_55_Fattyliver.pth'\n# pretrained_weights = '../data/z16_zhenni_Fattyliver_v3_cls2/raw_diff_Fattyliver/raw_diff_0.7058823529411765_27_Fattyliver.pth'\n# pretrained_weights = '/home/zhangwd/code/work/FattyLiver_Solution/data/experiment_Oct_cls2/fattyliver_task_raw_diff_best.pth/fattyliver_z16_raw_diff_9.pth'\n\n# pretrained_weights = '../data/z16_zhenni_Fattyliver_v3_cls2/cut_diff_Fattyliver/cut_diff_0.7058823529411765_13_Fattyliver.pth'\n\nmodel.load_state_dict(torch.load(pretrained_weights))",
"../models/resnet.py:233: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_.\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n"
],
[
"data_root = '../data/experiment_0/0.ori'\nconfig_test = '../data/config/config_train.txt'\ntest_ds = FattyLiverClsDatasetsDiff3D(data_root, config_test,crop_size)\ntest_dataloader = DataLoader(test_ds, batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=False)",
"====> fatty liver count is:115\n"
],
[
"criterion = nn.CrossEntropyLoss().cuda()\nacc, logger, tot_pred, tot_label, tot_prob = test(test_dataloader, nn.DataParallel(model).cuda(), criterion, 0, 20)\nprint(acc)\n# print(tot_prob)",
"../train/train_3d_cls2.py:115: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n tot_prob = np.append(tot_prob, F.softmax(output).cpu().detach().numpy()[:,1])\n"
],
[
"def acu_curve(y,prob):\n from sklearn import metrics\n fpr,tpr,threshold = metrics.roc_curve(y,prob) ###计算真正率和假正率\n roc_auc = metrics.auc(fpr,tpr) ###计算auc的值\n \n plt.figure()\n lw = 2\n plt.figure(figsize=(10,10))\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.3f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic train')\n plt.legend(loc=\"lower right\")\n \n plt.show()\n\n\ndef plot_roc(y_true, y_pred, class_name='dr'):\n print('\\n====> plot {} info:\\n'.format(class_name))\n log = []\n from sklearn import metrics\n def calc_metrics_table(y_true, y_pred, thresholds):\n metrics_list = list()\n for threshold in thresholds:\n y_pred_binary = np.zeros(y_pred.shape, dtype=np.uint8)\n y_pred_binary[y_pred>threshold] = 1\n tn, fp, fn, tp = metrics.confusion_matrix(y_true, y_pred_binary).ravel()\n# print('tn:{:.3f}\\tfp:{:.3f}\\tfn:{:.3f}\\ttp:{:.3f}\\t'.format(tn, fp, fn, tp))\n accuracy = (tp+tn)/(tn+fp+fn+tp)\n sensitivity = tp/(tp+fn)\n specificity = tn/(fp+tn)\n ppv = tp/(tp+fp)\n npv = tn/(tn+fn)\n metrics_list.append([threshold, accuracy, sensitivity, specificity, ppv, npv])\n metrics_table = pd.DataFrame(np.array(metrics_list), columns=['threshold','accuracy','sensitivity','specificity','ppv','npv'])\n return metrics_table\n\n\n fpr, tpr, thres = metrics.roc_curve(y_true, y_pred)\n# print('fpr\\t\\t\\t','tpr')\n# for i in range(len(fpr)):\n# print(fpr[i],'\\t',tpr[i])\n\n auc = metrics.auc(fpr, tpr)\n\n thresholds = np.arange(0.01, 1., 0.01)\n metrics_table = calc_metrics_table(y_true, y_pred, thresholds)\n\n print('\\nAUC:%.4f\\n'% auc)\n log.append('AUC:%.4f'% auc)\n\n# plt.figure()\n# plt.title('{} roc curve'.format(class_name))\n# plt.plot(fpr, tpr, 'r')\n# plt.xlabel('fpr')\n# plt.ylabel('tpr')\n# plt.xticks(np.arange(0, 1.1, step=0.1))\n# plt.yticks(np.arange(0, 1.1, step=0.1))\n# plt.grid(ls='--')\n# plt.show()\n acu_curve(y_true, y_pred)\n\n print(metrics_table)\n log.append(metrics_table)\n \n metrics_table.to_csv('/home/zhangwd/code/work/FattyLiver_Solution/train/Train_3D_0.54.csv')\n\n return log",
"_____no_output_____"
],
[
"log = plot_roc(np.array(tot_label, dtype=np.float32), np.array(tot_prob), 'fatty liver classification 2')",
"\n====> plot fatty liver classification 2 info:\n\n\nAUC:0.5421\n\n"
],
[
"# from sklearn import metrics\n# ?metrics.roc_curve",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c783d19ddd8ec04e59386be7393d437daa1ff3 | 5,210 | ipynb | Jupyter Notebook | big-docs-example2/big-docs-example08.ipynb | DavidLeoni/jubuild | 0ae2c9c5e78a74bb9d7dbedd00ca2696b03f3e8e | [
"Apache-2.0"
] | null | null | null | big-docs-example2/big-docs-example08.ipynb | DavidLeoni/jubuild | 0ae2c9c5e78a74bb9d7dbedd00ca2696b03f3e8e | [
"Apache-2.0"
] | 85 | 2017-09-20T12:29:11.000Z | 2022-02-22T09:42:33.000Z | big-docs-example2/big-docs-example08.ipynb | DavidLeoni/jubuild | 0ae2c9c5e78a74bb9d7dbedd00ca2696b03f3e8e | [
"Apache-2.0"
] | 1 | 2017-09-20T18:11:28.000Z | 2017-09-20T18:11:28.000Z | 20.11583 | 53 | 0.46334 | [
[
[
"# Big docs example 9",
"_____no_output_____"
],
[
"## reasonable paragraph \n\n### reasonable sub paragraph\n\n## reasonable paragraph \n\n### reasonable sub paragraph\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph",
"_____no_output_____"
],
[
"## reasonable paragraph \n\n### reasonable sub paragraph\n\n## reasonable paragraph \n\n### reasonable sub paragraph\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with long text\n\n### sub paragraph with long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph with extra super long text\n\n### sub paragraph with extra super long text\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph\n\n## paragraph\n\n### sub paragraph",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
]
] |
e7c78d807f4c791646b25264dc026370e0218990 | 10,899 | ipynb | Jupyter Notebook | Miscellaneous_Tools/Napari_Software/Napari_Getting_Started.ipynb | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 7 | 2019-05-13T10:07:44.000Z | 2022-03-01T16:20:48.000Z | Miscellaneous_Tools/Napari_Software/Napari_Getting_Started.ipynb | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | null | null | null | Miscellaneous_Tools/Napari_Software/Napari_Getting_Started.ipynb | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 3 | 2020-04-23T18:13:20.000Z | 2020-11-11T18:46:48.000Z | 123.852273 | 1,597 | 0.68676 | [
[
[
"from skimage import data\nimport napari\n",
"_____no_output_____"
],
[
"directory = '/Volumes/lowegrp/Data/Kristina/MDCK_90WT_10Sc_NoComp/17_07_24/pos0/density_matlab_python_comparison/s_0_before.tif'\n\nwith napari.gui_qt():\n viewer = napari.Viewer()\n viewer = napari.view_image(directory, rgb=False)\n\n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7c7a98058bb8f2388be00cbd7858c2a0b3d285c | 7,457 | ipynb | Jupyter Notebook | lectures/week1/Numbers_1.Integers .ipynb | yh2420/MATH50003NumericalAnalysis | a899aa6459cf1cdf1b028ccfd431cff2ba2bcadb | [
"MIT"
] | null | null | null | lectures/week1/Numbers_1.Integers .ipynb | yh2420/MATH50003NumericalAnalysis | a899aa6459cf1cdf1b028ccfd431cff2ba2bcadb | [
"MIT"
] | null | null | null | lectures/week1/Numbers_1.Integers .ipynb | yh2420/MATH50003NumericalAnalysis | a899aa6459cf1cdf1b028ccfd431cff2ba2bcadb | [
"MIT"
] | null | null | null | 19.071611 | 118 | 0.486791 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c7adf571f0f8e4e78ec9dd8a5cf053e0e6b463 | 9,543 | ipynb | Jupyter Notebook | .ipynb_checkpoints/learnJupyter-checkpoint.ipynb | kalz2q/files | 272cd4326d89feb2300397ee3f50ce0e5484da1e | [
"MIT"
] | null | null | null | .ipynb_checkpoints/learnJupyter-checkpoint.ipynb | kalz2q/files | 272cd4326d89feb2300397ee3f50ce0e5484da1e | [
"MIT"
] | null | null | null | .ipynb_checkpoints/learnJupyter-checkpoint.ipynb | kalz2q/files | 272cd4326d89feb2300397ee3f50ce0e5484da1e | [
"MIT"
] | null | null | null | 24.220812 | 1,784 | 0.587027 | [
[
[
"# Jupyter (iPython) Notebookを使って技術ノート環境を構築する方法\n\nmyenigma.hatenablog.com",
"_____no_output_____"
]
],
[
[
"from sympy import *",
"_____no_output_____"
],
[
"x=Symbol('x')",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"init_printing()",
"_____no_output_____"
],
[
"expand((x - 3)**5)",
"_____no_output_____"
]
],
[
[
"しかし、下記のJupyter Notebooksのextensionをインストールすることで、\n\n様々な拡張機能が使えるようになり、\n\n画像のドラッグアンドドロップもできるようになります。\n\nインストールは、READMEにある通り、\n\n下記のコマンドでOKです。\n\n(下記のインストールをする場合は\n\nJupyter Notebookのプロセスはkillしておきましょう)\n\n\n$ pip install https://github.com/ipython-contrib/IPython-notebook-extensions/archive/master.zip –user\n\n\n続いて、Jupyter Notebookを起動し、",
"_____no_output_____"
],
[
"URLに/nbextensions/を追加して、移動すると\n\n下記のような拡張機能の追加画面に移動します。\n\nあとは、Drag and DropのチェックボタンをONして下さい。\n\nすると、Jupyter Notebookで画像をドラッグアンドドロップすると、\n\n画像が挿入されるようになります。\n\nちなみに画像ファイルはD&Dすると\n\n.ipynbファイルと\n\n同じ場所にコピーされます。\n\n目次を見出し情報から自動生成する\n上記のIPython-notebook-extensionsの一機能を使うことで、\n\nMarkdownの見出し情報から、\n\n上記のように自動で目次を生成することができます。\n\n使い方としては、\n\n先ほどの/nbextensions/の設定画面で、\n\nTable Contents(2)にチェックをいれます。\n\n加えて、同じ設定画面で、\n\n“Add a Table of Contents at the top of the notebook”\n\nにチェックをいれます。\n\n最後に下記のように\n\n目次ウインドウを表示させて、\n\ntボタンを押すと、\n\n自動的に目次情報が先頭のセルに追加されます。\n\nレポートタイトルを入力する方法\n残念ながら、今のところ\n\nレポートのタイトルのようなものを\n\n入力するツールは見つかっていませんが、\n\nJupyterのMarkdownのモードでは、\n\nHTMLを入力するとマークアップされた文字を入力できます。\n\nこの機能を使うことで、レポートタイトルっぽい文字を表示できました。\n\n例えば、下記のようなhtmlを\n\nJupyterのMarkdownモードのセルに入力すると、\n\n<br />\n\n<div style=\"text-align: center;\">\n<font size=\"7\">Jupyter Report</font>\n</div>\n<br />\n<div style=\"text-align: right;\">\n<font size=\"4\">Atsushi Sakai</font>\n</div>\n\n<br />\n下記のように表示されます。\n\nPDFに出力する\n上記の方法である程度のレポート形式の技術ノートを作ることは\n\nできるはずなので、あとはPDFなどに変換すれば、\n\nプログラマ以外の他の人と資料を共有することができます。\n\nMacの場合\nJupyter NotebookのデータをPDFに変換する方法は色々ありますが、\n\nMacで一番シンプルなのは、Jupyter Notebookをブラウザで開いた状態で、\n\nCtrl-Pで、Macの標準機能でPDFに変換するのが一番簡単だと思います。\n\n下記のように詳細設定の部分で、ヘッダーとフッターに\n\n時刻や、ファイル名、ページなどを入力する設定ができます。\n\nあとは普通にPDFを出力すると、\n\n下記のようにそれなりのレポートがPDFで作成できます。\n\nJupyterでプレゼン資料を作る方法\n下記を参照下さい。\n\nmyenigma.hatenablog.com\n\nJupyterのサーバをHerokuの無料枠で構築する方法\n下記を参照下さい。\n\nmyenigma.hatenablog.com\n\nJupyter Markdown数式の入力テンプレート\n最近、数式の入力はすべてJupyterで実施していますが、\n\n複雑でいつもググっている数式コマンドをメモとして残しておきます。\n\n行列\n\\begin{equation*}\n\\begin{bmatrix}\n1 & 0 & \\cdots \\\\\n0 & 1 & \\cdots \\\\\n\\vdots & \\vdots & \\ddots \\\\\n-1 & 0 & \\cdots \\\\\n0 & -1 & \\cdots \\\\\n\\vdots & \\vdots & \\ddots \\\\\n\\end{bmatrix}\n\\end{equation*}\n数式参考リンク\nできれば改善してもらいたい部分\nもう少しで完全にWordをおさらばできそうなので、\n\n下記のような機能を実現してもらいたいですね。\n\n参考文献管理\n\n図のタイトル入力\n\n自動図番号割り当て\n\n上記のような機能を実現できる拡張などを知っている方は\n\nコメントなどで教えて頂けると幸いです。\n\n(Python使えるから、自分で作ればいいのか。。。)\n\nvimユーザのためのJupyterプラグイン\n下記を参照下さい\n\nlambdalisue.hatenablog.com\n\nなにかおかしいので云々。\nbookmark",
"_____no_output_____"
],
[
"上記のプラグインを使うことで、\n\nJupyterのエディタの部分でvimのキーバインドが使えます。\n\nJupyter Notebook上でコードの処理時間を計測する\nJupyter notebookでは、\n\n%timeit\n\nの後にコードを記述すると、そのコードの計算時間を計測してくれます。\n\nコードやアルゴリズムのレポートに便利ですね。\n\nmoqada.hatenablog.com\n\nJupyter上でモジュールや関数のdocstringを確認する\nJupyter notebookでは、\n\nモジュールや関数の後ろに?(はてな)をつけると、\n\ndocstringを表示してくれるので、\n\nマニュアル確認も簡単です。\n\n更にJupyterを学びたい人は\n下記の資料がおすすめです。\n\n\n\nまた、どうしても解決できない問題などが\n\nある場合は下記のようなQ&Aサイトで質問してみると、\n\nかなりの確率で回答がもらえると思います。\n\n参考資料\nmyenigma.hatenablog.com\n\nmyenigma.hatenablog.com\n\nmyenigma.hatenablog.com\n\nmyenigma.hatenablog.com\n\nmyenigma.hatenablog.com\n\nMyEnigma Supporters\nもしこの記事が参考になり、\n\nブログをサポートしたいと思われた方は、\n\nこちらからよろしくお願いします。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7c7b0241505ff2e073cf9c6a8647e24a2c200b1 | 173,159 | ipynb | Jupyter Notebook | IBM_AI/4_Pytorch/4.2.multiple_linear_regression_training_v2.ipynb | merula89/cousera_notebooks | caa529a7abd3763d26f3f2add7c3ab508fbb9bd2 | [
"MIT"
] | null | null | null | IBM_AI/4_Pytorch/4.2.multiple_linear_regression_training_v2.ipynb | merula89/cousera_notebooks | caa529a7abd3763d26f3f2add7c3ab508fbb9bd2 | [
"MIT"
] | null | null | null | IBM_AI/4_Pytorch/4.2.multiple_linear_regression_training_v2.ipynb | merula89/cousera_notebooks | caa529a7abd3763d26f3f2add7c3ab508fbb9bd2 | [
"MIT"
] | null | null | null | 282.017915 | 77,136 | 0.928349 | [
[
[
"<a href=\"http://cocl.us/pytorch_link_top\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png\" width=\"750\" alt=\"IBM Product \" />\n</a> ",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png\" width=\"200\" alt=\"cognitiveclass.ai logo\" />",
"_____no_output_____"
],
[
"<h1>Linear Regression Multiple Outputs</h1> ",
"_____no_output_____"
],
[
"<h2>Table of Contents</h2>\n<p>In this lab, you will create a model the PyTroch way. This will help you more complicated models.</p>\n\n<ul>\n <li><a href=\"#Makeup_Data\">Make Some Data</a></li>\n <li><a href=\"#Model_Cost\">Create the Model and Cost Function the PyTorch way</a></li>\n <li><a href=\"#BGD\">Train the Model: Batch Gradient Descent</a></li>\n</ul>\n<p>Estimated Time Needed: <strong>20 min</strong></p>\n\n<hr>",
"_____no_output_____"
],
[
"<h2>Preparation</h2>",
"_____no_output_____"
],
[
"We'll need the following libraries:",
"_____no_output_____"
]
],
[
[
"# Import the libraries we need for this lab\n\nfrom torch import nn,optim\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom torch.utils.data import Dataset, DataLoader",
"_____no_output_____"
]
],
[
[
"Set the random seed:",
"_____no_output_____"
]
],
[
[
"# Set the random seed to 1. \n\ntorch.manual_seed(1)",
"_____no_output_____"
]
],
[
[
"Use this function for plotting: ",
"_____no_output_____"
]
],
[
[
"# The function for plotting 2D\n\ndef Plot_2D_Plane(model, dataset, n=0):\n w1 = model.state_dict()['linear.weight'].numpy()[0][0]\n w2 = model.state_dict()['linear.weight'].numpy()[0][1]\n b = model.state_dict()['linear.bias'].numpy()\n\n # Data\n x1 = data_set.x[:, 0].view(-1, 1).numpy()\n x2 = data_set.x[:, 1].view(-1, 1).numpy()\n y = data_set.y.numpy()\n\n # Make plane\n X, Y = np.meshgrid(np.arange(x1.min(), x1.max(), 0.05), np.arange(x2.min(), x2.max(), 0.05))\n yhat = w1 * X + w2 * Y + b\n\n # Plotting\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.plot(x1[:, 0], x2[:, 0], y[:, 0],'ro', label='y') # Scatter plot\n \n ax.plot_surface(X, Y, yhat) # Plane plot\n \n ax.set_xlabel('x1 ')\n ax.set_ylabel('x2 ')\n ax.set_zlabel('y')\n plt.title('estimated plane iteration:' + str(n))\n ax.legend()\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\" #Makeup_Data\" > Make Some Data </h2>",
"_____no_output_____"
],
[
"Create a dataset class with two-dimensional features:",
"_____no_output_____"
]
],
[
[
"# Create a 2D dataset\n\nclass Data2D(Dataset):\n \n # Constructor\n def __init__(self):\n self.x = torch.zeros(20, 2)\n self.x[:, 0] = torch.arange(-1, 1, 0.1)\n self.x[:, 1] = torch.arange(-1, 1, 0.1)\n self.w = torch.tensor([[1.0], [1.0]])\n self.b = 1\n self.f = torch.mm(self.x, self.w) + self.b \n self.y = self.f + 0.1 * torch.randn((self.x.shape[0],1))\n self.len = self.x.shape[0]\n\n # Getter\n def __getitem__(self, index): \n return self.x[index], self.y[index]\n \n # Get Length\n def __len__(self):\n return self.len",
"_____no_output_____"
]
],
[
[
"Create a dataset object:",
"_____no_output_____"
]
],
[
[
"# Create the dataset object\n\ndata_set = Data2D()",
"_____no_output_____"
]
],
[
[
"<h2 id=\"Model_Cost\">Create the Model, Optimizer, and Total Loss Function (Cost)</h2>",
"_____no_output_____"
],
[
"Create a customized linear regression module: ",
"_____no_output_____"
]
],
[
[
"# Create a customized linear\n\nclass linear_regression(nn.Module):\n \n # Constructor\n def __init__(self, input_size, output_size):\n super(linear_regression, self).__init__()\n self.linear = nn.Linear(input_size, output_size)\n \n # Prediction\n def forward(self, x):\n yhat = self.linear(x)\n return yhat",
"_____no_output_____"
]
],
[
[
"Create a model. Use two features: make the input size 2 and the output size 1: ",
"_____no_output_____"
]
],
[
[
"# Create the linear regression model and print the parameters\n\nmodel = linear_regression(2,1)\nprint(\"The parameters: \", list(model.parameters()))",
"The parameters: [Parameter containing:\ntensor([[ 0.6209, -0.1178]], requires_grad=True), Parameter containing:\ntensor([0.3026], requires_grad=True)]\n"
]
],
[
[
"Create an optimizer object. Set the learning rate to 0.1. <b>Don't forget to enter the model parameters in the constructor.</b>",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter2/2.6.2paramater_hate.png\" width = \"100\" alt=\"How the optimizer works\" />",
"_____no_output_____"
]
],
[
[
"# Create the optimizer\n\noptimizer = optim.SGD(model.parameters(), lr=0.1)",
"_____no_output_____"
]
],
[
[
"Create the criterion function that calculates the total loss or cost:",
"_____no_output_____"
]
],
[
[
"# Create the cost function\n\ncriterion = nn.MSELoss()",
"_____no_output_____"
]
],
[
[
"Create a data loader object. Set the batch_size equal to 2: ",
"_____no_output_____"
]
],
[
[
"# Create the data loader\n\ntrain_loader = DataLoader(dataset=data_set, batch_size=2)",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"BGD\">Train the Model via Mini-Batch Gradient Descent</h2>",
"_____no_output_____"
],
[
"Run 100 epochs of Mini-Batch Gradient Descent and store the total loss or cost for every iteration. Remember that this is an approximation of the true total loss or cost:",
"_____no_output_____"
]
],
[
[
"# Train the model\n\nLOSS = []\nprint(\"Before Training: \")\nPlot_2D_Plane(model, data_set) \nepochs = 100\n \ndef train_model(epochs): \n for epoch in range(epochs):\n for x,y in train_loader:\n yhat = model(x)\n loss = criterion(yhat, y)\n LOSS.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \ntrain_model(epochs)\nprint(\"After Training: \")\nPlot_2D_Plane(model, data_set, epochs) ",
"Before Training: \n"
],
[
"# Plot out the Loss and iteration diagram\n\nplt.plot(LOSS)\nplt.xlabel(\"Iterations \")\nplt.ylabel(\"Cost/total loss \")",
"_____no_output_____"
]
],
[
[
"<h3>Practice</h3>",
"_____no_output_____"
],
[
"Create a new <code>model1</code>. Train the model with a batch size 30 and learning rate 0.1, store the loss or total cost in a list <code>LOSS1</code>, and plot the results.",
"_____no_output_____"
]
],
[
[
"# Practice create model1. Train the model with batch size 30 and learning rate 0.1, store the loss in a list <code>LOSS1</code>. Plot the results.\n\ndata_set = Data2D()",
"_____no_output_____"
]
],
[
[
"Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\ntrain_loader = DataLoader(dataset = data_set, batch_size = 30)\nmodel1 = linear_regression(2, 1)\noptimizer = optim.SGD(model1.parameters(), lr = 0.1)\nLOSS1 = []\nepochs = 100\ndef train_model(epochs): \n for epoch in range(epochs):\n for x,y in train_loader:\n yhat = model1(x)\n loss = criterion(yhat,y)\n LOSS1.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \ntrain_model(epochs)\nPlot_2D_Plane(model1 , data_set) \nplt.plot(LOSS1)\nplt.xlabel(\"iterations \")\nplt.ylabel(\"Cost/total loss \")\n-->",
"_____no_output_____"
],
[
"Use the following validation data to calculate the total loss or cost for both models:",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(2)\n\nvalidation_data = Data2D()\nY = validation_data.y\nX = validation_data.x",
"_____no_output_____"
]
],
[
[
"Double-click <b>here</b> for the solution.\n<!-- Your answer is below:\nprint(\"total loss or cost for model: \",criterion(model(X),Y))\nprint(\"total loss or cost for model: \",criterion(model1(X),Y))\n-->",
"_____no_output_____"
],
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<a href=\"http://cocl.us/pytorch_link_bottom\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png\" width=\"750\" alt=\"PyTorch Bottom\" />\n</a>",
"_____no_output_____"
],
[
"<h2>About the Authors:</h2> \n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. ",
"_____no_output_____"
],
[
"Other contributors: <a href=\"https://www.linkedin.com/in/michelleccarey/\">Michelle Carey</a>, <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"Copyright © 2018 <a href=\"cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu\">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c7b1049348624d839eb7a3aeee3bb921de2a16 | 6,643 | ipynb | Jupyter Notebook | notebooks/Django Models.ipynb | warplydesigned/django_jupyter | 8a26c19862f6f7d7535ac0407c2e5c7ff43a9ba6 | [
"MIT"
] | 1 | 2020-03-08T04:19:07.000Z | 2020-03-08T04:19:07.000Z | notebooks/Django Models.ipynb | warplydesigned/django_jupyter | 8a26c19862f6f7d7535ac0407c2e5c7ff43a9ba6 | [
"MIT"
] | null | null | null | notebooks/Django Models.ipynb | warplydesigned/django_jupyter | 8a26c19862f6f7d7535ac0407c2e5c7ff43a9ba6 | [
"MIT"
] | 1 | 2020-10-12T04:35:49.000Z | 2020-10-12T04:35:49.000Z | 21.223642 | 94 | 0.481258 | [
[
[
"from candidates.models import SavedCandidate\nfrom jobs.models import Job\nfrom groups.models import Group",
"_____no_output_____"
]
],
[
[
"## Creating groups\nWe are going to create a simple group.",
"_____no_output_____"
]
],
[
[
"group = Group.objects.create(name='My First Group')\ngroup.pk",
"_____no_output_____"
]
],
[
[
"Now lets create a group that is a child of the first group.",
"_____no_output_____"
]
],
[
[
"group_child = Group.objects.create(name='Child of (My First Group)', parent_group=group)",
"_____no_output_____"
],
[
"group_child.parent_group.name",
"_____no_output_____"
]
],
[
[
"### Creating jobs\nNow that we have a few groups lets create some jobs to add to the groups.",
"_____no_output_____"
]
],
[
[
"job_1 = Job.objects.create(title='Job 1')\njob_2 = Job.objects.create(title='Job 2')",
"_____no_output_____"
]
],
[
[
"### Adding jobs to a group",
"_____no_output_____"
]
],
[
[
"group.jobs.add(job_1)\ngroup_child.jobs.add(job_2)",
"_____no_output_____"
]
],
[
[
"### Ok now lets add some saved candidates to a new group",
"_____no_output_____"
]
],
[
[
"candidate_1 = SavedCandidate.objects.create(name='Candidate 1')\ncandidate_2 = SavedCandidate.objects.create(name='Candidate 2')\ngroup_2 = Group.objects.create(name='Group 2')\ngroup_2_child = Group.objects.create(name='Group 2 Child', parent_group=group_2)\ngroup_2_child.saved_candidates.add(candidate_1)\ngroup_2_child.saved_candidates.add(candidate_2)",
"_____no_output_____"
]
],
[
[
"Lets loop all the groups and display there names, jobs and saved candiates for each.",
"_____no_output_____"
]
],
[
[
"for group in Group.objects.all():\n print(\"Group: {}\".format(group.name))\n print(\"jobs: {}\".format(group.jobs.count()))\n for job in group.jobs.all():\n print(job.title)\n \n print(\"saved candidates: {}\".format(group.saved_candidates.count()))\n for candidate in group.saved_candidates.all():\n print(candidate.name)\n print(\"\\n\")",
"Group: My First Group\njobs: 1\nJob 1\nsaved candidates: 0\n\n\nGroup: Child of (My First Group)\njobs: 1\nJob 2\nsaved candidates: 0\n\n\nGroup: Group 2\njobs: 0\nsaved candidates: 0\n\n\nGroup: Group 2 Child\njobs: 0\nsaved candidates: 2\nCandidate 1\nCandidate 2\n\n\nGroup: My First Group\njobs: 1\nJob 1\nsaved candidates: 0\n\n\nGroup: Child of (My First Group)\njobs: 1\nJob 2\nsaved candidates: 0\n\n\nGroup: Group 2\njobs: 0\nsaved candidates: 0\n\n\nGroup: Group 2 Child\njobs: 0\nsaved candidates: 2\nCandidate 1\nCandidate 2\n\n\nGroup: My First Group\njobs: 1\nJob 1\nsaved candidates: 0\n\n\nGroup: Child of (My First Group)\njobs: 1\nJob 2\nsaved candidates: 0\n\n\nGroup: Group 2\njobs: 0\nsaved candidates: 0\n\n\nGroup: Group 2 Child\njobs: 0\nsaved candidates: 0\n\n\nGroup: My First Group\njobs: 1\nJob 1\nsaved candidates: 0\n\n\nGroup: Child of (My First Group)\njobs: 1\nJob 2\nsaved candidates: 0\n\n\nGroup: Group 2\njobs: 0\nsaved candidates: 0\n\n\nGroup: Group 2 Child\njobs: 0\nsaved candidates: 0\n\n\nGroup: My First Group\njobs: 1\nJob 1\nsaved candidates: 0\n\n\nGroup: Child of (My First Group)\njobs: 1\nJob 2\nsaved candidates: 0\n\n\nGroup: Group 2\njobs: 0\nsaved candidates: 0\n\n\nGroup: Group 2 Child\njobs: 0\nsaved candidates: 2\nCandidate 1\nCandidate 2\n\n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c7b147ad8b715cb2b207ffc00aa8c30557f6db | 702,681 | ipynb | Jupyter Notebook | Notebooks/Sample-Notebooks/Example - Guided Investigation - Process-Alerts.ipynb | h0tp0ck3t/Sentinel | a138dd6a9f22411af597714fe9c8f25c1dfd09ca | [
"MIT"
] | 1 | 2020-12-10T21:00:42.000Z | 2020-12-10T21:00:42.000Z | Notebooks/Sample-Notebooks/Example - Guided Investigation - Process-Alerts.ipynb | h0tp0ck3t/Sentinel | a138dd6a9f22411af597714fe9c8f25c1dfd09ca | [
"MIT"
] | null | null | null | Notebooks/Sample-Notebooks/Example - Guided Investigation - Process-Alerts.ipynb | h0tp0ck3t/Sentinel | a138dd6a9f22411af597714fe9c8f25c1dfd09ca | [
"MIT"
] | 1 | 2022-01-27T15:57:32.000Z | 2022-01-27T15:57:32.000Z | 205.763104 | 86,319 | 0.699571 | [
[
[
"# Title: Alert Investigation (Windows Process Alerts)\n\n**Notebook Version:** 1.0<br>\n**Python Version:** Python 3.6 (including Python 3.6 - AzureML)<br>\n**Required Packages**: kqlmagic, msticpy, pandas, numpy, matplotlib, networkx, ipywidgets, ipython, scikit_learn<br>\n**Platforms Supported**:<br>\n- Azure Notebooks Free Compute\n- Azure Notebooks DSVM\n- OS Independent\n\n**Data Sources Required**:<br>\n- Log Analytics - SecurityAlert, SecurityEvent (EventIDs 4688 and 4624/25)\n- (Optional) - VirusTotal (with API key)\n\n## Description:\nThis notebook is intended for triage and investigation of security alerts. It is specifically targeted at alerts triggered by suspicious process activity on Windows hosts. Some of the sections will work on other types of alerts but this is not guaranteed.\n",
"_____no_output_____"
],
[
"<a id='toc'></a>\n## Table of Contents\n- [Setup and Authenticate](#setup)\n\n- [Get Alerts List](#getalertslist)\n- [Choose an Alert to investigate](#enteralertid)\n - [Extract Properties and entities from alert](#extractalertproperties)\n - [Entity Graph](#entitygraph)\n- [Related Alerts](#related_alerts)\n- [Session Process Tree](#processtree)\n - [Process Timeline](#processtimeline)\n- [Other Process on Host](#process_clustering)\n- [Check for IOCs in Commandline](#cmdlineiocs)\n - [VirusTotal lookup](#virustotallookup)\n- [Alert command line - Occurrence on other hosts in subscription](#cmdlineonotherhosts)\n- [Host Logons](#host_logons)\n - [Alert Account](#logonaccount)\n - [Failed Logons](#failed_logons)\n- [Appendices](#appendices)\n - [Saving data to Excel](#appendices)\n",
"_____no_output_____"
],
[
"<a id='setup'></a>[Contents](#toc)\n# Setup\n\n1. Make sure that you have installed packages specified in the setup (uncomment the lines to execute)\n2. There are some manual steps up to selecting the alert ID. After this most of the notebook can be executed sequentially\n3. Major sections should be executable independently (e.g. Alert Command line and Host Logons can be run skipping Session Process Tree)\n\n## Install Packages\nThe first time this cell runs for a new Azure Notebooks project or local Python environment it will take several minutes to download and install the packages. In subsequent runs it should run quickly and confirm that package dependencies are already installed. Unless you want to upgrade the packages you can feel free to skip execution of the next cell.\n\nIf you see any import failures (```ImportError```) in the notebook, please re-run this cell and answer 'y', then re-run the cell where the failure occurred. \n\nNote you may see some warnings about package incompatibility with certain packages. This does not affect the functionality of this notebook but you may need to upgrade the packages producing the warnings to a more recent version.",
"_____no_output_____"
]
],
[
[
"import sys\nimport warnings\n\nwarnings.filterwarnings(\"ignore\",category=DeprecationWarning)\n\nMIN_REQ_PYTHON = (3,6)\nif sys.version_info < MIN_REQ_PYTHON:\n print('Check the Kernel->Change Kernel menu and ensure that Python 3.6')\n print('or later is selected as the active kernel.')\n sys.exit(\"Python %s.%s or later is required.\\n\" % MIN_REQ_PYTHON)\n \n# Package Installs - try to avoid if they are already installed\ntry:\n import msticpy.sectools as sectools\n import Kqlmagic\n print('If you answer \"n\" this cell will exit with an error in order to avoid the pip install calls,')\n print('This error can safely be ignored.')\n resp = input('msticpy and Kqlmagic packages are already loaded. Do you want to re-install? (y/n)')\n if resp.strip().lower() != 'y':\n sys.exit('pip install aborted - you may skip this error and continue.')\n else:\n print('After installation has completed, restart the current kernel and run '\n 'the notebook again skipping this cell.')\nexcept ImportError:\n pass\n\nprint('\\nPlease wait. Installing required packages. This may take a few minutes...')\n!pip install git+https://github.com/microsoft/msticpy --upgrade --user\n!pip install Kqlmagic --no-cache-dir --upgrade --user\n\nprint('\\nTo ensure that the latest versions of the installed libraries '\n 'are used, please restart the current kernel and run '\n 'the notebook again skipping this cell.')",
"_____no_output_____"
]
],
[
[
"### Import Python Packages",
"_____no_output_____"
],
[
"### Get WorkspaceId\nTo find your Workspace Id go to [Log Analytics](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.OperationalInsights%2Fworkspaces). Look at the workspace properties to find the ID.",
"_____no_output_____"
]
],
[
[
"# Imports\nimport sys\nMIN_REQ_PYTHON = (3,6)\nif sys.version_info < MIN_REQ_PYTHON:\n print('Check the Kernel->Change Kernel menu and ensure that Python 3.6')\n print('or later is selected as the active kernel.')\n sys.exit(\"Python %s.%s or later is required.\\n\" % MIN_REQ_PYTHON)\n\nimport numpy as np\nfrom IPython import get_ipython\nfrom IPython.display import display, HTML, Markdown\nimport ipywidgets as widgets\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport networkx as nx\nsns.set()\nimport pandas as pd\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 50)\npd.set_option('display.max_colwidth', 100)\n \nimport msticpy.sectools as sectools\nimport msticpy.nbtools as mas\nimport msticpy.nbtools.kql as qry\nimport msticpy.nbtools.nbdisplay as nbdisp\n\n# Some of our dependencies (networkx) still use deprecated Matplotlib\n# APIs - we can't do anything about it so suppress them from view\nfrom matplotlib import MatplotlibDeprecationWarning\nwarnings.simplefilter(\"ignore\", category=MatplotlibDeprecationWarning)\n",
"_____no_output_____"
],
[
"import os\nfrom msticpy.nbtools.wsconfig import WorkspaceConfig\nws_config_file = 'config.json'\n\nWORKSPACE_ID = None\nTENANT_ID = None\ntry:\n ws_config = WorkspaceConfig(ws_config_file)\n display(Markdown(f'Read Workspace configuration from local config.json for workspace **{ws_config[\"workspace_name\"]}**'))\n for cf_item in ['tenant_id', 'subscription_id', 'resource_group', 'workspace_id', 'workspace_name']:\n display(Markdown(f'**{cf_item.upper()}**: {ws_config[cf_item]}'))\n \n if ('cookiecutter' not in ws_config['workspace_id'] or\n 'cookiecutter' not in ws_config['tenant_id']):\n WORKSPACE_ID = ws_config['workspace_id']\n TENANT_ID = ws_config['tenant_id']\nexcept:\n pass\n\nif not WORKSPACE_ID or not TENANT_ID:\n display(Markdown('**Workspace configuration not found.**\\n\\n'\n 'Please go to your Log Analytics workspace, copy the workspace ID'\n ' and/or tenant Id and paste here.<br> '\n 'Or read the workspace_id from the config.json in your Azure Notebooks project.'))\n ws_config = None\n ws_id = mas.GetEnvironmentKey(env_var='WORKSPACE_ID',\n prompt='Please enter your Log Analytics Workspace Id:', auto_display=True)\n ten_id = mas.GetEnvironmentKey(env_var='TENANT_ID',\n prompt='Please enter your Log Analytics Tenant Id:', auto_display=True)\n\n ",
"_____no_output_____"
]
],
[
[
"### Authenticate to Log Analytics\nIf you are using user/device authentication, run the following cell. \n- Click the 'Copy code to clipboard and authenticate' button.\n- This will pop up an Azure Active Directory authentication dialog (in a new tab or browser window). The device code will have been copied to the clipboard. \n- Select the text box and paste (Ctrl-V/Cmd-V) the copied value. \n- You should then be redirected to a user authentication page where you should authenticate with a user account that has permission to query your Log Analytics workspace.\n\nUse the following syntax if you are authenticating using an Azure Active Directory AppId and Secret:\n```\n%kql loganalytics://tenant(aad_tenant).workspace(WORKSPACE_ID).clientid(client_id).clientsecret(client_secret)\n```\ninstead of\n```\n%kql loganalytics://code().workspace(WORKSPACE_ID)\n```\n\nNote: you may occasionally see a JavaScript error displayed at the end of the authentication - you can safely ignore this.<br>\nOn successful authentication you should see a ```popup schema``` button.",
"_____no_output_____"
]
],
[
[
"if not WORKSPACE_ID or not TENANT_ID:\n try:\n WORKSPACE_ID = ws_id.value\n TENANT_ID = ten_id.value\n except NameError:\n raise ValueError('No workspace or Tenant Id.')\n\nmas.kql.load_kql_magic()\n%kql loganalytics://code().tenant(TENANT_ID).workspace(WORKSPACE_ID)",
"_____no_output_____"
]
],
[
[
"<a id='getalertslist'></a>[Contents](#toc)\n# Get Alerts List\n\nSpecify a time range to search for alerts. One this is set run the following cell to retrieve any alerts in that time window.\nYou can change the time range and re-run the queries until you find the alerts that you want.",
"_____no_output_____"
]
],
[
[
"alert_q_times = mas.QueryTime(units='day', max_before=20, before=5, max_after=1)\nalert_q_times.display()",
"_____no_output_____"
],
[
"alert_counts = qry.list_alerts_counts(provs=[alert_q_times])\nalert_list = qry.list_alerts(provs=[alert_q_times])\nprint(len(alert_counts), ' distinct alert types')\nprint(len(alert_list), ' distinct alerts')\ndisplay(HTML('<h2>Alert Timeline</h2>'))\nnbdisp.display_timeline(data=alert_list, source_columns = ['AlertName', 'CompromisedEntity'], title='Alerts', height=200)\ndisplay(HTML('<h2>Top alerts</h2>'))\nalert_counts.head(20) # remove '.head(20)'' to see the full list grouped by AlertName",
"12 distinct alert types\n51 distinct alerts\n"
]
],
[
[
"<a id='enteralertid'></a>[Contents](#toc)\n# Choose Alert to Investigate\nEither pick an alert from a list of retrieved alerts or paste the SystemAlertId into the text box in the following section.",
"_____no_output_____"
],
[
"### Select alert from list\nAs you select an alert, the main properties will be shown below the list.\n\nUse the filter box to narrow down your search to any substring in the AlertName.",
"_____no_output_____"
]
],
[
[
"alert_select = mas.AlertSelector(alerts=alert_list, action=nbdisp.display_alert)\nalert_select.display()",
"_____no_output_____"
]
],
[
[
"### Or paste in an alert ID and fetch it\n**Skip this if you selected from the above list**",
"_____no_output_____"
]
],
[
[
"# Allow alert to be selected\n# Allow subscription to be selected\nget_alert = mas.GetSingleAlert(action=nbdisp.display_alert)\nget_alert.display()",
"_____no_output_____"
]
],
[
[
"<a id='extractalertproperties'></a>[Contents](#toc)\n## Extract properties and entities from Alert\nThis section extracts the alert information and entities into a SecurityAlert object allowing us to query the properties more reliably. \n\nIn particular, we use the alert to automatically provide parameters for queries and UI elements.\nSubsequent queries will use properties like the host name and derived properties such as the OS family (Linux or Windows) to adapt the query. Query time selectors like the one above will also default to an origin time that matches the alert selected.\n\nThe alert view below shows all of the main properties of the alert plus the extended property dictionary (if any) and JSON representations of the Entity.",
"_____no_output_____"
]
],
[
[
"# Extract entities and properties into a SecurityAlert class\nif alert_select.selected_alert is None and get_alert.selected_alert is None:\n sys.exit(\"Please select an alert before executing remaining cells.\")\n\nif get_alert.selected_alert is not None:\n security_alert = mas.SecurityAlert(get_alert.selected_alert)\nelif alert_select.selected_alert is not None:\n security_alert = mas.SecurityAlert(alert_select.selected_alert)\n \nmas.disp.display_alert(security_alert, show_entities=True)",
"_____no_output_____"
]
],
[
[
"<a id='entitygraph'></a>[Contents](#toc)\n## Entity Graph\nDepending on the type of alert there may be one or more entities attached as properties. Entities are things like Host, Account, IpAddress, Process, etc. - essentially the 'nouns' of security investigation. Events and alerts are the things that link them in actions so can be thought of as the verbs. Entities are often related to other entities - for example a process will usually have a related file entity (the process image) and an Account entity (the context in which the process was running). Endpoint alerts typically always have a host entity (which could be a physical or virtual machine).",
"_____no_output_____"
],
[
"### Plot using Networkx/Matplotlib",
"_____no_output_____"
]
],
[
[
"# Draw the graph using Networkx/Matplotlib\n%matplotlib inline\nalertentity_graph = mas.create_alert_graph(security_alert)\nnbdisp.draw_alert_entity_graph(alertentity_graph, width=15)",
"_____no_output_____"
]
],
[
[
"<a id='related_alerts'></a>[Contents](#toc)\n# Related Alerts\nFor a subset of entities in the alert we can search for any alerts that have that entity in common. Currently this query looks for alerts that share the same Host, Account or Process and lists them below. \n**Notes:**\n- Some alert types do not include all of these entity types.\n- The original alert will be included in the \"Related Alerts\" set if it occurs within the query time boundary set below.\n\nThe query time boundaries default to a longer period than when searching for the alert. You can extend the time boundary searched before or after the alert time. If the widget doesn't support the time boundary that you want you can change the max_before and max_after parameters in the call to QueryTime below to extend the possible time boundaries.",
"_____no_output_____"
]
],
[
[
"# set the origin time to the time of our alert\nquery_times = mas.QueryTime(units='day', origin_time=security_alert.TimeGenerated, \n max_before=28, max_after=1, before=5)\nquery_times.display()",
"_____no_output_____"
],
[
"related_alerts = qry.list_related_alerts(provs=[query_times, security_alert])\n\nif related_alerts is not None and not related_alerts.empty:\n host_alert_items = related_alerts\\\n .query('host_match == @True')[['AlertType', 'StartTimeUtc']]\\\n .groupby('AlertType').StartTimeUtc.agg('count').to_dict()\n acct_alert_items = related_alerts\\\n .query('acct_match == @True')[['AlertType', 'StartTimeUtc']]\\\n .groupby('AlertType').StartTimeUtc.agg('count').to_dict()\n proc_alert_items = related_alerts\\\n .query('proc_match == @True')[['AlertType', 'StartTimeUtc']]\\\n .groupby('AlertType').StartTimeUtc.agg('count').to_dict()\n\n def print_related_alerts(alertDict, entityType, entityName):\n if len(alertDict) > 0:\n print('Found {} different alert types related to this {} (\\'{}\\')'\n .format(len(alertDict), entityType, entityName))\n for (k,v) in alertDict.items():\n print(' {}, Count of alerts: {}'.format(k, v))\n else:\n print('No alerts for {} entity \\'{}\\''.format(entityType, entityName))\n\n print_related_alerts(host_alert_items, 'host', security_alert.hostname)\n print_related_alerts(acct_alert_items, 'account', \n security_alert.primary_account.qualified_name \n if security_alert.primary_account\n else None)\n print_related_alerts(proc_alert_items, 'process', \n security_alert.primary_process.ProcessFilePath \n if security_alert.primary_process\n else None)\n nbdisp.display_timeline(data=related_alerts, source_columns = ['AlertName'], title='Alerts', height=100)\nelse:\n display(Markdown('No related alerts found.'))",
"Found 8 different alert types related to this host ('msticalertswin1')\n Detected potentially suspicious use of Telegram tool, Count of alerts: 2\n Detected the disabling of critical services, Count of alerts: 2\n Digital currency mining related behavior detected, Count of alerts: 2\n Potential attempt to bypass AppLocker detected, Count of alerts: 4\n Security incident detected, Count of alerts: 2\n Security incident with shared process detected, Count of alerts: 3\n Suspicious system process executed, Count of alerts: 2\n Suspiciously named process detected, Count of alerts: 2\nFound 13 different alert types related to this account ('msticalertswin1\\msticadmin')\n An history file has been cleared, Count of alerts: 12\n Azure Security Center test alert (not a threat), Count of alerts: 13\n Detected potentially suspicious use of Telegram tool, Count of alerts: 2\n Detected the disabling of critical services, Count of alerts: 2\n Digital currency mining related behavior detected, Count of alerts: 2\n New SSH key added, Count of alerts: 13\n Possible credential access tool detected, Count of alerts: 11\n Possible suspicious scheduling tasks access detected, Count of alerts: 1\n Potential attempt to bypass AppLocker detected, Count of alerts: 3\n Suspicious Download Then Run Activity, Count of alerts: 13\n Suspicious binary detected, Count of alerts: 13\n Suspicious system process executed, Count of alerts: 2\n Suspiciously named process detected, Count of alerts: 2\nFound 2 different alert types related to this process ('c:\\w!ndows\\system32\\suchost.exe')\n Digital currency mining related behavior detected, Count of alerts: 2\n Suspiciously named process detected, Count of alerts: 2\n"
]
],
[
[
"### Show these related alerts on a graph\nThis should indicate which entities the other alerts are related to.\n\nThis can be unreadable with a lot of alerts. Use the matplotlib interactive zoom control to zoom in to part of the graph.",
"_____no_output_____"
]
],
[
[
"# Draw a graph of this (add to entity graph)\n%matplotlib notebook\n%matplotlib inline\n\nif related_alerts is not None and not related_alerts.empty:\n rel_alert_graph = mas.add_related_alerts(related_alerts=related_alerts,\n alertgraph=alertentity_graph)\n nbdisp.draw_alert_entity_graph(rel_alert_graph, width=15)\nelse:\n display(Markdown('No related alerts found.'))",
"_____no_output_____"
]
],
[
[
"### Browse List of Related Alerts\nSelect an Alert to view details. \n\nIf you want to investigate that alert - copy its *SystemAlertId* property and open a new instance of this notebook to investigate this alert.",
"_____no_output_____"
]
],
[
[
"\ndef disp_full_alert(alert):\n global related_alert\n related_alert = mas.SecurityAlert(alert)\n nbdisp.display_alert(related_alert, show_entities=True)\n\nif related_alerts is not None and not related_alerts.empty:\n related_alerts['CompromisedEntity'] = related_alerts['Computer']\n print('Selected alert is available as \\'related_alert\\' variable.')\n rel_alert_select = mas.AlertSelector(alerts=related_alerts, action=disp_full_alert)\n rel_alert_select.display()\nelse:\n display(Markdown('No related alerts found.'))",
"Selected alert is available as 'related_alert' variable.\n"
]
],
[
[
"<a id='processtree'></a>[Contents](#toc)\n# Get Process Tree\nIf the alert has a process entity this section tries to retrieve the entire process tree to which that process belongs.\n\nNotes:\n- The alert must have a process entity\n- Only processes started within the query time boundary will be included\n- Ancestor and descented processes are retrieved to two levels (i.e. the parent and grandparent of the alert process plus any child and grandchild processes).\n- Sibling processes are the processes that share the same parent as the alert process\n- This can be a long-running query, especially if a wide time window is used! Caveat Emptor!\n\nThe source (alert) process is shown in red.\n\nWhat's shown for each process:\n- Each process line is indented according to its position in the tree hierarchy\n- Top line fields:\n - \\[relationship to source process:lev# - where # is the hops away from the source process\\]\n - Process creation date-time (UTC)\n - Process Image path\n - PID - Process Id\n - SubjSess - the session Id of the process spawning the new process\n - TargSess - the new session Id if the process is launched in another context/session. If 0/0x0 then the process is launched in the same session as its parent\n- Second line fields:\n - Process command line\n - Account - name of the account context in which the process is running",
"_____no_output_____"
]
],
[
[
"# set the origin time to the time of our alert\nquery_times = mas.QueryTime(units='minute', origin_time=security_alert.origin_time)\nquery_times.display()",
"_____no_output_____"
],
[
"from msticpy.nbtools.query_defns import DataFamily\n\nif security_alert.data_family != DataFamily.WindowsSecurity:\n raise ValueError('The remainder of this notebook currently only supports Windows. '\n 'Linux support is in development but not yet implemented.')\n\ndef extract_missing_pid(security_alert):\n for pid_ext_name in ['Process Id', 'Suspicious Process Id']:\n pid = security_alert.ExtendedProperties.get(pid_ext_name, None)\n if pid:\n return pid\n\ndef extract_missing_sess_id(security_alert):\n sess_id = security_alert.ExtendedProperties.get('Account Session Id', None)\n if sess_id:\n return sess_id\n for session in [e for e in security_alert.entities if\n e['Type'] == 'host-logon-session' or e['Type'] == 'hostlogonsession']:\n return session['SessionId']\n \nif (security_alert.primary_process):\n # Do some patching up if the process entity doesn't have a PID\n pid = security_alert.primary_process.ProcessId\n if not pid:\n pid = extract_missing_pid(security_alert)\n if pid:\n security_alert.primary_process.ProcessId = pid\n else:\n raise ValueError('Could not find the process Id for the alert process.')\n \n # Do the same if we can't find the account logon ID\n if not security_alert.get_logon_id():\n sess_id = extract_missing_sess_id(security_alert)\n if sess_id and security_alert.primary_account:\n security_alert.primary_account.LogonId = sess_id\n else:\n raise ValueError('Could not find the session Id for the alert process.')\n \n # run the query\n process_tree = qry.get_process_tree(provs=[query_times, security_alert])\n\n if len(process_tree) > 0:\n # Print out the text view of the process tree\n nbdisp.display_process_tree(process_tree)\n else:\n display(Markdown('No processes were returned so cannot obtain a process tree.'\n '\\n\\nSkip to [Other Processes](#process_clustering) later in the'\n ' notebook to retrieve all processes'))\nelse:\n display(Markdown('This alert has no process entity so cannot obtain a process tree.'\n '\\n\\nSkip to [Other Processes](#process_clustering) later in the'\n ' notebook to retrieve all processes'))\n process_tree = None\n",
"_____no_output_____"
]
],
[
[
"<a id='processtimeline'></a>[Contents](#toc)\n## Process TimeLine\nThis shows each process in the process tree on a timeline view.\n\nLabelling of individual process is very performance intensive and often results in nothing being displayed at all! Besides, for large numbers of processes it would likely result in an unreadable mess. \n\nYour main tools for negotiating the timeline are the Hover tool (toggled on and off by the speech bubble icon) and the wheel-zoom and pan tools (the former is an icon with an elipse and a magnifying glass, the latter is the crossed-arrows icon). The wheel zoom is particularly useful.\n\nAs you hover over each process it will display the image name, PID and commandline.\n\nAlso shown on the graphic is the timestamp line of the source/alert process.",
"_____no_output_____"
]
],
[
[
"# Show timeline of events\nif process_tree is not None and not process_tree.empty:\n nbdisp.display_timeline(data=process_tree, alert=security_alert, \n title='Alert Process Session', height=250)",
"_____no_output_____"
]
],
[
[
"<a id='process_clustering'></a>[Contents](#toc)\n# Other Processes on Host - Clustering\nSometimes you don't have a source process to work with. Other times it's just useful to see what else is going on on the host. This section retrieves all processes on the host within the time bounds\nset in the query times widget.\n\nYou can display the raw output of this by looking at the *processes_on_host* dataframe. Just copy this into a new cell and hit Ctrl-Enter.\n\nUsually though, the results return a lot of very repetitive and unintersting system processes so we attempt to cluster these to make the view easier to negotiate. \nTo do this we process the raw event list output to extract a few features that render strings (such as commandline)into numerical values. The default below uses the following features:\n- commandLineTokensFull - this is a count of common delimiters in the commandline \n (given by this regex r'[\\s\\-\\\\/\\.,\"\\'|&:;%$()]'). The aim of this is to capture the commandline structure while ignoring variations on what is essentially the same pattern (e.g. temporary path GUIDs, target IP or host names, etc.)\n- pathScore - this sums the ordinal (character) value of each character in the path (so /bin/bash and /bin/bosh would have similar scores).\n- isSystemSession - 1 if this is a root/system session, 0 if anything else.\n\nThen we run a clustering algorithm (DBScan in this case) on the process list. The result groups similar (noisy) processes together and leaves unique process patterns as single-member clusters.",
"_____no_output_____"
],
[
"### Clustered Processes (i.e. processes that have a cluster size > 1)",
"_____no_output_____"
]
],
[
[
"from msticpy.sectools.eventcluster import dbcluster_events, add_process_features\n\nprocesses_on_host = qry.list_processes(provs=[query_times, security_alert])\n\nif processes_on_host is not None and not processes_on_host.empty:\n feature_procs = add_process_features(input_frame=processes_on_host,\n path_separator=security_alert.path_separator)\n\n # you might need to play around with the max_cluster_distance parameter.\n # decreasing this gives more clusters.\n (clus_events, dbcluster, x_data) = dbcluster_events(data=feature_procs,\n cluster_columns=['commandlineTokensFull', \n 'pathScore', \n 'isSystemSession'],\n max_cluster_distance=0.0001)\n print('Number of input events:', len(feature_procs))\n print('Number of clustered events:', len(clus_events))\n clus_events[['ClusterSize', 'processName']][clus_events['ClusterSize'] > 1].plot.bar(x='processName', \n title='Process names with Cluster > 1', \n figsize=(12,3));\nelse:\n display(Markdown('Unable to obtain any processes for this host. This feature'\n ' is currently only supported for Windows hosts.'\n '\\n\\nIf this is a Windows host skip to [Host Logons](#host_logons)'\n ' later in the notebook to examine logon events.'))",
"Number of input events: 190\nNumber of clustered events: 24\n"
]
],
[
[
"### Variability in Command Lines and Process Names\nThe top chart shows the variability of command line content for a give process name. The wider the box, the more instances were found with different command line structure \n\nNote, the 'structure' in this case is measured by the number of tokens or delimiters in the command line and does not look at content differences. This is done so that commonly varying instances of the same command line are grouped together.<br>\nFor example `updatepatch host1.mydom.com` and `updatepatch host2.mydom.com` will be grouped together.\n\nThe second chart shows the variability in executable path. This does compare content so `c:\\windows\\system32\\net.exe` and `e:\\windows\\system32\\net.exe` are treated as distinct. You would normally not expect to see any variability in this chart unless you have multiple copies of the same name executable or an executable is trying masquerade as another well-known binary.",
"_____no_output_____"
]
],
[
[
"# Looking at the variability of commandlines and process image paths\nimport seaborn as sns\nsns.set(style=\"darkgrid\")\n\nif processes_on_host is not None and not processes_on_host.empty:\n proc_plot = sns.catplot(y=\"processName\", x=\"commandlineTokensFull\", \n data=feature_procs.sort_values('processName'),\n kind='box', height=10)\n proc_plot.fig.suptitle('Variability of Commandline Tokens', x=1, y=1)\n\n proc_plot = sns.catplot(y=\"processName\", x=\"pathLogScore\", \n data=feature_procs.sort_values('processName'),\n kind='box', height=10, hue='isSystemSession')\n proc_plot.fig.suptitle('Variability of Path', x=1, y=1);",
"_____no_output_____"
]
],
[
[
"The top graph shows that, for a given process, some have a wide variability in their command line content while the majority have little or none. Looking at a couple of examples - like cmd.exe, powershell.exe, reg.exe, net.exe - we can recognize several common command line tools.\n\nThe second graph shows processes by full process path content. We wouldn't normally expect to see variation here - as is the cast with most. There is also quite a lot of variance in the score making it a useful proxy feature for unique path name (this means that proc1.exe and proc2.exe that have the same commandline score won't get collapsed into the same cluster).\n\nAny process with a spread of values here means that we are seeing the same process name (but not necessarily the same file) is being run from different locations.",
"_____no_output_____"
]
],
[
[
"if not clus_events.empty:\n resp = input('View the clustered data? y/n')\n if resp == 'y':\n display(clus_events.sort_values('TimeGenerated')[['TimeGenerated', 'LastEventTime',\n 'NewProcessName', 'CommandLine', \n 'ClusterSize', 'commandlineTokensFull',\n 'pathScore', 'isSystemSession']])",
"View the clustered data? y/ny\n"
],
[
"# Look at clusters for individual process names\ndef view_cluster(exe_name):\n display(clus_events[['ClusterSize', 'processName', 'CommandLine', 'ClusterId']][clus_events['processName'] == exe_name])\n \ndisplay(Markdown('You can view the cluster members for individual processes'\n 'by inserting a new cell and entering:<br>'\n '`>>> view_cluster(process_name)`<br></div>'\n 'where process_name is the unqualified process binary. E.g<br>'\n '`>>> view_cluster(\\'reg.exe\\')`'))",
"_____no_output_____"
]
],
[
[
"### Time showing clustered vs. original data",
"_____no_output_____"
]
],
[
[
"# Show timeline of events - clustered events\nif not clus_events.empty:\n nbdisp.display_timeline(data=clus_events, \n overlay_data=processes_on_host, \n alert=security_alert, \n title='Distinct Host Processes (top) and All Proceses (bottom)')",
"_____no_output_____"
]
],
[
[
"<a id='cmdlineiocs'></a>[Contents](#toc)\n# Base64 Decode and Check for IOCs\nThis section looks for Indicators of Compromise (IoC) within the data sets passed to it.\n\nThe first section looks at the commandline for the alert process (if any). It also looks for base64 encoded strings within the data - this is a common way of hiding attacker intent. It attempts to decode any strings that look like base64. Additionally, if the base64 decode operation returns any items that look like a base64 encoded string or file, a gzipped binary sequence, a zipped or tar archive, it will attempt to extract the contents before searching for potentially interesting items.",
"_____no_output_____"
]
],
[
[
"process = security_alert.primary_process\nioc_extractor = sectools.IoCExtract()\n\nif process:\n # if nothing is decoded this just returns the input string unchanged\n base64_dec_str, _ = sectools.b64.unpack_items(input_string=process[\"CommandLine\"])\n if base64_dec_str and '<decoded' in base64_dec_str:\n print('Base64 encoded items found.')\n print(base64_dec_str)\n \n # any IoCs in the string?\n iocs_found = ioc_extractor.extract(base64_dec_str)\n \n if iocs_found:\n print('\\nPotential IoCs found in alert process:')\n display(iocs_found)\nelse:\n print('Nothing to process')\n",
"\nPotential IoCs found in alert process:\n"
]
],
[
[
"### If we have a process tree, look for IoCs in the whole data set\nYou can replace the data=process_tree parameter to ioc_extractor.extract() to pass other data frames.\nuse the columns parameter to specify which column or columns that you want to search.",
"_____no_output_____"
]
],
[
[
"ioc_extractor = sectools.IoCExtract()\n\ntry:\n if not process_tree.empty:\n source_processes = process_tree\n else:\n source_processes = clus_events\nexcept NameError:\n source_processes = None\nif source_processes is not None: \n\n ioc_df = ioc_extractor.extract(data=source_processes, \n columns=['CommandLine'], \n os_family=security_alert.os_family,\n ioc_types=['ipv4', 'ipv6', 'dns', 'url',\n 'md5_hash', 'sha1_hash', 'sha256_hash'])\n if len(ioc_df):\n display(HTML(\"<h3>IoC patterns found in process tree.</h3>\"))\n display(ioc_df)\nelse:\n ioc_df = None",
"_____no_output_____"
]
],
[
[
"### If any Base64 encoded strings, decode and search for IoCs in the results.\nFor simple strings the Base64 decoded output is straightforward. However for nested encodings this can get a little complex and difficult to represent in a tabular format.\n\n**Columns**\n - reference - The index of the row item in dotted notation in depth.seq pairs (e.g. 1.2.2.3 would be the 3 item at depth 3 that is a child of the 2nd item found at depth 1). This may not always be an accurate notation - it is mainly use to allow you to associate an individual row with the reference value contained in the full_decoded_string column of the topmost item).\n - original_string - the original string before decoding.\n - file_name - filename, if any (only if this is an item in zip or tar file).\n - file_type - a guess at the file type (this is currently elementary and only includes a few file types).\n - input_bytes - the decoded bytes as a Python bytes string.\n - decoded_string - the decoded string if it can be decoded as a UTF-8 or UTF-16 string. Note: binary sequences may often successfully decode as UTF-16 strings but, in these cases, the decodings are meaningless.\n - encoding_type - encoding type (UTF-8 or UTF-16) if a decoding was possible, otherwise 'binary'.\n - file_hashes - collection of file hashes for any decoded item.\n - md5 - md5 hash as a separate column.\n - sha1 - sha1 hash as a separate column.\n - sha256 - sha256 hash as a separate column.\n - printable_bytes - printable version of input_bytes as a string of \\xNN values\n - src_index - the index of the row in the input dataframe from which the data came.\n - full_decoded_string - the full decoded string with any decoded replacements. This is only really useful for top-level items, since nested items will only show the 'full' string representing the child fragment.",
"_____no_output_____"
]
],
[
[
"if source_processes is not None:\n dec_df = sectools.b64.unpack_items(data=source_processes, column='CommandLine')\n \nif source_processes is not None and not dec_df.empty:\n display(HTML(\"<h3>Decoded base 64 command lines</h3>\"))\n display(HTML(\"Warning - some binary patterns may be decodable as unicode strings\"))\n display(dec_df[['full_decoded_string', 'original_string', 'decoded_string', 'input_bytes', 'file_hashes']])\n\n ioc_dec_df = ioc_extractor.extract(data=dec_df, columns=['full_decoded_string'])\n if len(ioc_dec_df):\n display(HTML(\"<h3>IoC patterns found in base 64 decoded data</h3>\"))\n display(ioc_dec_df)\n if ioc_df is not None:\n ioc_df = ioc_df.append(ioc_dec_df ,ignore_index=True)\n else:\n ioc_df = ioc_dec_df\nelse:\n print(\"No base64 encodings found.\")\n ioc_df = None",
"_____no_output_____"
]
],
[
[
"<a id='virustotallookup'></a>[Contents](#toc)\n## Virus Total Lookup\nThis section uses the popular Virus Total service to check any recovered IoCs against VTs database.\n\nTo use this you need an API key from virus total, which you can obtain here: https://www.virustotal.com/.\n\nNote that VT throttles requests for free API keys to 4/minute. If you are unable to process the entire data set, try splitting it and submitting smaller chunks.\n\n**Things to note:**\n- Virus Total lookups include file hashes, domains, IP addresses and URLs.\n- The returned data is slightly different depending on the input type\n- The VTLookup class tries to screen input data to prevent pointless lookups. E.g.:\n - Only public IP Addresses will be submitted (no loopback, private address space, etc.)\n - URLs with only local (unqualified) host parts will not be submitted.\n - Domain names that are unqualified will not be submitted.\n - Hash-like strings (e.g 'AAAAAAAAAAAAAAAAAA') that do not appear to have enough entropy to be a hash will not be submitted.\n\n**Output Columns**\n - Observable - The IoC observable submitted\n - IoCType - the IoC type\n - Status - the status of the submission request\n - ResponseCode - the VT response code\n - RawResponse - the entire raw json response\n - Resource - VT Resource\n - SourceIndex - The index of the Observable in the source DataFrame. You can use this to rejoin to your original data.\n - VerboseMsg - VT Verbose Message\n - ScanId - VT Scan ID if any\n - Permalink - VT Permanent URL describing the resource\n - Positives - If this is not zero, it indicates the number of malicious reports that VT holds for this observable.\n - MD5 - The MD5 hash, if any\n - SHA1 - The MD5 hash, if any\n - SHA256 - The MD5 hash, if any\n - ResolvedDomains - In the case of IP Addresses, this contains a list of all domains that resolve to this IP address\n - ResolvedIPs - In the case Domains, this contains a list of all IP addresses resolved from the domain.\n - DetectedUrls - Any malicious URLs associated with the observable.",
"_____no_output_____"
]
],
[
[
"vt_key = mas.GetEnvironmentKey(env_var='VT_API_KEY',\n help_str='To obtain an API key sign up here https://www.virustotal.com/',\n prompt='Virus Total API key:')\nvt_key.display()",
"_____no_output_____"
],
[
"if vt_key.value and ioc_df is not None and not ioc_df.empty:\n vt_lookup = sectools.VTLookup(vt_key.value, verbosity=2)\n\n print(f'{len(ioc_df)} items in input frame')\n supported_counts = {}\n for ioc_type in vt_lookup.supported_ioc_types:\n supported_counts[ioc_type] = len(ioc_df[ioc_df['IoCType'] == ioc_type])\n print('Items in each category to be submitted to VirusTotal')\n print('(Note: items have pre-filtering to remove obvious erroneous '\n 'data and false positives, such as private IPaddresses)')\n print(supported_counts)\n print('-' * 80)\n vt_results = vt_lookup.lookup_iocs(data=ioc_df, type_col='IoCType', src_col='Observable')\n \n pos_vt_results = vt_results.query('Positives > 0')\n if len(pos_vt_results) > 0:\n display(HTML(f'<h3>{len(pos_vt_results)} Positive Results Found</h3>'))\n display(pos_vt_results[['Observable', 'IoCType','Permalink', \n 'ResolvedDomains', 'ResolvedIPs', \n 'DetectedUrls', 'RawResponse']])\n display(HTML('<h3>Other results</h3>'))\n display(vt_results.query('Status == \"Success\"'))",
"5 items in input frame\nItems in each category to be submitted to VirusTotal\n(Note: items have pre-filtering to remove obvious erroneous data and false positives, such as private IPaddresses)\n{'ipv4': 0, 'dns': 2, 'url': 2, 'md5_hash': 0, 'sha1_hash': 0, 'sh256_hash': 0}\n--------------------------------------------------------------------------------\nInvalid observable format: \"wh401k.org\", type \"dns\", status: Observable does not match expected pattern for dns - skipping. (Source index 4)\nInvalid observable format: \"wh401k.org\", type \"dns\", status: Observable does not match expected pattern for dns - skipping. (Source index 0)\nSubmitting observables: \"http://wh401k.org/getps\"\", type \"url\" to VT. (Source index 4)\nError in response submitting observables: \"http://wh401k.org/getps\"\", type \"url\" http status is 403. Response: None (Source index 4)\nSubmitting observables: \"http://wh401k.org/getps\"</decoded>\", type \"url\" to VT. (Source index 0)\nError in response submitting observables: \"http://wh401k.org/getps\"</decoded>\", type \"url\" http status is 403. Response: None (Source index 0)\nSubmission complete. 4 responses from 5 input rows\n"
]
],
[
[
"To view the raw response for a specific row.\n```\nimport json\nrow_idx = 0 # The row number from one of the above dataframes\nraw_response = json.loads(pos_vt_results['RawResponse'].loc[row_idx])\nraw_response\n```",
"_____no_output_____"
],
[
"<a id='cmdlineonotherhosts'></a>[Contents](#toc)\n# Alert command line - Occurrence on other hosts in workspace\nTo get a sense of whether the alert process is something that is occuring on other hosts, run this section.\n\nThis might tell you that the alerted process is actually a commonly-run process and the alert is a false positive. Alternatively, it may tell you that a real infection or attack is happening on other hosts in your environment.",
"_____no_output_____"
]
],
[
[
"# set the origin time to the time of our alert\nquery_times = mas.QueryTime(units='day', before=5, max_before=20,\n after=1, max_after=10,\n origin_time=security_alert.origin_time)\nquery_times.display()",
"_____no_output_____"
],
[
"# API ILLUSTRATION - Find the query to use\nqry.list_queries()",
"_____no_output_____"
],
[
"# API ILLUSTRATION - What does the query look like?\nqry.query_help('list_hosts_matching_commandline')",
"Query: list_hosts_matching_commandline\nRetrieves processes on other hosts with matching commandline\nDesigned to be executed with data_source: process_create\nSupported data families: DataFamily.WindowsSecurity, DataFamily.LinuxSecurity\nSupported data environments: DataEnvironment.LogAnalytics\nQuery parameters:\n['add_query_items', 'subscription_filter', 'process_name', 'start', 'end', 'host_filter_neq', 'commandline']\nOptional parameters:\nadd_query_items\nQuery:\n{table}\n{query_project}\n| where {subscription_filter}\n| where {host_filter_neq}\n| where TimeGenerated >= datetime({start})\n| where TimeGenerated <= datetime({end})\n| where NewProcessName endswith '{process_name}'\n| where CommandLine =~ '{commandline}'\n{add_query_items}\n"
],
[
"# This query needs a commandline parameter which isn't supplied\n# by default from the the alert \n# - so extract and escape this from the process\nif not security_alert.primary_process:\n raise ValueError('This alert has no process entity. This section is not applicable.')\n\nproc_match_in_ws = None\ncommandline = security_alert.primary_process.CommandLine\ncommandline = mas.utility.escape_windows_path(commandline)\n\nif commandline.strip():\n proc_match_in_ws = qry.list_hosts_matching_commandline(provs=[query_times, security_alert],\n commandline=commandline)\nelse:\n print('process has empty commandline')\n# Check the results\nif proc_match_in_ws is None or proc_match_in_ws.empty:\n print('No proceses with matching commandline found in on other hosts in workspace')\n print('between', query_times.start, 'and', query_times.end)\nelse:\n hosts = proc_match_in_ws['Computer'].drop_duplicates().shape[0]\n processes = proc_match_in_ws.shape[0]\n print('{numprocesses} proceses with matching commandline found on {numhosts} hosts in workspace'\\\n .format(numprocesses=processes, numhosts=hosts))\n print('between', query_times.start, 'and', query_times.end)\n print('To examine these execute the dataframe \\'{}\\' in a new cell'.format('proc_match_in_ws'))\n print(proc_match_in_ws[['TimeCreatedUtc','Computer', 'NewProcessName', 'CommandLine']].head())\n ",
"No proceses with matching commandline found in on other hosts in workspace\nbetween 2019-02-08 22:04:16 and 2019-02-14 22:04:16\n"
]
],
[
[
"<a id='host_logons'></a>[Contents](#toc)\n# Host Logons\nThis section retrieves the logon events on the host in the alert.\n\nYou may want to use the query times to search over a broader range than the default.",
"_____no_output_____"
]
],
[
[
"# set the origin time to the time of our alert\nquery_times = mas.QueryTime(units='day', origin_time=security_alert.origin_time,\n before=1, after=0, max_before=20, max_after=1)\nquery_times.display()",
"_____no_output_____"
]
],
[
[
"<a id='logonaccount'></a>[Contents](#toc)\n## Alert Logon Account\nThe logon associated with the process in the alert.",
"_____no_output_____"
]
],
[
[
"logon_id = security_alert.get_logon_id()\n\nif logon_id:\n if logon_id in ['0x3e7', '0X3E7', '-1', -1]:\n print('Cannot retrieve single logon event for system logon id '\n '- please continue with All Host Logons below.')\n else:\n logon_event = qry.get_host_logon(provs=[query_times, security_alert])\n nbdisp.display_logon_data(logon_event, security_alert)\nelse:\n print('No account entity in the source alert or the primary account had no logonId value set.')",
"### Account Logon\nAccount: MSTICAdmin\nAccount Domain: MSTICAlertsWin1\nLogon Time: 2019-02-13 22:03:42.283000\nLogon type: 4 (Batch)\nUser Id/SID: S-1-5-21-996632719-2361334927-4038480536-500\n SID S-1-5-21-996632719-2361334927-4038480536-500 is administrator\n SID S-1-5-21-996632719-2361334927-4038480536-500 is local machine or domain account\nSession id '0x1e821b5' \nSubject (source) account: WORKGROUP/MSTICAlertsWin1$\nLogon process: Advapi \nAuthentication: Negotiate\nSource IpAddress: -\nSource Host: MSTICAlertsWin1\nLogon status: \n\n"
]
],
[
[
"### All Host Logons\nSince the number of logon events may be large and, in the case of system logons, very repetitive, we use clustering to try to identity logons with unique characteristics.\n\nIn this case we use the numeric score of the account name and the logon type (i.e. interactive, service, etc.). The results of the clustered logons are shown below along with a more detailed, readable printout of the logon event information. The data here will vary depending on whether this is a Windows or Linux host.",
"_____no_output_____"
]
],
[
[
"from msticpy.sectools.eventcluster import dbcluster_events, add_process_features, _string_score\n\nhost_logons = qry.list_host_logons(provs=[query_times, security_alert])\nif host_logons is not None and not host_logons.empty:\n logon_features = host_logons.copy()\n logon_features['AccountNum'] = host_logons.apply(lambda x: _string_score(x.Account), axis=1)\n logon_features['LogonHour'] = host_logons.apply(lambda x: x.TimeGenerated.hour, axis=1)\n\n # you might need to play around with the max_cluster_distance parameter.\n # decreasing this gives more clusters.\n (clus_logons, _, _) = dbcluster_events(data=logon_features, time_column='TimeGenerated',\n cluster_columns=['AccountNum',\n 'LogonType'],\n max_cluster_distance=0.0001)\n print('Number of input events:', len(host_logons))\n print('Number of clustered events:', len(clus_logons))\n print('\\nDistinct host logon patterns:')\n display(clus_logons.sort_values('TimeGenerated'))\nelse:\n print('No logon events found for host.')",
"Number of input events: 22\nNumber of clustered events: 3\n\nDistinct host logon patterns:\n"
],
[
"# Display logon details\nnbdisp.display_logon_data(clus_logons, security_alert)",
"### Account Logon\nAccount: MSTICAdmin\nAccount Domain: MSTICAlertsWin1\nLogon Time: 2019-02-13 22:03:42.283000\nLogon type: 4 (Batch)\nUser Id/SID: S-1-5-21-996632719-2361334927-4038480536-500\n SID S-1-5-21-996632719-2361334927-4038480536-500 is administrator\n SID S-1-5-21-996632719-2361334927-4038480536-500 is local machine or domain account\nSession id '0x1e821b5' \nSubject (source) account: WORKGROUP/MSTICAlertsWin1$\nLogon process: Advapi \nAuthentication: Negotiate\nSource IpAddress: -\nSource Host: MSTICAlertsWin1\nLogon status: \n\n### Account Logon\nAccount: SYSTEM\nAccount Domain: NT AUTHORITY\nLogon Time: 2019-02-13 21:10:58.540000\nLogon type: 5 (Service)\nUser Id/SID: S-1-5-18\n SID S-1-5-18 is LOCAL_SYSTEM\nSession id '0x3e7' System logon session\n\nSubject (source) account: WORKGROUP/MSTICAlertsWin1$\nLogon process: Advapi \nAuthentication: Negotiate\nSource IpAddress: -\nSource Host: -\nLogon status: \n\n### Account Logon\nAccount: DWM-2\nAccount Domain: Window Manager\nLogon Time: 2019-02-12 22:22:21.240000\nLogon type: 2 (Interactive)\nUser Id/SID: S-1-5-90-0-2\nSession id '0x106b458' \nSubject (source) account: WORKGROUP/MSTICAlertsWin1$\nLogon process: Advapi \nAuthentication: Negotiate\nSource IpAddress: -\nSource Host: -\nLogon status: \n\n"
]
],
[
[
"### Comparing All Logons with Clustered results relative to Alert time line",
"_____no_output_____"
]
],
[
[
"# Show timeline of events - all logons + clustered logons\nif host_logons is not None and not host_logons.empty:\n nbdisp.display_timeline(data=host_logons, overlay_data=clus_logons,\n alert=security_alert, \n source_columns=['Account', 'LogonType'],\n title='All Host Logons')",
"_____no_output_____"
]
],
[
[
"### View Process Session and Logon Events in Timelines\nThis shows the timeline of the clustered logon events with the process tree obtained earlier. This allows you to get a sense of which logon was responsible for the process tree session whether any additional logons (e.g. creating a process as another user) might be associated with the alert timeline.\n\n*Note you should use the pan and zoom tools to align the timelines since the data may be over different time ranges.*",
"_____no_output_____"
]
],
[
[
"# Show timeline of events - all events\nif host_logons is not None and not host_logons.empty:\n nbdisp.display_timeline(data=clus_logons, source_columns=['Account', 'LogonType'],\n alert=security_alert,\n title='Clustered Host Logons', height=200)\n try:\n nbdisp.display_timeline(data=process_tree, alert=security_alert, title='Alert Process Session', height=200)\n except NameError:\n print('process_tree not available for this alert.')",
"_____no_output_____"
],
[
"# Counts of Logon types by Account\nif host_logons is not None and not host_logons.empty:\n display(host_logons[['Account', 'LogonType', 'TimeGenerated']]\n .groupby(['Account','LogonType']).count()\n .rename(columns={'TimeGenerated': 'LogonCount'}))",
"_____no_output_____"
]
],
[
[
"<a id='failed logons'></a>[Contents](#toc)\n## Failed Logons",
"_____no_output_____"
]
],
[
[
"failedLogons = qry.list_host_logon_failures(provs=[query_times, security_alert])\nif failedLogons.shape[0] == 0:\n display(print('No logon failures recorded for this host between {security_alert.start} and {security_alert.start}'))\n\nfailedLogons",
"_____no_output_____"
]
],
[
[
"<a id='appendices'></a>[Contents](#toc)\n# Appendices",
"_____no_output_____"
],
[
"## Available DataFrames",
"_____no_output_____"
]
],
[
[
"print('List of current DataFrames in Notebook')\nprint('-' * 50)\ncurrent_vars = list(locals().keys())\nfor var_name in current_vars:\n if isinstance(locals()[var_name], pd.DataFrame) and not var_name.startswith('_'):\n print(var_name)",
"List of current DataFrames in Notebook\n--------------------------------------------------\nmydf\nalert_counts\nalert_list\nrelated_alerts\nprocess_tree\nprocesses_on_host\nfeature_procs\nclus_events\nsource_processes\nioc_df\ndec_df\nioc_dec_df\nvt_results\npos_vt_results\nproc_match_in_ws\nlogon_event\nhost_logons\nlogon_features\nclus_logons\nfailedLogons\n"
]
],
[
[
"## Saving Data to CSV\nTo save the contents of a pandas DataFrame to an CSV\nuse the following syntax\n```\nhost_logons.to_csv('host_logons.csv')\n```",
"_____no_output_____"
],
[
"## Saving Data to Excel\nTo save the contents of a pandas DataFrame to an Excel spreadsheet\nuse the following syntax\n```\nwriter = pd.ExcelWriter('myWorksheet.xlsx')\nmy_data_frame.to_excel(writer,'Sheet1')\nwriter.save()\n```",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7c7b1ee34b3815dea84d44068f1a80eb36bfe2d | 3,159 | ipynb | Jupyter Notebook | .ipynb_checkpoints/8 - Real World Implementation Problems-checkpoint.ipynb | alexsludds/MIT-6.058-Notebook | c9b062e350d3f1655f7545dbca2ea67c3d1f22ee | [
"MIT"
] | 4 | 2018-02-05T16:28:11.000Z | 2021-07-28T14:42:24.000Z | .ipynb_checkpoints/8 - Real World Implementation Problems-checkpoint.ipynb | alexsludds/MIT-6.058-Notebook | c9b062e350d3f1655f7545dbca2ea67c3d1f22ee | [
"MIT"
] | null | null | null | .ipynb_checkpoints/8 - Real World Implementation Problems-checkpoint.ipynb | alexsludds/MIT-6.058-Notebook | c9b062e350d3f1655f7545dbca2ea67c3d1f22ee | [
"MIT"
] | 2 | 2018-12-28T00:46:05.000Z | 2021-07-28T14:42:25.000Z | 26.546218 | 177 | 0.528965 | [
[
[
"import numpy as np\nglobal np\nimport scipy as sp\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\nimport IPython.display as ipd\nfrom ipywidgets import interact\nimport sys\nimport wave\nsys.path.append(\"../backend/\")\n%matplotlib inline\n\ndef load_wav(filepath, t_start = 0, t_end = 2**32) :\n \"\"\"Load a wave file, which must be 22050Hz and 16bit and must be either\n mono or stereo. \n Inputs:\n filepath: audio file\n t_start, t_end: (optional) subrange of file to load (in seconds)\n Returns:\n a numpy floating-point array with a range of [-1, 1]\n \"\"\"\n wf = wave.open(filepath)\n num_channels, sampwidth, fs, end, comptype, compname = wf.getparams()\n \n # for now, we will only accept 16 bit files at 22k\n assert(sampwidth == 2)\n assert(fs == 22050)\n\n # start frame, end frame, and duration in frames\n f_start = int(t_start * fs)\n f_end = min(int(t_end * fs), end)\n frames = f_end - f_start\n\n wf.setpos(f_start)\n raw_bytes = wf.readframes(frames)\n # convert raw data to numpy array, assuming int16 arrangement\n samples = np.fromstring(raw_bytes, dtype = np.int16)\n\n # convert from integer type to floating point, and scale to [-1, 1]\n samples = samples.astype(np.float)\n samples *= (1 / 32768.0)\n\n if num_channels == 1:\n return samples\n\n elif num_channels == 2:\n return 0.5 * (samples[0::2] + samples[1::2])\n\n else:\n raise('Can only handle mono or stereo wave files') ",
"_____no_output_____"
]
],
[
[
"Today, in preparation for our final projects, we are going to talk about problems that can arise with hardware implementations, and how we can avoid this with good design.",
"_____no_output_____"
],
[
"## Eliminating 60Hz Noise",
"_____no_output_____"
],
[
"## Impedance Mathing",
"_____no_output_____"
],
[
"## Ground Feedback Loops",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c7b309527a0a8a6b2054ffdc65c10a9a85524d | 5,250 | ipynb | Jupyter Notebook | 03_email.ipynb | eandreas/secretsanta | 85fbe5aad9945100f34138daabc7b06dea8b5f3e | [
"Apache-2.0"
] | 1 | 2021-10-31T20:32:28.000Z | 2021-10-31T20:32:28.000Z | 03_email.ipynb | eandreas/secretsanta | 85fbe5aad9945100f34138daabc7b06dea8b5f3e | [
"Apache-2.0"
] | null | null | null | 03_email.ipynb | eandreas/secretsanta | 85fbe5aad9945100f34138daabc7b06dea8b5f3e | [
"Apache-2.0"
] | null | null | null | 25.362319 | 114 | 0.516952 | [
[
[
"#hide\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"# default_exp email",
"_____no_output_____"
]
],
[
[
"# Compose and send emails\n\n> Compose and send html emails through an SMTP server using TLS.",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
],
[
"#export\nimport smtplib\nfrom email.message import EmailMessage\nimport mimetypes\nfrom pathlib2 import Path\nimport re",
"_____no_output_____"
]
],
[
[
"## Complose a message",
"_____no_output_____"
]
],
[
[
"#export\ndef create_html_message(from_address, to_addresses, subject, html, text = None, image_path = Path.cwd()):\n msg = EmailMessage()\n msg['From'] = from_address\n msg['To'] = to_addresses\n msg['Subject'] = subject\n if text is not None:\n msg.set_content(text)\n msg.add_alternative(html, subtype='html')\n \n cid_images = list(re.findall(fr'<img src=\"cid:(.*?)\"', html))\n cid_images.extend(list(re.findall(fr'url\\(cid:(.*?)\\)', html)))\n cid_images = list(set(cid_images))\n for cid_img in cid_images:\n with open(image_path / cid_img, 'rb') as img:\n msg.get_payload()[-1].add_related(img.read(),'image', 'jpeg', cid = cid_img)\n return msg",
"_____no_output_____"
]
],
[
[
"### Add an attachment to a message",
"_____no_output_____"
]
],
[
[
"#export\ndef add_attachmet(msg, path):\n \"Add an attachment with location `path` to the cunnet message `msg`.\"\n # Guess the content type based on the file's extension. Encoding\n # will be ignored, although we should check for simple things like\n # gzip'd or compressed files.\n ctype, encoding = mimetypes.guess_type(path)\n if ctype is None or encoding is not None:\n # No guess could be made, or the file is encoded (compressed), so\n # use a generic bag-of-bits type.\n ctype = 'application/octet-stream'\n maintype, subtype = ctype.split('/', 1)\n with open(path, 'rb') as f:\n msg.add_attachment(f.read(), maintype = maintype, subtype = subtype, filename = path.name)\n return msg",
"_____no_output_____"
]
],
[
[
"## Send a message using SMTP and TLS",
"_____no_output_____"
]
],
[
[
"#export\ndef send_smtp_email(server, tls_port, user, pw, msg):\n \"Send the message `msg` using the specified `server` and `port` - login using `user` and `pw`.\"\n # Create a secure SSL context\n try:\n smtp = smtplib.SMTP(server, tls_port)\n smtp.starttls()\n smtp.login(user, pw)\n smtp.send_message(msg)\n except Exception as e:\n print(e)\n finally:\n smtp.quit()",
"_____no_output_____"
]
],
[
[
"## Examples",
"_____no_output_____"
],
[
"The following is an example on how to compose and send a html-email message.",
"_____no_output_____"
]
],
[
[
"## set user and password of the smtp server\n#user = ''\n#pw = ''\n#\n## send email from and to myself\n#from_email = user\n#to_email = ''\n#\n#html = \"\"\"\n#Hello, this is a test message!\n#<h1>Hello 22!</h1>\n#<img src=\"cid:email.jpg\">\n#<h1>Hello 23!</h1>\n#<img src=\"cid:iceberg.jpg\">\n#\"\"\"\n#\n#msg = create_html_message(from_email, to_email, 'subject', html, image_path=Path(''))\n#add_attachmet(msg, Path(''))\n#\n## uncomment after setting user and pw above\n##send_smtp_email('', 587, user, pw, msg)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7c7b5ec92c67edde976e2385b971534ee537534 | 30,015 | ipynb | Jupyter Notebook | week04_approx_rl/prioritized_replay_dqn.ipynb | hsl89/Practical_RL | 2d9692faf02bfe5523be04d75d03a6c0281c8794 | [
"Unlicense"
] | null | null | null | week04_approx_rl/prioritized_replay_dqn.ipynb | hsl89/Practical_RL | 2d9692faf02bfe5523be04d75d03a6c0281c8794 | [
"Unlicense"
] | null | null | null | week04_approx_rl/prioritized_replay_dqn.ipynb | hsl89/Practical_RL | 2d9692faf02bfe5523be04d75d03a6c0281c8794 | [
"Unlicense"
] | null | null | null | 38.579692 | 1,945 | 0.518941 | [
[
[
"# DQN With Prioritized Replay Buffer\nUse prioritized replay buffer to train a DQN agent.",
"_____no_output_____"
]
],
[
[
"import random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils\n\nimport gym\nimport numpy as np\n\nfrom gym.core import ObservationWrapper\nfrom gym.spaces import Box\nimport cv2\nimport os\n\nimport atari_wrappers # adjust env\nfrom framebuffer import FrameBuffer # stack 4 consec images \n",
"_____no_output_____"
],
[
"ENV_NAME = \"BreakoutNoFrameskip-v4\"\n\n# create break-out env\nenv = gym.make(ENV_NAME)\nenv.reset()",
"_____no_output_____"
]
],
[
[
"## Preprocessing\nCrop the important part of the image, then resize to 64 x 64",
"_____no_output_____"
]
],
[
[
"class PreprocessAtariObs(ObservationWrapper):\n def __init__(self, env):\n \"\"\"A gym wrapper that crops, scales image into the desired shapes and grayscales it.\"\"\"\n ObservationWrapper.__init__(self, env)\n\n self.image_size = (1, 64, 64)\n self.observation_space = Box(0.0, 1.0, self.image_size)\n\n def observation(self, img):\n \"\"\"what happens to each observation\"\"\"\n\n # Here's what you need to do:\n # * crop image, remove irrelevant parts\n # * resize image to self.img_size\n # (use imresize from any library you want,\n # e.g. opencv, skimage, PIL, keras)\n # * cast image to grayscale\n # * convert image pixels to (0,1) range, float32 type\n\n # crop the image \n # remove the top part\n img = img[50:]\n\n # resize the image\n img = cv2.resize(img, dsize=(self.image_size[1], self.image_size[2]))\n\n # gray scale\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # normalize to (0, 1)\n img = img.astype(np.float32) / 255.0\n\n # add channel dimension\n return img[None]\n\n# adjust the env by some wrappers\ndef PrimaryAtariWrap(env, clip_rewards=True):\n assert 'NoFrameskip' in env.spec.id\n\n # This wrapper holds the same action for <skip> frames and outputs\n # the maximal pixel value of 2 last frames (to handle blinking\n # in some envs)\n env = atari_wrappers.MaxAndSkipEnv(env, skip=4)\n\n # This wrapper sends done=True when each life is lost\n # (not all the 5 lives that are givern by the game rules).\n # It should make easier for the agent to understand that losing is bad.\n env = atari_wrappers.EpisodicLifeEnv(env)\n\n # This wrapper laucnhes the ball when an episode starts.\n # Without it the agent has to learn this action, too.\n # Actually it can but learning would take longer.\n env = atari_wrappers.FireResetEnv(env)\n\n # This wrapper transforms rewards to {-1, 0, 1} according to their sign\n if clip_rewards:\n env = atari_wrappers.ClipRewardEnv(env)\n\n # This wrapper is yours :)\n env = PreprocessAtariObs(env)\n return env\n \ndef make_env(clip_rewards=True, seed=None):\n env = gym.make(ENV_NAME) # create raw env\n if seed is not None:\n env.seed(seed)\n env = PrimaryAtariWrap(env, clip_rewards)\n env = FrameBuffer(env, n_frames=4, dim_order='pytorch')\n return env\n\nenv = make_env()\nenv.reset()\nn_actions = env.action_space.n\nstate_shape = env.observation_space.shape\nprint(\"adjusted env with 4 consec images stacked can be created\")",
"adjusted env with 4 consec images stacked can be created\n"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef conv2d_size_out(size, kernel_size, stride):\n \"\"\"\n common use case:\n cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride)\n cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride)\n to understand the shape for dense layer's input\n \"\"\"\n return (size - (kernel_size - 1) - 1) // stride + 1\n\nclass DuelingDQNAgent(nn.Module):\n def __init__(self, state_shape, n_actions, epsilon=0):\n super().__init__()\n self.epsilon = epsilon\n self.n_actions = n_actions\n self.state_shape = state_shape\n\n # Define your network body here. Please make sure agent is fully contained here\n # nn.Flatten() can be useful\n kernel_size = 3\n stride = 2\n self.conv1 = nn.Conv2d(4, 16, kernel_size, stride)\n out_size = conv2d_size_out(state_shape[1], kernel_size, stride)\n self.conv2 = nn.Conv2d(16, 32, kernel_size, stride)\n out_size = conv2d_size_out(out_size, kernel_size, stride)\n self.conv3 = nn.Conv2d(32, 64, kernel_size, stride)\n out_size = conv2d_size_out(out_size, kernel_size, stride)\n\n # size of the output tensor after convolution batch_size x 64 x out_size x out_size\n self.linear = nn.Linear(64*out_size*out_size, 256)\n \n # advantage\n self.advantage = nn.Sequential(\n nn.Linear(256, 512),\n nn.ReLU(),\n nn.Linear(512, self.n_actions)\n )\n \n # state value\n self.value = nn.Sequential(\n nn.Linear(256, 512),\n nn.ReLU(),\n nn.Linear(512, 1)\n )\n \n def forward(self, state_t):\n \"\"\"\n takes agent's observation (tensor), returns qvalues (tensor)\n :param state_t: a batch of 4-frame buffers, shape = [batch_size, 4, h, w]\n \"\"\"\n # Use your network to compute qvalues for given state\n # qvalues = <YOUR CODE>\n t = self.conv1(state_t)\n t = F.relu(t)\n t = self.conv2(t)\n t = F.relu(t)\n t = self.conv3(t)\n t = F.relu(t)\n\n t = t.view(state_t.shape[0], -1)\n t = self.linear(t)\n t = F.relu(t)\n \n # compute advantage and state value as different heads\n advantage = self.advantage(t)\n value = self.value(t)\n \n qvalues = value + advantage - advantage.mean(dim=1, keepdim=True)\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == n_actions\n\n return qvalues\n\n def get_qvalues(self, states):\n \"\"\"\n like forward, but works on numpy arrays, not tensors\n \"\"\"\n model_device = next(self.parameters()).device\n states = torch.tensor(states, device=model_device, dtype=torch.float)\n qvalues = self.forward(states)\n return qvalues.data.cpu().numpy()\n\n def sample_actions(self, qvalues):\n \"\"\"pick actions given qvalues. Uses epsilon-greedy exploration strategy. \"\"\"\n epsilon = self.epsilon\n batch_size, n_actions = qvalues.shape\n\n random_actions = np.random.choice(n_actions, size=batch_size)\n best_actions = qvalues.argmax(axis=-1)\n\n should_explore = np.random.choice(\n [0, 1], batch_size, p=[1-epsilon, epsilon])\n return np.where(should_explore, random_actions, best_actions)\n \n# Evaluate the agent\ndef evaluate(env, agent, n_games=1, greedy=False, t_max=10000):\n rewards = []\n for _ in range(n_games):\n reward = 0.0\n s = env.reset()\n for _ in range(t_max):\n qvalues = agent.get_qvalues([s])\n action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(\n qvalues)[0]\n s, r, done, _ = env.step(action)\n reward += r\n if done:\n break\n \n rewards.append(reward)\n return np.mean(rewards)\n",
"_____no_output_____"
]
],
[
[
"## Compute TD loss",
"_____no_output_____"
]
],
[
[
"def compute_td_loss(states, actions, rewards, next_states, is_done,\n agent, target_network,\n gamma=0.99,\n device=device, check_shapes=False):\n \"\"\" Compute td loss using torch operations only. Use the formulae above. '''\n \n objective of agent is \n \\hat Q(s_t, a_t) = r_t + \\gamma Target(s_{t+1}, argmax_{a} Q(s_{t+1}, a)) \n \"\"\"\n states = torch.tensor(states, device=device, dtype=torch.float) # shape: [batch_size, *state_shape]\n\n # for some torch reason should not make actions a tensor\n actions = torch.tensor(actions, device=device, dtype=torch.long) # shape: [batch_size]\n rewards = torch.tensor(rewards, device=device, dtype=torch.float) # shape: [batch_size]\n # shape: [batch_size, *state_shape]\n next_states = torch.tensor(next_states, device=device, dtype=torch.float)\n \n is_done = torch.tensor(\n is_done,\n device=device,\n dtype=torch.float\n ) # shape: [batch_size]\n \n is_not_done = 1 - is_done\n \n # get q-values for all actions in current states\n predicted_qvalues = agent(states)\n \n # compute q-values for all actions in next states\n predicted_next_qvalues = target_network(next_states)\n \n # best action in next state\n next_best_actions = torch.argmax(agent(states), dim=1)\n\n # select q-values for chosen actions\n predicted_qvalues_for_actions = predicted_qvalues[range(\n len(actions)), actions]\n \n # compute the objective of the agent\n next_state_values = predicted_next_qvalues[range(\n len(actions)), next_best_actions] \n \n # assert next_state_values.dim(\n # == 1 and next_state_values.shape[0] == states.shape[0], \"must predict one value per state\"\n\n # compute \"target q-values\" for loss - it's what's inside square parentheses in the above formula.\n # at the last state use the simplified formula: Q(s,a) = r(s,a) since s' doesn't exist\n # you can multiply next state values by is_not_done to achieve this.\n # target_qvalues_for_actions = <YOUR CODE>\n\n target_qvalues_for_actions = rewards + next_state_values * is_not_done\n\n # mean squared error loss adjusted by importance sampling weights to minimize\n #loss = torch.mean(\n # weights * torch.pow(predicted_qvalues_for_actions - target_qvalues_for_actions.detach(), 2)\n #)\n \n # return the TD-loss\n \n if check_shapes:\n assert predicted_next_qvalues.data.dim(\n ) == 2, \"make sure you predicted q-values for all actions in next state\"\n assert next_state_values.data.dim(\n ) == 1, \"make sure you computed V(s') as maximum over just the actions axis and not all axes\"\n assert target_qvalues_for_actions.data.dim(\n ) == 1, \"there's something wrong with target q-values, they must be a vector\"\n \n return target_qvalues_for_actions - predicted_qvalues_for_actions",
"_____no_output_____"
]
],
[
[
"## Test the memory need of the replay buffer",
"_____no_output_____"
],
[
"Init DQN agent and play a total 10^4 time steps",
"_____no_output_____"
]
],
[
[
"def play_and_record(initial_state, agent, env, exp_replay, n_steps=1):\n \"\"\"\n Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer. \n Whenever game ends, add record with done=True and reset the game.\n It is guaranteed that env has done=False when passed to this function.\n\n PLEASE DO NOT RESET ENV UNLESS IT IS \"DONE\"\n\n :returns: return sum of rewards over time and the state in which the env stays\n \"\"\"\n s = initial_state\n sum_rewards = 0\n\n # Play the game for n_steps as per instructions above\n sum_rewards = 0.0 \n for _ in range(n_steps):\n qvalues = agent.get_qvalues([s])\n action = agent.sample_actions(qvalues)[0] \n next_s, r, done, _ = env.step(action)\n\n exp_replay.add((s, action, r, next_s, done))\n sum_rewards += r\n if done:\n s = env.reset()\n else:\n s = next_s\n\n return sum_rewards, s\n\n",
"_____no_output_____"
],
[
"import utils\nimport imp\nimport replay_buffer\nimp.reload(replay_buffer)\n\nfrom replay_buffer import PrioritizedReplayBuffer\n\n\n#n_actions = env.action_space.n\n#state_shape = env.observation_space.shape\n\nagent = DuelingDQNAgent(state_shape=state_shape, n_actions=n_actions)\nexp_replay = PrioritizedReplayBuffer(10**4)\n\n'''\nfor i in range(100):\n state = env.reset()\n if not utils.is_enough_ram(min_available_gb=0.1):\n print(\"\"\"\n Less than 100 Mb RAM available. \n Make sure the buffer size in not too huge.\n Also check, maybe other processes consume RAM heavily.\n \"\"\"\n )\n break\n play_and_record(state, agent, env, exp_replay, n_steps=10**2)\n if len(exp_replay) == 10**4:\n break\nprint(len(exp_replay))\n\ndel exp_replay\n'''\n",
"_____no_output_____"
],
[
"seed = 42\n\n# env\nn_lives = 5\n\n\n# training params\nT = 1 # number of experiences to get from env before each update\nbatch_size = 16\ntotal_steps = 3 * 10**1 # total steps to train the agent\ndecay_steps = 10**1 # steps to decay the epsilon, \n # after the decay_steps, epsilon stops decaying\n # and the agent explores with a fixed probability\nmax_grad_norm = 50 \n\n \nrefresh_target_network_freq = 5000 # freqency to update the target network\nlearning_rate = 1e-4\n\n\n# agent \ngamma = 0.99 # discount factor\ninit_epsilon = 1.0\nfinal_epsilon = 0.1\n\n# buffer\nbuffer_size = 10**4\n\n# eval\nloss_freq = 50 \neval_freq = 5000\n\n# logs \nckpt_dir = 'logs'\nckpt_file = 'prioritized_experience_replay_ckpt.pth'\nmetrics_file = 'prioritized_experience_replay_metrics.pth'\nckpt_freq = 10*5000 # Debug param",
"_____no_output_____"
],
[
"# main loop\n\nenv = make_env(seed)\n\nstate_shape = env.observation_space.shape\nn_actions = env.action_space.n\nstate = env.reset()\n\nagent = DuelingDQNAgent(state_shape, n_actions, epsilon=1).to(device)\ntarget_network = DuelingDQNAgent(state_shape, n_actions).to(device)\ntarget_network.load_state_dict(agent.state_dict())\n\nexp_replay = PrioritizedReplayBuffer(buffer_size)\n\n\n\nopt = torch.optim.Adam(agent.parameters(), lr=learning_rate)\n\nmean_rw_history = []\ntd_loss_history = []\ngrad_norm_history = []\ninitial_state_v_history = []\n\nprint(\"Starts training on {}\".format(next(agent.parameters()).device))\n\n# populate the buffer with 128 samples\ninit_size = 128\nplay_and_record(state, agent, env, exp_replay, init_size)\n\nfor step in range(total_steps):\n agent.epsilon = utils.linear_decay(init_epsilon, final_epsilon, step, decay_steps)\n \n # play for $T time steps and cache the exprs to the buffer\n _, state = play_and_record(state, agent, env, exp_replay, T)\n \n b_idx, obses_t, actions, rewards, obses_tp1, dones, weights = exp_replay.sample(\n batch_size)\n \n # td loss for each sample\n td_loss = compute_td_loss(\n states=obses_t, \n actions=actions, \n rewards=rewards, \n next_states=obses_tp1, \n is_done=dones,\n agent=agent,\n target_network=target_network,\n gamma=gamma,\n device=device,\n check_shapes=True)\n \n '''\n A batch of samples from prioritized replay looks like:\n (states, actions, rewards, next_states, weights, is_done)\n weights here are importance sampling weights\n\n Basically:\n Loss = weights * MSE\n \n '''\n \n # compute MSE adjusted by importance sampling weights\n # and backprop\n weights = torch.tensor(weights, dtype=torch.float32)\n #print(weights, torch.pow(td_loss, 2))\n loss = torch.mean(weights * torch.pow(td_loss, 2))\n loss.backward()\n grad_norm = nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm)\n opt.step()\n opt.zero_grad()\n \n # update the priorities of sampled exprs\n exp_replay.batch_update(b_idx, np.abs(td_loss.detach().cpu().numpy()))\n \n # increase the importance sampling hyperparameter b gradually to 1\n exp_replay.increment_b()\n \n if step % loss_freq == 0:\n # save MSE without importance sampling\n loss = torch.mean(torch.pow(td_loss, 2))\n td_loss_history.append(loss.cpu().item())\n \n if step % refresh_target_network_freq == 0:\n target_network.load_state_dict(agent.state_dict())\n \n if step % eval_freq == 0:\n mean_rw_history.append(evaluate(\n make_env(clip_rewards=True, seed=step),\n agent, n_games=3*n_lives, greedy=True\n ))\n \n initial_state_q_values = agent.get_qvalues(\n [make_env(seed=step).reset()]\n )\n \n initial_state_v_history.append(np.max(initial_state_q_values))\n \n print(\"buffer size = %i, epsilon: %.5f\" % \n (len(exp_replay), agent.epsilon))\n \n \n # TODO \n # checkpointing\n if step % ckpt_freq == 0:\n print(\"checkpointing ...\")\n \n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n \n # check point model and optimizer\n checkpoint = {\n \"step\": step,\n \"agent\": agent.state_dict(),\n \"epsilon\": agent.epsilon,\n \"target_network\": target_network.state_dict(),\n \"optimizer\": opt.state_dict(),\n \"replay_buffer\": exp_replay\n }\n \n torch.save(checkpoint, os.path.join(ckpt_dir, ckpt_file))\n \n # save the performance metric \n metrics = {\n \"mean_rw_history\": mean_rw_history,\n \"td_loss_history\": td_loss_history,\n \"grad_norm_history\": grad_norm_history,\n \"initial_state_v_history\": initial_state_v_history\n }\n \n torch.save(metrics, os.path.join(ckpt_dir, metrics_file))\n \n \n# check point model and optimizer\ncheckpoint = {\n \"step\": step,\n \"agent\": agent.state_dict(),\n \"epsilon\": agent.epsilon,\n \"target_network\": target_network.state_dict(),\n \"optimizer\": opt.state_dict(),\n \"replay_buffer\": exp_replay\n}\n\ntorch.save(checkpoint, os.path.join(ckpt_dir, ckpt_file))\n\n# save the performance metric \nmetrics = {\n \"mean_rw_history\": mean_rw_history,\n \"td_loss_history\": td_loss_history,\n \"grad_norm_history\": grad_norm_history,\n \"initial_state_v_history\": initial_state_v_history\n}\n\ntorch.save(metrics, os.path.join(ckpt_dir, metrics_file))",
"Starts training on cpu\nbuffer size = 129, epsilon: 1.00000\ncheckpointing ...\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7c7ddc2e7df9544ed0092d27db5deaa28ac3163 | 707,567 | ipynb | Jupyter Notebook | tf.version.1/06.images/01.image_segmentation.ipynb | jinhwanhan/tensorflow.tutorials | f6a2c98a204174a76d75f7a6665936347079db35 | [
"Apache-2.0"
] | 57 | 2018-09-12T16:48:15.000Z | 2021-02-19T10:51:04.000Z | tf.version.1/06.images/01.image_segmentation.ipynb | jinhwanhan/tensorflow.tutorials | f6a2c98a204174a76d75f7a6665936347079db35 | [
"Apache-2.0"
] | null | null | null | tf.version.1/06.images/01.image_segmentation.ipynb | jinhwanhan/tensorflow.tutorials | f6a2c98a204174a76d75f7a6665936347079db35 | [
"Apache-2.0"
] | 15 | 2018-10-10T07:27:42.000Z | 2020-02-02T09:08:32.000Z | 696.424213 | 297,016 | 0.945406 | [
[
[
"# Image Segmentation\n\n* This code is **only** `tensorflow API` version for [TensorFlow tutorials/Image Segmentation](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb) which is made of `tf.keras`.\n* You can see the detail description [tutorial link](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb) \n\n* I use below dataset instead of [carvana-image-masking-challenge dataset](https://www.kaggle.com/c/carvana-image-masking-challenge/rules) in TensorFlow Tutorials which is a kaggle competition dataset.\n * carvana-image-masking-challenge dataset: Too large dataset (14GB)\n* [Gastrointestinal Image ANAlys Challenges (GIANA)](https://giana.grand-challenge.org) Dataset (345MB)\n * Train data: 300 images with RGB channels (bmp format)\n * Train lables: 300 images with 1 channels (bmp format)\n * Image size: 574 x 500",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport functools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport matplotlib as mpl\nmpl.rcParams['axes.grid'] = False\nmpl.rcParams['figure.figsize'] = (12,12)\n\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nfrom IPython.display import clear_output\n\nimport tensorflow as tf\nslim = tf.contrib.slim\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nsess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"",
"/home/lab4all/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n"
]
],
[
[
"# Get all the files \nSince this tutorial will be using a dataset from [Giana Dataset](https://giana.grand-challenge.org/Dates/).",
"_____no_output_____"
]
],
[
[
"# Unfortunately you cannot downlaod GIANA dataset from website\n# So I upload zip file on my dropbox\n# if you want to download from my dropbox uncomment below\n#!wget https://goo.gl/mxikqa\n#!mv mxikqa sd_train.zip\n#!unzip sd_train.zip\n#!mkdir ../../datasets\n#!mv sd_train ../../datasets\n#!rm sd_train.zip",
"_____no_output_____"
],
[
"dataset_dir = '../../datasets/sd_train'\nimg_dir = os.path.join(dataset_dir, \"train\")\nlabel_dir = os.path.join(dataset_dir, \"train_labels\")",
"_____no_output_____"
],
[
"x_train_filenames = [os.path.join(img_dir, filename) for filename in os.listdir(img_dir)]\nx_train_filenames.sort()\ny_train_filenames = [os.path.join(label_dir, filename) for filename in os.listdir(label_dir)]\ny_train_filenames.sort()",
"_____no_output_____"
],
[
"x_train_filenames, x_test_filenames, y_train_filenames, y_test_filenames = \\\n train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=219)",
"_____no_output_____"
],
[
"num_train_examples = len(x_train_filenames)\nnum_test_examples = len(x_test_filenames)\n\nprint(\"Number of training examples: {}\".format(num_train_examples))\nprint(\"Number of test examples: {}\".format(num_test_examples))",
"Number of training examples: 240\nNumber of test examples: 60\n"
]
],
[
[
"### Here's what the paths look like",
"_____no_output_____"
]
],
[
[
"x_train_filenames[:10]",
"_____no_output_____"
],
[
"y_train_filenames[:10]",
"_____no_output_____"
],
[
"y_test_filenames[:10]",
"_____no_output_____"
]
],
[
[
"# Visualize\nLet's take a look at some of the examples of different images in our dataset. ",
"_____no_output_____"
]
],
[
[
"display_num = 5\n\nr_choices = np.random.choice(num_train_examples, display_num)\n\nplt.figure(figsize=(10, 15))\nfor i in range(0, display_num * 2, 2):\n img_num = r_choices[i // 2]\n x_pathname = x_train_filenames[img_num]\n y_pathname = y_train_filenames[img_num]\n \n plt.subplot(display_num, 2, i + 1)\n plt.imshow(Image.open(x_pathname))\n plt.title(\"Original Image\")\n \n example_labels = Image.open(y_pathname)\n label_vals = np.unique(example_labels)\n \n plt.subplot(display_num, 2, i + 2)\n plt.imshow(example_labels)\n plt.title(\"Masked Image\")\n \nplt.suptitle(\"Examples of Images and their Masks\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Set up",
"_____no_output_____"
],
[
"Let’s begin by setting up some parameters. We’ll standardize and resize all the shapes of the images. We’ll also set up some training parameters: ",
"_____no_output_____"
]
],
[
[
"# Set hyperparameters\nimage_size = 128\nimg_shape = (image_size, image_size, 3)\nbatch_size = 8\nmax_epochs = 100\nprint_steps = 50\nsave_epochs = 50\ntrain_dir = 'train/exp1'",
"_____no_output_____"
]
],
[
[
"# Build our input pipeline with `tf.data`\nSince we begin with filenames, we will need to build a robust and scalable data pipeline that will play nicely with our model. If you are unfamiliar with **tf.data** you should check out my other tutorial introducing the concept! \n\n### Our input pipeline will consist of the following steps:\n1. Read the bytes of the file in from the filename - for both the image and the label. Recall that our labels are actually images with each pixel annotated as car or background (1, 0). \n2. Decode the bytes into an image format\n3. Apply image transformations: (optional, according to input parameters)\n * `resize` - Resize our images to a standard size (as determined by eda or computation/memory restrictions)\n * The reason why this is optional is that U-Net is a fully convolutional network (e.g. with no fully connected units) and is thus not dependent on the input size. However, if you choose to not resize the images, you must use a batch size of 1, since you cannot batch variable image size together\n * Alternatively, you could also bucket your images together and resize them per mini-batch to avoid resizing images as much, as resizing may affect your performance through interpolation, etc.\n * `hue_delta` - Adjusts the hue of an RGB image by a random factor. This is only applied to the actual image (not our label image). The `hue_delta` must be in the interval `[0, 0.5]` \n * `horizontal_flip` - flip the image horizontally along the central axis with a 0.5 probability. This transformation must be applied to both the label and the actual image. \n * `width_shift_range` and `height_shift_range` are ranges (as a fraction of total width or height) within which to randomly translate the image either horizontally or vertically. This transformation must be applied to both the label and the actual image. \n * `rescale` - rescale the image by a certain factor, e.g. 1/ 255.\n4. Shuffle the data, repeat the data (so we can iterate over it multiple times across epochs), batch the data, then prefetch a batch (for efficiency).\n\nIt is important to note that these transformations that occur in your data pipeline must be symbolic transformations. ",
"_____no_output_____"
],
[
"#### Why do we do these image transformations?\nThis is known as **data augmentation**. Data augmentation \"increases\" the amount of training data by augmenting them via a number of random transformations. During training time, our model would never see twice the exact same picture. This helps prevent [overfitting](https://developers.google.com/machine-learning/glossary/#overfitting) and helps the model generalize better to unseen data.",
"_____no_output_____"
],
[
"## Processing each pathname",
"_____no_output_____"
]
],
[
[
"def _process_pathnames(fname, label_path):\n # We map this function onto each pathname pair\n img_str = tf.read_file(fname)\n img = tf.image.decode_bmp(img_str, channels=3)\n\n label_img_str = tf.read_file(label_path)\n label_img = tf.image.decode_bmp(label_img_str, channels=1)\n \n resize = [image_size, image_size]\n img = tf.image.resize_images(img, resize)\n label_img = tf.image.resize_images(label_img, resize)\n \n scale = 1 / 255.\n img = tf.to_float(img) * scale\n label_img = tf.to_float(label_img) * scale\n \n return img, label_img",
"_____no_output_____"
],
[
"def get_baseline_dataset(filenames,\n labels,\n threads=5,\n batch_size=batch_size,\n max_epochs=max_epochs,\n shuffle=True):\n num_x = len(filenames)\n # Create a dataset from the filenames and labels\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n # Map our preprocessing function to every element in our dataset, taking\n # advantage of multithreading\n dataset = dataset.map(_process_pathnames, num_parallel_calls=threads)\n \n if shuffle:\n dataset = dataset.shuffle(num_x * 10)\n \n # It's necessary to repeat our data for all epochs \n dataset = dataset.repeat(max_epochs).batch(batch_size)\n return dataset",
"_____no_output_____"
]
],
[
[
"## Set up train and test datasets\nNote that we apply image augmentation to our training dataset but not our validation dataset.",
"_____no_output_____"
]
],
[
[
"train_ds = get_baseline_dataset(x_train_filenames,\n y_train_filenames)\ntest_ds = get_baseline_dataset(x_test_filenames,\n y_test_filenames,\n shuffle=False)",
"_____no_output_____"
],
[
"train_ds",
"_____no_output_____"
]
],
[
[
"### Plot some train data",
"_____no_output_____"
]
],
[
[
"temp_ds = get_baseline_dataset(x_train_filenames,\n y_train_filenames,\n batch_size=1,\n max_epochs=1,\n shuffle=False)\n# Let's examine some of these augmented images\ntemp_iter = temp_ds.make_one_shot_iterator()\nnext_element = temp_iter.get_next()\nwith tf.Session() as sess:\n batch_of_imgs, label = sess.run(next_element)\n\n # Running next element in our graph will produce a batch of images\n plt.figure(figsize=(10, 10))\n img = batch_of_imgs[0]\n\n plt.subplot(1, 2, 1)\n plt.imshow(img)\n\n plt.subplot(1, 2, 2)\n plt.imshow(label[0, :, :, 0])\n plt.show()",
"_____no_output_____"
]
],
[
[
"# Build the model\nWe'll build the U-Net model. U-Net is especially good with segmentation tasks because it can localize well to provide high resolution segmentation masks. In addition, it works well with small datasets and is relatively robust against overfitting as the training data is in terms of the number of patches within an image, which is much larger than the number of training images itself. Unlike the original model, we will add batch normalization to each of our blocks. \n\nThe Unet is built with an encoder portion and a decoder portion. The encoder portion is composed of a linear stack of [`Conv`](https://developers.google.com/machine-learning/glossary/#convolution), `BatchNorm`, and [`Relu`](https://developers.google.com/machine-learning/glossary/#ReLU) operations followed by a [`MaxPool`](https://developers.google.com/machine-learning/glossary/#pooling). Each `MaxPool` will reduce the spatial resolution of our feature map by a factor of 2. We keep track of the outputs of each block as we feed these high resolution feature maps with the decoder portion. The Decoder portion is comprised of UpSampling2D, Conv, BatchNorm, and Relus. Note that we concatenate the feature map of the same size on the decoder side. Finally, we add a final Conv operation that performs a convolution along the channels for each individual pixel (kernel size of (1, 1)) that outputs our final segmentation mask in grayscale. ",
"_____no_output_____"
]
],
[
[
"def conv_block(inputs, num_outputs, is_training, scope):\n batch_norm_params = {'decay': 0.9,\n 'epsilon': 0.001,\n 'is_training': is_training,\n 'scope': 'batch_norm'}\n with tf.variable_scope(scope) as scope:\n with slim.arg_scope([slim.conv2d],\n num_outputs=num_outputs,\n kernel_size=[3, 3],\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n encoder = slim.conv2d(inputs, scope='conv1')\n encoder = slim.conv2d(encoder, scope='conv2')\n return encoder\n\ndef encoder_block(inputs, num_outputs, is_training, scope):\n with tf.variable_scope(scope) as scope:\n encoder = conv_block(inputs, num_outputs, is_training, scope)\n encoder_pool = slim.max_pool2d(encoder, kernel_size=[2, 2], scope='pool')\n \n return encoder_pool, encoder\n\ndef decoder_block(inputs, concat_tensor, num_outputs, is_training, scope):\n batch_norm_params = {'decay': 0.9,\n 'epsilon': 0.001,\n 'is_training': is_training,\n 'scope': 'batch_norm'}\n with tf.variable_scope(scope) as scope:\n decoder = slim.conv2d_transpose(inputs, num_outputs,\n kernel_size=[2, 2], stride=[2, 2],\n activation_fn=None, scope='convT')\n decoder = tf.concat([concat_tensor, decoder], axis=-1)\n decoder = slim.batch_norm(decoder, **batch_norm_params)\n decoder = tf.nn.relu(decoder)\n with slim.arg_scope([slim.conv2d],\n num_outputs=num_outputs,\n kernel_size=[3, 3],\n stride=[1, 1],\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n decoder = slim.conv2d(decoder, scope='conv1')\n decoder = slim.conv2d(decoder, scope='conv2')\n return decoder",
"_____no_output_____"
],
[
"class UNet(object):\n def __init__(self, train_ds, test_ds):\n self.train_ds = train_ds\n self.test_ds = test_ds\n \n def build_images(self):\n # tf.data.Iterator.from_string_handle의 output_shapes는 default = None이지만 꼭 값을 넣는 게 좋음\n self.handle = tf.placeholder(tf.string, shape=[])\n self.iterator = tf.data.Iterator.from_string_handle(self.handle,\n self.train_ds.output_types,\n self.train_ds.output_shapes)\n self.input_images, self.targets = self.iterator.get_next()\n \n def inference(self, inputs, is_training, reuse=False):\n with tf.variable_scope('', reuse=reuse) as scope:\n # inputs: [128, 128, 3]\n encoder0_pool, encoder0 = encoder_block(inputs, 32, is_training, 'encoder0')\n # encoder0_pool: [64, 64, 32], encoder0: [128, 128, 32]\n encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64, is_training, 'encoder1')\n # encoder1_pool: [32, 32, 64], encoder1: [64, 64, 64]\n encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128, is_training, 'encoder2')\n # encoder2_pool: [16, 16, 128], encoder2: [32, 32, 128]\n encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256, is_training, 'encoder3')\n # encoder3_pool: [8, 8, 256], encoder3: [16, 16, 256]\n center = conv_block(encoder3_pool, 512, is_training, 'center')\n # center: [8, 8, 512]\n decoder3 = decoder_block(center, encoder3, 256, is_training, 'decoder3')\n # decoder3 = [16, 16, 256]\n decoder2 = decoder_block(decoder3, encoder2, 128, is_training, 'decoder2')\n # decoder2 = [32, 32, 128]\n decoder1 = decoder_block(decoder2, encoder1, 64, is_training, 'decoder1')\n # decoder1 = [64, 64, 64]\n decoder0 = decoder_block(decoder1, encoder0, 32, is_training, 'decoder0')\n # decoder0 = [128, 128, 32]\n logits = slim.conv2d(decoder0, 1, [1, 1], activation_fn=None, scope='outputs')\n # logits = [128, 128, 1]\n\n return logits\n \n def dice_coeff(self, y_true, y_logits):\n smooth = 1.\n # Flatten\n y_true_f = tf.reshape(y_true, [-1])\n y_pred_f = tf.reshape(tf.nn.sigmoid(y_logits), [-1])\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)\n return score\n \n def dice_loss(self, y_true, y_logits):\n loss = 1 - self.dice_coeff(y_true, y_logits)\n return loss\n \n def bce_dice_loss(self, y_true, y_logits):\n loss = tf.losses.sigmoid_cross_entropy(y_true, y_logits) + self.dice_loss(y_true, y_logits)\n return loss\n \n def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n \n self.build_images()\n self.logits = self.inference(self.input_images, is_training=True)\n self.logits_val = self.inference(self.input_images, is_training=False, reuse=True)\n self.predicted_images = tf.nn.sigmoid(self.logits_val)\n\n self.loss = self.bce_dice_loss(self.targets, self.logits)\n \n print(\"complete model build.\")",
"_____no_output_____"
]
],
[
[
"### Create a model (UNet)",
"_____no_output_____"
]
],
[
[
"model = UNet(train_ds=train_ds, test_ds=test_ds)\nmodel.build()\n\n# show info for trainable variables\nt_vars = tf.trainable_variables()\nslim.model_analyzer.analyze_vars(t_vars, print_info=True)",
"complete model build.\n---------\nVariables: name (type shape) [size]\n---------\nencoder0/conv1/weights:0 (float32_ref 3x3x3x32) [864, bytes: 3456]\nencoder0/conv1/batch_norm/beta:0 (float32_ref 32) [32, bytes: 128]\nencoder0/conv2/weights:0 (float32_ref 3x3x32x32) [9216, bytes: 36864]\nencoder0/conv2/batch_norm/beta:0 (float32_ref 32) [32, bytes: 128]\nencoder1/conv1/weights:0 (float32_ref 3x3x32x64) [18432, bytes: 73728]\nencoder1/conv1/batch_norm/beta:0 (float32_ref 64) [64, bytes: 256]\nencoder1/conv2/weights:0 (float32_ref 3x3x64x64) [36864, bytes: 147456]\nencoder1/conv2/batch_norm/beta:0 (float32_ref 64) [64, bytes: 256]\nencoder2/conv1/weights:0 (float32_ref 3x3x64x128) [73728, bytes: 294912]\nencoder2/conv1/batch_norm/beta:0 (float32_ref 128) [128, bytes: 512]\nencoder2/conv2/weights:0 (float32_ref 3x3x128x128) [147456, bytes: 589824]\nencoder2/conv2/batch_norm/beta:0 (float32_ref 128) [128, bytes: 512]\nencoder3/conv1/weights:0 (float32_ref 3x3x128x256) [294912, bytes: 1179648]\nencoder3/conv1/batch_norm/beta:0 (float32_ref 256) [256, bytes: 1024]\nencoder3/conv2/weights:0 (float32_ref 3x3x256x256) [589824, bytes: 2359296]\nencoder3/conv2/batch_norm/beta:0 (float32_ref 256) [256, bytes: 1024]\ncenter/conv1/weights:0 (float32_ref 3x3x256x512) [1179648, bytes: 4718592]\ncenter/conv1/batch_norm/beta:0 (float32_ref 512) [512, bytes: 2048]\ncenter/conv2/weights:0 (float32_ref 3x3x512x512) [2359296, bytes: 9437184]\ncenter/conv2/batch_norm/beta:0 (float32_ref 512) [512, bytes: 2048]\ndecoder3/convT/weights:0 (float32_ref 2x2x256x512) [524288, bytes: 2097152]\ndecoder3/convT/biases:0 (float32_ref 256) [256, bytes: 1024]\ndecoder3/batch_norm/beta:0 (float32_ref 512) [512, bytes: 2048]\ndecoder3/conv1/weights:0 (float32_ref 3x3x512x256) [1179648, bytes: 4718592]\ndecoder3/conv1/batch_norm/beta:0 (float32_ref 256) [256, bytes: 1024]\ndecoder3/conv2/weights:0 (float32_ref 3x3x256x256) [589824, bytes: 2359296]\ndecoder3/conv2/batch_norm/beta:0 (float32_ref 256) [256, bytes: 1024]\ndecoder2/convT/weights:0 (float32_ref 2x2x128x256) [131072, bytes: 524288]\ndecoder2/convT/biases:0 (float32_ref 128) [128, bytes: 512]\ndecoder2/batch_norm/beta:0 (float32_ref 256) [256, bytes: 1024]\ndecoder2/conv1/weights:0 (float32_ref 3x3x256x128) [294912, bytes: 1179648]\ndecoder2/conv1/batch_norm/beta:0 (float32_ref 128) [128, bytes: 512]\ndecoder2/conv2/weights:0 (float32_ref 3x3x128x128) [147456, bytes: 589824]\ndecoder2/conv2/batch_norm/beta:0 (float32_ref 128) [128, bytes: 512]\ndecoder1/convT/weights:0 (float32_ref 2x2x64x128) [32768, bytes: 131072]\ndecoder1/convT/biases:0 (float32_ref 64) [64, bytes: 256]\ndecoder1/batch_norm/beta:0 (float32_ref 128) [128, bytes: 512]\ndecoder1/conv1/weights:0 (float32_ref 3x3x128x64) [73728, bytes: 294912]\ndecoder1/conv1/batch_norm/beta:0 (float32_ref 64) [64, bytes: 256]\ndecoder1/conv2/weights:0 (float32_ref 3x3x64x64) [36864, bytes: 147456]\ndecoder1/conv2/batch_norm/beta:0 (float32_ref 64) [64, bytes: 256]\ndecoder0/convT/weights:0 (float32_ref 2x2x32x64) [8192, bytes: 32768]\ndecoder0/convT/biases:0 (float32_ref 32) [32, bytes: 128]\ndecoder0/batch_norm/beta:0 (float32_ref 64) [64, bytes: 256]\ndecoder0/conv1/weights:0 (float32_ref 3x3x64x32) [18432, bytes: 73728]\ndecoder0/conv1/batch_norm/beta:0 (float32_ref 32) [32, bytes: 128]\ndecoder0/conv2/weights:0 (float32_ref 3x3x32x32) [9216, bytes: 36864]\ndecoder0/conv2/batch_norm/beta:0 (float32_ref 32) [32, bytes: 128]\noutputs/weights:0 (float32_ref 1x1x32x1) [32, bytes: 128]\noutputs/biases:0 (float32_ref 1) [1, bytes: 4]\nTotal size of variables: 7761057\nTotal bytes of variables: 31044228\n"
],
[
"opt = tf.train.AdamOptimizer(learning_rate=2e-4)\nwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n opt_op = opt.minimize(model.loss, global_step=model.global_step)",
"_____no_output_____"
],
[
"saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)",
"_____no_output_____"
]
],
[
[
"### Train a model",
"_____no_output_____"
]
],
[
[
"%%time\nsess = tf.Session(config=sess_config)\nsess.run(tf.global_variables_initializer())\ntf.logging.info('Start Session.')\n\ntrain_iterator = train_ds.make_one_shot_iterator()\ntrain_handle = sess.run(train_iterator.string_handle())\ntest_iterator = test_ds.make_one_shot_iterator()\ntest_handle = sess.run(test_iterator.string_handle())\n\n# save loss values for plot\nloss_history = []\npre_epochs = 0\nwhile True:\n try:\n start_time = time.time()\n _, global_step_, loss = sess.run([opt_op,\n model.global_step,\n model.loss],\n feed_dict={model.handle: train_handle})\n\n epochs = global_step_ * batch_size / float(num_train_examples)\n duration = time.time() - start_time\n\n if global_step_ % print_steps == 0:\n clear_output(wait=True)\n examples_per_sec = batch_size / float(duration)\n print(\"Epochs: {:.2f} global_step: {} loss: {:.3f} ({:.2f} examples/sec; {:.3f} sec/batch)\".format(\n epochs, global_step_, loss, examples_per_sec, duration))\n\n loss_history.append([epochs, loss])\n\n # print sample image\n img, label, predicted_label = sess.run([model.input_images, model.targets, model.predicted_images],\n feed_dict={model.handle: test_handle})\n plt.figure(figsize=(10, 20))\n plt.subplot(1, 3, 1)\n plt.imshow(img[0,: , :, :])\n plt.title(\"Input image\")\n \n plt.subplot(1, 3, 2)\n plt.imshow(label[0, :, :, 0])\n plt.title(\"Actual Mask\")\n \n plt.subplot(1, 3, 3)\n plt.imshow(predicted_label[0, :, :, 0])\n plt.title(\"Predicted Mask\")\n plt.show()\n\n # save model checkpoint periodically\n if int(epochs) % save_epochs == 0 and pre_epochs != int(epochs):\n tf.logging.info('Saving model with global step {} (= {} epochs) to disk.'.format(global_step_, int(epochs)))\n saver.save(sess, train_dir + 'model.ckpt', global_step=global_step_)\n pre_epochs = int(epochs)\n\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\") # ==> \"End of dataset\"\n tf.logging.info('Saving model with global step {} (= {} epochs) to disk.'.format(global_step_, int(epochs)))\n saver.save(sess, train_dir + 'model.ckpt', global_step=global_step_)\n break\n\ntf.logging.info('complete training...')",
"Epochs: 98.33 global_step: 2950 loss: 0.090 (248.52 examples/sec; 0.032 sec/batch)\n"
]
],
[
[
"### Plot the loss",
"_____no_output_____"
]
],
[
[
"loss_history = np.asarray(loss_history)\nplt.plot(loss_history[:,0], loss_history[:,1])\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Evaluate the test dataset and Plot",
"_____no_output_____"
]
],
[
[
"test_ds_eval = get_baseline_dataset(x_test_filenames,\n y_test_filenames,\n batch_size=num_test_examples,\n max_epochs=1,\n shuffle=False)\n\ntest_iterator_eval = test_ds_eval.make_one_shot_iterator()\ntest_handle_eval = sess.run(test_iterator_eval.string_handle())",
"_____no_output_____"
],
[
"mean_iou, mean_iou_op = tf.metrics.mean_iou(labels=tf.to_int32(tf.round(model.targets)),\n predictions=tf.to_int32(tf.round(model.predicted_images)),\n num_classes=2,\n name='mean_iou')\nsess.run(tf.local_variables_initializer())\n\nsess.run(mean_iou_op, feed_dict={model.handle: test_handle_eval})\nprint(\"mean iou:\", sess.run(mean_iou))",
"mean iou: 0.87998605\n"
]
],
[
[
"#### Visualize testset",
"_____no_output_____"
]
],
[
[
"test_ds_visual = get_baseline_dataset(x_test_filenames, \n y_test_filenames,\n batch_size=1,\n max_epochs=1,\n shuffle=False)\n\ntest_iterator_visual = test_ds_visual.make_one_shot_iterator()\ntest_handle_visual = sess.run(test_iterator_visual.string_handle())",
"_____no_output_____"
],
[
"# Let's visualize some of the outputs \n\n# Running next element in our graph will produce a batch of images\nplt.figure(figsize=(10, 20))\nfor i in range(5):\n #img, label, predicted_label = sess.run([model.input_images, model.targets, model.predicted_images],\n img, label, predicted_label = sess.run([model.input_images,\n tf.to_int32(tf.round(model.targets)),\n tf.to_int32(tf.round(model.predicted_images))],\n feed_dict={model.handle: test_handle_visual})\n\n plt.subplot(5, 3, 3 * i + 1)\n plt.imshow(img[0,: , :, :])\n plt.title(\"Input image\")\n\n plt.subplot(5, 3, 3 * i + 2)\n plt.imshow(label[0, :, :, 0])\n plt.title(\"Actual Mask\")\n\n plt.subplot(5, 3, 3 * i + 3)\n plt.imshow(predicted_label[0, :, :, 0])\n plt.title(\"Predicted Mask\")\nplt.suptitle(\"Examples of Input Image, Label, and Prediction\")\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c7e6dcf67a50c061d8e3472839134570b4caca | 10,424 | ipynb | Jupyter Notebook | PythonCodes/Exercises/Class-SEAS/green/.ipynb_checkpoints/green-checkpoint.ipynb | Nicolucas/C-Scripts | 2608df5c2e635ad16f422877ff440af69f98f960 | [
"MIT"
] | 1 | 2020-02-25T08:05:13.000Z | 2020-02-25T08:05:13.000Z | PythonCodes/Exercises/Class-SEAS/green/.ipynb_checkpoints/green-checkpoint.ipynb | Nicolucas/TEAR | bbeb599cf2bab70fd7a82041336a1a918e8727f2 | [
"MIT"
] | null | null | null | PythonCodes/Exercises/Class-SEAS/green/.ipynb_checkpoints/green-checkpoint.ipynb | Nicolucas/TEAR | bbeb599cf2bab70fd7a82041336a1a918e8727f2 | [
"MIT"
] | null | null | null | 47.167421 | 5,476 | 0.73254 | [
[
[
"Green's function\n==============",
"_____no_output_____"
],
[
"Fundamental solution\n-------------------------------",
"_____no_output_____"
]
],
[
[
"from sympy import *\ninit_printing()",
"_____no_output_____"
],
[
"x1, x2, xi1, xi2 = symbols('x_1 x_2 xi_1 xi_2')\nE = -1/(2*pi) * log(sqrt((x1-xi1)**2 + (x2-xi2)**2))\nE",
"_____no_output_____"
]
],
[
[
"**Task**: Check that $\\nabla^2_\\xi E = 0$ for $x \\neq \\xi$.\n\n*Hint*: https://docs.sympy.org/latest/tutorial/calculus.html#derivatives",
"_____no_output_____"
]
],
[
[
"diff(E,x,2)",
"_____no_output_____"
]
],
[
[
"Directional derivative\n------------------------------",
"_____no_output_____"
]
],
[
[
"n1, n2 = symbols('n_1 n_2')",
"_____no_output_____"
]
],
[
[
"**Task**: Compute the directional derivative $\\frac{\\partial E}{\\partial n}$.",
"_____no_output_____"
],
[
"**Task** (optional): Write a function which returns the directional derivative of an expression.",
"_____no_output_____"
]
],
[
[
"def ddn(expr):\n pass",
"_____no_output_____"
]
],
[
[
"Reflection principle\n----------------------------",
"_____no_output_____"
],
[
"For simple geometries Green's function can sometimes be found by reflecting the fundamental solution at the boundary and linearly combining the fundamental solution with its reflection. ",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"**Task**: Based on $E$, find the solution for the half-space problem\n\\begin{align*}\n \\nabla^2G(x,\\xi) &= -\\delta(x-\\xi), & \\xi\\in\\Omega \\\\\n G(x,\\xi) &= 0, & \\xi\\in\\partial\\Omega \\\\\n \\Omega &= \\{\\xi\\in\\mathbb{R}^2 : \\xi_2 > 0\\}\n\\end{align*}\n\n*Hint*: https://docs.sympy.org/latest/tutorial/basic_operations.html#substitution",
"_____no_output_____"
],
[
"**Task**: Based on $E$, find the solution for the half-space problem\n\\begin{align*}\n \\nabla^2G(x,\\xi) &= -\\delta(x-\\xi), & \\xi\\in\\Omega \\\\\n \\frac{\\partial G(x,\\xi)}{\\partial n} &= 0, & \\xi\\in\\partial\\Omega \\\\\n \\Omega &= \\{\\xi\\in\\mathbb{R}^2 : \\xi_2 > 0\\}\n\\end{align*}",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c7e7dd3b905dc373cbdbcdc3e6612622913760 | 5,965 | ipynb | Jupyter Notebook | Assignment-3 Q1 (1).ipynb | Akanksha019/Hypothesis_Testing | 84babebd624be8541ce737832d1289f81674d6c4 | [
"Apache-2.0"
] | null | null | null | Assignment-3 Q1 (1).ipynb | Akanksha019/Hypothesis_Testing | 84babebd624be8541ce737832d1289f81674d6c4 | [
"Apache-2.0"
] | null | null | null | Assignment-3 Q1 (1).ipynb | Akanksha019/Hypothesis_Testing | 84babebd624be8541ce737832d1289f81674d6c4 | [
"Apache-2.0"
] | null | null | null | 21.690909 | 93 | 0.381559 | [
[
[
"import pandas as pd\nimport scipy\nimport numpy\nfrom scipy import stats",
"_____no_output_____"
],
[
"data1=pd.read_csv(\"Cutlets.csv\")",
"_____no_output_____"
],
[
"data1.head()",
"_____no_output_____"
],
[
"unit_A=pd.Series(data1.iloc[:,0])\nunit_A",
"_____no_output_____"
],
[
"unit_B=pd.Series(data1.iloc[:,1])\nunit_B",
"_____no_output_____"
],
[
"p_value=stats.ttest_ind(unit_A,unit_B)[1]\np_value",
"_____no_output_____"
],
[
"#compare p value with 0.05\n#p value is > than 0.05 accept null hypothesis(there is no difffrence between two unit)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c7ea114654b116d8e632444fa0976976e47842 | 908,173 | ipynb | Jupyter Notebook | code/04_Mining_CuboidB.ipynb | JulienLeprince/multidimensional-building-data-cube-pattern-identification | ab9e63aae853c6c045dd7abc903f38a14e189c1c | [
"MIT"
] | 1 | 2021-07-29T00:28:26.000Z | 2021-07-29T00:28:26.000Z | code/04_Mining_CuboidB.ipynb | JulienLeprince/multidimensional-building-data-cube-pattern-identification | ab9e63aae853c6c045dd7abc903f38a14e189c1c | [
"MIT"
] | null | null | null | code/04_Mining_CuboidB.ipynb | JulienLeprince/multidimensional-building-data-cube-pattern-identification | ab9e63aae853c6c045dd7abc903f38a14e189c1c | [
"MIT"
] | 1 | 2021-12-20T08:41:43.000Z | 2021-12-20T08:41:43.000Z | 2,647.734694 | 248,669 | 0.879479 | [
[
[
"# Multidimensional Pattern identification with SAX\n## In-site view\n\nThis script performs pattern identification over the {time, attribute} cuboid, that covers the intra-building frame. It serves for within-site exploration on how a given building operates across time and building-specific attributes.\n\nThe data is first normalized then transformed using SAX over normalized daily sequences. Motifs are identified across buildings, and a final clustering phase is executed over the reduced counts of sequences. \n\nResults are presented visually allowing interpretable analytics.",
"_____no_output_____"
]
],
[
[
"# Import modules\nimport pandas as pd\nimport numpy as np\nimport time\nfrom sklearn.cluster import KMeans\nimport sklearn.metrics as metrics\nfrom collections import Counter\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n# Plotting modules\nfrom plotly.offline import init_notebook_mode\ninit_notebook_mode(connected = True)\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\n# Importing utility script\nimport utils as ut\n\n# Version\nversion = \"v1.0\"\n\n# Path definition\npath_data = \"..\\\\data\\\\cube\\\\\"\npath_fig_out = \"..\\\\figures\\\\insite_view\\\\\"",
"_____no_output_____"
]
],
[
[
"## Read",
"_____no_output_____"
]
],
[
[
"# Read Cuboid\nblg_id = \"Fox_education_Melinda\"\ndf = pd.read_csv(path_data + \"cuboid_B_\"+blg_id+\".csv\", index_col=\"timestamp\")\n\n# Format index to datetime object\ndf.index = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M:%S')\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Pre-Mining\n## Motifs identification",
"_____no_output_____"
]
],
[
[
"# SAX Parameters\nday_number_of_pieces = 4\nalphabet_size = 3\nscaler_function = StandardScaler()\n\n# Normalize per attribute\ndf_normalized = ut.scale_df_columns_NanRobust(df, df.columns, scaler=scaler_function)\n\n# Perform SAX transformation\nsax_dict, counts, sax_data = ut.SAX_mining(df_normalized, W=day_number_of_pieces, A=alphabet_size)\n\n# Plot the sequence counts per attribute\nfor meter in df.columns.values:\n fig = ut.counter_plot(counts[meter], title=meter)\n fig.savefig(path_fig_out+\"SAXcounts_StandardScaler_blg_\"+blg_id+\"_meter_\"+meter+\"_\"+version+\".jpg\", dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"# Reformating sax results for plotting\nsax_dict_data, index_map_dictionary = dict(), dict()\nfor meter in sax_data:\n sax_dict_data[meter], index_map_dictionary[meter] = ut.sax_df_reformat(sax_data, sax_dict, meter)\n\n# Plotting all SAX sequences and saving figure\nfig = ut.SAX_dailyhm_visualization(sax_dict_data, sax_dict, index_map_dictionary)\nut.png_output([len(sax_dict.keys())*250, 800])\nfig.show()\nfig.write_image(path_fig_out+\"SAX_blg_\"+blg_id+\"_\"+version+\".png\")",
"_____no_output_____"
],
[
"# Filter discords from established threshold\nthreshold = 10 # motif number threshold\nindexes = dict()\nfor meter in df.columns.values:\n df_count = pd.DataFrame.from_dict(Counter(sax_dict[meter]), orient='index').rename(columns={0:'count'})\n df_count.fillna(0)\n motifs = df_count[df_count.values > threshold]\n indexes[meter] = [i for i,x in enumerate(sax_dict[meter]) if x in list(motifs.index)] # returns all indexes",
"_____no_output_____"
]
],
[
[
"# Mining\n## Attribute motifs clustering\nAttribute daily profile motifs are clustered together resulting in a reduced number of typical patterns from the previous motif identification thanks to SAX trasnformation.",
"_____no_output_____"
]
],
[
[
"# Identify optimal cluster number\nwcss, sil = [], []\nfor meter in sax_data:\n wcss_l, sil_l = ut.elbow_method(sax_data[meter].iloc[indexes[meter]].interpolate(method='linear').transpose(), n_cluster_max=20)\n wcss.append(wcss_l)\n sil.append(sil_l)\n# Get similarity index quantiles (cross attributes)\narr_sil, arr_wcss = np.array(sil), np.array(wcss)\nwcss_med = np.quantile(arr_wcss, .5, axis=0)\nsil_med = np.quantile(arr_sil, .5, axis=0)\nerr_wcss = [np.quantile(arr_wcss, .25, axis=0), np.quantile(arr_wcss, .75, axis=0)]\nerr_sil = [np.quantile(arr_sil, .25, axis=0), np.quantile(arr_sil, .75, axis=0)]\n\n# Plots\nplt.rcParams.update({'font.size': 12})\nplt.rcParams['font.sans-serif'] = ['Times New Roman']\nfig = ut.similarity_index_werror_plot(wcss_med, sil_med, err_wcss, err_sil)\nfig.savefig(path_fig_out+\"blg_\"+blg_id+\"_cluster_SimilarityIndex_\"+version+\".jpg\", dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"## Clustering identified motifs\n\n# Cluster the identified motifs\nnb_clusters_opt = 4\nkmeans = KMeans(n_clusters=nb_clusters_opt, init='k-means++', max_iter=300, n_init=10, random_state=0)\nkmeans_pred_y, clust_sax_data = dict(), dict()\nfor meter in df.columns.values:\n clust_sax_data[meter] = sax_data[meter].iloc[indexes[meter]]\n kmeans_pred_y[meter] = kmeans.fit_predict(clust_sax_data[meter].interpolate(method='linear', limit_direction='both'))\n\n# Reformating cluster results for plotting\nclust_dict_data, index_map_dictionary = dict(), dict()\nmax_shape = 0\nfor meter in sax_data:\n clust_dict_data[meter], index_map_dictionary[meter] = ut.sax_df_reformat(clust_sax_data, kmeans_pred_y, meter)\n max_shape = max(max_shape, max(np.shape(clust_dict_data[meter])))\n# Adjusting reformaating from variable attribute motifs lengths\nfor meter in sax_data:\n # Defining width of empty dataframe to add\n space_btw_saxseq = max_shape - max(np.shape(clust_dict_data[meter]))\n # Creating empty frame\n empty_sax_df = pd.DataFrame(columns=sax_data[meter].columns, index=[' ']*space_btw_saxseq)\n # Adding empty frame to the df\n clust_dict_data[meter] = clust_dict_data[meter].append(empty_sax_df)\n\n\n# Plotting cluster results results\nfig = ut.SAX_dailyhm_visualization(clust_dict_data, sax_dict, index_map_dictionary)\nut.png_output([len(clust_dict_data.keys())*250, 800])\nfig.show()\nfig.write_image(path_fig_out+\"clust_blg_\"+blg_id+\"_\"+version+\".png\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c7ec7680fab4d6521760b84e89afa7f6a760b0 | 21,012 | ipynb | Jupyter Notebook | notebooks/train_basic_rnn.ipynb | rateixei/hls-rnn-btag | 6b3643137d5bdcfef5c8c7b692126dad0be95cc3 | [
"MIT"
] | null | null | null | notebooks/train_basic_rnn.ipynb | rateixei/hls-rnn-btag | 6b3643137d5bdcfef5c8c7b692126dad0be95cc3 | [
"MIT"
] | null | null | null | notebooks/train_basic_rnn.ipynb | rateixei/hls-rnn-btag | 6b3643137d5bdcfef5c8c7b692126dad0be95cc3 | [
"MIT"
] | null | null | null | 31.175074 | 123 | 0.5168 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py",
"_____no_output_____"
],
[
"from tensorflow.keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder",
"_____no_output_____"
],
[
"data_loc = '/gpfs/slac/atlas/fs1/d/rafaeltl/public/ML/L1RNN/datasets_2020_ff/'",
"_____no_output_____"
],
[
"file_str = 'Jan06_FlavFix_smear_1_std_xtd_zst.h5'",
"_____no_output_____"
],
[
"f5 = h5py.File(data_loc+file_str, 'r')",
"_____no_output_____"
],
[
"x_train = np.array( f5['x_train'] )\ny_train = to_categorical ( np.array( f5['y_train'] ) )\nw_train = np.array( f5['w_train'] )",
"_____no_output_____"
],
[
"y_train",
"_____no_output_____"
],
[
"from tensorflow.keras.layers import Dense, Activation, BatchNormalization, LSTM, Masking, Input, GRU\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l1",
"_____no_output_____"
],
[
"from tensorflow.keras import regularizers",
"_____no_output_____"
],
[
"def lstmmodel(max_len, n_var, rec_units, ndense=[10], l1_reg=0,\n l2_reg=0, rec_act='sigmoid', extra_lab='none', rec_kernel_init='VarianceScaling',\n dense_kernel_init='lecun_uniform', domask=False):\n \n rec_layer = 'LSTM'\n \n track_inputs = Input(shape=(max_len, n_var,))\n \n if domask:\n hidden = Masking( mask_value=0, name=\"masking_1\")(track_inputs)\n else:\n hidden = track_inputs\n \n if l1_reg > 1e-6 and l2_reg > 1e-6:\n hidden = LSTM(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),\n name = 'lstm1_l1l2')(hidden)\n elif l1_reg > 1e-6:\n hidden = LSTM(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l1(l1 = l1_reg),\n name = 'lstm1_l1')(hidden)\n elif l2_reg > 1e-6:\n hidden = LSTM(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l2(l2 = l2_reg),\n name = 'lstm1_l2')(hidden)\n else:\n hidden = LSTM(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n name = 'lstm1')(hidden)\n\n for ind,nd in enumerate(ndense):\n hidden = Dense(nd, activation='relu', kernel_initializer=dense_kernel_init, name=f'dense_{ind}' )(hidden)\n \n output = Dense(3, activation='softmax', kernel_initializer=dense_kernel_init, name = 'output_softmax')(hidden)\n \n model = Model(inputs=track_inputs, outputs=output)\n \n d_layers = ''.join([ str(dl) for dl in ndense ])\n \n if domask:\n mname = f'MASKED_rnn_{rec_layer}.{rec_units}_Dense.{d_layers}_'\n else:\n mname = f'rnn_{rec_layer}.{rec_units}_Dense.{d_layers}_'\n mname += f'LSTMKernelInit.{rec_kernel_init}_DenseKernelInit.{dense_kernel_init}'\n mname += f'KRl1.{l1_reg}_KRl2.{l2_reg}_recAct.{rec_act}' #LSTM kernel regularizer\n \n if 'none' not in extra_lab:\n mname += f'_{extra_lab}'\n \n return model, mname\n\n# mask = Masking( mask_value=0, name=\"masking_1\")(track_inputs)\n##########################################\n# use_bias=False,\n# activation='relu',\n# recurrent_activation='relu',\n# kernel_regularizer = regularizers.l1_l2(l1= 0.001, l2 = 0.0001), \n# bias_regularizer = regularizers.l1_l2(l1= 1, l2 = 1), \n# activity_regularizer=regularizers.l1_l2(l1= 0.001, l2 = 0.0001),\n##########################################\n",
"_____no_output_____"
],
[
"def grumodel(max_len, n_var, rec_units, ndense=[10], l1_reg=0,\n l2_reg=0, rec_act='sigmoid', extra_lab='none', rec_kernel_init='VarianceScaling',\n dense_kernel_init='lecun_uniform', domask=False):\n \n rec_layer = 'GRU'\n \n track_inputs = Input(shape=(max_len, n_var,))\n \n if domask:\n hidden = Masking( mask_value=0, name=\"masking_1\")(track_inputs)\n else:\n hidden = track_inputs\n \n\n if l1_reg > 1e-6 and l2_reg > 1e-6:\n hidden = GRU(units=rec_units,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),\n name = 'gru_l1l2')(hidden)\n elif l1_reg > 1e-6:\n hidden = GRU(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l1(l1 = l1_reg),\n name = 'gru_l1')(hidden)\n elif l2_reg > 1e-6:\n hidden = GRU(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n kernel_regularizer = regularizers.l2(l2 = l2_reg),\n name = 'gru_l2')(hidden)\n else:\n hidden = GRU(units=rec_units,\n recurrent_activation = rec_act,\n kernel_initializer = rec_kernel_init, \n name = 'gru')(hidden)\n \n\n for ind,nd in enumerate(ndense):\n hidden = Dense(nd, activation='relu', kernel_initializer=dense_kernel_init, name=f'dense_{ind}' )(hidden)\n \n output = Dense(3, activation='softmax', kernel_initializer=dense_kernel_init, name = 'output_softmax')(hidden)\n \n model = Model(inputs=track_inputs, outputs=output)\n \n d_layers = ''.join([ str(dl) for dl in ndense ])\n \n if domask:\n mname = f'MASKED_rnn_{rec_layer}.{rec_units}_Dense.{d_layers}_'\n else:\n mname = f'rnn_{rec_layer}.{rec_units}_Dense.{d_layers}_'\n mname += f'LSTMKernelInit.{rec_kernel_init}_DenseKernelInit.{dense_kernel_init}'\n mname += f'KRl1.{l1_reg}_KRl2.{l2_reg}_recAct.{rec_act}' #LSTM kernel regularizer\n \n if 'none' not in extra_lab:\n mname += f'_{extra_lab}'\n \n return model, mname\n\n# mask = Masking( mask_value=0, name=\"masking_1\")(track_inputs)\n##########################################\n# use_bias=False,\n# activation='relu',\n# recurrent_activation='relu',\n# kernel_regularizer = regularizers.l1_l2(l1= 0.001, l2 = 0.0001), \n# bias_regularizer = regularizers.l1_l2(l1= 1, l2 = 1), \n# activity_regularizer=regularizers.l1_l2(l1= 0.001, l2 = 0.0001),\n##########################################\n",
"_____no_output_____"
],
[
"l1_reg = 0\nl2_reg = 0\n\n## GRU Model\n\n# model, model_name = grumodel(15, 6, 120, [50, 10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n## LSTM Model\n\nmodel, model_name = lstmmodel(15, 6, 120, [50, 10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# Masked model\n\n# model, model_name = lstmmodel(15, 6, 20, [10], l1_reg=l1_reg, l2_reg=l2_reg, domask=True)\n\n\n# ## Very very tiny model\n\n# model, model_name = lstmmodel(15, 6, 2, [], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Very tiny model\n\n# model, model_name = lstmmodel(15, 6, 10, [], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Tiny model\n\n# model, model_name = lstmmodel(15, 6, 10, [10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Small model\n\n# model, model_name = lstmmodel(15, 6, 20, [10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Little model\n\n# model, model_name = lstmmodel(15, 6, 50, [10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Intermediate model\n\n# model, model_name = lstmmodel(15, 6, 50, [10, 10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# ## Large model\n\n# model, model_name = lstmmodel(15, 6, 100, [50, 10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# model, model_name = lstmmodel(15, 6, 100, [10], l1_reg=l1_reg, l2_reg=l2_reg)\n\n# model, model_name = lstmmodel(15, 6, 100, [10], l1_reg=l1_reg, l2_reg=l2_reg)",
"_____no_output_____"
],
[
"model.summary()\nprint(model_name)",
"Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 15, 6)] 0 \n_________________________________________________________________\nlstm1 (LSTM) (None, 120) 60960 \n_________________________________________________________________\ndense_0 (Dense) (None, 50) 6050 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 510 \n_________________________________________________________________\noutput_softmax (Dense) (None, 3) 33 \n=================================================================\nTotal params: 67,553\nTrainable params: 67,553\nNon-trainable params: 0\n_________________________________________________________________\nrnn_LSTM.120_Dense.5010_LSTMKernelInit.VarianceScaling_DenseKernelInit.lecun_uniformKRl1.0_KRl2.0_recAct.sigmoid\n"
],
[
"model_json = model.to_json()\nwith open(f'keras/model_{model_name}_arch.json', \"w\") as json_file:\n json_file.write(model_json)",
"_____no_output_____"
],
[
"# adam = Adam(learning_rate=0.01)\nmodel.compile(optimizer='adam', loss=['categorical_crossentropy'], metrics=['accuracy'])",
"_____no_output_____"
],
[
"from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint",
"_____no_output_____"
],
[
"model_output = f'keras/model_{model_name}_weights.h5'",
"_____no_output_____"
],
[
"train = True",
"_____no_output_____"
],
[
"if train:\n history = model.fit( x_train , y_train,\n batch_size=2**14,\n # epochs=10,\n epochs=150,\n validation_split=0.1,\n shuffle = True,\n sample_weight= w_train,\n callbacks = [\n EarlyStopping(verbose=True, patience=20, monitor='val_accuracy'),\n ModelCheckpoint(model_output, monitor='val_accuracy', verbose=True, save_best_only=True)\n ],\n verbose=True\n )\n \nmodel.load_weights(model_output)",
"_____no_output_____"
],
[
"model.summary()",
"_____no_output_____"
],
[
"x_test = np.array( f5['x_test'] )\ny_test = to_categorical ( np.array( f5['y_test'] ) )",
"_____no_output_____"
],
[
"import plotting\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"import importlib",
"_____no_output_____"
],
[
"importlib.reload(plotting)",
"_____no_output_____"
],
[
"pred_test = model.predict(x_test, batch_size=2**10)",
"_____no_output_____"
],
[
"print(\"Accuracy: {}\".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(pred_test, axis=1))))",
"_____no_output_____"
],
[
"pb_b = pred_test[:,0] [y_test[:,0] == 1]\npc_b = pred_test[:,1] [y_test[:,0] == 1]\npl_b = pred_test[:,2] [y_test[:,0] == 1]\n \npc_c = pred_test[:,1] [y_test[:,1] == 1]\npb_c = pred_test[:,0] [y_test[:,1] == 1]\n \npl_l = pred_test[:,2] [y_test[:,2] == 1]\npb_l = pred_test[:,0] [y_test[:,2] == 1]\n\nplt.Figure()\n\nplt.hist( pb_b/(pb_b+pl_b), range=(0,1), bins=1000, histtype='step' )\nplt.hist( pb_l/(pb_l+pl_l), range=(0,1), bins=1000, histtype='step' )\n\nplt.show()\n\n\nplt.Figure()\n\nplt.hist( pb_b/(pb_b+pc_b), range=(0,1), bins=1000, histtype='step' )\nplt.hist( pb_c/(pb_c+pc_c), range=(0,1), bins=1000, histtype='step' )\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(9,9))\n_ = plotting.makeRoc(y_test, pred_test)",
"_____no_output_____"
],
[
"for layer in model.layers:\n print(layer.name)\n# plt.Figure()\n \n this_wgts = layer.get_weights()\n# if len(this_wgts) < 1: continue\n print(layer.get_config())\n \n for wgt in this_wgts:\n print(wgt)\n print()\n# max_wgts = np.max(this_wgts)\n# min_wgts = np.min(this_wgts)\n# plt.hist(this_wgts, bins=100, range=(min_wgts, max_wgts))\n# plt.xlabel(f'{layer.name}')\n# plt.show",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c7ed4b34781f1ea620bb592395d6fc66043934 | 441,900 | ipynb | Jupyter Notebook | DSPNP_practical2/DSPNP_notebook2_digits.ipynb | frantu08/Data_Science_Unit_20-21 | 60b5904c003d535bf917624fa2e2cfc0e21dac6b | [
"Apache-2.0"
] | null | null | null | DSPNP_practical2/DSPNP_notebook2_digits.ipynb | frantu08/Data_Science_Unit_20-21 | 60b5904c003d535bf917624fa2e2cfc0e21dac6b | [
"Apache-2.0"
] | null | null | null | DSPNP_practical2/DSPNP_notebook2_digits.ipynb | frantu08/Data_Science_Unit_20-21 | 60b5904c003d535bf917624fa2e2cfc0e21dac6b | [
"Apache-2.0"
] | null | null | null | 52.395068 | 25,880 | 0.591937 | [
[
[
"# Practical Session 2: Classification algorithms\n\n*Notebook by Ekaterina Kochmar*",
"_____no_output_____"
],
[
"## 0.1 Your task\n\nIn practical 1, you worked with the housing prices and bike sharing datasets on the tasks that required you to predict some value (e.g., price of a house) or amount (e.g., the count of rented bikes, or the number of registered users) based on a number of attributes – age of the house, number of rooms, income level of the house owners for the house price prediction (or weather conditions and time of the day for the prediction of the number of rented bikes). That is, you were predicting some continuous value.\n\nThis time, your task is to predict a particular category the instance belongs to based on its characteristics. This type of tasks is called *classification*.",
"_____no_output_____"
],
[
"# Assignment: Handwritten digits dataset\n\nThe dataset that you will use in this assignment is the [*digits* dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) which contains $1797$ images of $10$ hand-written digits. The digits have been preprocessed so that $32 \\times 32$ bitmaps are divided into non-overlapping blocks of $4 \\times 4$ and the number of on pixels are counted in each block. This generates an input matrix of $8 \\times 8$ where each element is an integer in the range of $[0, ..., 16]$. This reduces dimensionality and gives invariance to small distortions.\n\nFor further information on NIST preprocessing routines applied to this data, see M. D. Garris, J. L. Blue, G. T. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C. L. Wilson, *NIST Form-Based Handprint Recognition System*, NISTIR 5469, 1994.\n\nAs before, use the `sklearn`'s data uploading routines to load the dataset and get the data fields:",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\ndigits = datasets.load_digits()\nlist(digits.keys())",
"_____no_output_____"
],
[
"digits",
"_____no_output_____"
],
[
"X, y = digits[\"data\"], digits[\"target\"]\nX.shape",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
]
],
[
[
"You can access the digits and visualise them using the following code (feel free to select another digit):",
"_____no_output_____"
]
],
[
[
"import matplotlib\nfrom matplotlib import pyplot as plt\n\nsome_digit = X[3]\nsome_digit_image = some_digit.reshape(8, 8)\n\nplt.imshow(some_digit_image, cmap=matplotlib.cm.binary, interpolation=\"nearest\")\nplt.axis(\"off\")\nplt.show()",
"_____no_output_____"
],
[
"y[3]",
"_____no_output_____"
]
],
[
[
"For the rest of the practical, apply the data preprocessing techniques, implement and evaluate the classification models on the digits dataset using the steps that you applied above to the iris dataset.",
"_____no_output_____"
],
[
"## Step 2: Splitting the data into training and test subsets",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import StratifiedShuffleSplit\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nsplit.get_n_splits(X, y)\nprint(split) \n\nfor train_index, test_index in split.split(X, y):\n print(\"TRAIN:\", len(train_index), \"TEST:\", len(test_index))\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape)",
"StratifiedShuffleSplit(n_splits=1, random_state=42, test_size=0.2,\n train_size=None)\nTRAIN: 1437 TEST: 360\n(1437, 64) (1437,) (360, 64) (360,)\n"
]
],
[
[
"Check proportions",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\n# def original_proportions(data):\n# props = {}\n# for value in set(data[\"target\"]):\n# data_value = [i for i in data[\"target\"] if i==value]\n# props[value] = len(data_value) / len(data[\"target\"])\n# return props\n\ndef subset_proportions(subset):\n props = {}\n for value in set(subset):\n data_value = [i for i in subset if i==value]\n props[value] = len(data_value) / len(subset)\n return props\n\n \ncompare_props = pd.DataFrame({\n \"Overall\": subset_proportions(digits[\"target\"]),\n \"Stratified tr\": subset_proportions(y_train),\n \"Stratified ts\": subset_proportions(y_test),\n})\ncompare_props[\"Strat. tr %error\"] = 100 * compare_props[\"Stratified tr\"] / compare_props[\"Overall\"] - 100\ncompare_props[\"Strat. ts %error\"] = 100 * compare_props[\"Stratified ts\"] / compare_props[\"Overall\"] - 100\n\ncompare_props.sort_index()",
"_____no_output_____"
]
],
[
[
"# Case 1: Binary Classification",
"_____no_output_____"
]
],
[
[
"y_train_zero = (y_train == 0) # will return True when the label is 0 (i.e., zero)\ny_test_zero = (y_test == 0)\ny_test_zero",
"_____no_output_____"
],
[
"zero_example = X_test[10]\n",
"_____no_output_____"
]
],
[
[
"## Perceptron",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import SGDClassifier\n\nsgd = SGDClassifier(max_iter=5, tol=None, random_state=42,\n loss=\"perceptron\", eta0=1, learning_rate=\"constant\", penalty=None)\nsgd.fit(X_train, y_train_zero)\nsgd.predict([zero_example])",
"_____no_output_____"
]
],
[
[
"Trying it for label 1",
"_____no_output_____"
]
],
[
[
"y_train_one = (y_train == 1) # True when the label is 1 (i.e., versicolor)\ny_test_one = (y_test == 1)\ny_test_one",
"_____no_output_____"
],
[
"one_example = X_test[40]\nprint(\"Class\", y_test[40], \"(\", digits.target_names[y_test[40]], \")\")\n\nsgd.fit(X_train, y_train_one)\nprint(sgd.predict([one_example]))",
"Class 1 ( 1 )\n[ True]\n"
]
],
[
[
"Perceptron did well",
"_____no_output_____"
],
[
"## Logistic Regression",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\n\nlog_reg = LogisticRegression()\nlog_reg.fit(X_train, y_train_zero)\nprint(log_reg.predict([zero_example]))",
"[ True]\n"
],
[
"log_reg.fit(X_train, y_train_one)\nlog_reg.predict([one_example])",
"_____no_output_____"
]
],
[
[
"Looks like Logistic regression didn't get the 1",
"_____no_output_____"
],
[
"## Naive Bayes",
"_____no_output_____"
]
],
[
[
"from sklearn.naive_bayes import GaussianNB, MultinomialNB\n\ngnb = MultinomialNB() # or:\ngnb = GaussianNB() \ngnb.fit(X_train, y_train_zero)\ngnb.predict([zero_example])",
"_____no_output_____"
],
[
"gnb.fit(X_train, y_train_one)\n\ngnb.predict([one_example])",
"_____no_output_____"
]
],
[
[
"Naive Bayes did good",
"_____no_output_____"
],
[
"# Step 3: Evaluation\n## Performance measures\n- Acc for cross-val",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_val_score\n\nprint(cross_val_score(log_reg, X_train, y_train_zero, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(gnb, X_train, y_train_zero, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(sgd, X_train, y_train_zero, cv=5, scoring=\"accuracy\"))",
"[1. 1. 0.99651568 1. 1. ]\n[0.98958333 0.99305556 0.99651568 1. 0.9825784 ]\n[0.99652778 0.99652778 0.99651568 1. 1. ]\n"
],
[
"print(cross_val_score(log_reg, X_train, y_train_one, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(gnb, X_train, y_train_one, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(sgd, X_train, y_train_one, cv=5, scoring=\"accuracy\"))",
"[0.97916667 0.97916667 0.96515679 0.97212544 0.95470383]\n[0.61805556 0.62847222 0.61324042 0.66550523 0.51916376]\n[0.97222222 0.95486111 0.95470383 0.95470383 0.95818815]\n"
]
],
[
[
"Brute force predicting only non-ones",
"_____no_output_____"
]
],
[
[
"from sklearn.base import BaseEstimator\nimport numpy as np\nnp.random.seed(42)\n\nclass NotXClassifier(BaseEstimator):\n def fit(self, X, y=None):\n pass\n def predict(self, X):\n return np.zeros((len(X), 1), dtype=bool)\n \nnotone_clf = NotXClassifier()\ncross_val_score(notone_clf, X_train, y_train_one, cv=5, scoring=\"accuracy\")",
"_____no_output_____"
]
],
[
[
"- Confusion Matrix",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\n\ny_train_pred = cross_val_predict(log_reg, X_train, y_train_zero, cv=5)\nconfusion_matrix(y_train_zero, y_train_pred)",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(gnb, X_train, y_train_zero, cv=5)\nconfusion_matrix(y_train_zero, y_train_pred)",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(log_reg, X_train, y_train_one, cv=5)\nconfusion_matrix(y_train_one, y_train_pred)",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(gnb, X_train, y_train_one, cv=5)\nconfusion_matrix(y_train_one, y_train_pred)",
"_____no_output_____"
]
],
[
[
"- precision, recall, f1",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import precision_score, recall_score, f1_score\n\ny_train_pred = cross_val_predict(gnb, X_train, y_train_one, cv=5)\nprecision = precision_score(y_train_one, y_train_pred) # == 36 / (36 + 5)\nrecall = recall_score(y_train_one, y_train_pred) # == 36 / (36 + 4)\nf1 = f1_score(y_train_one, y_train_pred)\nprint(precision, recall, f1)\n\ny_train_pred = cross_val_predict(log_reg, X_train, y_train_one, cv=5)\nprecision = precision_score(y_train_one, y_train_pred) # == 15 / (15 + 9)\nrecall = recall_score(y_train_one, y_train_pred) # == 15 / (15 + 25)\nf1 = f1_score(y_train_one, y_train_pred)\nprint(precision, recall, f1)",
"0.20454545454545456 0.9863013698630136 0.3388235294117647\n0.832258064516129 0.8835616438356164 0.8571428571428571\n"
]
],
[
[
"Oh no, poor gnb",
"_____no_output_____"
],
[
"- Precision-recall treade-off",
"_____no_output_____"
],
[
"Confidence score",
"_____no_output_____"
]
],
[
[
"log_reg.fit(X_train, y_train_one)\n\ny_scores = log_reg.decision_function([one_example])\ny_scores",
"_____no_output_____"
],
[
"threshold = 0\ny_one_pred = (y_scores > threshold)\ny_one_pred",
"_____no_output_____"
],
[
"threshold = -2\ny_one_pred = (y_scores > threshold)\ny_one_pred",
"_____no_output_____"
]
],
[
[
"Confidence scores",
"_____no_output_____"
]
],
[
[
"y_scores = cross_val_predict(log_reg, X_train, y_train_one, cv=5, method=\"decision_function\")\ny_scores",
"_____no_output_____"
]
],
[
[
"Plot precision vs recall",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import precision_recall_curve\n \nprecisions, recalls, thresholds = precision_recall_curve(y_train_one, y_scores)\n\ndef plot_pr_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g--\", label=\"Recall\")\n plt.xlabel(\"Threshold\")\n plt.legend(loc=\"upper right\")\n plt.ylim([0, 1])\n \nplot_pr_vs_threshold(precisions, recalls, thresholds)\nplt.show()",
"_____no_output_____"
],
[
"def plot_precision_vs_recall(precisions, recalls):\n plt.plot(recalls, precisions, \"b-\", linewidth=2)\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.axis([0, 1, 0, 1])\n\nplot_precision_vs_recall(precisions, recalls)\nplt.show()",
"_____no_output_____"
]
],
[
[
"- The Receiver Operating Characteristic (ROC)",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import roc_curve\n\nfpr, tpr, thresholds = roc_curve(y_train_one, y_scores)\n\ndef plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.axis([0, 1, 0, 1.01])\n plt.xlabel(\"False positive rate (fpr)\")\n plt.ylabel(\"True positive rate (tpr)\")\n \nplot_roc_curve(fpr, tpr)\nplt.show()",
"_____no_output_____"
],
[
"# Area\nfrom sklearn.metrics import roc_auc_score\nroc_auc_score(y_train_one, y_scores)",
"_____no_output_____"
],
[
"# Now with GNB\ny_probas_gnb = cross_val_predict(gnb, X_train, y_train_one, cv=3, method=\"predict_proba\")\ny_scores_gnb = y_probas_gnb[:, 1] # score = proba of the positive class\nfpr_gnb, tpr_gnb, thresholds_gnb = roc_curve(y_train_one, y_scores_gnb)\n\nplt.plot(fpr, tpr, \"b:\", label=\"Logistic Regression\")\nplot_roc_curve(fpr_gnb, tpr_gnb, \"Gaussian Naive Bayes\")\nplt.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
],
[
"#Area\nroc_auc_score(y_train_one, y_scores_gnb)",
"_____no_output_____"
]
],
[
[
"Looks like Logistic Regression outperformed Gaussian Naive Bayes",
"_____no_output_____"
],
[
"# Step 4: Data transformations",
"_____no_output_____"
],
[
"## Kernel trick\n\n- with gamma = 1 we get EXTREMELY bad results\n- gamma = 0.001 solves that",
"_____no_output_____"
]
],
[
[
"from sklearn.kernel_approximation import RBFSampler\n\nrbf_features = RBFSampler(gamma=0.001, random_state=42)\nX_train_features = rbf_features.fit_transform(X_train)\nprint(X_train.shape, \"->\", X_train_features.shape)\nsgd_rbf = SGDClassifier(max_iter=100, random_state=42, loss=\"perceptron\", \n eta0=1, learning_rate=\"constant\", penalty=None)\nsgd_rbf.fit(X_train_features, y_train_one) \n\nsgd_rbf.score(X_train_features, y_train_one)\nprint(X_train_features[0])",
"(1437, 64) -> (1437, 100)\n[-0.08948691 0.07840032 0.13012926 0.01843734 0.05243378 0.12363736\n -0.13138175 0.12487656 -0.06774296 -0.13116408 0.14131867 0.13014406\n -0.1412175 0.01781151 0.08980399 0.14045931 0.06150205 0.11652648\n 0.13544217 -0.11087697 0.13594337 -0.0800289 0.0574227 0.01216671\n 0.11133797 0.00604765 0.12907269 0.04008129 0.10124134 0.14130664\n 0.09733658 -0.14111269 0.11467299 -0.03910098 -0.05214749 -0.05723397\n -0.02252198 -0.1064269 0.00072984 -0.08188124 0.01504524 -0.1212134\n -0.0339027 0.10711778 0.01232271 -0.10386685 -0.08298496 0.13956306\n -0.03454778 0.14113989 -0.09677051 0.03187626 -0.07078854 -0.12390397\n 0.13693932 0.09349667 -0.12903172 0.0018465 -0.02683269 -0.062455\n 0.14121793 -0.01998847 0.13880371 0.13414756 -0.14132905 0.13276154\n -0.14141921 -0.05054704 0.12889829 0.13459871 -0.03282508 0.13367935\n -0.06263253 -0.11907552 0.14105804 0.13411986 0.06823374 0.08644726\n 0.09729963 0.14135676 -0.04737141 0.0218788 0.09904029 -0.12565361\n 0.1260095 0.04542973 0.08625159 -0.06465836 0.09918457 0.13192078\n 0.10236442 0.13360416 -0.081419 -0.09102759 0.13254435 -0.05242659\n 0.04783216 -0.14066595 -0.02853276 -0.11711412]\n"
]
],
[
[
"- Precision, recall and F1 : non-kernel vs kernel GNB",
"_____no_output_____"
]
],
[
[
"y_train_pred = cross_val_predict(sgd, X_train, y_train_one, cv=5)\nprecision = precision_score(y_train_one, y_train_pred)\nrecall = recall_score(y_train_one, y_train_pred)\nf1 = f1_score(y_train_one, y_train_pred)\nprint(precision, recall, f1)\n\ny_train_pred = cross_val_predict(sgd_rbf, X_train_features, y_train_one, cv=5)\nprecision = precision_score(y_train_one, y_train_pred)\nrecall = recall_score(y_train_one, y_train_pred)\nf1 = f1_score(y_train_one, y_train_pred)\nprint(precision, recall, f1)",
"0.8270676691729323 0.7534246575342466 0.7885304659498208\n0.9154929577464789 0.8904109589041096 0.9027777777777778\n"
]
],
[
[
"## Case 2: Multi-class classification",
"_____no_output_____"
]
],
[
[
"sgd.fit(X_train, y_train) # i.e., all instances, not just one class\nprint(sgd.predict([zero_example]))\nprint(sgd.predict([one_example]))",
"[0]\n[1]\n"
]
],
[
[
"half good",
"_____no_output_____"
]
],
[
[
"sgd_rbf.fit(X_train_features, y_train) # i.e., all instances, not just one class\nX_test_features = rbf_features.transform(X_test)\nzero_rbf_example = X_test_features[10] # note that you need to transform the test data in the same way, too\none_rbf_example = X_test_features[3]\n\nprint(sgd_rbf.predict([zero_rbf_example]))\nprint(sgd_rbf.predict([one_rbf_example]))",
"[0]\n[6]\n"
]
],
[
[
"half good",
"_____no_output_____"
]
],
[
[
"zero_scores = sgd_rbf.decision_function([zero_rbf_example])\nprint(zero_scores)\n\n# check which class gets the maximum score\nprediction = np.argmax(zero_scores)\nprint(prediction)\n# check which class this corresponds to in the classifier\nprint(sgd_rbf.classes_[prediction])\nprint(digits.target_names[sgd_rbf.classes_[prediction]])\n",
"[[ 1.32752637 -8.43047571 -0.56672488 -3.41607774 -2.43529497 -2.63031545\n -3.12302686 -2.30546944 -3.48919124 -6.71624512]]\n0\n0\n0\n"
]
],
[
[
"good",
"_____no_output_____"
]
],
[
[
"# with the kernel\none_scores = sgd_rbf.decision_function([one_rbf_example])\nprint(one_scores)\nprediction = np.argmax(one_scores)\nprint(prediction)\nprint(digits.target_names[sgd_rbf.classes_[prediction]])",
"[[-1.43787351 -1.87790689 -2.6351228 -3.70534368 -2.11141745 -3.57570642\n -0.83998359 -3.2773025 -2.55029636 -3.2336616 ]]\n6\n6\n"
]
],
[
[
"):",
"_____no_output_____"
]
],
[
[
"# without the kernel\none_scores = sgd.decision_function([one_example])\nprint(one_scores)\nprediction = np.argmax(one_scores)\nprint(prediction)\nprint(digits.target_names[sgd.classes_[prediction]])",
"[[-10026. -333. -5977. -2605. -5370. -6327. -7540. -2234. -1181.\n -6917.]]\n1\n1\n"
]
],
[
[
"### One VS One",
"_____no_output_____"
]
],
[
[
"from sklearn.multiclass import OneVsOneClassifier\n\novo_clf = OneVsOneClassifier(SGDClassifier(max_iter=100, random_state=42, loss=\"perceptron\", \n eta0=1, learning_rate=\"constant\", penalty=None))\novo_clf.fit(X_train_features, y_train)\novo_clf.predict([one_rbf_example])",
"_____no_output_____"
],
[
"len(ovo_clf.estimators_)",
"_____no_output_____"
]
],
[
[
"10-way Naive Bayes ",
"_____no_output_____"
]
],
[
[
"gnb.fit(X_train, y_train)\ngnb.predict([one_example])",
"_____no_output_____"
]
],
[
[
"wow\n\nIt correctly classifies the *one* example, so let's check how confident it is about this prediction (use `predict_proba` with `NaiveBayes` and `decision_function` with the `SGDClassifier`):\n",
"_____no_output_____"
]
],
[
[
"gnb.predict_proba([one_example])",
"_____no_output_____"
]
],
[
[
"Cross-validation performance",
"_____no_output_____"
]
],
[
[
"print(cross_val_score(sgd_rbf, X_train_features, y_train, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(ovo_clf, X_train_features, y_train, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(gnb, X_train, y_train, cv=5, scoring=\"accuracy\"))",
"[0.9375 0.90625 0.91637631 0.91986063 0.90592334]\n[0.93402778 0.92013889 0.92334495 0.91986063 0.91986063]\n[0.85416667 0.83333333 0.81881533 0.85365854 0.77700348]\n"
]
],
[
[
"### Scaling\nLet's apply scaling",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n#scaler = StandardScaler()\nscalar = MinMaxScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\nX_train_features_scaled = scaler.fit_transform(X_train_features.astype(np.float64))\n\nprint(cross_val_score(sgd_rbf, X_train_features_scaled, y_train, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(ovo_clf, X_train_features_scaled, y_train, cv=5, scoring=\"accuracy\"))\nprint(cross_val_score(gnb, X_train_scaled, y_train, cv=5, scoring=\"accuracy\"))",
"[0.93402778 0.90625 0.8989547 0.87108014 0.87456446]\n[0.94444444 0.9375 0.90940767 0.92682927 0.89198606]\n[0.79166667 0.78472222 0.76655052 0.80836237 0.72473868]\n"
]
],
[
[
"- StandardScaler() only made things worse\n\n[0.93402778 0.90625 0.8989547 0.87108014 0.87456446]\n\n[0.94444444 0.9375 0.90940767 0.92682927 0.89198606]\n\n[0.79166667 0.78472222 0.76655052 0.80836237 0.72473868]\n\n- MinMaxScaler() gives exact same values\n\n\n",
"_____no_output_____"
],
[
"# Step 5: Error Analysis",
"_____no_output_____"
]
],
[
[
"y_train_pred = cross_val_predict(sgd_rbf, X_train_features_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nconf_mx",
"_____no_output_____"
],
[
"plt.imshow(conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
],
[
"row_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums\nnp.fill_diagonal(norm_conf_mx, 0)\nplt.imshow(norm_conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(sgd, X_train_features_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nconf_mx",
"_____no_output_____"
],
[
"plt.imshow(conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
],
[
"row_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums\nnp.fill_diagonal(norm_conf_mx, 0)\nplt.imshow(norm_conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(ovo_clf, X_train_features_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nconf_mx",
"_____no_output_____"
],
[
"plt.imshow(conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
],
[
"row_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums\nnp.fill_diagonal(norm_conf_mx, 0)\nplt.imshow(norm_conf_mx, cmap = \"jet\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Final step – evaluating on the test set",
"_____no_output_____"
]
],
[
[
"# non-kernel perceptron\nfrom sklearn.metrics import accuracy_score\n\ny_pred = sgd.predict(X_test)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"precision = precision_score(y_test, y_pred, average='weighted')\nrecall = recall_score(y_test, y_pred, average='weighted')\nf1 = f1_score(y_test, y_pred, average='weighted')\nprint(precision, recall, f1)",
"0.9506739556477815 0.9472222222222222 0.9472461128819086\n"
],
[
"# kernel perceptron\nfrom sklearn.metrics import accuracy_score\n\nX_test_features_scaled = scaler.transform(X_test_features.astype(np.float64))\ny_pred = sgd_rbf.predict(X_test_features_scaled)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"precision = precision_score(y_test, y_pred, average='weighted')\nrecall = recall_score(y_test, y_pred, average='weighted')\nf1 = f1_score(y_test, y_pred, average='weighted')\nprint(precision, recall, f1)",
"0.9330274822584538 0.9305555555555556 0.9299190345492117\n"
]
],
[
[
"The OvO SGD classifier:",
"_____no_output_____"
]
],
[
[
"# One vs one\nfrom sklearn.metrics import accuracy_score\n\nX_test_features_scaled = scaler.transform(X_test_features.astype(np.float64))\ny_pred = ovo_clf.predict(X_test_features_scaled)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"precision = precision_score(y_test, y_pred, average='weighted')\nrecall = recall_score(y_test, y_pred, average='weighted')\nf1 = f1_score(y_test, y_pred, average='weighted')\nprint(precision, recall, f1)",
"0.9038423919737274 0.8944444444444445 0.8906734076041858\n"
]
],
[
[
"Naive Bayes",
"_____no_output_____"
]
],
[
[
"#Naive Bayes\ngnb.fit(X_train, y_train)\ny_pred = gnb.predict(X_test)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"precision = precision_score(y_test, y_pred, average='weighted')\nrecall = recall_score(y_test, y_pred, average='weighted')\nf1 = f1_score(y_test, y_pred, average='weighted')\nprint(precision, recall, f1)",
"0.8479871939298477 0.8111111111111111 0.8150828576150382\n"
]
],
[
[
"Seems like non-kernel perceptron's doing the best",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c80023ec9534a0e4f58ae2afa1c05ce3e5af12 | 20,432 | ipynb | Jupyter Notebook | .ipynb_checkpoints/data_collection-collisionavoidance_Jetbot_Joystick-checkpoint.ipynb | tomMEM/Jetbot-Project | d84cb09a8a51208437734280e4a5c927a8b034a1 | [
"MIT"
] | 8 | 2020-09-24T06:43:54.000Z | 2022-01-23T20:52:43.000Z | .ipynb_checkpoints/data_collection-collisionavoidance_Jetbot_Joystick-checkpoint.ipynb | tomMEM/Jetbot-Project | d84cb09a8a51208437734280e4a5c927a8b034a1 | [
"MIT"
] | 4 | 2021-02-24T22:07:02.000Z | 2021-09-09T03:24:43.000Z | data_collection-collisionavoidance_Jetbot_Joystick.ipynb | tomMEM/Jetbot-Project | d84cb09a8a51208437734280e4a5c927a8b034a1 | [
"MIT"
] | 6 | 2020-09-14T00:22:35.000Z | 2021-09-25T09:26:44.000Z | 42.127835 | 506 | 0.626321 | [
[
[
"# Collision Avoidance - Data Collection\n\nIf you ran through the basic motion notebook, hopefully you're enjoying how easy it can be to make your Jetbot move around! Thats very cool! But what's even cooler, is making JetBot move around all by itself! \n\nThis is a super hard task, that has many different approaches but the whole problem is usually broken down into easier sub-problems. It could be argued that one of the most\nimportant sub-problems to solve, is the problem of preventing the robot from entering dangerous situations! We're calling this *collision avoidance*. \n\nIn this set of notebooks, we're going to attempt to solve the problem using deep learning and a single, very versatile, sensor: the camera. You'll see how with a neural network, camera, and the NVIDIA Jetson Nano, we can teach the robot a very useful behavior!\n\nThe approach we take to avoiding collisions is to create a virtual \"safety bubble\" around the robot. Within this safety bubble, the robot is able to spin in a circle without hitting any objects (or other dangerous situations like falling off a ledge). \n\n\nOf course, the robot is limited by what's in it's field of vision, and we can't prevent objects from being placed behind the robot, etc. But we can prevent the robot from entering these scenarios itself.\n\nThe way we'll do this is super simple: \n\nFirst, we'll manually place the robot in scenarios where it's \"safety bubble\" is violated, and label these scenarios ``blocked``. We save a snapshot of what the robot sees along with this label.\n\nSecond, we'll manually place the robot in scenarios where it's safe to move forward a bit, and label these scenarios ``free``. Likewise, we save a snapshot along with this label.\n\nThat's all that we'll do in this notebook; data collection. Once we have lots of images and labels, we'll upload this data to a GPU enabled machine where we'll *train* a neural network to predict whether the robot's safety bubble is being violated based off of the image it sees. We'll use this to implement a simple collision avoidance behavior in the end :)\n\n> IMPORTANT NOTE: When JetBot spins in place, it actually spins about the center between the two wheels, not the center of the robot chassis itself. This is an important detail to remember when you're trying to estimate whether the robot's safety bubble is violated or not. But don't worry, you don't have to be exact. If in doubt it's better to lean on the cautious side (a big safety bubble). We want to make sure JetBot doesn't enter a scenario that it couldn't get out of by turning in place.",
"_____no_output_____"
],
[
"### Display live camera feed\n\nSo let's get started. First, let's initialize and display our camera like we did in the *teleoperation* notebook. \n\n> Our neural network takes a 224x224 pixel image as input. We'll set our camera to that size to minimize the filesize of our dataset (we've tested that it works for this task).\n> In some scenarios it may be better to collect data in a larger image size and downscale to the desired size later.",
"_____no_output_____"
]
],
[
[
"import traitlets\nimport ipywidgets.widgets as widgets\nfrom IPython.display import display\nfrom jetbot import Camera, bgr8_to_jpeg\n#\ncamera = Camera.instance(width=224, height=224)\nimage = widgets.Image(format='jpeg', width=224, height=224) # this width and height doesn't necessarily have to match the camera\ncamera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)\n",
"_____no_output_____"
]
],
[
[
"Awesome, next let's create a few directories where we'll store all our data. We'll create a folder ``dataset`` that will contain two sub-folders ``free`` and ``blocked``, \nwhere we'll place the images for each scenario.",
"_____no_output_____"
]
],
[
[
"import os\n\nblocked_dir = 'dataset/blocked'\nfree_dir = 'dataset/free'\n\n# we have this \"try/except\" statement because these next functions can throw an error if the directories exist already\ntry:\n os.makedirs(free_dir)\n os.makedirs(blocked_dir)\nexcept FileExistsError:\n print('Directories are not created because they already exist')",
"Directories are not created because they already exist\n"
]
],
[
[
"If you refresh the Jupyter file browser on the left, you should now see those directories appear. Next, let's create and display some buttons that we'll use to save snapshots\nfor each class label. We'll also add some text boxes that will display how many images of each category that we've collected so far. This is useful because we want to make\nsure we collect about as many ``free`` images as ``blocked`` images. It also helps to know how many images we've collected overall.",
"_____no_output_____"
]
],
[
[
"button_layout = widgets.Layout(width='128px', height='64px')\nfree_button = widgets.Button(description='add free', button_style='success', layout=button_layout)\nblocked_button = widgets.Button(description='add blocked', button_style='danger', layout=button_layout)\nfree_count = widgets.IntText(layout=button_layout, value=len(os.listdir(free_dir)))\nblocked_count = widgets.IntText(layout=button_layout, value=len(os.listdir(blocked_dir)))\n\nx=0 #from here TB\ncontroller = widgets.Controller(index=0) # replace with index of your controller\nbutton_layout = widgets.Layout(width='200px', height='64px') #TB\nfree_left = widgets.FloatText(layout=button_layout, value=x, description='forward') #TB\nfree_right = widgets.FloatText(layout=button_layout, value=x, description='turning') #TB\nmotorleft = widgets.FloatText(layout=button_layout, value=x, description='Motor Left') #TB\nmotorright = widgets.FloatText(layout=button_layout, value=x, description='Motor Right') #TB\n\nspeed_widget = widgets.FloatSlider(value=0.5, min=0.05, max=1.0, step=0.001, description='speed')\n#TB higher speed requires smaller turn_gain values: 2.5 for speed 0.22, around 2 for speed 0.4\nturn_gain_widget = widgets.FloatSlider(value=1, min=0.05, step=0.001, max=4.0, description='turn sensitivity')\n#TB value different for different forward speed, but very small differences\nmotoradjustment_widget = widgets.FloatSlider(value=0.02, min=0.00, max=0.2, step=0.0001, description='motoradjustment')\n",
"_____no_output_____"
],
[
"from jetbot import Robot\nimport traitlets\nimport math\n\nrobot = Robot()\n\n#TB to show the controller values\n\nleft_link = traitlets.dlink((controller.axes[1], 'value'), (free_left, 'value'), transform=lambda x: -x)\nright_link = traitlets.dlink((controller.axes[0], 'value'), (free_right, 'value'), transform=lambda x: -x)\n\ndef on_value_change(change):\n x= free_right.value\n y= free_left.value\n leftnew, rightnew = steering(x, y)\n motorright.value= round(float(leftnew),2) \n motorleft.value= round(float(rightnew + motoradjustment_widget.value),2) # adjust the motor that lags behind\n #motoradjustment value important to keep bot driving straight, small offset-values like 0.05\n robot.right_motor.value=motorright.value\n robot.left_motor.value=motorleft.value\n \ndef steering(x, y): \n #script from stackexchange of user Pedro Werneck \n #https://electronics.stackexchange.com/questions/19669/algorithm-for-mixing-2-axis-analog-input-to-control-a-differential-motor-drive\n # convert to polar\n r = math.hypot(x, y)\n t = math.atan2(y, x)\n\n # rotate by 45 degrees\n t += math.pi / -4.0\n\n # back to cartesian\n left = r * math.cos(t)\n right = r * math.sin(t)\n\n # rescale the new coords\n left = left * math.sqrt(2)\n right = right * math.sqrt(2)\n\n # clamp to -1/+1\n scalefactor= speed_widget.value\n left = max(scalefactor*-1.0, min(left, scalefactor))\n right = max(scalefactor*-1.0, min(right, scalefactor))\n \n #gamma correction for response sensitivity of joystick while turning : TB\n gamma=turn_gain_widget.value #using slider for joystick 1-4, for object recognition 2-40 \n if left <0 :\n left= -1* (((abs(left)/scalefactor)**(1/gamma))*scalefactor)\n else:\n left= ((abs(left)/scalefactor)**(1/gamma))*scalefactor\n \n if right <0:\n right= -1*(((abs(right)/scalefactor)**(1/gamma))*scalefactor)\n else:\n right= ((abs(right)/scalefactor)**(1/gamma))*scalefactor\n \n return left, right\n\n\nfree_left.observe(on_value_change, names='value')\nfree_right.observe(on_value_change, names='value')\n\n#left_link = traitlets.dlink((motorleft, 'value'), (robot.left_motor, 'value'))\n#right_link = traitlets.dlink((motorright, 'value'), (robot.right_motor, 'value'))",
"_____no_output_____"
],
[
"from jetbot import Heartbeat\n\n\ndef handle_heartbeat_status(change):\n if change['new'] == Heartbeat.Status.dead:\n camera_link.unlink()\n left_link.unlink()\n right_link.unlink()\n robot.stop()\n\nheartbeat = Heartbeat(period=0.5)\n\n# attach the callback function to heartbeat status\nheartbeat.observe(handle_heartbeat_status, names='status')",
"_____no_output_____"
]
],
[
[
"Right now, these buttons wont do anything. We have to attach functions to save images for each category to the buttons' ``on_click`` event. We'll save the value\nof the ``Image`` widget (rather than the camera), because it's already in compressed JPEG format!\n\nTo make sure we don't repeat any file names (even across different machines!) we'll use the ``uuid`` package in python, which defines the ``uuid1`` method to generate\na unique identifier. This unique identifier is generated from information like the current time and the machine address.",
"_____no_output_____"
]
],
[
[
"from uuid import uuid1\n\nsnapshot_image = widgets.Image(format='jpeg', width=224, height=224)\n\ndef save_snapshot(directory):\n image_path = os.path.join(directory, str(uuid1()) + '.jpg')\n with open(image_path, 'wb') as f:\n f.write(image.value)\n # display snapshot that was saved\n snapshot_image.value = image.value\n\ndef save_free(change):\n global free_dir, free_count\n if change['new']:\n save_snapshot(free_dir)\n free_count.value = len(os.listdir(free_dir))\n \ndef save_blocked(change):\n global blocked_dir, blocked_count\n if change['new']:\n save_snapshot(blocked_dir)\n blocked_count.value = len(os.listdir(blocked_dir))\n \ndef save_free_button():\n global free_dir, free_count\n save_snapshot(free_dir)\n free_count.value = len(os.listdir(free_dir))\n \ndef save_blocked_button():\n global blocked_dir, blocked_count\n save_snapshot(blocked_dir)\n blocked_count.value = len(os.listdir(blocked_dir))\n \n# attach the callbacks, we use a 'lambda' function to ignore the\n# parameter that the on_click event would provide to our function\n# because we don't need it.\n\ncontroller.buttons[5].observe(save_free, names='value') #TB gamepad button number 5\ncontroller.buttons[7].observe(save_blocked, names='value') #TB gamepad button numer 7\n\nfree_button.on_click(lambda x: save_free_button())\nblocked_button.on_click(lambda x: save_blocked_button())\n\n#display(image)\ndisplay(widgets.HBox([image, snapshot_image]))\ndisplay(controller)\n\ndisplay(widgets.VBox([\n speed_widget,\n turn_gain_widget,\n motoradjustment_widget,\n]))\n\ndisplay(widgets.HBox([free_left, free_right, motorleft, motorright]))\n\ndisplay(widgets.HBox([free_count, free_button]))\ndisplay(widgets.HBox([blocked_count, blocked_button]))",
"_____no_output_____"
],
[
"import time\n\ncamera.unobserve_all()\ntime.sleep(1.0)\nrobot.stop()",
"_____no_output_____"
]
],
[
[
"Great! Now the buttons above should save images to the ``free`` and ``blocked`` directories. You can use the Jupyter Lab file browser to view these files!\n\nNow go ahead and collect some data \n\n1. Place the robot in a scenario where it's blocked and press ``add blocked``\n2. Place the robot in a scenario where it's free and press ``add free``\n3. Repeat 1, 2\n\n> REMINDER: You can move the widgets to new windows by right clicking the cell and clicking ``Create New View for Output``. Or, you can just re-display them\n> together as we will below\n\nHere are some tips for labeling data\n\n1. Try different orientations\n2. Try different lighting\n3. Try varied object / collision types; walls, ledges, objects\n4. Try different textured floors / objects; patterned, smooth, glass, etc.\n\nUltimately, the more data we have of scenarios the robot will encounter in the real world, the better our collision avoidance behavior will be. It's important\nto get *varied* data (as described by the above tips) and not just a lot of data, but you'll probably need at least 100 images of each class (that's not a science, just a helpful tip here). But don't worry, it goes pretty fast once you get going :)",
"_____no_output_____"
],
[
"## Next\n\nOnce you've collected enough data, we'll need to copy that data to our GPU desktop or cloud machine for training. First, we can call the following *terminal* command to compress\nour dataset folder into a single *zip* file.\n\n> The ! prefix indicates that we want to run the cell as a *shell* (or *terminal*) command.\n\n> The -r flag in the zip command below indicates *recursive* so that we include all nested files, the -q flag indicates *quiet* so that the zip command doesn't print any output",
"_____no_output_____"
]
],
[
[
"!zip -r -q dataset.zip dataset",
"_____no_output_____"
]
],
[
[
"You should see a file named ``dataset.zip`` in the Jupyter Lab file browser. You should download the zip file using the Jupyter Lab file browser by right clicking and selecting ``Download``.\n\nNext, we'll need to upload this data to our GPU desktop or cloud machine (we refer to this as the *host*) to train the collision avoidance neural network. We'll assume that you've set up your training\nmachine as described in the JetBot WiKi. If you have, you can navigate to ``http://<host_ip_address>:8888`` to open up the Jupyter Lab environment running on the host. The notebook you'll need to open there is called ``collision_avoidance/train_model.ipynb``.\n\nSo head on over to your training machine and follow the instructions there! Once your model is trained, we'll return to the robot Jupyter Lab enivornment to use the model for a live demo!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7c8006e3e4d26b34415f985c6ccf8394f32019a | 28,746 | ipynb | Jupyter Notebook | notebooks/Solvers.ipynb | HighDimensionalEconLab/Laplacians.jl | 25c75811f697ff1030ded0155d0d35c1fa3223c3 | [
"MIT"
] | 167 | 2016-02-06T01:29:06.000Z | 2022-03-31T09:19:41.000Z | notebooks/Solvers.ipynb | HighDimensionalEconLab/Laplacians.jl | 25c75811f697ff1030ded0155d0d35c1fa3223c3 | [
"MIT"
] | 25 | 2016-05-15T18:21:42.000Z | 2022-02-21T23:56:02.000Z | notebooks/Solvers.ipynb | HighDimensionalEconLab/Laplacians.jl | 25c75811f697ff1030ded0155d0d35c1fa3223c3 | [
"MIT"
] | 39 | 2016-07-08T17:32:33.000Z | 2022-03-28T20:42:36.000Z | 22.670347 | 1,311 | 0.498121 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c80e075390420d15f403cfb8c2a79c7c6f8041 | 159,404 | ipynb | Jupyter Notebook | notebooks/digit-classification-test.ipynb | MovsisyanM/digit-recognizer | 3b5b59a61a5fedec4a17c634e575dcaff1d673e4 | [
"MIT"
] | 5 | 2021-07-18T17:21:44.000Z | 2022-01-08T09:13:47.000Z | notebooks/digit-classification-test.ipynb | MovsisyanM/digit-recognizer | 3b5b59a61a5fedec4a17c634e575dcaff1d673e4 | [
"MIT"
] | null | null | null | notebooks/digit-classification-test.ipynb | MovsisyanM/digit-recognizer | 3b5b59a61a5fedec4a17c634e575dcaff1d673e4 | [
"MIT"
] | null | null | null | 117.554572 | 42,492 | 0.842231 | [
[
[
"# The Performance Of Models Trained On The MNIST Dataset On Custom-Drawn Images",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport tensorflow as tf\nimport sklearn, sklearn.linear_model, sklearn.multiclass, sklearn.naive_bayes\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
],
[
"plt.rcParams[\"figure.figsize\"] = (10, 10)\nplt.rcParams.update({'font.size': 12})",
"_____no_output_____"
]
],
[
[
"## Defining the data",
"_____no_output_____"
]
],
[
[
"(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()",
"_____no_output_____"
]
],
[
[
"### Making 1D versions of the MNIST images for the one-vs-rest classifier",
"_____no_output_____"
]
],
[
[
"train_images_flat = train_images.reshape((train_images.shape[0], train_images.shape[1] * train_images.shape[2])) / 255.0\n\ntest_images_flat = test_images.reshape((test_images.shape[0], test_images.shape[1] * test_images.shape[2])) / 255.0",
"_____no_output_____"
]
],
[
[
"### Making a 4D dataset and categorical labels for the neural net",
"_____no_output_____"
]
],
[
[
"train_images = np.expand_dims(train_images, axis=-1) / 255.0\ntest_images = np.expand_dims(test_images, axis=-1) / 255.0\n\n#train_images = train_images.reshape(60000, 28, 28, 1)\n#test_images = test_images.reshape(10000, 28, 28, 1)\n\ntrain_labels_cat = tf.keras.utils.to_categorical(train_labels)\ntest_labels_cat = tf.keras.utils.to_categorical(test_labels)",
"_____no_output_____"
],
[
"def plot_images(images, labels, rows=5, cols=5, label='Label'): \n fig, axes = plt.subplots(rows, cols)\n fig.figsize=(15, 15) \n\n indices = np.random.choice(len(images), rows * cols)\n counter = 0\n\n for i in range(rows):\n for j in range(cols):\n axes[i, j].imshow(images[indices[counter]])\n axes[i, j].set_title(f\"{label}: {labels[indices[counter]]}\")\n axes[i, j].set_xticks([])\n axes[i, j].set_yticks([])\n counter += 1\n \n plt.tight_layout()\n plt.show()",
"_____no_output_____"
],
[
"plot_images(train_images, train_labels)",
"_____no_output_____"
]
],
[
[
"## Training",
"_____no_output_____"
],
[
"### Defining and training the one-vs-rest classifier",
"_____no_output_____"
]
],
[
[
"log_reg = sklearn.linear_model.SGDClassifier(loss='log', max_iter=1000, penalty='l2')\n\nclassifier = sklearn.multiclass.OneVsRestClassifier(log_reg)\n\nclassifier.fit(train_images_flat, train_labels)",
"_____no_output_____"
]
],
[
[
"### Defining and training the neural net",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras import Sequential\nfrom tensorflow.keras import layers",
"_____no_output_____"
],
[
"def create_model():\n model = Sequential([\n layers.Conv2D(64, 5, activation='relu', input_shape=(28, 28, 1)),\n layers.MaxPool2D(2),\n layers.Conv2D(128, 5, activation='relu'),\n layers.MaxPool2D(2),\n layers.GlobalAveragePooling2D(),\n layers.Dense(10, activation='softmax')\n ])\n \n model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])\n \n return model",
"_____no_output_____"
],
[
"model = create_model()",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 24, 24, 64) 1664 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 8, 8, 128) 204928 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 4, 4, 128) 0 \n_________________________________________________________________\nglobal_average_pooling2d (Gl (None, 128) 0 \n_________________________________________________________________\ndense (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 207,882\nTrainable params: 207,882\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"train_gen = tf.keras.preprocessing.image.ImageDataGenerator(zoom_range=0.3,\n height_shift_range=0.10,\n width_shift_range=0.10,\n rotation_range=10)\n\ntrain_datagen = train_gen.flow(train_images, train_labels_cat, batch_size=256)\n\n'''def scheduler(epoch): \n initial_lr = 0.001\n lr = initial_lr * np.exp(-0.1 * epoch)\n return lr\n\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nlr_scheduler = LearningRateScheduler(scheduler, verbose=1)'''",
"_____no_output_____"
],
[
"history = model.fit(train_datagen, initial_epoch=0, epochs=30, batch_size=256,\n validation_data=(test_images, test_labels_cat))",
"Epoch 1/30\n235/235 [==============================] - 15s 55ms/step - loss: 1.0590 - accuracy: 0.6757 - val_loss: 0.2701 - val_accuracy: 0.9319\nEpoch 2/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.4189 - accuracy: 0.8813 - val_loss: 0.1617 - val_accuracy: 0.9566\nEpoch 3/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.3105 - accuracy: 0.9123 - val_loss: 0.1374 - val_accuracy: 0.9592\nEpoch 4/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.2602 - accuracy: 0.9252 - val_loss: 0.0984 - val_accuracy: 0.9728\nEpoch 5/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.2215 - accuracy: 0.9368 - val_loss: 0.0929 - val_accuracy: 0.9718\nEpoch 6/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.1958 - accuracy: 0.9436 - val_loss: 0.0849 - val_accuracy: 0.9737\nEpoch 7/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.1724 - accuracy: 0.9498 - val_loss: 0.0593 - val_accuracy: 0.9819\nEpoch 8/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.1539 - accuracy: 0.9554 - val_loss: 0.0618 - val_accuracy: 0.9821\nEpoch 9/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.1443 - accuracy: 0.9575 - val_loss: 0.0533 - val_accuracy: 0.9832\nEpoch 10/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.1306 - accuracy: 0.9614 - val_loss: 0.0499 - val_accuracy: 0.9842\nEpoch 11/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.1251 - accuracy: 0.9637 - val_loss: 0.0482 - val_accuracy: 0.9856\nEpoch 12/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.1145 - accuracy: 0.9662 - val_loss: 0.0411 - val_accuracy: 0.9872\nEpoch 13/30\n235/235 [==============================] - 13s 55ms/step - loss: 0.1080 - accuracy: 0.9681 - val_loss: 0.0412 - val_accuracy: 0.9872\nEpoch 14/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.1036 - accuracy: 0.9694 - val_loss: 0.0400 - val_accuracy: 0.9877\nEpoch 15/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0974 - accuracy: 0.9701 - val_loss: 0.0495 - val_accuracy: 0.9846\nEpoch 16/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0967 - accuracy: 0.9710 - val_loss: 0.0386 - val_accuracy: 0.9881\nEpoch 17/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0932 - accuracy: 0.9720 - val_loss: 0.0403 - val_accuracy: 0.9866\nEpoch 18/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0857 - accuracy: 0.9747 - val_loss: 0.0417 - val_accuracy: 0.9866\nEpoch 19/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.0843 - accuracy: 0.9755 - val_loss: 0.0332 - val_accuracy: 0.9894\nEpoch 20/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0822 - accuracy: 0.9749 - val_loss: 0.0420 - val_accuracy: 0.9863\nEpoch 21/30\n235/235 [==============================] - 13s 56ms/step - loss: 0.0767 - accuracy: 0.9771 - val_loss: 0.0354 - val_accuracy: 0.9896\nEpoch 22/30\n235/235 [==============================] - 13s 55ms/step - loss: 0.0731 - accuracy: 0.9779 - val_loss: 0.0356 - val_accuracy: 0.9876\nEpoch 23/30\n235/235 [==============================] - 13s 55ms/step - loss: 0.0752 - accuracy: 0.9773 - val_loss: 0.0315 - val_accuracy: 0.9898\nEpoch 24/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0749 - accuracy: 0.9776 - val_loss: 0.0280 - val_accuracy: 0.9902\nEpoch 25/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0697 - accuracy: 0.9791 - val_loss: 0.0389 - val_accuracy: 0.9877\nEpoch 26/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0718 - accuracy: 0.9780 - val_loss: 0.0345 - val_accuracy: 0.9891\nEpoch 27/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0653 - accuracy: 0.9799 - val_loss: 0.0292 - val_accuracy: 0.9902\nEpoch 28/30\n235/235 [==============================] - 13s 53ms/step - loss: 0.0682 - accuracy: 0.9796 - val_loss: 0.0321 - val_accuracy: 0.9892\nEpoch 29/30\n235/235 [==============================] - 13s 54ms/step - loss: 0.0646 - accuracy: 0.9805 - val_loss: 0.0297 - val_accuracy: 0.9905\nEpoch 30/30\n235/235 [==============================] - 13s 55ms/step - loss: 0.0641 - accuracy: 0.9805 - val_loss: 0.0364 - val_accuracy: 0.9887\n"
],
[
"model.save('cnn-64-128-5-aug')\n#model.load_weights('cnn-64-128-5-aug')",
"INFO:tensorflow:Assets written to: cnn-64-128-5-aug\\assets\n"
]
],
[
[
"## Assessing model performance",
"_____no_output_____"
],
[
"### Loading drawn images",
"_____no_output_____"
]
],
[
[
"def read_images(filepaths, reverse=False):\n images = []\n images_flat = []\n \n for filepath in filepaths:\n image = tf.io.read_file(filepath)\n image = tf.image.decode_image(image, channels=1)\n image = tf.image.resize(image, (28, 28))\n \n if reverse: \n image = np.where(image == 255, 0, 255)\n else:\n image = image.numpy()\n \n image = image / 255.0 \n images.append(image)\n \n images_flat.append(image.reshape(28 * 28)) \n \n return np.array(images), np.array(images_flat)",
"_____no_output_____"
],
[
"filepaths = tf.io.gfile.glob('images/*.png')\n\nlist.sort(filepaths, key=lambda x: int(x[12:-4]))\n\nimages, images_flat = read_images(filepaths, True)",
"_____no_output_____"
],
[
"images.shape",
"_____no_output_____"
]
],
[
[
"### Creating labels for the one-vs-rest classifier and the neural net",
"_____no_output_____"
]
],
[
[
"labels = 100 * [0] + 98 * [1] + 100 * [2] + 101 * [3] + 99 * [4] + 111 * [5] + 89 * [6] + 110 * [7] + 93 * [8] + 112 * [9]\n\nlabels = np.array(labels)",
"_____no_output_____"
],
[
"labels.shape",
"_____no_output_____"
],
[
"labels_cat = tf.keras.utils.to_categorical(labels)\n\nlabels_cat.shape",
"_____no_output_____"
],
[
"labels_cat[0]",
"_____no_output_____"
]
],
[
[
"### Plotting the drawn images and their corresponding labels",
"_____no_output_____"
]
],
[
[
"plot_images(images, labels)",
"_____no_output_____"
]
],
[
[
"### Evaluating model performance",
"_____no_output_____"
],
[
"#### Neural net on MNIST test dataset",
"_____no_output_____"
]
],
[
[
"model.evaluate(test_images, test_labels_cat)",
"313/313 [==============================] - 1s 3ms/step - loss: 0.0364 - accuracy: 0.9887\n"
],
[
"from sklearn.metrics import classification_report, confusion_matrix\n\npredictions = np.argmax(model.predict(test_images), axis=-1)\n\nconf_mat = confusion_matrix(test_labels, predictions) ",
"_____no_output_____"
],
[
"conf_mat",
"_____no_output_____"
],
[
"class_report = classification_report(test_labels, predictions, output_dict=True)\n\nclass_report",
"_____no_output_____"
]
],
[
[
"#### Neural net on drawn images",
"_____no_output_____"
]
],
[
[
"model.evaluate(images, labels_cat)",
"32/32 [==============================] - 0s 4ms/step - loss: 0.1527 - accuracy: 0.9467\n"
],
[
"predictions = np.argmax(model.predict(images), axis=-1)",
"_____no_output_____"
],
[
"rows, cols = 5, 5\n\nfig, axes = plt.subplots(rows, cols)\nfig.figsize=(15, 15) \n\nindices = np.random.choice(len(images), rows * cols)\ncounter = 0\n \nfor i in range(rows):\n for j in range(cols):\n axes[i, j].imshow(images[indices[counter]])\n axes[i, j].set_title(f\"Prediction: {predictions[indices[counter]]}\\n\"\n f\"True label: {labels[indices[counter]]}\")\n axes[i, j].set_xticks([])\n axes[i, j].set_yticks([])\n counter += 1\n \nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"##### Plotting wrong predictions",
"_____no_output_____"
]
],
[
[
"wrong_predictions = list(filter(lambda x: x[1][0] != x[1][1], list(enumerate(zip(predictions, labels)))))",
"_____no_output_____"
],
[
"len(wrong_predictions)",
"_____no_output_____"
],
[
"cols, rows = 5, 5\n\nfig, axes = plt.subplots(rows, cols)\nfig.figsize=(15, 15) \n\ncounter = 0\n\nfor i in range(rows):\n for j in range(cols):\n axes[i, j].imshow(images[wrong_predictions[counter][0]])\n axes[i, j].set_title(f\"Prediction: {wrong_predictions[counter][1][0]}\\n\"\n f\"True label: {wrong_predictions[counter][1][1]}\")\n axes[i, j].set_xticks([])\n axes[i, j].set_yticks([])\n counter += 1\n \nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report, confusion_matrix\n\nconf_mat = confusion_matrix(labels, predictions) ",
"_____no_output_____"
],
[
"conf_mat",
"_____no_output_____"
],
[
"class_report = classification_report(labels, predictions, output_dict=True)\n\nclass_report",
"_____no_output_____"
]
],
[
[
"#### One-vs-rest classifier on MNIST test dataset",
"_____no_output_____"
]
],
[
[
"classifier.score(test_images_flat, test_labels)",
"_____no_output_____"
],
[
"predictions = classifier.predict(test_images_flat)\n\nconf_mat = confusion_matrix(test_labels, predictions) ",
"_____no_output_____"
],
[
"conf_mat",
"_____no_output_____"
],
[
"class_report = classification_report(test_labels, predictions, output_dict=True)",
"_____no_output_____"
],
[
"class_report",
"_____no_output_____"
]
],
[
[
"#### One-vs-rest classifier on drawn images",
"_____no_output_____"
]
],
[
[
"classifier.score(images_flat, labels)",
"_____no_output_____"
],
[
"predictions = classifier.predict(images_flat)\n\nconf_mat = confusion_matrix(labels, predictions) ",
"_____no_output_____"
],
[
"conf_mat",
"_____no_output_____"
],
[
"class_report = classification_report(labels, predictions, output_dict=True)",
"_____no_output_____"
],
[
"class_report",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c83b776d57c601fe6be3dd9c8db5e2ef76c8b9 | 4,619 | ipynb | Jupyter Notebook | pandas/codes/Part02_Timestamp_Study.ipynb | dhb15735104415/python-DA-notes | cbbfcf49e9799e7dfb850067bb51ce28a0dbedc0 | [
"MIT"
] | null | null | null | pandas/codes/Part02_Timestamp_Study.ipynb | dhb15735104415/python-DA-notes | cbbfcf49e9799e7dfb850067bb51ce28a0dbedc0 | [
"MIT"
] | null | null | null | pandas/codes/Part02_Timestamp_Study.ipynb | dhb15735104415/python-DA-notes | cbbfcf49e9799e7dfb850067bb51ce28a0dbedc0 | [
"MIT"
] | null | null | null | 29.608974 | 325 | 0.531717 | [
[
[
"'''\n Pandas时刻数据:Timestamp 时间戳\n\n 时刻数据代表时间点,是pandas的数据类型,是将值与时间点相关联的最基本类型的时间序列数据\n\n pandas.Timestamp()\n'''\nimport datetime\nimport pandas as pd\n\ndate1 = datetime.datetime.today()\ndate2 = '2019-11-13'\nt1 = pd.Timestamp(date1)\nt2 = pd.Timestamp(date2)\nprint('t1:',t1)\nprint('type(t1):',type(t1)) # 数据类型为 pandas的Timestamp\nprint('t2:',t2)",
"t1: 2019-12-15 07:53:53.091608\ntype(t1): <class 'pandas._libs.tslibs.timestamps.Timestamp'>\nt2: 2019-11-13 00:00:00\n"
],
[
"# pd.to_datetime 多个时间数据转换时间戳索引\n\ndate1 = datetime.datetime(2019,11,13,20,22,22)\ndate2 = '2020-1-1'\n\n# pd.to_datetime():如果是单个时间数据,转换成pandas的时刻数据,数据类型为Timestamp\nt1 = pd.to_datetime(date1)\nt2 = pd.to_datetime(date2)\nprint('t1:',t1)\nprint('type(t1):',type(t1))\nprint('t1:',t1)\nprint('type(t2):',type(t2))\n\n# 多个时间数据,将会转换为pandas的DatetimeIndex\ndate_lst = ['2018-1-1','2019-11-13','2019-5-29']\nt3 = pd.to_datetime(date_lst)\nprint('时间列表t3:',t3)\nprint('type(t3):',type(t3))\n\n# 当一组时间序列中夹杂其他格式数据,可用errors参数返回\ndate_lst2 = ['2018-1-1','2019-11-13','2019-5-29','头号玩家']\nprint(date_lst2)\n\n# errors = 'ignore':不可解析时返回原始输入,这里就是直接生成一般数组\nt4 = pd.to_datetime(date_lst2,errors='ignore')\nprint('t4:',t4) \nprint('type(t4):',type(t4))\n\n# errors = 'coerce':不可扩展,缺失值返回NaT(Not a Time),结果认为DatetimeIndex\nt5 = pd.to_datetime(date_lst2, errors='coerce')\nprint('t5:',t5) # 结果中,不可解析为时间的值变为NaT\nprint('type(t5):',type(t5))\n\n'''\n 源码:\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n coerce:胁迫 迫使\n'''",
"t1: 2019-11-13 20:22:22\ntype(t1): <class 'pandas._libs.tslibs.timestamps.Timestamp'>\nt1: 2019-11-13 20:22:22\ntype(t2): <class 'pandas._libs.tslibs.timestamps.Timestamp'>\n时间列表t3: DatetimeIndex(['2018-01-01', '2019-11-13', '2019-05-29'], dtype='datetime64[ns]', freq=None)\ntype(t3): <class 'pandas.core.indexes.datetimes.DatetimeIndex'>\n['2018-1-1', '2019-11-13', '2019-5-29', '头号玩家']\nt4: Index(['2018-1-1', '2019-11-13', '2019-5-29', '头号玩家'], dtype='object')\ntype(t4): <class 'pandas.core.indexes.base.Index'>\nt5: DatetimeIndex(['2018-01-01', '2019-11-13', '2019-05-29', 'NaT'], dtype='datetime64[ns]', freq=None)\ntype(t5): <class 'pandas.core.indexes.datetimes.DatetimeIndex'>\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7c8514bc615c5a068a8cbc01f7200f86ae98241 | 240,102 | ipynb | Jupyter Notebook | plot/plotFromRewards.ipynb | architsakhadeo/OHT | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | null | null | null | plot/plotFromRewards.ipynb | architsakhadeo/OHT | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | null | null | null | plot/plotFromRewards.ipynb | architsakhadeo/OHT | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | 2 | 2021-09-21T21:19:11.000Z | 2021-09-24T23:11:35.000Z | 219.271233 | 193,587 | 0.881084 | [
[
[
"# Plot Comparison Between Algorithms",
"_____no_output_____"
],
[
"Expects the input data to contain CSV files containing rewards per timestep",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\n\n%reload_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"We need to read the CSV files (from a function in another file) to get the reward at each timestep for each run of each algorithm. Only the `dataPath` directories will be loaded.\n\n`load_data` loads the CSV files containing rewards as a python list of Pandas DataFrames.\n\n`dataPath` contains the exact path of the directories containing the CSV files. This path is relative to the `data` directory. It assumes every element will be path for a different algorithm. It will overwrite if two paths are for different parameter settings of the same algorithm.\n\nExpects there to be more than 1 input CSV file.",
"_____no_output_____"
]
],
[
[
"dataPath = ['esarsa/alpha-0.015625_driftProb--1,-1,-1,-1_driftScale-1000_enable-debug-0_epsilon-0.05_gamma-0.95_lambda-0.8_sensorLife-1,1,1,1_tiles-4_tilings-32',\n 'dqn/alpha-0.015625_driftProb--1,-1,-1,-1_driftScale-100_enable-debug-0_epsilon-0.05_gamma-0.95_lambda-0.8_sensorLife-1,1,1,1_tiles-4_tilings-32/']\n\nbasePath = '../data/'\n\nalgorithms = [dataPath[i].split('/')[0] for i in range(len(dataPath))]\n\nData = {}\n\nfrom loadFromRewards import load_data\n\nfor i in range(len(dataPath)):\n if os.path.isdir(basePath + dataPath[i]) == True:\n Data[algorithms[i]] = load_data(basePath+dataPath[i])\n\nprint('Data will be stored for', ', '.join([k for k in Data.keys()]))\nprint('Loaded the rewards data from the csv files')",
"Data will be stored for esarsa\nLoaded the rewards data from the csv files\n"
]
],
[
[
"The rewards can be transformed into the following values of transformation =\n1. 'Returns'\n2. 'Failures'\n3. 'Average-Rewards'\n4. 'Rewards' (no change)\n\n----------------------------------------------------------------------------------------------\n\nThere is an additional parameter of window which can be any non-negative integer. It is used for the 'Average-Rewards' transformation to maintain a moving average over a sliding window. By default window is 0.\n\n- If window is 500 and timesteps are 10000, then the first element is the average of the performances of timesteps from 1 - 500. The second element is the average of the performances of timesteps from 2 - 501. The last element is the average of the performances of timesteps from 9501 - 10000.\n\n----------------------------------------------------------------------------------------------\n\n`transform_data` transforms the absolute failure timesteps (python list of Pandas DataFrames) into the respective `transformation` (a numpy array of numpy arrays) for plotting",
"_____no_output_____"
]
],
[
[
"plottingData = {}\n\nfrom loadFromRewards import transform_data\n\ntransformation = 'Returns'\nwindow = 2500\n\nfor alg, data in Data.items():\n plottingData[alg] = transform_data(alg, data, transformation, window)\n\nprint('Data will be plotted for', ', '.join([k for k in plottingData.keys()]))\nprint('The stored rewards are transformed to: ', transformation)",
"0 esarsa\nData will be plotted for esarsa\nThe stored rewards are transformed to: Returns\n"
]
],
[
[
"Here, we can plot the following statistics:\n\n1. Mean of all the runs\n\n2. Median run\n\n3. Run with the best performance (highest return, or equivalently least failures)\n\n4. Run with the worst performance (lowest return, or equivalently most failures)\n\n5. Mean along with the confidence interval (Currently, plots the mean along with 95% confidence interval, but should be changed to make it adaptive to any confidence interval)\n\n6. Mean along with percentile regions (Plots the mean and shades the region between the run with the lower percentile and the run with the upper percentile)\n\n----------------------------------------------------------------------------------------------\n\nDetails:\n\nplotBest, plotWorst, plotMeanAndPercentileRegions sort the performances based on their final performance\n\n ----------------------------------------------------\n\nMean, Median, MeanAndConfidenceInterval are all symmetric plots so 'Failures' does not affect their plots\n \nBest, Worst, MeanAndPercentileRegions are all asymmetric plots so 'Failures' affects their plots, and has to be treated in the following way: \n\n ----------------------------------------------------\n\n1. plotBest for Returns will plot the run with the highest return (least failures)\n plotBest for Failures will plot the run with the least failures and not the highest failures\n\n2. plotWorst for Returns will plot the run with the lowest return (most failures)\n plotWorst for Failures will plot the run with the most failures and not the least failures\n\n3. plotMeanAndPercentileRegions for Returns uses the lower variable to select the run with the 'lower' percentile and uses the upper variable to select the run with the 'upper' percentile\n plotMeanAndPercentileRegions for Failures uses the lower variable along with some calculations to select the run with 'upper' percentile and uses the upper variable along with some calculations to select the run with the 'lower' percentile \n \n----------------------------------------------------------------------------------------------\n\nCaution:\n- Jupyter notebooks (mostly) or matplotlib gives an error when displaying very dense plots. For example: plotting best and worst case for transformation of 'Rewards' for 'example' algorithm, or when trying to zoom into dense plots. Most of the plots for 'Rewards' and 'example' fail.",
"_____no_output_____"
]
],
[
[
"from stats import getMean, getMedian, getBest, getWorst, getConfidenceIntervalOfMean, getRegion\n\n# Add color, linestyles as needed\n\ndef plotMean(xAxis, data, color):\n mean = getMean(data)\n plt.plot(xAxis, mean, label=alg+'-mean', color=color)\n\ndef plotMedian(xAxis, data, color):\n median = getMedian(data)\n plt.plot(xAxis, median, label=alg+'-median', color=color)\n\ndef plotBest(xAxis, data, transformation, color):\n best = getBest(data, transformation)\n plt.plot(xAxis, best, label=alg+'-best', color=color)\n\ndef plotWorst(xAxis, data, transformation, color):\n worst = getWorst(data, transformation)\n plt.plot(xAxis, worst, label=alg+'-worst', color=color)\n\ndef plotMeanAndConfidenceInterval(xAxis, data, confidence, color):\n plotMean(xAxis, data, color=color)\n lowerBound, upperBound = getConfidenceIntervalOfMean(data, confidence)\n plt.fill_between(xAxis, lowerBound, upperBound, alpha=0.25, color=color)\n\ndef plotMeanAndPercentileRegions(xAxis, data, lower, upper, transformation, color):\n plotMean(xAxis, data, color)\n lowerRun, upperRun = getRegion(data, lower, upper, transformation)\n plt.fill_between(xAxis, lowerRun, upperRun, alpha=0.25, color=color)",
"_____no_output_____"
]
],
[
[
"Details:\n\n- X axis for 'Average-Rewards' will start from 'window' timesteps and end with the final timesteps\n\n- Need to add color (shades), linestyle as per requirements\n\n- Currently plot one at a time by commenting out the others otherwise, it displays different colors for all.\n",
"_____no_output_____"
]
],
[
[
"# For saving figures\n#%matplotlib inline\n\n# For plotting in the jupyter notebook\n%matplotlib notebook \n\nimport matplotlib.pyplot as plt\n\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\nfor alg, data in plottingData.items():\n lenRun = len(data[0])\n xAxis = np.array([i for i in range(1,lenRun+1)])\n \n if transformation == 'Average-Rewards':\n xAxis += (window-1)\n \n if alg == 'esarsa':\n color = colors[0]\n if alg == 'hand':\n color = colors[1] \n \n plotMean(xAxis, data, color=color)\n\n #plotMedian(xAxis, data, color=color)\n \n #plotBest(xAxis, data, transformation=transformation, color=color)\n \n #plotWorst(xAxis, data, transformation=transformation, color=color)\n \n #plotMeanAndConfidenceInterval(xAxis, data, confidence=0.95, color=color)\n \n #plotMeanAndPercentileRegions(xAxis, data, lower=0.025, upper=0.975, transformation=transformation, color=color)\n\n \n#plt.title('Rewards averaged with sliding window of 1000 timesteps across 100 runs', pad=25, fontsize=10)\nplt.xlabel('Timesteps', labelpad=35)\nplt.ylabel(transformation, rotation=0, labelpad=45)\nplt.rcParams['figure.figsize'] = [8, 5.33]\nplt.legend(loc=0)\nplt.yticks()\nplt.xticks()\nplt.tight_layout()\n\n#plt.savefig('../img/'+transformation+'.png',dpi=500, bbox_inches='tight')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c8617f16d16d666fbbf8d8339951d128691124 | 54,409 | ipynb | Jupyter Notebook | mnist/resnet_mnist.ipynb | jonathanventura/nncookbook | 1ddb5e00635a65881ea762a5a68e73f7dcd87494 | [
"MIT"
] | 1 | 2018-03-24T14:28:48.000Z | 2018-03-24T14:28:48.000Z | mnist/resnet_mnist.ipynb | jonathanventura/nncookbook | 1ddb5e00635a65881ea762a5a68e73f7dcd87494 | [
"MIT"
] | null | null | null | mnist/resnet_mnist.ipynb | jonathanventura/nncookbook | 1ddb5e00635a65881ea762a5a68e73f7dcd87494 | [
"MIT"
] | null | null | null | 144.320955 | 18,878 | 0.815674 | [
[
[
"### Classification with MNIST Dataset and ResNet network\nThis script sets up a ResNet-style network to classify digits from the MNIST dataset.",
"_____no_output_____"
]
],
[
[
"import keras\nimport keras.backend as K\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, Conv2D, Dense, Flatten, MaxPooling2D, Add, Activation, Dropout\nfrom keras.optimizers import SGD\nfrom matplotlib import pyplot as plt\nimport numpy as np",
"Using TensorFlow backend.\n"
]
],
[
[
"Use a Keras utility function to load the MNIST dataset. We select only zeros and ones to do binary classification.",
"_____no_output_____"
]
],
[
[
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\ny_train = keras.utils.to_categorical(y_train,10)\ny_test = keras.utils.to_categorical(y_test,10)",
"_____no_output_____"
]
],
[
[
"Resize the images to vectors and convert their datatype and range.",
"_____no_output_____"
]
],
[
[
"x_train = np.expand_dims(x_train,axis=-1)\nx_test = np.expand_dims(x_test,axis=-1)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nx_train = x_train*2.-1.\nx_test = x_test*2.-1.",
"_____no_output_____"
]
],
[
[
"Build a multi-class classifier model.",
"_____no_output_____"
]
],
[
[
"def res_block(x,c,s=1):\n if K.shape(x)[3] <> c or s <> 1:\n x_save = Conv2D(c,1,strides=s,activation=None)(x)\n else:\n x_save = x\n x = Conv2D(c,3,strides=s,padding='same',activation='relu',kernel_initializer='he_normal')(x)\n x = Conv2D(c,3,padding='same',activation=None,kernel_initializer='he_normal')(x)\n x = Add()([x,x_save])\n x = Activation('relu')(x)\n return x\n\nx_in = Input((28,28,1))\n\nx = res_block(x_in,64,2)\nx = res_block(x,64)\n\nx = res_block(x,128,2)\nx = res_block(x,128)\n\nx = res_block(x,256,2)\nx = res_block(x,256)\n\nx = Flatten()(x)\nx = Dense(200,kernel_initializer='he_normal')(x)\nx = Dropout(0.5)(x)\nx = Dense(10,activation='softmax',kernel_initializer='he_normal')(x)\nmodel = Model(inputs=x_in,outputs=x)\nmodel.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 28, 28, 1) 0 \n____________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 14, 14, 64) 640 input_1[0][0] \n____________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 14, 64) 36928 conv2d_2[0][0] \n____________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 14, 14, 64) 128 input_1[0][0] \n____________________________________________________________________________________________________\nadd_1 (Add) (None, 14, 14, 64) 0 conv2d_3[0][0] \n conv2d_1[0][0] \n____________________________________________________________________________________________________\nactivation_1 (Activation) (None, 14, 14, 64) 0 add_1[0][0] \n____________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 14, 14, 64) 36928 activation_1[0][0] \n____________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 14, 14, 64) 36928 conv2d_5[0][0] \n____________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 14, 14, 64) 4160 activation_1[0][0] \n____________________________________________________________________________________________________\nadd_2 (Add) (None, 14, 14, 64) 0 conv2d_6[0][0] \n conv2d_4[0][0] \n____________________________________________________________________________________________________\nactivation_2 (Activation) (None, 14, 14, 64) 0 add_2[0][0] \n____________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 7, 7, 128) 73856 activation_2[0][0] \n____________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 7, 7, 128) 147584 conv2d_8[0][0] \n____________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 7, 7, 128) 8320 activation_2[0][0] \n____________________________________________________________________________________________________\nadd_3 (Add) (None, 7, 7, 128) 0 conv2d_9[0][0] \n conv2d_7[0][0] \n____________________________________________________________________________________________________\nactivation_3 (Activation) (None, 7, 7, 128) 0 add_3[0][0] \n____________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 7, 7, 128) 147584 activation_3[0][0] \n____________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 7, 7, 128) 147584 conv2d_11[0][0] \n____________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 7, 7, 128) 16512 activation_3[0][0] \n____________________________________________________________________________________________________\nadd_4 (Add) (None, 7, 7, 128) 0 conv2d_12[0][0] \n conv2d_10[0][0] \n____________________________________________________________________________________________________\nactivation_4 (Activation) (None, 7, 7, 128) 0 add_4[0][0] \n____________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 4, 4, 256) 295168 activation_4[0][0] \n____________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 4, 4, 256) 590080 conv2d_14[0][0] \n____________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 4, 4, 256) 33024 activation_4[0][0] \n____________________________________________________________________________________________________\nadd_5 (Add) (None, 4, 4, 256) 0 conv2d_15[0][0] \n conv2d_13[0][0] \n____________________________________________________________________________________________________\nactivation_5 (Activation) (None, 4, 4, 256) 0 add_5[0][0] \n____________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 4, 4, 256) 590080 activation_5[0][0] \n____________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 4, 4, 256) 590080 conv2d_17[0][0] \n____________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 4, 4, 256) 65792 activation_5[0][0] \n____________________________________________________________________________________________________\nadd_6 (Add) (None, 4, 4, 256) 0 conv2d_18[0][0] \n conv2d_16[0][0] \n____________________________________________________________________________________________________\nactivation_6 (Activation) (None, 4, 4, 256) 0 add_6[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 4096) 0 activation_6[0][0] \n____________________________________________________________________________________________________\ndense_1 (Dense) (None, 200) 819400 flatten_1[0][0] \n____________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 200) 0 dense_1[0][0] \n____________________________________________________________________________________________________\ndense_2 (Dense) (None, 10) 2010 dropout_1[0][0] \n====================================================================================================\nTotal params: 3,642,786\nTrainable params: 3,642,786\nNon-trainable params: 0\n____________________________________________________________________________________________________\n"
]
],
[
[
"Set up the model to optimize the categorical crossentropy loss using stochastic gradient descent.",
"_____no_output_____"
]
],
[
[
"model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"Optimize the model over the training data.",
"_____no_output_____"
]
],
[
[
"history = model.fit(x_train, y_train,\n batch_size=100,\n epochs=20,\n verbose=1,\n validation_data=(x_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 44s - loss: 0.3635 - acc: 0.8831 - val_loss: 0.0851 - val_acc: 0.9725\nEpoch 2/20\n60000/60000 [==============================] - 42s - loss: 0.1030 - acc: 0.9686 - val_loss: 0.0563 - val_acc: 0.9805\nEpoch 3/20\n60000/60000 [==============================] - 42s - loss: 0.0728 - acc: 0.9771 - val_loss: 0.0444 - val_acc: 0.9848\nEpoch 4/20\n60000/60000 [==============================] - 42s - loss: 0.0545 - acc: 0.9833 - val_loss: 0.0408 - val_acc: 0.9858\nEpoch 5/20\n60000/60000 [==============================] - 42s - loss: 0.0436 - acc: 0.9864 - val_loss: 0.0367 - val_acc: 0.9872\nEpoch 6/20\n60000/60000 [==============================] - 42s - loss: 0.0358 - acc: 0.9888 - val_loss: 0.0359 - val_acc: 0.9881\nEpoch 7/20\n60000/60000 [==============================] - 42s - loss: 0.0291 - acc: 0.9908 - val_loss: 0.0344 - val_acc: 0.9881\nEpoch 8/20\n60000/60000 [==============================] - 42s - loss: 0.0251 - acc: 0.9926 - val_loss: 0.0291 - val_acc: 0.9894\nEpoch 9/20\n60000/60000 [==============================] - 42s - loss: 0.0210 - acc: 0.9931 - val_loss: 0.0281 - val_acc: 0.9906\nEpoch 10/20\n60000/60000 [==============================] - 42s - loss: 0.0179 - acc: 0.9943 - val_loss: 0.0304 - val_acc: 0.9893\nEpoch 11/20\n60000/60000 [==============================] - 42s - loss: 0.0147 - acc: 0.9952 - val_loss: 0.0282 - val_acc: 0.9912\nEpoch 12/20\n60000/60000 [==============================] - 42s - loss: 0.0137 - acc: 0.9953 - val_loss: 0.0259 - val_acc: 0.9914\nEpoch 13/20\n60000/60000 [==============================] - 42s - loss: 0.0121 - acc: 0.9960 - val_loss: 0.0294 - val_acc: 0.9906\nEpoch 14/20\n60000/60000 [==============================] - 42s - loss: 0.0100 - acc: 0.9968 - val_loss: 0.0317 - val_acc: 0.9895\nEpoch 15/20\n60000/60000 [==============================] - 42s - loss: 0.0088 - acc: 0.9976 - val_loss: 0.0337 - val_acc: 0.9900\nEpoch 16/20\n60000/60000 [==============================] - 42s - loss: 0.0074 - acc: 0.9976 - val_loss: 0.0275 - val_acc: 0.9918\nEpoch 17/20\n60000/60000 [==============================] - 42s - loss: 0.0063 - acc: 0.9980 - val_loss: 0.0304 - val_acc: 0.9917\nEpoch 18/20\n60000/60000 [==============================] - 42s - loss: 0.0062 - acc: 0.9981 - val_loss: 0.0282 - val_acc: 0.9916\nEpoch 19/20\n60000/60000 [==============================] - 42s - loss: 0.0057 - acc: 0.9983 - val_loss: 0.0297 - val_acc: 0.9912\nEpoch 20/20\n60000/60000 [==============================] - 42s - loss: 0.0047 - acc: 0.9986 - val_loss: 0.0291 - val_acc: 0.9914\n"
],
[
"plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.legend(['Training Loss','Testing Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.show()",
"_____no_output_____"
],
[
"plt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.legend(['Training Accuracy','Testing Accuracy'])\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7c89ac9cc874e5a2e5c41a5fe321ecf8054da6c | 31,125 | ipynb | Jupyter Notebook | nbs/01_fastprogress.ipynb | hsm/fastprogress | 1f1664956a6a38085f6968ceeb27042daa5921b9 | [
"Apache-2.0"
] | null | null | null | nbs/01_fastprogress.ipynb | hsm/fastprogress | 1f1664956a6a38085f6968ceeb27042daa5921b9 | [
"Apache-2.0"
] | null | null | null | nbs/01_fastprogress.ipynb | hsm/fastprogress | 1f1664956a6a38085f6968ceeb27042daa5921b9 | [
"Apache-2.0"
] | null | null | null | 32.388137 | 518 | 0.484241 | [
[
[
"# export\nimport time,os,shutil\nfrom sys import stdout\nfrom warnings import warn\nfrom fastprogress.core import *",
"_____no_output_____"
],
[
"#default_exp fastprogress",
"_____no_output_____"
]
],
[
[
"## Base class",
"_____no_output_____"
]
],
[
[
"#export\nclass ProgressBar():\n update_every,first_its = 0.2,5\n\n def __init__(self, gen, total=None, display=True, leave=True, parent=None, master=None, comment=''):\n self.gen,self.parent,self.master,self.comment = gen,parent,master,comment\n self.total = len(gen) if total is None else total\n self.last_v = 0\n if parent is None: self.leave,self.display = leave,display\n else:\n self.leave,self.display=False,False\n parent.add_child(self)\n self.last_v = None\n\n def on_iter_begin(self):\n if self.master is not None: self.master.on_iter_begin()\n \n def on_interrupt(self):\n if self.master is not None: self.master.on_interrupt()\n \n def on_iter_end(self):\n if self.master is not None: self.master.on_iter_end()\n \n def on_update(self, val, text): pass\n\n def __iter__(self):\n if self.total != 0: self.update(0)\n try:\n for i,o in enumerate(self.gen):\n if i >= self.total: break\n yield o\n self.update(i+1)\n except Exception as e:\n self.on_interrupt()\n raise e\n\n def update(self, val):\n if self.last_v is None: \n self.on_iter_begin()\n self.last_v = 0\n if val == 0:\n self.start_t = self.last_t = time.time()\n self.pred_t,self.last_v,self.wait_for = 0,0,1\n self.update_bar(0)\n elif val <= self.first_its or val >= self.last_v + self.wait_for or val >= self.total:\n cur_t = time.time()\n avg_t = (cur_t - self.start_t) / val\n self.wait_for = max(int(self.update_every / (avg_t+1e-8)),1)\n self.pred_t = avg_t * self.total\n self.last_v,self.last_t = val,cur_t\n self.update_bar(val)\n if val >= self.total: \n self.on_iter_end()\n self.last_v = None\n\n def update_bar(self, val):\n elapsed_t = self.last_t - self.start_t\n remaining_t = format_time(self.pred_t - elapsed_t)\n elapsed_t = format_time(elapsed_t)\n end = '' if len(self.comment) == 0 else f' {self.comment}'\n if self.total == 0:\n warn(\"Your generator is empty.\")\n self.on_update(0, '100% [0/0]')\n else: self.on_update(val, f'{100 * val/self.total:.2f}% [{val}/{self.total} {elapsed_t}<{remaining_t}{end}]')",
"_____no_output_____"
],
[
"class VerboseProgressBar(ProgressBar):\n def on_iter_begin(self): super().on_iter_begin(); print(\"on_iter_begin\")\n def on_interrupt(self): print(\"on_interrupt\")\n def on_iter_end(self): print(\"on_iter_end\"); super().on_iter_end()\n def on_update(self, val, text): print(f\"on_update {val}\")",
"_____no_output_____"
],
[
"from contextlib import redirect_stdout\nimport io",
"_____no_output_____"
],
[
"tst_pb = VerboseProgressBar(range(6))\ns = io.StringIO()\nwith redirect_stdout(s): \n for i in tst_pb: time.sleep(0.1)\n\nassert s.getvalue() == '\\n'.join(['on_iter_begin'] + [f'on_update {i}' for i in range(7)] + ['on_iter_end']) + '\\n'",
"_____no_output_____"
],
[
"tst_pb = VerboseProgressBar(range(6))\ns = io.StringIO()\nwith redirect_stdout(s): \n for i in range(7): \n tst_pb.update(i)\n time.sleep(0.1)\n\nassert s.getvalue() == '\\n'.join(['on_iter_begin'] + [f'on_update {i}' for i in range(7)] + ['on_iter_end']) + '\\n'",
"_____no_output_____"
],
[
"#export\nclass MasterBar(ProgressBar):\n def __init__(self, gen, cls, total=None): \n self.main_bar = cls(gen, total=total, display=False, master=self)\n \n def on_iter_begin(self): pass\n def on_interrupt(self): pass\n def on_iter_end(self): pass\n def add_child(self, child): pass\n def write(self, line): pass\n def update_graph(self, graphs, x_bounds, y_bounds): pass\n \n def __iter__(self):\n for o in self.main_bar:\n yield o\n \n def update(self, val): self.main_bar.update(val)",
"_____no_output_____"
],
[
"class VerboseMasterBar(MasterBar):\n def __init__(self, gen, total=None): super().__init__(gen, VerboseProgressBar, total=total)\n def on_iter_begin(self): print(\"master_on_iter_begin\")\n def on_interrupt(self): print(\"master_on_interrupt\")\n def on_iter_end(self): print(\"master_on_iter_end\")\n #def on_update(self, val, text): print(f\"master_on_update {val}\")",
"_____no_output_____"
],
[
"tst_mb = VerboseMasterBar(range(6))\nfor i in tst_mb: time.sleep(0.1)",
"master_on_iter_begin\non_iter_begin\non_update 0\non_update 1\non_update 2\non_update 3\non_update 4\non_update 5\non_update 6\non_iter_end\nmaster_on_iter_end\n"
],
[
"#hide\n#Test an empty progress bar doesn't crash\nfor i in ProgressBar([]): pass",
"_____no_output_____"
]
],
[
[
"## Notebook progress bars",
"_____no_output_____"
]
],
[
[
"#export\nif IN_NOTEBOOK:\n try:\n from IPython.display import clear_output, display, HTML\n import matplotlib.pyplot as plt\n import ipywidgets as widgets\n except:\n warn(\"Couldn't import ipywidgets properly, progress bar will use console behavior\")\n IN_NOTEBOOK = False",
"_____no_output_____"
],
[
"#export\nclass NBOutput():\n def __init__(self, to_display):\n self.out = widgets.Output()\n display(self.out)\n with self.out:\n display(to_display)\n\n def update(self, to_update):\n with self.out:\n clear_output(wait=True)\n display(to_update)",
"_____no_output_____"
],
[
"#export\nclass NBProgressBar(ProgressBar):\n def on_iter_begin(self):\n super().on_iter_begin()\n self.progress = html_progress_bar(0, self.total, \"\")\n if self.display:\n display(HTML(html_progress_bar_styles))\n self.out = NBOutput(HTML(self.progress))\n self.is_active=True\n\n def on_interrupt(self):\n self.on_update(0, 'Interrupted', interrupted=True)\n super().on_interrupt()\n self.on_iter_end()\n\n def on_iter_end(self):\n if not self.leave and self.display: self.out.update(HTML(''))\n self.is_active=False\n super().on_iter_end()\n \n def on_update(self, val, text, interrupted=False):\n self.progress = html_progress_bar(val, self.total, text, interrupted)\n if self.display: self.out.update(HTML(self.progress))\n elif self.parent is not None: self.parent.show()",
"_____no_output_____"
],
[
"tst = NBProgressBar(range(100))\nfor i in tst: time.sleep(0.05)",
"_____no_output_____"
],
[
"tst = NBProgressBar(range(100))\nfor i in range(50): \n time.sleep(0.05)\n tst.update(i)\ntst.on_interrupt()",
"_____no_output_____"
],
[
"#hide\nfor i in NBProgressBar([]): pass",
"_____no_output_____"
],
[
"#export\nclass NBMasterBar(MasterBar):\n names = ['train', 'valid']\n def __init__(self, gen, total=None, hide_graph=False, order=None, clean_on_interrupt=False, total_time=False):\n super().__init__(gen, NBProgressBar, total)\n if order is None: order = ['pb1', 'text', 'pb2']\n self.hide_graph,self.order = hide_graph,order\n self.report,self.clean_on_interrupt,self.total_time = [],clean_on_interrupt,total_time\n self.inner_dict = {'pb1':self.main_bar, 'text':\"\"}\n self.text,self.lines = \"\",[]\n \n def on_iter_begin(self):\n self.html_code = '\\n'.join([html_progress_bar(0, self.main_bar.total, \"\"), \"\"])\n display(HTML(html_progress_bar_styles))\n self.out = NBOutput(HTML(self.html_code))\n\n def on_interrupt(self):\n if self.clean_on_interrupt: self.out.update(HTML(''))\n\n def on_iter_end(self):\n if hasattr(self, 'imgs_fig'):\n plt.close()\n self.imgs_out.update(self.imgs_fig)\n if hasattr(self, 'graph_fig'):\n plt.close()\n self.graph_out.update(self.graph_fig)\n if self.text.endswith('<p>'): self.text = self.text[:-3]\n if self.total_time: \n total_time = format_time(time.time() - self.main_bar.start_t)\n self.text = f'Total time: {total_time} <p>' + self.text\n if hasattr(self, 'out'): self.out.update(HTML(self.text))\n\n def add_child(self, child):\n self.child = child\n self.inner_dict['pb2'] = self.child\n #self.show()\n\n def show(self):\n self.inner_dict['text'] = self.text\n to_show = [name for name in self.order if name in self.inner_dict.keys()]\n self.html_code = '\\n'.join([getattr(self.inner_dict[n], 'progress', self.inner_dict[n]) for n in to_show])\n self.out.update(HTML(self.html_code))\n\n def write(self, line, table=False):\n if not table: self.text += line + \"<p>\"\n else:\n self.lines.append(line)\n self.text = text2html_table(self.lines)\n\n def show_imgs(self, imgs, titles=None, cols=4, imgsize=4, figsize=None):\n if self.hide_graph: return\n rows = len(imgs)//cols if len(imgs)%cols == 0 else len(imgs)//cols + 1\n plt.close()\n if figsize is None: figsize = (imgsize*cols, imgsize*rows)\n self.imgs_fig, imgs_axs = plt.subplots(rows, cols, figsize=figsize)\n if titles is None: titles = [None] * len(imgs)\n for img, ax, title in zip(imgs, imgs_axs.flatten(), titles): img.show(ax=ax, title=title)\n for ax in imgs_axs.flatten()[len(imgs):]: ax.axis('off')\n if not hasattr(self, 'imgs_out'): self.imgs_out = NBOutput(self.imgs_fig)\n else: self.imgs_out.update(self.imgs_fig)\n\n def update_graph(self, graphs, x_bounds=None, y_bounds=None, figsize=(6,4)):\n if self.hide_graph: return\n if not hasattr(self, 'graph_fig'):\n self.graph_fig, self.graph_ax = plt.subplots(1, figsize=figsize)\n self.graph_out = NBOutput(self.graph_ax.figure)\n self.graph_ax.clear()\n if len(self.names) < len(graphs): self.names += [''] * (len(graphs) - len(self.names))\n for g,n in zip(graphs,self.names): self.graph_ax.plot(*g, label=n)\n self.graph_ax.legend(loc='upper right')\n if x_bounds is not None: self.graph_ax.set_xlim(*x_bounds)\n if y_bounds is not None: self.graph_ax.set_ylim(*y_bounds)\n self.graph_out.update(self.graph_ax.figure)",
"_____no_output_____"
],
[
"mb = NBMasterBar(range(5))\nfor i in mb:\n for j in NBProgressBar(range(10), parent=mb, comment=f'first bar stat'):\n time.sleep(0.01)\n #mb.child.comment = f'second bar stat'\n mb.write(f'Finished loop {i}.')",
"_____no_output_____"
],
[
"mb = NBMasterBar(range(5))\nmb.update(0) \nfor i in range(5):\n for j in NBProgressBar(range(10), parent=mb):\n time.sleep(0.01)\n #mb.child.comment = f'second bar stat'\n mb.main_bar.comment = f'first bar stat'\n mb.write(f'Finished loop {i}.')\n mb.update(i+1)",
"_____no_output_____"
]
],
[
[
"## Console progress bars",
"_____no_output_____"
]
],
[
[
"#export\nNO_BAR = False\nWRITER_FN = print\nFLUSH = True\nSAVE_PATH = None\nSAVE_APPEND = False\nMAX_COLS = 160",
"_____no_output_____"
],
[
"#export \ndef printing():\n return False if NO_BAR else (stdout.isatty() or IN_NOTEBOOK)",
"_____no_output_____"
],
[
"#export\nclass ConsoleProgressBar(ProgressBar):\n fill:str='█'\n end:str='\\r'\n\n def __init__(self, gen, total=None, display=True, leave=True, parent=None, master=None, txt_len=60):\n self.cols,_ = shutil.get_terminal_size((100, 40))\n if self.cols > MAX_COLS: self.cols=MAX_COLS\n self.length = self.cols-txt_len\n self.max_len,self.prefix = 0,''\n #In case the filling char returns an encoding error\n try: print(self.fill, end='\\r', flush=FLUSH)\n except: self.fill = 'X'\n super().__init__(gen, total, display, leave, parent, master)\n\n def on_interrupt(self):\n super().on_interrupt()\n self.on_iter_end()\n\n def on_iter_end(self):\n if not self.leave and printing():\n print(f'\\r{self.prefix}' + ' ' * (self.max_len - len(f'\\r{self.prefix}')), end='\\r', flush=FLUSH)\n super().on_iter_end()\n\n def on_update(self, val, text):\n if self.display:\n if self.length > self.cols-len(text)-len(self.prefix)-4:\n self.length = self.cols-len(text)-len(self.prefix)-4\n filled_len = int(self.length * val // self.total) if self.total else 0\n bar = self.fill * filled_len + '-' * (self.length - filled_len)\n to_write = f'\\r{self.prefix} |{bar}| {text}'\n if val >= self.total: end = '\\r'\n else: end = self.end\n if len(to_write) > self.max_len: self.max_len=len(to_write)\n if printing(): WRITER_FN(to_write, end=end, flush=FLUSH)",
"_____no_output_____"
],
[
"tst = ConsoleProgressBar(range(100))\nfor i in tst: time.sleep(0.05)",
" |█████████████████████████████████████████████████████████████████████████████████████████████| 100.00% [100/100 00:05<00:00]\r"
],
[
"tst = ConsoleProgressBar(range(100))\nfor i in range(50): \n time.sleep(0.05)\n tst.update(i)\ntst.on_interrupt()",
" |███████████████████████████████████████████--------------------------------------------------| 47.00% [47/100 00:02<00:02]\r"
],
[
"#export\ndef print_and_maybe_save(line):\n WRITER_FN(line)\n if SAVE_PATH is not None:\n attr = \"a\" if os.path.exists(SAVE_PATH) else \"w\"\n with open(SAVE_PATH, attr) as f: f.write(line + '\\n')",
"_____no_output_____"
],
[
"#export\nclass ConsoleMasterBar(MasterBar):\n def __init__(self, gen, total=None, hide_graph=False, order=None, clean_on_interrupt=False, total_time=False):\n super().__init__(gen, ConsoleProgressBar, total)\n self.total_time = total_time\n\n def add_child(self, child):\n self.child = child\n v = 0 if self.main_bar.last_v is None else self.main_bar.last_v\n self.child.prefix = f'Epoch {v+1}/{self.main_bar.total} :'\n self.child.display = True\n\n def on_iter_begin(self):\n super().on_iter_begin()\n if SAVE_PATH is not None and os.path.exists(SAVE_PATH) and not SAVE_APPEND:\n with open(SAVE_PATH, 'w') as f: f.write('')\n\n def write(self, line, table=False):\n if table:\n text = ''\n if not hasattr(self, 'names'):\n self.names = [name + ' ' * (8-len(name)) if len(name) < 8 else name for name in line]\n text = ' '.join(self.names)\n else:\n for (t,name) in zip(line,self.names): text += t + ' ' * (2 + len(name)-len(t))\n print_and_maybe_save(text)\n else: print_and_maybe_save(line)\n if self.total_time:\n total_time = format_time(time() - self.start_t)\n print_and_maybe_save(f'Total time: {total_time}')\n\n def show_imgs(*args, **kwargs): pass\n def update_graph(*args, **kwargs): pass",
"_____no_output_____"
],
[
"mb = ConsoleMasterBar(range(5))\nfor i in mb:\n for j in ConsoleProgressBar(range(10), parent=mb):\n time.sleep(0.01)\n #mb.child.comment = f'second bar stat'\n mb.main_bar.comment = f'first bar stat'\n mb.write(f'Finished loop {i}.')",
"Finished loop 0. \nFinished loop 1. \nFinished loop 2. \nFinished loop 3. \nFinished loop 4. \n"
],
[
"mb = ConsoleMasterBar(range(5))\nmb.update(0) \nfor i in range(5):\n for j in ConsoleProgressBar(range(10), parent=mb):\n time.sleep(0.01)\n #mb.child.comment = f'second bar stat'\n mb.main_bar.comment = f'first bar stat'\n mb.write(f'Finished loop {i}.')\n mb.update(i+1)\n \n # confirming a kwarg can be passed to ConsoleMasterBar instance\n mb.update_graph([[1,2],[3,4]], figsize=(10,5,))\n mb.show_imgs(figsize=(10,5,))",
"Finished loop 0. \nFinished loop 1. \nFinished loop 2. \nFinished loop 3. \nFinished loop 4. \n"
],
[
"#export\nif IN_NOTEBOOK: master_bar, progress_bar = NBMasterBar, NBProgressBar\nelse: master_bar, progress_bar = ConsoleMasterBar, ConsoleProgressBar",
"_____no_output_____"
],
[
"#export\n_all_ = ['master_bar', 'progress_bar']",
"_____no_output_____"
],
[
"#export\ndef force_console_behavior():\n \"Return the console progress bars\"\n return ConsoleMasterBar, ConsoleProgressBar",
"_____no_output_____"
],
[
"#export\ndef workaround_empty_console_output():\n \"Change console output behaviour to correctly show progress in consoles not recognizing \\r at the end of line\"\n ConsoleProgressBar.end = ''",
"_____no_output_____"
]
],
[
[
"## Export -",
"_____no_output_____"
]
],
[
[
"from nbdev.export import notebook2script\nnotebook2script()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c89b57cffe4bcf5181f08a14ec4aeeee104e51 | 33,273 | ipynb | Jupyter Notebook | Prediction_diabet.ipynb | bsyllaisidk/Architecture-L | 26979e7464a1024bafb291854c9bdb5c0a8d496f | [
"MIT"
] | null | null | null | Prediction_diabet.ipynb | bsyllaisidk/Architecture-L | 26979e7464a1024bafb291854c9bdb5c0a8d496f | [
"MIT"
] | null | null | null | Prediction_diabet.ipynb | bsyllaisidk/Architecture-L | 26979e7464a1024bafb291854c9bdb5c0a8d496f | [
"MIT"
] | null | null | null | 30.414077 | 240 | 0.36486 | [
[
[
"<a href=\"https://colab.research.google.com/github/bsyllaisidk/Architecture-L/blob/main/Prediction_diabet.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Impotations de dependances\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n#import matplotlib.pyplot as plt\n#import seaborn as sns\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\n#from sklearn.cluster import KMeans\n#from sklearn.model_selection import train_test_split\n#from sklearn.ensemble import RandomForestRegressor\n#from sklearn import metrics\n",
"_____no_output_____"
]
],
[
[
"\nchargement des données du fichier csv dans une trame de données pendas",
"_____no_output_____"
]
],
[
[
"\ndiabet_data = pd.read_csv('/content/diabete.csv')",
"_____no_output_____"
]
],
[
[
"lecture des donnes du tableau ",
"_____no_output_____"
]
],
[
[
"pd.read_csv\n",
"_____no_output_____"
]
],
[
[
"les 5 premiers lignes du tableau",
"_____no_output_____"
]
],
[
[
"diabet_data.head()",
"_____no_output_____"
],
[
"# le nombre de ligne et de colonnes dans notre base de donnes ",
"_____no_output_____"
],
[
"diabet_data.shape",
"_____no_output_____"
],
[
"#Pour obtenir les mesures statistique de cette donnee.",
"_____no_output_____"
],
[
"diabet_data.describe()",
"_____no_output_____"
]
],
[
[
"le nombre de diabetique et le non diabetique \n0--> non Diabetique\n1--> Diabetique",
"_____no_output_____"
]
],
[
[
"diabet_data['diabete'].value_counts()",
"_____no_output_____"
],
[
"diabet_data.groupby('diabete').mean()",
"_____no_output_____"
],
[
"#Sepearation de données et les etiquettes\nX=diabet_data.drop(columns='diabete', axis=1)\nY=diabet_data['diabete']",
"_____no_output_____"
],
[
"#lecture de X\nprint(X)",
" n_pregnant glucose tension thickness insulin bmi pedigree age\n0 6 148 72 35 0 33.6 0.627 50\n1 1 85 66 29 0 26.6 0.351 31\n2 8 183 64 0 0 23.3 0.672 32\n3 1 89 66 23 94 28.1 0.167 21\n4 0 137 40 35 168 43.1 2.288 33\n.. ... ... ... ... ... ... ... ...\n763 10 101 76 48 180 32.9 0.171 63\n764 2 122 70 27 0 36.8 0.340 27\n765 5 121 72 23 112 26.2 0.245 30\n766 1 126 60 0 0 30.1 0.349 47\n767 1 93 70 31 0 30.4 0.315 23\n\n[768 rows x 8 columns]\n"
],
[
"print(Y)",
"0 1\n1 0\n2 1\n3 0\n4 1\n ..\n763 0\n764 0\n765 0\n766 1\n767 0\nName: diabete, Length: 768, dtype: int64\n"
]
],
[
[
"Normalisation des donnees.",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler()",
"_____no_output_____"
],
[
"scaler.fit(X)",
"_____no_output_____"
],
[
"standardized_date = scaler.transform(X)\n",
"_____no_output_____"
],
[
"print(standardized_date)",
"[[ 0.63994726 0.84832379 0.14964075 ... 0.20401277 0.46849198\n 1.4259954 ]\n [-0.84488505 -1.12339636 -0.16054575 ... -0.68442195 -0.36506078\n -0.19067191]\n [ 1.23388019 1.94372388 -0.26394125 ... -1.10325546 0.60439732\n -0.10558415]\n ...\n [ 0.3429808 0.00330087 0.14964075 ... -0.73518964 -0.68519336\n -0.27575966]\n [-0.84488505 0.1597866 -0.47073225 ... -0.24020459 -0.37110101\n 1.17073215]\n [-0.84488505 -0.8730192 0.04624525 ... -0.20212881 -0.47378505\n -0.87137393]]\n"
],
[
"X = standardized_date\nY = diabet_data['diabete']",
"_____no_output_____"
],
[
"print(X)\nprint(Y)",
"[[ 0.63994726 0.84832379 0.14964075 ... 0.20401277 0.46849198\n 1.4259954 ]\n [-0.84488505 -1.12339636 -0.16054575 ... -0.68442195 -0.36506078\n -0.19067191]\n [ 1.23388019 1.94372388 -0.26394125 ... -1.10325546 0.60439732\n -0.10558415]\n ...\n [ 0.3429808 0.00330087 0.14964075 ... -0.73518964 -0.68519336\n -0.27575966]\n [-0.84488505 0.1597866 -0.47073225 ... -0.24020459 -0.37110101\n 1.17073215]\n [-0.84488505 -0.8730192 0.04624525 ... -0.20212881 -0.47378505\n -0.87137393]]\n0 1\n1 0\n2 1\n3 0\n4 1\n ..\n763 0\n764 0\n765 0\n766 1\n767 0\nName: diabete, Length: 768, dtype: int64\n"
],
[
"#Essaye de train fractionné",
"_____no_output_____"
],
[
"X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.2, stratify=Y, random_state=2)",
"_____no_output_____"
],
[
"print(X.shape, X_train.shape, X_test.shape)",
"(768, 8) (614, 8) (154, 8)\n"
]
],
[
[
"Model de formation",
"_____no_output_____"
]
],
[
[
"classifier = svm.SVC(kernel='linear')",
"_____no_output_____"
]
],
[
[
"Formation de la machine a vecteur classée",
"_____no_output_____"
]
],
[
[
"classifier.fit(X_train, Y_train)",
"_____no_output_____"
]
],
[
[
"Evaluation de modele",
"_____no_output_____"
],
[
"Note de precision",
"_____no_output_____"
]
],
[
[
"# Score de precision sur les données de formation\nX_train_prediction = classifier.predict(X_train)\ntraining_data_accuracy = accuracy_score(X_train_prediction, Y_train)",
"_____no_output_____"
],
[
"print('Score de precision sur les données de formation : ', training_data_accuracy)",
"Score de precision sur les données de formation : 0.7866449511400652\n"
],
[
"# Score de precision sur les données de formation\nX_test_prediction = classifier.predict(X_test)\ntest_data_accuracy = accuracy_score(X_test_prediction, Y_test)",
"_____no_output_____"
],
[
"print('Score de precision sur les données de teste : ', test_data_accuracy)",
"Score de precision sur les données de teste : 0.7727272727272727\n"
]
],
[
[
"Faaier un systeme predictif",
"_____no_output_____"
]
],
[
[
"input_data = (2,197,70,45,543,30.5,0.158,53)\n\n# Changement de inptu_data en tableau nympy\ninput_data_as_numpy_array = np.asarray(input_data)\n\n# Remodeler le tableau comme nous le predisons une instance \ninput_data_reshaped = input_data_as_numpy_array.reshape(1,-1)\n\n# normaliser l’entrée de donnees\nstd_date = scaler.transform(input_data_reshaped)\nprint(std_date)\n\nprediction = classifier.predict(std_date)\nprint(prediction)\n\nif (prediction[0] == 0) :\n print('La personne n\\'est pas diabetique')\nelse:\n print('La peronne est diabetique') ",
"[[-0.54791859 2.38188392 0.04624525 1.53455054 4.02192191 -0.18943689\n -0.94794368 1.68125866]]\n[1]\nLa peronne est diabetique\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c8ab2c51edd50ebed0dd0763bf0db654b9af24 | 4,219 | ipynb | Jupyter Notebook | 10_cte3.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 10_cte3.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 10_cte3.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 26.534591 | 67 | 0.359564 | [
[
[
"sql(\n '''\n WITH daily_avg_sales AS\n (SELECT\n date_format(order_date, '%Y-%m-%d') order_day,\n avg(sales) avg_sales\n FROM \n superstore\n GROUP BY\n order_day\n )\n SELECT order_day, avg_sales\n FROM daily_avg_sales\n ORDER BY order_day;\n '''\n)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7c8b2a73d64a02cf978ef3f8c79606891f8590d | 31,615 | ipynb | Jupyter Notebook | notebooks/10b-anlyz_run02-synthetic_lethal_classes-feat1.ipynb | pritchardlabatpsu/cga | 0a71c672b1348cebc724560643fd908d636fc133 | [
"MIT"
] | null | null | null | notebooks/10b-anlyz_run02-synthetic_lethal_classes-feat1.ipynb | pritchardlabatpsu/cga | 0a71c672b1348cebc724560643fd908d636fc133 | [
"MIT"
] | null | null | null | notebooks/10b-anlyz_run02-synthetic_lethal_classes-feat1.ipynb | pritchardlabatpsu/cga | 0a71c672b1348cebc724560643fd908d636fc133 | [
"MIT"
] | 1 | 2022-02-08T01:06:20.000Z | 2022-02-08T01:06:20.000Z | 72.845622 | 10,276 | 0.785007 | [
[
[
"### Breakdown of lethality",
"_____no_output_____"
]
],
[
[
"import pickle\nimport pandas as pd\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ast import literal_eval",
"_____no_output_____"
]
],
[
[
"#### Read in files",
"_____no_output_____"
]
],
[
[
"dir_in_res = '../out/20.0216 feat/reg_rf_boruta'\ndir_in_anlyz = os.path.join(dir_in_res, 'anlyz_filtered')\ndf_featSummary = pd.read_csv(os.path.join(dir_in_anlyz, 'feat_summary.csv')) #feature summary\ndf_featSummary['feat_sources'] = df_featSummary['feat_sources'].apply(literal_eval)\ndf_featSummary['feat_genes'] = df_featSummary['feat_genes'].apply(literal_eval)",
"_____no_output_____"
],
[
"feat_summary_annot_gene = pd.read_csv(os.path.join(dir_in_anlyz, 'onsamegene', 'feat_summary_annot.csv'), header=0, index_col=0)\n\ngs_name = 'paralog'\nfeat_summary_annot_paralog = pd.read_csv(os.path.join(dir_in_anlyz, f'insame{gs_name}', 'feat_summary_annot.csv'), header=0, index_col=0)\n\ngs_name = 'Panther'\nfeat_summary_annot_panther = pd.read_csv(os.path.join(dir_in_anlyz, f'insamegeneset{gs_name}', 'feat_summary_annot.csv'), header=0, index_col=0)\n",
"_____no_output_____"
]
],
[
[
"#### Breakdown - basic - top most important feature",
"_____no_output_____"
]
],
[
[
"df_counts = df_featSummary.groupby('feat_source1')['feat_source1'].count()\ndf_counts = df_counts.to_dict()\ndf_sl = pd.DataFrame([{'new_syn_lethal':df_counts['CERES'],\n 'classic_syn_lethal': sum([df_counts[k] for k in ['CN','Mut','RNA-seq']]) }])\ndf_sl = df_sl.T.squeeze()",
"_____no_output_____"
],
[
"df_sl",
"_____no_output_____"
]
],
[
[
"#### Breakdown of lethality, top most important feature",
"_____no_output_____"
]
],
[
[
"df_src1 = df_featSummary[['target','feat_source1']].set_index('target')\ndf = pd.DataFrame({'isNotCERES': df_src1.feat_source1.isin(['RNA-seq', 'CN', 'Mut']),\n 'sameGene': feat_summary_annot_gene.inSame_1,\n 'sameParalog': feat_summary_annot_paralog.inSame_1,\n 'sameGS': feat_summary_annot_panther.inSame_1,\n 'isCERES': df_src1.feat_source1 == 'CERES'\n })\n\nlethal_dict = {'sameGene': 'Same gene',\n 'sameParalog': 'Paralog',\n 'sameGS': 'Gene set',\n 'isCERES': 'Functional',\n 'isNotCERES': 'Classic synthetic'}",
"_____no_output_____"
],
[
"df_counts = pd.DataFrame({'sum':df.sum(axis=0)})\ndf_counts['lethality'] = [lethal_dict[n] for n in df_counts.index]",
"_____no_output_____"
],
[
"df_counts",
"_____no_output_____"
],
[
"plt.figure()\nax = sns.barplot(df_counts['lethality'], df_counts['sum'], color='steelblue')\nax.set(xlabel='Lethality types', ylabel='Number of genes')\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"#### Breakdown of lethality, top 10 most important feature",
"_____no_output_____"
]
],
[
[
"df_src = df_featSummary.set_index('target').feat_sources\ndf = pd.DataFrame({'hasNoCERES': df_src.apply(lambda x: any([n in x for n in ['CN','Mut','RNA-seq','Lineage']])),\n 'sameGene': feat_summary_annot_gene.inSame_top10,\n 'sameParalog': feat_summary_annot_paralog.inSame_top10,\n 'sameGS': feat_summary_annot_panther.inSame_top10,\n 'hasCERES': df_src.apply(lambda x: 'CERES' in x)\n })\n\nlethal_dict = {'sameGene': 'Same gene',\n 'sameParalog': 'Paralog',\n 'sameGS': 'Gene set',\n 'hasCERES': 'Functional',\n 'hasNoCERES': 'Classic synthetic'}",
"_____no_output_____"
],
[
"df_counts = pd.DataFrame({'sum':df.sum(axis=0)})\ndf_counts['lethality'] = [lethal_dict[n] for n in df_counts.index]",
"_____no_output_____"
],
[
"df_counts",
"_____no_output_____"
],
[
"plt.figure()\nax = sns.barplot(df_counts['lethality'], df_counts['sum'], color='steelblue')\nax.set(xlabel='Lethality types', ylabel='Number of genes', ylim=[0,500])\nplt.tight_layout()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7c8b8711e254f19f2fc11bde84cb3b7e8f2ee52 | 307,271 | ipynb | Jupyter Notebook | Module 2, Inversion-Doug.ipynb | lheagy/inversion-tutorial | 78673e800d4662992e5fe9595b4422bc2d5c50e9 | [
"MIT"
] | null | null | null | Module 2, Inversion-Doug.ipynb | lheagy/inversion-tutorial | 78673e800d4662992e5fe9595b4422bc2d5c50e9 | [
"MIT"
] | null | null | null | Module 2, Inversion-Doug.ipynb | lheagy/inversion-tutorial | 78673e800d4662992e5fe9595b4422bc2d5c50e9 | [
"MIT"
] | null | null | null | 311.633874 | 149,360 | 0.92118 | [
[
[
"# Module 2: Inversion",
"_____no_output_____"
],
[
"In the previous module we started with a continuous distribution of a physical property and discretized it into many cells, then we performed a forward simulation that created data from known model parameters. Inversion, of course, is exactly the opposite process. Imagine each model parameter that we had represents a layer in a 1D layered earth. At the surface of the earth we measure the data, and when we invert we do so for the model parameters. Our goal is to take the observed data and recover models that emulate the real Earth as closely as possible. \n\nYou may have noticed that the act of discretizing our problem created more cells than data values. In our latter example we produced 20 data points from 1000 model parameters, which is only a few data points and many model parameters. While this was not much of a problem in the forward simulation, when we want to do the inverse process, that is, obtain the model parameters from the data, it is clear that we have many more unknowns than knowns. In short, we have an underdetermined problem, and therefore infinite possible solutions. In mathematical terms, geophysical surveys represent what are called \"ill-posed\" problems. \n\nAn \"ill-posed\" problem is any problem that does not satisfy the requirements for the definition of \"well-posed\" problem. A *well-posed* problem is a problem in mathematics that must satisfy all three of the following criteria:\n\n<ol>\n<li> A solution exists.\n<li> The solution is unique.\n<li> The solution's behaviors change continuously with continuously changing initial conditions.\n</ol>\n\nAny mathematical formulation that does not satisfy all three of the above is, by definition, an ill-posed problem. Since we are dealing with an underdetermined system, I hope that it is clear that we are dealing with an ill-posed problem (i.e., we have no unique solution), and we are going to have to come up with a method (or methods) that can help us choose from the available solutions. We need to devise an algorithm that can choose the \"best\" model from the infinitely many that are available to us. \n\nIn short, we are going to have to find an optimum model. More specifically, in the context of most geophysics problems, we are going to use gradient-based optimization. This process involves building an *objective function*, which is a function that casts our inverse problem as an optimization problem. We will build an objective function consisting of two parts:\n\n$$\n\\phi = \\phi_d + \\beta \\phi_m\n$$\n\nWhere the terms on the right hand side are (1) a data misfit (denoted as $\\phi_d$) and (2) a model regularization (denoted as $\\phi_m$). These two parts will be elaborated in detail below.\n\nOnce we have formulated the objective function, we will take derivatives and obtain a recovered model. This module will flesh out the details of the model objective function, and then take first and second derivatives to derive an expression that gives us a solution for our model parameters.\n\n",
"_____no_output_____"
],
[
"## The Data Misfit, $\\phi_d$",
"_____no_output_____"
],
[
"A *misfit* describes how close synthetic data matches measurements that are made in the field. Traditionally this term refers to the difference between the measured data and the predicted data. If these two quantities are sufficiently close, then we consider the model to be a viable candidate for the solution to our problem. Because the data are inaccurate, a model that reproduces those data exactly is not feasible. A realistic goal, rather, is to find a model whose predicted data are consistent with the errors in the observations, and this requires incorporating knowledge about the noise and uncertainties. The concept of fitting the data means that some estimate of the “noise” be available. Unfortunately “noise” within the context of inversion is everything that cannot be accounted for by a compatible relationship between the model and the data. More specifically, noise refers to (1) noise from data aquisition in the field, (2) uncertainty in source and receiver locations, (3) numerical error, (4) physical assumptions about our model that do not capture all of the physics. \n\nA standard approach is to assume that each datum, $d_i$, contains errors that can be described as Gaussian with a standard deviation $\\epsilon_i$. It is important to give a significant amount of thought towards assigning standard deviations in the data, but a reasonable starting point is to assign each $\\epsilon_i$ as $\\epsilon_i= floor +\\%|d_i|$. \n\nIncorporating both the differences between predicted and measured data and a measure of the uncertainties in the data yields our misfit function, $\\phi_d$:\n\n$$\n\\phi_d (m) = \\frac{1}{2} \\sum_{i=1}^N \\left( \\frac{F[m] -d_i^{obs} }{\\epsilon_i}\\right)^2 = \\frac{1}{2} \\|W_d(F[m] - d^{obs}) \\|_2^2\n$$ \n\nNote that the right hand size of the equation is written as a matrix-vector product, with each $\\epsilon_i$ in the denominator placed as elements on a diagonal matrix $W_d$, as follows:\n\n\\begin{equation}\n\\begin{split}\nW_d = \n\\begin{bmatrix}\n \\frac{1}{\\epsilon_1} & 0 & 0 & \\cdots & 0\\\\\n 0 & \\frac{1}{\\epsilon_2} & 0 & \\cdots & 0\\\\\n 0 & 0 & \\frac{1}{\\epsilon_3} & \\cdots & \\vdots\\\\\n 0 & 0 & 0 & \\ddots & \\frac{1}{\\epsilon_M}\\\\ \n\\end{bmatrix}\n\\end{split}\n\\end{equation}\n\nIf we return to linear problem from the previous section where our forward operator was simply a matrix of kernel functions, we can substitute $F[m]$ with $G$ and obtain\n$$\n\\phi_d (m) = \\frac{1}{2} \\sum_{i=1}^N \\left( \\frac{(Gm)_i -d_i^{obs} }{\\epsilon_i}\\right)^2 = \\frac{1}{2} \\|W_d(Gm - d^{obs}) \\|_2^2\n$$ \n\nNow that we have defined a measure of misfit, the next task is to determine a tolerance value, such that if the misfit is about equal to that value, then we have an acceptable fit. Suppose that the standard deviations are known and that errors are Gaussian, then $\\phi_d$ becomes a $\\chi_N^2$ variable with $N$ degrees of freedom. This is a well-known quantity with an expected value $E[\\chi_N^2]=N$ and a standard deviation of $\\sqrt{2N}$. Basically, what this means is that computing $\\phi_d$ should give us a value that is close to the number of data, $N$.\n\n\n\n",
"_____no_output_____"
],
[
"## The Model Regularization, $\\phi_m$",
"_____no_output_____"
],
[
"There are many options for choosing a model regularization, but the goal in determining a model regularization is the same: given that we have no unique solution, we must make assumptions in order to recast the problem in such a way that a solution exists. A general function used in 1D is as follows:\n\n$$\n\\phi_m = \\alpha_s \\int (m)^2 dx + \\alpha_x \\int \\left( \\frac{dm}{dx} \\right)^2 dx\n$$\n\nEach term in the above expression is a norm that measures characteristics about our model. The first term is a representation of the square of the Euclidean length for a continuous function, and therefore measures the length of the model, while the second term uses derivative information to measure the model's smoothness. Usually the model regularization is defined with respect to a reference model. In the above, the reference model would simply be zero, but choosing a non-zero reference model $m_{ref}$, yields the following:\n$$\n\\phi_m = \\alpha_s \\int (m-m_{ref})^2 dx + \\alpha_x \\int \\left( \\frac{d}{dx} (m-m_{ref}) \\right)^2 dx\n$$\n\nAs before, we will discretize this expression. It is easiest to break up each term and treat them separately, at first.\nWe will denote each term of $\\phi_m$ as $\\phi_s$ and $\\phi_x$, respectively. Consider the first term. Translating the integral to a sum yields:\n\n$$\n\\phi_s = \\alpha_s \\int (m)^2 dx \\rightarrow \\sum_{i=1}^N \\int_{x_{i-1}}^{x_i} (m_i)^2 dx = \\sum_{i=1}^N m_i^2 (x_i - x_{i-1})\n$$\n\nEach spatial \"cell\" is $x_i - x_{i-1}$, which is the distance between nodes, as you may recall from the previous module. To simplify notation, we will use $\\Delta x_{n_i}$ to denote the *ith* distance between nodes:\n\n<img src=\"figures/1D_domain_dx.png\" width=\"40%\" height=\"40%\"> <br>\n\n\nWe can then write $\\phi_s$ as:\n\n$$\n\\phi_s = \\alpha_s \\sum_{i=1}^N m_i^2 \\Delta x_{n_i} = \\alpha_s m^T W_s^T W_s m = \\alpha_s \\|W_s m\\|_2^2\n$$\n\nwith:\n\n\\begin{equation}\n\\begin{split}\nW_s = \n\\begin{bmatrix}\n {\\sqrt{\\Delta x_{n_1}}} & 0 & 0 & \\cdots & 0\\\\\n 0 & {\\sqrt{\\Delta x_{n_2}}} & 0 & \\cdots & 0\\\\\n 0 & 0 & {\\sqrt{\\Delta x_{n_3}}} & \\cdots & \\vdots\\\\\n 0 & 0 & 0 & \\ddots & {\\sqrt{\\Delta x_{n_N}}}\\\\ \n\\end{bmatrix}\n\\end{split}\n\\end{equation}\n\n\nFor the second term, we will do a similar process. First, we will delineate $\\Delta x_{c_i}$ as the distance between cell centers:\n\n<img src=\"figures/1D_h_lengths_dx.png\" width=\"40%\" height=\"40%\"> <br>\n\nA discrete approximation to the integral can be made by evaluating the derivative of the model based on how much it changes between the cell-centers, that is, we will take the average gradient between the *ith* and *i+1th* cells:\n\n$$\n\\phi_x = \\alpha_x \\int \\left( \\frac{dm}{dx} \\right)^2 dx \\rightarrow \\sum_{i=1}^{N-1} \\left( \\frac{m_{i+1}-m_i}{h_k}\\right) \\Delta x_{c_i} = m^T W_x^T W_x m = \\|W_x m\\|_2^2\n$$\n\nThe matrix $W_x$ is a finite difference matrix constructed thus:\n\n\n\\begin{equation}\n\\begin{split}\nD_x = \n\\begin{bmatrix}\n -\\frac{1}{{\\Delta x_{c_1}}} & \\frac{1}{{\\Delta x_{c_1}}} & 0 & \\cdots & 0\\\\\n 0 & -\\frac{1}{{\\Delta x_{c_2}}} & \\frac{1}{{\\Delta x_{c_2}}} & \\cdots & 0\\\\\n 0 & 0 & \\ddots & \\ddots & \\vdots\\\\\n 0 & 0 & 0 & -\\frac{1}{{\\Delta x_{c_{M-1}}}} & \\frac{1}{{\\Delta x_{c_{M-1}}}}\\\\ \n\\end{bmatrix}\n\\end{split}\n\\end{equation}\n\nand then we need to account for the integration, so we multiply by a diagonal matrix $\\rm diag(\\sqrt{v})$\n\n\\begin{equation}\nW_x = D_x \\rm diag(\\sqrt{v})\n\\end{equation}\n\nSo to summarize, we have $\\phi_m = \\phi_s + \\phi_x$ with \n\n\\begin{equation}\n\\begin{split}\n \\phi_m & = \\phi_s + \\phi_x \\\\[0.4em]\n & = \\alpha_s \\|W_s (m - m_{ref})\\|_2^2 + \\alpha_x \\|W_x (m - m_{ref})\\|_2^2 \\\\[0.4em] \n\\end{split}\n\\end{equation}\n\nNext, we will write this more compactly by stacking $W_s$ and $W_x$ into a matrix $W_m$ as follows\n\n\\begin{equation}\n\\begin{split}\nW_m = \n\\begin{bmatrix}\n \\sqrt{\\alpha_s} W_s\\\\\n \\sqrt{\\alpha_x} W_x\n\\end{bmatrix}\n\\end{split}\n\\end{equation}",
"_____no_output_____"
],
[
"## Model Objective Function",
"_____no_output_____"
],
[
"If we go back and recall what was discussed in the introduction, the model objective function casts the inverse problem as an optimization problem, and as mentioned, we will be using gradient-based optimization, so we will need to take derivatives. The complete model objective function that we are dealing with will contain both the data misfit and the model regularization. This means that we can write it as $\\phi$ as the sum of the two and then differentiate:\n\n$$\n\\phi = \\phi_d + \\beta \\phi_m\n$$\nFor the linear problem we are considering\n$$\n\\phi_d = \\frac{1}{2}\\| W_d (Gm-d^{obs})\\|_2^2 = \\frac{1}{2}(Gm-d^{obs})^T W_d^T W_d (Gm-d^{obs})\n$$\nand\n$$\n\\phi_m = \\frac{1}{2} \\|W_m (m-m_{ref}) \\|^2_2 = \\frac{1}{2}(m-m_{ref})^T W_m^T W_m (m-m_{ref})\n$$\n\nTo simplify the terms and see the math a little more clearly, let's note that $W_d(Gm-d^{obs})$, and $\\beta W_m(m-m_{ref})$ are simply vectors. And since we are taking the square of the 2-norm, all that we are really doing is taking the dot product of each vector with itself. So let $z=W_d(Gm-d^{obs})$, and let $y=W_m(m-m_{ref})$ where both $z$ and $y$ vectors are functions of $m$. So then:\n\n$$\n\\phi_d = \\frac{1}{2}\\|z\\|_2^2 = \\frac{1}{2}z^T z \n$$<br>\n$$\n\\phi_m = \\frac{1}{2}\\|y\\|_2^2 =\\frac{1}{2}y^T y \n$$\n\n\nTo minimize this, we want to look at $\\nabla \\phi$. Using our compact expressions:\n$$\n\\phi = \\phi_d + \\beta \\phi_m = \\frac{1}{2}z^Tz + \\beta \\frac{1}{2}y^Ty \\\\ \n$$\n\nTaking the derivative with respect to $m$ yields:\n\\begin{equation}\n\\begin{split}\n\\frac{d \\phi}{dm}& = \\frac{1}{2} \\left(z^T \\frac{dz}{dm} + z^T \\frac{dz}{dm} + \\beta y^T \\frac{dy}{dm} + \\beta y^T \\frac{dy}{dm}\\right)\\\\\\\\[0.6em]\n& = z^T \\frac{dz}{dm} + \\beta y^T \\frac{dy}{dm}\n\\end{split}\n\\end{equation}\n\nNote that \n$$\\frac{dz}{dm} = \\frac{d}{dm}(W_d(Gm-d^{obs})) = W_d G $$ \n\nand \n\n$$ \\frac{dy}{dm} = \\frac{d}{dm}(W_m (m-m_{ref})) = W_m $$\n\nNext, let's substitute both derivatives, our expressions for $z$ and $y$, apply the transposes, and rearrange:<br>\n\n\\begin{equation}\n\\begin{split}\n\\frac{d \\phi}{dm} & = z^T \\frac{dz}{dm} + \\beta y^T \\frac{dy}{dm} \\\\[0.6em]\n & = (W_d(Gm-d^{obs}))^T W_d G + \\beta (W_m (m-m_{ref}))^T W_m\\\\[0.6em]\n & = (Gm-d^{obs})^T W_d^T W_d G + \\beta (m-m_{ref})^T W_m^T W_m \\\\[0.6em]\n & = ((Gm)^T - d^T) W_d^T W_d G + \\beta (m^T-m_{ref}^T)W_m^T W_m \\\\[0.6em]\n & = (m^T G^T - d^T) W_d^T W_d G + \\beta m^T W_m^T W_m - \\beta m_{ref}^T W_m^T W_m \\\\[0.6em]\n & = m^T G^T W_d^T W_d G - d^T W_d^T W_d G + \\beta m^T W_m^T W_m - \\beta m_{ref}^T W_m^T W_m\\\\[0.6em]\n & = m^T G^T W_d^T W_d G + \\beta m^T W_m^T W_m - d^T W_d^T W_d G - \\beta m_{ref}^T W_m^T W_m\n \\end{split}\n\\end{equation}\n\nNow we have an expression for the derivative of our equation that we can work with. Setting the gradient to zero and gathering like terms gives:<br>\n\n\\begin{equation} \n\\begin{split}\n m^T G^T W_d^T W_d G + \\beta m^T W_m^T W_m = d^T W_d^T W_d G + \\beta m_{ref}^T W_m^T W_m\\\\[0.6em]\n (G^T W_d^T W_d G + \\beta W_m^T W_m)m = G^T W_d^T W_d d + \\beta W_m^T W_m m_{ref}\\\\[0.6em]\n\\end{split}\n\\end{equation}\n\nFrom here we can do two things. First, we can solve for $m$, our recovered model:\n\n\\begin{equation}\n\\begin{split}\n m = (G^T W_d^T W_d G + \\beta W_m^T W_m)^{-1} (G^T W_d^T W_d d + \\beta W_m^T W_m m_{ref})\\\\[0.6em]\n\\end{split}\n\\end{equation}\n\nSecond, we can get the second derivative simply from the bracketed terms on the left hand side of the equation above:\n\\begin{equation} \n\\frac{d^2 \\phi}{dm^2} = G^T W_d^T W_d G + \\beta W_m^T W_m\n\\end{equation}\n\nIn the model problem that we are solving, second derivative information is not required to obtain a solution, however, in non-linear problems or situations when higher order information is required, it is useful to have this available when we need it. \n\n\n\n",
"_____no_output_____"
],
[
"## Solving for $m$ in Python",
"_____no_output_____"
],
[
"Before we solve for $m$, we will recreate what we had in the first module. First, install the appropriate packages:",
"_____no_output_____"
]
],
[
[
"# Import Packages\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"Here is the model that we had previously:",
"_____no_output_____"
]
],
[
[
"# Begin by creating a ficticious set of model data\n\nn_cells = 1000 # Set number of model parameters \nn_nodes = n_cells + 1\nxn = np.linspace(0, 1, n_nodes) # Define 1D domain on nodes\nxc = 0.5*(xn[1:] + xn[:-1]) # Define 1D domain on cell centers",
"_____no_output_____"
],
[
"# Define Gaussian function:\ndef gauss(x, amplitude, mean, std):\n \"\"\"Define a gaussian function\"\"\"\n return amplitude * np.exp(-((x-mean)/std)**2 / 2) \n\n# Choose parameters for Gaussian, evaluate, and store in an array, f.\nstd = 6e-2 \nmean = 0.7\namplitude_gaussian = 1 \ngaussian = gauss(xc, amplitude_gaussian, mean, std)\n\nfig, ax = plt.subplots(1, 1)\nax.plot(xc, gaussian)\nax.set_title(\"Gaussian\")",
"_____no_output_____"
],
[
"# Define a boxcar function:\nx_boxcar = np.r_[0.2, 0.35]\namplitude_boxcar = 1\nboxcar = np.zeros(n_cells) # initialize an array of all zeros\nboxcar_inds = (xc >= x_boxcar.min()) & (xc <= x_boxcar.max()) # find the indices of the boxcar\nboxcar[boxcar_inds] = amplitude_boxcar \n\n# construct the model\nmtrue = gaussian + boxcar\n \n# Plot \nfig, ax = plt.subplots(1, 1)\nax.plot(xc, mtrue)\nax.set_xlabel('x')\nax.set_ylabel('m(x)')\nax.set_title('Model, $m(x)$')",
"_____no_output_____"
]
],
[
[
"Again, we define out kernel functions and averaging and volume matrices as before:",
"_____no_output_____"
]
],
[
[
"# Make the set of kernel functions\n\ndef kernel_functions(x, j, p, q): \n return np.exp(-p*j*x) * np.cos(2*np.pi*q*j*x) \n\np = 0.01 # Set values for p, q\nq = 0.15\nn_data = 20 # specify number of output data\n\nj_min = 0\nj_max = n_data\nj_values = np.linspace(j_min, j_max, n_data)\n\nGn = np.zeros((n_nodes, n_data))\n\nfor i, j in enumerate(j_values):\n Gn[:, i] = kernel_functions(xn, j, p, q)\n \n# Plot \nfig, ax = plt.subplots(1, 1)\nax.plot(xn, Gn)\nax.set_xlabel('x')\nax.set_ylabel('g(x)')\nax.set_title('Kernel functions, $g(x)$')",
"_____no_output_____"
],
[
"# Make Averaging Matrix\nAv = np.zeros((n_cells, n_nodes)) # Create a matrix of zeros of the correct dimensions \n\n# and fill in with elements usin the loop below (note the 1/2 is included in here). \nfor i in range(n_cells):\n Av[i, i] = 0.5 \n Av[i, i+1] = 0.5 \n\nprint(Av.shape)",
"(1000, 1001)\n"
],
[
"# make the Volume, \"delta x\" array\ndelta_x = np.diff(xn) # set x-spacings\nV = np.diag(delta_x) # create diagonal matrix \n\nprint(V.shape)",
"(1000, 1000)\n"
]
],
[
[
"Last, we produce our data:",
"_____no_output_____"
]
],
[
[
"G = Gn.T @ Av.T @ V",
"_____no_output_____"
],
[
"d = G @ mtrue",
"_____no_output_____"
],
[
"# Plot\nfig, ax = plt.subplots(1, 1)\nax.plot(d, '-o')\nax.set_title('Synthetic Data $d$')",
"_____no_output_____"
]
],
[
[
"## Introducing noise to the data",
"_____no_output_____"
],
[
"This is where we stood at the end of the last module. Next, to simulate taking data in the field, we are going to add a noise to the data before we perform our inversion. We will do this by defining a lambda function that assigns a floor value and percent scaling factor. Also, we will assume that the noise is Gaussian. We then add the noise to the original data to make a simulated vector of observed data. The superposition of our noise and original data is plotted below.",
"_____no_output_____"
]
],
[
[
"# Add noise to our synthetic data\n\nadd_noise = False # set to true if you want to add noise to the data\n\nif add_noise is True:\n relative_noise = 0.04\n noise_floor = 1e-2\n noise = (\n relative_noise * np.random.randn(n_data) * np.abs(d) + # percent of data\n noise_floor * np.random.randn(n_data)\n )\n dobs = d + noise\nelse: \n dobs = d\n\nfig, ax = plt.subplots(1, 1)\nax.plot(d, '-o', label=\"d clean\")\nax.plot(dobs, '-s', label=\"dobs\")\nax.set_title(\"synthetic data\")\nax.legend()",
"_____no_output_____"
]
],
[
[
"# Setting up the inverse problem\n\nNow we will assemble the pieces for constructing an objective function to be minimized in the inversion. \n\nThroughout we use L2 norms, so the first thing we will do is define a simple function for computing a weighted L2 norm. ",
"_____no_output_____"
]
],
[
[
"def weighted_l2_norm(W, v):\n \"\"\"\n A function that returns a weighted L2 norm. The parameter W is a weighting matrix \n and v is a vector. \n \"\"\"\n Wv = W @ v\n return Wv.T @ Wv",
"_____no_output_____"
]
],
[
[
"## Calculating $\\phi_d$",
"_____no_output_____"
],
[
"We are now in a position to build up the data misfit term, $\\phi_d$. We will need a function to compute the 2-norm, so constructing a function to do this is useful. Next we will make the matrix $W_d$, which is a diagonal matrix that contains the inverses of the uncertainty in our data. Again, we will define a floor and percent error for each datum. Last, we calculate $\\phi_d$ using our 2-norm function that we created. It is insightful to see what values have been assigned to our floor and misfit, so they are printed below.",
"_____no_output_____"
]
],
[
[
"# Calculate the data misfit, phi_d\n\nnoise_floor = 1e-3\nrelative_error = 0.05\nstandard_deviation = noise_floor + relative_error * np.abs(dobs)\n\n# construct Wd\nWd = np.diag(1/standard_deviation)\n\nfig, ax = plt.subplots(1, 1)\nimg = ax.imshow(Wd, \"Greys\")\nplt.colorbar(img, ax=ax)\nax.set_title(\"Wd\")",
"_____no_output_____"
]
],
[
[
"## Calculating $\\phi_m$\n\nAs discussed above, we are going to first need to make our $W_m$ matrix, which is a partitioned matrix from two other matrices, $W_s$ and $W_x$, each scaled by a separate parameter $\\alpha_s$ and $\\alpha_x$. We are going to discuss the manner in which $\\alpha_s$ and $\\alpha_x$ are selected in more detail during the next module. But for the moment, we will set them as they are defined below. Once this matrix is built up, calculating $\\phi_m$ is a simple matter, given that we have made a function to compute the 2-norm already. For the sake of illustration, I compute and print $\\phi_m$ from the residual of our reference model $m_{ref}$ and our true model $m$. However, of interest to us will be the residual of the model that we recover $m_{rec}$ and our reference model.",
"_____no_output_____"
]
],
[
[
"# Start with Ws \nsqrt_vol = np.sqrt(delta_x) # in 1D - the \"Volume\" = length of each cell (delta_x)\nWs = np.diag(sqrt_vol)\n\n# and now Wx\nDx = np.zeros((n_cells-1, n_cells)) # differencing matrix\n\nfor i, dx in enumerate(delta_x[:-1]):\n Dx[i, i] = -1/dx\n Dx[i, i+1] = 1/dx\n\nWx = Dx @ np.diag(sqrt_vol)\n\nprint(Ws.shape, Wx.shape)",
"(1000, 1000) (999, 1000)\n"
],
[
"# plot both\nfig, ax = plt.subplots(1, 2, figsize=(10, 4))\nplot_up_to = 10 # plot 10 entries\n\n# plot Ws\nimg = ax[0].imshow(Ws[:plot_up_to, :plot_up_to], \"Greys\")\nplt.colorbar(img, ax=ax[0])\nax[0].set_title(\"Ws\")\n\n# plot Wx\nimg = ax[1].imshow(Wx[:plot_up_to, :plot_up_to+1], \"bwr\")\nplt.colorbar(img, ax=ax[1])\nax[1].set_title(\"Wx\")\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"### Stack Ws, Wx to make a single regularization matrix Wm",
"_____no_output_____"
]
],
[
[
"alpha_s = 1e-6\nalpha_x = 1\n\nWm = np.vstack([\n np.sqrt(alpha_s)*Ws, \n np.sqrt(alpha_x)*Wx\n])\n\nprint(Wm.shape)",
"(1999, 1000)\n"
]
],
[
[
"## Inverting for our recovered model",
"_____no_output_____"
],
[
"At last we can invert to find our recovered model and see how it compares with the true model. First we will assign a value for $\\beta$. As with the $\\alpha$ parameters from before, we will assign a value, but the choice of beta will be a topic that we explore more fully in the next module. Once our $\\beta$ value is assigned, we will define yet another lambda function to obtain the recovered model, plot it against our true model, and then output our results for $\\phi_d$ and $\\phi_m$. ",
"_____no_output_____"
]
],
[
[
"beta = 1e-1 # Set beta value\nmref = 0.5 * np.ones(n_cells) # choose a reference model\n\nWdG = Wd @ G\nmrec = (\n np.linalg.inv(WdG.T @ WdG + beta * Wm.T @ Wm) @ \n (WdG.T @ Wd @ dobs + beta * Wm.T @ Wm @ mref)\n)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1)\n\nax.plot(xc, mtrue, label=\"true\")\nax.plot(xc, mrec, label=\"recovered\")\nax.legend()",
"_____no_output_____"
],
[
"dpred = G @ mrec\n\nfig, ax = plt.subplots(1, 1)\nax.plot(dobs, '-o', label=\"observed\")\nax.plot(dpred, '-s', label=\"predicted\")\nax.legend()",
"_____no_output_____"
]
],
[
[
"This concludes the current module. For the next module, we will examine the constraints on our choice for $\\alpha_s$ and $\\alpha_x$, and then introduce the Tikhonov curve and a method for choosing $\\beta$.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7c8c528ff23fc27db5589e08de1306e4069fabd | 9,159 | ipynb | Jupyter Notebook | coding-259-master/coding-259/py3/04-advanced-conv/vgg.ipynb | huangjeake/OpenCV | f0253f7b6adf135d256f24e7298a8b8d11755af9 | [
"MIT"
] | null | null | null | coding-259-master/coding-259/py3/04-advanced-conv/vgg.ipynb | huangjeake/OpenCV | f0253f7b6adf135d256f24e7298a8b8d11755af9 | [
"MIT"
] | null | null | null | coding-259-master/coding-259/py3/04-advanced-conv/vgg.ipynb | huangjeake/OpenCV | f0253f7b6adf135d256f24e7298a8b8d11755af9 | [
"MIT"
] | null | null | null | 37.691358 | 94 | 0.433999 | [
[
[
"import tensorflow as tf\nimport os\nimport pickle\nimport numpy as np\n\nCIFAR_DIR = \"./../../cifar-10-batches-py\"\nprint(os.listdir(CIFAR_DIR))",
"_____no_output_____"
],
[
"def load_data(filename):\n \"\"\"read data from data file.\"\"\"\n with open(filename, 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n return data[b'data'], data[b'labels']\n\n# tensorflow.Dataset.\nclass CifarData:\n def __init__(self, filenames, need_shuffle):\n all_data = []\n all_labels = []\n for filename in filenames:\n data, labels = load_data(filename)\n all_data.append(data)\n all_labels.append(labels)\n self._data = np.vstack(all_data)\n self._data = self._data / 127.5 - 1\n self._labels = np.hstack(all_labels)\n print(self._data.shape)\n print(self._labels.shape)\n \n self._num_examples = self._data.shape[0]\n self._need_shuffle = need_shuffle\n self._indicator = 0\n if self._need_shuffle:\n self._shuffle_data()\n \n def _shuffle_data(self):\n # [0,1,2,3,4,5] -> [5,3,2,4,0,1]\n p = np.random.permutation(self._num_examples)\n self._data = self._data[p]\n self._labels = self._labels[p]\n \n def next_batch(self, batch_size):\n \"\"\"return batch_size examples as a batch.\"\"\"\n end_indicator = self._indicator + batch_size\n if end_indicator > self._num_examples:\n if self._need_shuffle:\n self._shuffle_data()\n self._indicator = 0\n end_indicator = batch_size\n else:\n raise Exception(\"have no more examples\")\n if end_indicator > self._num_examples:\n raise Exception(\"batch size is larger than all examples\")\n batch_data = self._data[self._indicator: end_indicator]\n batch_labels = self._labels[self._indicator: end_indicator]\n self._indicator = end_indicator\n return batch_data, batch_labels\n\ntrain_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]\ntest_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]\n\ntrain_data = CifarData(train_filenames, True)\ntest_data = CifarData(test_filenames, False)",
"_____no_output_____"
],
[
"x = tf.placeholder(tf.float32, [None, 3072])\ny = tf.placeholder(tf.int64, [None])\n# [None], eg: [0,5,6,3]\nx_image = tf.reshape(x, [-1, 3, 32, 32])\n# 32*32\nx_image = tf.transpose(x_image, perm=[0, 2, 3, 1])\n\n# conv1: 神经元图, feature_map, 输出图像\nconv1_1 = tf.layers.conv2d(x_image,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv1_1')\nconv1_2 = tf.layers.conv2d(conv1_1,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv1_2')\n\n# 16 * 16\npooling1 = tf.layers.max_pooling2d(conv1_2,\n (2, 2), # kernel size\n (2, 2), # stride\n name = 'pool1')\n\n\nconv2_1 = tf.layers.conv2d(pooling1,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv2_1')\nconv2_2 = tf.layers.conv2d(conv2_1,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv2_2')\n# 8 * 8\npooling2 = tf.layers.max_pooling2d(conv2_2,\n (2, 2), # kernel size\n (2, 2), # stride\n name = 'pool2')\n\nconv3_1 = tf.layers.conv2d(pooling2,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv3_1')\nconv3_2 = tf.layers.conv2d(conv3_1,\n 32, # output channel number\n (3,3), # kernel size\n padding = 'same',\n activation = tf.nn.relu,\n name = 'conv3_2')\n# 4 * 4 * 32\npooling3 = tf.layers.max_pooling2d(conv3_2,\n (2, 2), # kernel size\n (2, 2), # stride\n name = 'pool3')\n# [None, 4 * 4 * 32]\nflatten = tf.layers.flatten(pooling3)\ny_ = tf.layers.dense(flatten, 10)\n\nloss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)\n# y_ -> sofmax\n# y -> one_hot\n# loss = ylogy_\n\n# indices\npredict = tf.argmax(y_, 1)\n# [1,0,1,1,1,0,0,0]\ncorrect_prediction = tf.equal(predict, y)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))\n\nwith tf.name_scope('train_op'):\n train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)",
"_____no_output_____"
],
[
"init = tf.global_variables_initializer()\nbatch_size = 20\ntrain_steps = 10000\ntest_steps = 100\n\n# train 10k: 73.4%\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(train_steps):\n batch_data, batch_labels = train_data.next_batch(batch_size)\n loss_val, acc_val, _ = sess.run(\n [loss, accuracy, train_op],\n feed_dict={\n x: batch_data,\n y: batch_labels})\n if (i+1) % 100 == 0:\n print('[Train] Step: %d, loss: %4.5f, acc: %4.5f' \n % (i+1, loss_val, acc_val))\n if (i+1) % 1000 == 0:\n test_data = CifarData(test_filenames, False)\n all_test_acc_val = []\n for j in range(test_steps):\n test_batch_data, test_batch_labels \\\n = test_data.next_batch(batch_size)\n test_acc_val = sess.run(\n [accuracy],\n feed_dict = {\n x: test_batch_data, \n y: test_batch_labels\n })\n all_test_acc_val.append(test_acc_val)\n test_acc = np.mean(all_test_acc_val)\n print('[Test ] Step: %d, acc: %4.5f' % (i+1, test_acc))\n \n \n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7c8c83a435a06456dbdd943ec16cd36aa0d093a | 86,295 | ipynb | Jupyter Notebook | 02_Traffic_info_test_2_hidden_12_pool_multi_location.ipynb | chenmingxiang110/NCA_Prediction | d999c0c10a06a7120c945dc87003b6b823e1a52d | [
"MIT"
] | 13 | 2020-12-14T09:06:19.000Z | 2021-12-15T08:13:26.000Z | 02_Traffic_info_test_2_hidden_12_pool_multi_location.ipynb | chenmingxiang110/NCA_Prediction | d999c0c10a06a7120c945dc87003b6b823e1a52d | [
"MIT"
] | null | null | null | 02_Traffic_info_test_2_hidden_12_pool_multi_location.ipynb | chenmingxiang110/NCA_Prediction | d999c0c10a06a7120c945dc87003b6b823e1a52d | [
"MIT"
] | 4 | 2020-12-14T09:10:23.000Z | 2021-10-01T19:54:25.000Z | 102.854589 | 12,312 | 0.82627 | [
[
[
"%matplotlib inline\n\"\"\"\ndata source:\nhttps://zola.planning.nyc.gov/data#12.31/40.73327/-73.92447\nhttps://www1.nyc.gov/site/planning/data-maps/open-data/dwn-pluto-mappluto.page\nhttps://sfplanninggis.org/PIM/help.html\n\"\"\"\n\nimport os\nimport time\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom IPython.display import clear_output\nfrom datetime import datetime\n\nfrom lib.utils import SamplePool, make_seed, make_circle_masks, get_rand_avail\nfrom lib.utils import get_sobel, softmax\nfrom lib.NCCAModel2 import NCCAModel2",
"_____no_output_____"
],
[
"with open('anchor_loc.pickle', 'rb') as handle:\n anchor_loc = pickle.load(handle)\n\nroot = \"_maps/\"\nfull_size = (100,100)\nmap_size = (80,80)\ncolor_map = [(0.5,0.5,0.5),\n (0.5,1.0,0.5),\n (1.0,1.0,0.5),\n (1.0,0.7,0.2),\n (1.0,0.5,0.5),\n (1.0,0.5,1.0)]\n\n################################################################\n\nd_trains = []\nd_tests = []\nalive_maps = []\n\nfor d_i, obj_name in enumerate(list(anchor_loc.keys())[:10]):\n\n filenames = []\n common_index = {}\n\n for filename in os.listdir(root):\n if filename[:len(obj_name)]==obj_name:\n filenames.append(root+filename)\n\n for filename in filenames:\n with open(filename, 'rb') as handle:\n map_dict = pickle.load(handle)\n for index in map_dict:\n try:\n tmp = int(map_dict[index]['status'])\n if index in common_index:\n common_index[index]+= 1\n else:\n common_index[index] = 1\n except (TypeError, KeyError):\n continue\n\n common_index = [x for x in common_index.keys() if common_index[x]==len(filenames)]\n\n d_train = np.zeros([64, full_size[0], full_size[1], 4])\n d_test = np.zeros([len(filenames)-d_train.shape[0], full_size[0], full_size[1], d_train.shape[-1]])\n\n for i,filename in enumerate(filenames[:d_train.shape[0]]):\n with open(filename, 'rb') as handle:\n map_dict = pickle.load(handle)\n for index in common_index:\n try:\n status = min(int(map_dict[index]['status'])-1, 3)\n d_train[i, index[0], index[1]] = np.zeros(d_train.shape[-1])\n d_train[i, index[0], index[1], status] = 1\n except (TypeError, KeyError):\n continue\n\n for i,filename in enumerate(filenames[d_train.shape[0]:]):\n with open(filename, 'rb') as handle:\n map_dict = pickle.load(handle)\n for index in common_index:\n try:\n status = min(int(map_dict[index]['status'])-1, 3)\n d_test[i, index[0], index[1]] = np.zeros(d_test.shape[-1])\n d_test[i, index[0], index[1], status] = 1\n except (TypeError, KeyError):\n continue\n\n alive_map = np.expand_dims(np.expand_dims(np.sum(d_train[0, ...], -1)>0.001, 0), -1)\n\n cut_off = ((full_size[0]-map_size[0])//2, (full_size[1]-map_size[1])//2)\n d_train = d_train[:, cut_off[0]:(cut_off[0]+map_size[0]),\n cut_off[1]:(cut_off[1]+map_size[1]), :]\n d_test = d_test[:, cut_off[0]:(cut_off[0]+map_size[0]),\n cut_off[1]:(cut_off[1]+map_size[1]), :]\n alive_map = alive_map[:, cut_off[0]:(cut_off[0]+map_size[0]),\n cut_off[1]:(cut_off[1]+map_size[1]), :]\n\n print(d_train.shape, d_test.shape, alive_map.shape)\n \n d_trains.append(d_train)\n d_tests.append(d_test)\n alive_maps.append(alive_map)",
"(64, 80, 80, 4) (28, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (206, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (30, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (44, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (32, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (32, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (45, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (12, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (39, 80, 80, 4) (1, 80, 80, 1)\n(64, 80, 80, 4) (18, 80, 80, 4) (1, 80, 80, 1)\n"
],
[
"DEVICE = torch.device(\"cuda:0\")\nmodel_path = \"models/ncca_softmax_multi_traffic.pth\"\n\nCHANNEL_N = 16\nALPHA_CHANNEL = 4\n\nlr = 1e-3\nlr_gamma = 0.99995\nbetas = (0.5, 0.5)\nn_epoch = 100000\n\nBATCH_SIZE = 10\nN_STEPS = 128\nPOOL_SIZE = 256\nCELL_FIRE_RATE = 0.5\nCALIBRATION = 1.0\neps = 1e-3\n\nUSE_PATTERN_POOL = 1\nDAMAGE_N = 3\nTRANS_N = 3",
"_____no_output_____"
],
[
"valid_masks = []\nfor alive_map in alive_maps:\n valid_masks.append(alive_map.astype(bool))\nvalid_masks = np.concatenate(valid_masks, 0)\n\npools_list = []\nfor d_i, d_train in enumerate(d_trains):\n pools = []\n for _ in range(d_train.shape[0]):\n init_coord = get_rand_avail(valid_masks[d_i:(d_i+1)])\n seed = make_seed(map_size, CHANNEL_N, np.arange(CHANNEL_N-ALPHA_CHANNEL)+ALPHA_CHANNEL, init_coord)\n pools.append(SamplePool(x=np.repeat(seed[None, ...], POOL_SIZE, 0)))\n pools_list.append(pools)\n \nmy_model = NCCAModel2(CHANNEL_N, ALPHA_CHANNEL, CELL_FIRE_RATE, DEVICE).to(DEVICE)\nmy_model.load_state_dict(torch.load(model_path))\nfor param in my_model.parameters():\n param.requires_grad = False",
"_____no_output_____"
],
[
"def test(x, target, valid_mask_t, calibration_map, steps):\n history = [x.detach().cpu().numpy(),]\n for _ in range(steps):\n x = my_model(x, valid_mask_t, 1)\n h = torch.softmax(x[..., :ALPHA_CHANNEL], -1)\n t = target[..., :ALPHA_CHANNEL]\n _delta = t*(h-1)\n delta = _delta * calibration_map * CALIBRATION\n y1 = x[..., :ALPHA_CHANNEL]-delta\n \n alpha_h = x[..., ALPHA_CHANNEL:(ALPHA_CHANNEL+1)]\n y2 = alpha_h - 2 * (alpha_h-valid_mask_t) * calibration_map * CALIBRATION\n x = torch.cat((y1,y2,x[..., (ALPHA_CHANNEL+1):]), -1)\n history.append(x.detach().cpu().numpy())\n return x, history\n \n# 重新选择target\ncalibration_map = make_circle_masks(BATCH_SIZE, map_size[0], map_size[1], rmin=0.5, rmax=0.5)[..., None]\ncalibration_map = torch.from_numpy(calibration_map.astype(np.float32)).to(DEVICE)\n\ntargets = []\n# pre_target_i = [-1]*10\npre_target_i = [1, 122, 7, 6, 19, 27, 19, 11, 22, 6]\ntarget_is = []\nfor d_i in range(10):\n if pre_target_i[d_i]<0:\n target_i = np.random.randint(d_tests[d_i].shape[0])\n else:\n target_i = pre_target_i[d_i]\n print(target_i)\n target_is.append((d_i, target_i))\n target = np.concatenate((d_tests[d_i][target_i:target_i+1], valid_masks[d_i:(d_i+1)]), -1)\n targets.append(target)\ntargets = np.concatenate(targets, 0).astype(np.float32)\ntargets[..., :-1] += eps\ntargets[..., :-1] /= np.sum(targets[..., :-1], axis=-1, keepdims=True)\n_target = torch.from_numpy(targets).to(DEVICE)\n\nx0 = np.repeat(seed[None, ...], BATCH_SIZE, 0)*0\nx0 = torch.from_numpy(x0.astype(np.float32)).to(DEVICE)\n\nvalid_mask_t = valid_masks[[tmp[0] for tmp in target_is]]\nvalid_mask_t = torch.from_numpy(valid_mask_t.astype(np.float32)).to(DEVICE)\n\nx, history = test(x0, _target, valid_mask_t, calibration_map, N_STEPS)\n# x.backward()\nhistory = np.array(history)\ncali_map_numpy = calibration_map.detach().cpu().numpy()\nprint(\"history generated\", history.shape)",
"1\n122\n7\n6\n19\n27\n19\n11\n22\n6\nhistory generated (129, 10, 80, 80, 16)\n"
],
[
"color_map = [(0.0,0.0,0.0),\n (0.5,1.0,0.5),\n (1.0,1.0,0.5),\n (1.0,0.7,0.2),\n (1.0,0.5,0.5)]\n\nfor history_i in range(10):\n history_t = history[:,history_i,...,:(ALPHA_CHANNEL+1)]\n targets_t = targets[history_i,...]\n\n map_dict = np.argmax(targets_t[..., :-1], -1)\n _map = np.zeros([map_dict.shape[0], map_dict.shape[1], 3])\n for i in range(_map.shape[0]):\n for j in range(_map.shape[1]):\n if targets_t[i,j,-1]>0.1:\n _map[i,j] = color_map[map_dict[i,j]+1]\n\n plt.figure(figsize=(14,6))\n plt.subplot(1,9,1)\n rotated_img = ndimage.rotate(_map*0.999+0.00001, 90)\n plt.imshow(rotated_img)\n if history_i==0: plt.gca().set_title(\"Target\")\n plt.axis('off')\n plt.subplot(1,9,2)\n rotated_img = ndimage.rotate(cali_map_numpy[history_i, ..., 0], 90)\n plt.imshow(rotated_img, cmap=plt.cm.gray, vmin=0, vmax=1)\n if history_i==0: plt.gca().set_title(\"Pre-explored\")\n plt.axis('off')\n \n shown_steps = [2,4,8,16,32,64,128]\n for index, i_map in enumerate(shown_steps):\n plt.subplot(1,9,index+3)\n i_map-=1\n\n map_dict = np.argmax(history_t[i_map, ..., :-1], -1)\n _map = np.zeros([map_dict.shape[0], map_dict.shape[1], 3])\n for i in range(_map.shape[0]):\n for j in range(_map.shape[1]):\n if history_t[i_map,i,j,-1]>0.1:\n _map[i,j] = color_map[map_dict[i,j]+1]\n\n rotated_img = ndimage.rotate(_map*0.999+0.00001, 90)\n plt.imshow(rotated_img)\n if history_i==0: plt.gca().set_title('Step #'+str(i_map+1))\n plt.axis('off')\n plt.show()",
"_____no_output_____"
],
[
"percentages = []\nfor _ in range(10):\n for d_i in range(10):\n targets = []\n target_is = []\n for target_i in range(d_tests[d_i].shape[0]):\n target_is.append((d_i, target_i))\n target = np.concatenate((d_tests[d_i][target_i:target_i+1], valid_masks[d_i:(d_i+1)]), -1)\n targets.append(target)\n targets = np.concatenate(targets, 0).astype(np.float32)\n _target = torch.from_numpy(targets).to(DEVICE)\n \n calibration_map = make_circle_masks(_target.size(0), map_size[0], map_size[1],\n rmin=0.5, rmax=0.5)[..., None]\n calibration_map = torch.from_numpy(calibration_map.astype(np.float32)).to(DEVICE)\n x0 = np.repeat(seed[None, ...], _target.size(0), 0)*0\n x0 = torch.from_numpy(x0.astype(np.float32)).to(DEVICE)\n\n valid_mask_t = valid_masks[[tmp[0] for tmp in target_is]]\n valid_mask_t = torch.from_numpy(valid_mask_t.astype(np.float32)).to(DEVICE)\n\n x, history = test(x0, _target, valid_mask_t, calibration_map, N_STEPS)\n \n hyp = x.detach().cpu().numpy()\n hyp = np.argmax(hyp[..., :(ALPHA_CHANNEL+1)], -1)\n y = np.argmax(targets, -1)\n percentage = np.sum((hyp==y)*alive_maps[d_i][...,0])/(np.sum(alive_maps[d_i])*hyp.shape[0])\n percentages.append(percentage)\n print(percentage)\nprint(\"---------\")\nprint(np.mean(percentages))",
"0.6557621502209131\n0.7531524208975867\n0.7594786015672091\n0.7239869964007895\n0.6608549288617886\n0.803313364624506\n0.7698341994555803\n0.8309084126095878\n0.8490995998221432\n0.8345033486645687\n0.637711708394698\n0.7593831673297827\n0.7592224231464738\n0.7202542668059909\n0.6776549796747967\n0.7906913908102767\n0.7610987379361545\n0.8126748740906548\n0.8429116644434563\n0.8495521665456306\n0.6487297496318115\n0.7521038592457541\n0.7658981314044605\n0.7331185417392314\n0.6732405995934959\n0.7840059906126482\n0.7914625092798813\n0.8107162842753217\n0.8525270490588409\n0.8260308238521746\n0.6477724594992637\n0.7522655218558868\n0.757157926461724\n0.7086845466155811\n0.6781631097560976\n0.8037302371541502\n0.7849913387775304\n0.8409811602313001\n0.8408366681488069\n0.8326474622770919\n0.6371686303387334\n0.7546500453553434\n0.776130198915009\n0.7314582607686057\n0.667603531504065\n0.7841681077075099\n0.7678173719376392\n0.8002704719268793\n0.8513598636431007\n0.8260509965302993\n0.6548969072164949\n0.746346874073808\n0.7728149487643159\n0.7198827353999768\n0.6712874745934959\n0.7975543478260869\n0.8025736203909923\n0.80325498974072\n0.8520082999851786\n0.8442669248769467\n0.6425810014727541\n0.7474201341799664\n0.7632459312839059\n0.7100371531406015\n0.6613471798780488\n0.8129245923913043\n0.7700569166048008\n0.8149132624510352\n0.8533792796798577\n0.8254458161865569\n0.6292893961708395\n0.749337632361262\n0.7579716696805304\n0.7195402298850575\n0.6765275660569106\n0.8148005187747036\n0.8039594159861421\n0.8467636634956165\n0.8622165406847487\n0.8255265068990559\n0.6346649484536082\n0.7490547227935299\n0.7816455696202531\n0.7287008011145942\n0.6670954014227642\n0.7853415266798419\n0.7579683246721108\n0.8168718522663683\n0.8548984733955832\n0.8229444040990882\n0.6347938144329897\n0.7503861940130947\n0.7637733574442436\n0.7351793800069663\n0.668286331300813\n0.7906836709486166\n0.7706632021776788\n0.8242865137101287\n0.8497480361642211\n0.8360566448801743\n---------\n0.763090055501292\n"
],
[
"percentages = []\nfor _ in range(10):\n for d_i in range(10):\n targets = []\n target_is = []\n for target_i in range(d_trains[d_i].shape[0]):\n target_is.append((d_i, target_i))\n target = np.concatenate((d_trains[d_i][target_i:target_i+1], valid_masks[d_i:(d_i+1)]), -1)\n targets.append(target)\n targets = np.concatenate(targets, 0).astype(np.float32)\n _target = torch.from_numpy(targets).to(DEVICE)\n \n calibration_map = make_circle_masks(_target.size(0), map_size[0], map_size[1],\n rmin=0.5, rmax=0.5)[..., None]\n calibration_map = torch.from_numpy(calibration_map.astype(np.float32)).to(DEVICE)\n\n x0 = np.repeat(seed[None, ...], _target.size(0), 0)*0\n x0 = torch.from_numpy(x0.astype(np.float32)).to(DEVICE)\n\n valid_mask_t = valid_masks[[tmp[0] for tmp in target_is]]\n valid_mask_t = torch.from_numpy(valid_mask_t.astype(np.float32)).to(DEVICE)\n\n x, history = test(x0, _target, valid_mask_t, calibration_map, N_STEPS)\n \n hyp = x.detach().cpu().numpy()\n hyp = np.argmax(hyp[..., :6], -1)\n y = np.argmax(targets, -1)\n percentage = np.sum((hyp==y)*alive_maps[d_i][...,0])/(np.sum(alive_maps[d_i])*hyp.shape[0])\n percentages.append(percentage)\n print(percentage)\nprint(\"---------\")\nprint(np.mean(percentages))",
"0.6239449097938145\n0.7395206984273821\n0.7449635510849909\n0.6936981162196679\n0.6618711890243902\n0.7784322504940712\n0.705517469376392\n0.7859103945159485\n0.8766821712427746\n0.7884837962962963\n0.6338635631443299\n0.7393472479185939\n0.7378503616636528\n0.6970266602809706\n0.6633161839430894\n0.7776718441205533\n0.71118109688196\n0.7701804700615557\n0.8788046423410405\n0.7903560729847494\n0.62277706185567\n0.7383137719703978\n0.7385637997287523\n0.6968590357598978\n0.6614662728658537\n0.7817788105237155\n0.7356973830734966\n0.7790203553441523\n0.8808819544797688\n0.7855108478576616\n0.6176183956185567\n0.7480559088806661\n0.7359643422242315\n0.70242656449553\n0.6624587144308943\n0.7794396924407114\n0.7270409938752784\n0.7764234750979295\n0.8741306900289018\n0.7857888525780683\n0.6394249355670103\n0.7480197733580018\n0.7491735420433996\n0.6860791826309067\n0.6590447154471545\n0.7800997406126482\n0.7274063891982183\n0.7749720201454953\n0.880915823699422\n0.7866455610021786\n0.6187822164948453\n0.739751965772433\n0.7371581148282098\n0.7043702107279693\n0.6641577743902439\n0.7760545331027668\n0.7059437639198218\n0.7816522104085059\n0.8817851336705202\n0.7867930737109659\n0.6273236146907216\n0.7434233348751156\n0.7333154385171791\n0.6878591954022989\n0.6558292047764228\n0.781940927618577\n0.7243701280623608\n0.785429490766648\n0.8803061777456648\n0.7885121641249092\n0.6314030283505154\n0.7412118408880666\n0.738938178119349\n0.6993095466155811\n0.6622205284552846\n0.7795053112648221\n0.7059089643652561\n0.7699531337437046\n0.8806110007225434\n0.7922340232389252\n0.6231636597938144\n0.740069958371878\n0.7362257007233273\n0.6951149425287356\n0.6629906631097561\n0.7738080533596838\n0.7533842566815144\n0.7854906966983771\n0.880227149566474\n0.7922737381989833\n0.630114368556701\n0.7404385407030527\n0.7395315325497287\n0.6917784163473819\n0.663427337398374\n0.7745838994565217\n0.7389598413140311\n0.7876329043088975\n0.8832189306358381\n0.7880752995642701\n---------\n0.7414718541588834\n"
]
],
[
[
"## Speed Test",
"_____no_output_____"
]
],
[
[
"times = []\nvalid_mask_t = torch.from_numpy(np.ones([1,80,80,1]).astype(np.float32)).to(DEVICE)\nfor d_i in range(10):\n _target = torch.from_numpy(d_trains[d_i].astype(np.float32)).to(DEVICE)\n\n calibration_map = make_circle_masks(_target.size(0), map_size[0], map_size[1],\n rmin=0.5, rmax=0.5)[..., None]\n calibration_map = torch.from_numpy(calibration_map.astype(np.float32)).to(DEVICE)\n\n x0 = np.repeat(seed[None, ...], _target.size(0), 0)*0\n x0 = torch.from_numpy(x0.astype(np.float32)).to(DEVICE)\n\n start_time = time.time()\n x, history = test(x0, _target, valid_mask_t, calibration_map, N_STEPS)\n times.append((time.time()-start_time)/_target.size(0))\n print(times[-1])\nprint(\"---------\")\nprint(np.mean(times))",
"0.03891327977180481\n0.04005876183509827\n0.04132132604718208\n0.04402513429522514\n0.04346586391329765\n0.04147135838866234\n0.03921307995915413\n0.038483794778585434\n0.04098214581608772\n0.044003926217556\n---------\n0.04119386710226536\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c8ce76e5038eea2574fbcb422f5842ead98259 | 590,819 | ipynb | Jupyter Notebook | rulevetting/projects/csi_pecarn/notebooks/fit_models_ll.ipynb | aashen12/rule-vetting | b1a91675f838aee45eb8d989e299664151d02c11 | [
"MIT"
] | null | null | null | rulevetting/projects/csi_pecarn/notebooks/fit_models_ll.ipynb | aashen12/rule-vetting | b1a91675f838aee45eb8d989e299664151d02c11 | [
"MIT"
] | null | null | null | rulevetting/projects/csi_pecarn/notebooks/fit_models_ll.ipynb | aashen12/rule-vetting | b1a91675f838aee45eb8d989e299664151d02c11 | [
"MIT"
] | null | null | null | 652.838674 | 330,804 | 0.939276 | [
[
[
"Fit interpretable models to the training set and test on validation sets.",
"_____no_output_____"
]
],
[
[
"#%matplotlib inline\n#%load_ext autoreload\n#%autoreload 2\n\nimport os\nimport pickle as pkl\nfrom os.path import join as oj\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier\n\nimport imodels\nfrom rulevetting.api import validation\nfrom rulevetting.projects.csi_pecarn.dataset import Dataset\n\nMODELS_DIR = './models'\nos.makedirs(MODELS_DIR, exist_ok=True)\n\noutcome_def = 'outcome' # output",
"/Users/seunghoonpaik/Desktop/SH/Berkeley/Coursework/215A/Lab/final-proj/andy-github/rule-env/lib/python3.8/site-packages/redis/connection.py:72: UserWarning: redis-py works best with hiredis. Please consider installing\n warnings.warn(msg)\n"
],
[
"def var_selection(df,method=['rfe',10]): ## input: a dataframe with outcome as the last column, method: ['rfe',number of\n ## features to choose] or ['lasso',penalty] output: a dataframe containing the columns we select and the outcome column\n algo=method[0]\n param=method[1]\n X=df.drop(columns=['outcome'])\n y=df.outcome\n if algo=='rfe':\n mymodel=LogisticRegression()\n myrfe = RFE(mymodel,n_features_to_select=param)\n myfit = myrfe.fit(X, y)\n index=np.append(myfit.support_,True)\n \n elif algo=='lasso':\n mylasso = LogisticRegression(penalty='l1', solver='liblinear',C=param) ## for example C=0.1\n myfit=mylasso.fit(X, y)\n \n index=np.append(myfit.coef_[0]!=0,True)\n \n \n return index\n ",
"_____no_output_____"
],
[
"df_train, df_tune, _ = Dataset().get_data(load_csvs=True)",
"_____no_output_____"
],
[
"def predict_and_save(model, model_name='decision_tree'):\n '''Plots cv and returns cv, saves all stats\n '''\n results = {'model': model}\n for x, y, suffix in zip([X_train, X_tune],\n [y_train, y_tune],\n ['_train', '_tune']):\n \n stats, threshes = validation.all_stats_curve(y, model.predict_proba(x)[:, 1],\n plot=suffix == '_tune')\n for stat in stats.keys():\n results[stat + suffix] = stats[stat]\n results['threshes' + suffix] = threshes\n pkl.dump(results, open(oj(MODELS_DIR, model_name + '.pkl'), 'wb'))\n return stats, threshes\n\n\n\ndef model_valid(max_num=20,model_name='decision_tree'):\n '''use validation set to select # of features'''\n \n record=np.zeros(max_num)\n sensitivity=np.zeros(max_num)\n for num in range(1,max_num+1):\n index=var_selection(df_train,method=['rfe',num])\n loc_train=df_train.loc[:,index]\n loc_tune=df_tune.loc[:,index]\n loc_=_.loc[:,index]\n X_train = loc_train.drop(columns=outcome_def)\n y_train = loc_train[outcome_def].values\n X_tune = loc_tune.drop(columns=outcome_def)\n y_tune = loc_tune[outcome_def].values\n \n if model_name=='decision_tree':\n model = DecisionTreeClassifier(max_depth=4, class_weight={0: 1, 1: 1e3})\n model.fit(X_train, y_train)\n elif model_name=='logistic':\n model= LogisticRegression()\n model.fit(X_train, y_train)\n elif model_name=='adaboost':\n model= AdaBoostClassifier(n_estimators=50, learning_rate=1)\n model.fit(X_train, y_train)\n \n \n \n stats, threshes = validation.all_stats_curve(y_tune, model.predict_proba(X_tune)[:, 1],\n plot=False)\n sens=stats['sens']\n spec=stats['spec']\n if sens[0]<0.98:\n record[num-1]=0.\n sensitivity[num-1]=sens[0]\n continue\n j=0\n while sens[j]>0.98:\n #print([j, sens[j]], spec[j])\n #print(sens[j])\n cur_pec=spec[j]\n \n j+=1\n record[num-1]=cur_pec\n sensitivity[num-1]=sens[j]\n \n print(record)\n print(sensitivity)\n return np.argmax(record)+1 ## output the optimal number of features via validation",
"_____no_output_____"
],
[
"# print(model_valid(20,model_name='adaboost')) ## output zero when sens<.98, otherwise output spec (adaboost,decision_tree,logistic)",
"_____no_output_____"
],
[
"# print(model_valid(20,model_name='decision_tree')) ## output zero when sens<.98, otherwise output spec (adaboost,decision_tree,logistic)",
"_____no_output_____"
],
[
"# print(model_valid(30,model_name='logistic')) ## output zero when sens<.98, otherwise output spec (adaboost,decision_tree,logistic)",
"_____no_output_____"
],
[
"index=var_selection(df_train,method=['rfe',9])\n\nprint(df_train.columns[index])\n\ndf_train=df_train.loc[:,index]\ndf_tune=df_tune.loc[:,index]\n_=_.loc[:,index]\n\nX_train = df_train.drop(columns=outcome_def)\ny_train = df_train[outcome_def].values\nX_tune = df_tune.drop(columns=outcome_def)\ny_tune = df_tune[outcome_def].values\n\nprocessed_feats = df_train.keys().values.tolist()\nfeature_names=processed_feats",
"Index(['ArrPtIntub', 'DxCspineInjury', 'FocalNeuroFindings', 'HighriskDiving',\n 'IntervForCervicalStab', 'PtExtremityWeakness', 'PtSensoryLoss',\n 'PtTenderExt', 'SubInj_TorsoTrunk', 'outcome'],\n dtype='object')\n"
]
],
[
[
"# fit simple models",
"_____no_output_____"
],
[
"**decision tree**",
"_____no_output_____"
]
],
[
[
"# fit decision tree\ndt = DecisionTreeClassifier(max_depth=4, class_weight={0: 1, 1: 1e3})\ndt.fit(X_train, y_train)\n\nstats, threshes = predict_and_save(dt, model_name='decision_tree')\nprint(stats,threshes)\n\nplt.show()\nplt.savefig(\"tree-roc.png\", dpi='figure', format=None, metadata=None,\n bbox_inches=None, pad_inches=0,\n facecolor='auto', edgecolor='auto',\n backend=None)",
"100%|███████████████████████████████████████████████████| 14/14 [00:00<00:00, 1871.38it/s]\n100%|███████████████████████████████████████████████████| 14/14 [00:00<00:00, 2638.52it/s]"
],
[
"fig = plt.figure(figsize=(50, 40))\nplot_tree(dt, feature_names=feature_names, filled=True)\nplt.show()",
"_____no_output_____"
],
[
"# fit logitstic\ndt= LogisticRegression()\ndt.fit(X_train, y_train)\n\nstats_lr, threshes_lr = predict_and_save(dt, model_name='logistic')\nprint(stats_lr, \"\\n\")\nprint(threshes_lr)\n\nplt.show()\nfig = plt.figure(figsize=(50, 40))\nplt.show()",
"100%|███████████████████████████████████████████████████| 74/74 [00:00<00:00, 2963.78it/s]\n100%|███████████████████████████████████████████████████| 48/48 [00:00<00:00, 3448.55it/s]"
],
[
"# fit adaboost\ndt= AdaBoostClassifier(n_estimators=100, learning_rate=1)\ndt.fit(X_train, y_train)\n\nstats_ab, threshes_ab = predict_and_save(dt, model_name='adaboost')\nprint(stats_ab, \"\\n\")\nprint(threshes_ab)\n\nplt.show()\nfig = plt.figure(figsize=(50, 40))\nplt.show()",
"100%|███████████████████████████████████████████████████| 74/74 [00:00<00:00, 3015.37it/s]\n100%|███████████████████████████████████████████████████| 48/48 [00:00<00:00, 3513.92it/s]"
],
[
"(np.asarray(stats_lr[\"sens\"]) - np.asarray(stats_ab[\"sens\"])) * 1000",
"_____no_output_____"
]
],
[
[
"**bayesian rule list (this one is slow)**",
"_____no_output_____"
]
],
[
[
"np.random.seed(13)\n# train classifier (allow more iterations for better accuracy; use BigDataRuleListClassifier for large datasets)\nprint('training bayesian_rule_list...')\nbrl = imodels.BayesianRuleListClassifier(listlengthprior=2, max_iter=10000, class1label=\"IwI\", verbose=False)\nbrl.fit(X_train, y_train, feature_names=feature_names)\nstats, threshes = predict_and_save(brl, model_name='bayesian_rule_list')\nprint(brl)",
"training bayesian_rule_list...\n"
],
[
"print(brl)",
"Trained RuleListClassifier for detecting IwI\n=============================================\nIF IntervForCervicalStab > 0.5 THEN probability of IwI: 59.3% (54.8%-63.8%)\nELSE IF FocalNeuroFindings > 0.5 THEN probability of IwI: 15.8% (9.7%-23.0%)\nELSE IF DxCspineInjury > 0.5 THEN probability of IwI: 10.0% (5.1%-16.2%)\nELSE probability of IwI: 1.3% (0.8%-2.0%)\n============================================\n\n"
]
],
[
[
"**rulefit**",
"_____no_output_____"
]
],
[
[
"# fit a rulefit model\nnp.random.seed(13)\nrulefit = imodels.RuleFitRegressor(max_rules=4)\nrulefit.fit(X_train, y_train, feature_names=feature_names)\n\n# preds = rulefit.predict(X_test)\nstats, threshes = predict_and_save(rulefit, model_name='rulefit')\n'''\ndef print_best(sens, spec):\n idxs = np.array(sens) > 0.9\n print(np.array(sens)[idxs], np.array(spec)[idxs])\nprint_best(sens, spec)\n'''",
"100%|█████████████████████████████████████████████████████| 7/7 [00:00<00:00, 2259.51it/s]\n100%|█████████████████████████████████████████████████████| 7/7 [00:00<00:00, 1988.09it/s]\n"
],
[
"# pd.reset_option('display.max_colwidth')\nrulefit.visualize()",
"_____no_output_____"
]
],
[
[
"**greedy (CART) rule list**",
"_____no_output_____"
]
],
[
[
"class_weight = {0: 1, 1: 100}\nd = imodels.GreedyRuleListClassifier(max_depth=9, class_weight=class_weight, criterion='neg_corr')\nd.fit(X_train, y_train, feature_names=feature_names, verbose=False)\nstats, threshes = predict_and_save(d, model_name='grl')\n# d.print_list()\nprint(d)",
"/Users/seunghoonpaik/Desktop/SH/Berkeley/Coursework/215A/Lab/final-proj/andy-github/rule-env/lib/python3.8/site-packages/numpy/lib/function_base.py:2691: RuntimeWarning: invalid value encountered in true_divide\n c /= stddev[:, None]\n/Users/seunghoonpaik/Desktop/SH/Berkeley/Coursework/215A/Lab/final-proj/andy-github/rule-env/lib/python3.8/site-packages/numpy/lib/function_base.py:2692: RuntimeWarning: invalid value encountered in true_divide\n c /= stddev[None, :]\n100%|█████████████████████████████████████████████████████| 7/7 [00:00<00:00, 2065.43it/s]\n100%|█████████████████████████████████████████████████████| 6/6 [00:00<00:00, 1816.90it/s]"
]
],
[
[
"**rf**",
"_____no_output_____"
],
[
"# look at all the results",
"_____no_output_____"
]
],
[
[
"def plot_metrics(suffix, title=None, fs=15):\n for fname in sorted(os.listdir(MODELS_DIR)):\n if 'pkl' in fname:\n if not fname[:-4] == 'rf':\n r = pkl.load(open(oj(MODELS_DIR, fname), 'rb'))\n # print(r)\n # print(r.keys())\n\n threshes = np.array(r['threshes' + suffix])\n sens = np.array(r['sens' + suffix])\n spec = np.array(r['spec' + suffix])\n plt.plot(100 * sens, 100 * spec, 'o-', label=fname[:-4], alpha=0.6, markersize=3)\n plt.xlabel('Sensitivity (%)', fontsize=fs)\n plt.ylabel('Specificity (%)', fontsize=fs)\n s = suffix[1:]\n if title is None:\n plt.title(f'{s}\\n{data_sizes[s][0]} IAI-I / {data_sizes[s][1]}')\n else:\n plt.title(title, fontsize=fs)\n\n # print best results\n if suffix == '_test2':\n idxs = (sens > 0.95) & (spec > 0.43)\n if np.sum(idxs) > 0:\n idx_max = np.argmax(spec[idxs])\n print(fname, f'{100 * sens[idxs][idx_max]:0.2f} {100 * spec[idxs][idx_max]:0.2f}')\n\n if suffix == '_test2':\n plt.plot(96.77, 43.98, 'o', color='black', label='Original CDR', ms=4)\n else:\n plt.plot(97.0, 42.5, 'o', color='black', label='Original CDR', ms=4)\n plt.grid()\n\n\nsuffixes = ['_train', '_tune'] # _train, _test1, _test2, _cv\ntitles = ['Train (PECARN)', 'Tune (PECARN)']\nR, C = 1, len(suffixes)\nplt.figure(dpi=200, figsize=(C * 2.5, R * 3), facecolor='w')\nfs = 10\nfor i, suffix in enumerate(suffixes):\n ax = plt.subplot(R, C, i + 1)\n plot_metrics(suffix, title=titles[i], fs=fs)\n if i > 0:\n plt.ylabel('')\n plt.yticks([0, 25, 50, 75, 100], labels=[''] * 5)\n # ax.yaxis.set_visible(False)\n plt.xlim((50, 101))\n plt.ylim((0, 101))\nplt.tight_layout()\n# plt.subplot(R, C, 1)\n# plt.legend(fontsize=20)\nplt.legend(bbox_to_anchor=(1.1, 1), fontsize=fs, frameon=False)\n# plt.savefig('figs/metrics_3_splits')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7c8d039548291327ea79bbe530efcac6b168cc8 | 6,919 | ipynb | Jupyter Notebook | exercise-underfitting-and-overfitting.ipynb | gabboraron/Intro_to_Machine_Learning-Kaggle | 8daff9ac960dfbbbac327d7dc9d75983b7adcd3f | [
"MIT"
] | null | null | null | exercise-underfitting-and-overfitting.ipynb | gabboraron/Intro_to_Machine_Learning-Kaggle | 8daff9ac960dfbbbac327d7dc9d75983b7adcd3f | [
"MIT"
] | null | null | null | exercise-underfitting-and-overfitting.ipynb | gabboraron/Intro_to_Machine_Learning-Kaggle | 8daff9ac960dfbbbac327d7dc9d75983b7adcd3f | [
"MIT"
] | null | null | null | 6,919 | 6,919 | 0.75112 | [
[
[
"**This notebook is an exercise in the [Introduction to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/dansbecker/underfitting-and-overfitting).**\n\n---\n",
"_____no_output_____"
],
[
"## Recap\nYou've built your first model, and now it's time to optimize the size of the tree to make better predictions. Run this cell to set up your coding environment where the previous step left off.",
"_____no_output_____"
]
],
[
[
"# Code you have previously used to load data\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\n\n\n# Path of the file to read\niowa_file_path = '../input/home-data-for-ml-course/train.csv'\n\nhome_data = pd.read_csv(iowa_file_path)\n# Create target object and call it y\ny = home_data.SalePrice\n# Create X\nfeatures = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']\nX = home_data[features]\n\n# Split into validation and training data\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)\n\n# Specify Model\niowa_model = DecisionTreeRegressor(random_state=1)\n# Fit Model\niowa_model.fit(train_X, train_y)\n\n# Make validation predictions and calculate mean absolute error\nval_predictions = iowa_model.predict(val_X)\nval_mae = mean_absolute_error(val_predictions, val_y)\nprint(\"Validation MAE: {:,.0f}\".format(val_mae))\n\n# Set up code checking\nfrom learntools.core import binder\nbinder.bind(globals())\nfrom learntools.machine_learning.ex5 import *\nprint(\"\\nSetup complete\")",
"_____no_output_____"
]
],
[
[
"# Exercises\nYou could write the function `get_mae` yourself. For now, we'll supply it. This is the same function you read about in the previous lesson. Just run the cell below.",
"_____no_output_____"
]
],
[
[
"def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):\n model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)\n model.fit(train_X, train_y)\n preds_val = model.predict(val_X)\n mae = mean_absolute_error(val_y, preds_val)\n return(mae)",
"_____no_output_____"
]
],
[
[
"## Step 1: Compare Different Tree Sizes\nWrite a loop that tries the following values for *max_leaf_nodes* from a set of possible values.\n\nCall the *get_mae* function on each value of max_leaf_nodes. Store the output in some way that allows you to select the value of `max_leaf_nodes` that gives the most accurate model on your data.",
"_____no_output_____"
]
],
[
[
"candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]\n# Write loop to find the ideal tree size from candidate_max_leaf_nodes\nresults = []\nfor max_leaf_nodes in candidate_max_leaf_nodes:\n my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)\n print(\"Max leaf nodes: %d \\t\\t Mean Absolute Error: %d\" %(max_leaf_nodes, my_mae))\n results.append(my_mae)\nbest_value = min(results) \n\n# Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500)\nbest_tree_size = candidate_max_leaf_nodes[results.index(best_value)]\n\n# Check your answer\nstep_1.check()",
"_____no_output_____"
],
[
"# The lines below will show you a hint or the solution.\n# step_1.hint() \n# step_1.solution()",
"_____no_output_____"
]
],
[
[
"## Step 2: Fit Model Using All Data\nYou know the best tree size. If you were going to deploy this model in practice, you would make it even more accurate by using all of the data and keeping that tree size. That is, you don't need to hold out the validation data now that you've made all your modeling decisions.",
"_____no_output_____"
]
],
[
[
"# Fill in argument to make optimal size and uncomment\nfinal_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state=1)\n\n# fit the final model and uncomment the next two lines\nfinal_model.fit(X, y)\n\n# Check your answer\nstep_2.check()",
"_____no_output_____"
],
[
"step_2.hint()\nstep_2.solution()",
"_____no_output_____"
]
],
[
[
"You've tuned this model and improved your results. But we are still using Decision Tree models, which are not very sophisticated by modern machine learning standards. In the next step you will learn to use Random Forests to improve your models even more.\n\n# Keep Going\n\nYou are ready for **[Random Forests](https://www.kaggle.com/dansbecker/random-forests).**\n",
"_____no_output_____"
],
[
"---\n\n\n\n\n*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/intro-to-machine-learning/discussion) to chat with other learners.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7c8dbc4972297e915b067dc6d49f82bf5f1971a | 122,610 | ipynb | Jupyter Notebook | notebooks/svgtensor.ipynb | GeorgeProjects/deepsvg | 7b5b1985d9ab4648c56f8cf4ad1592ed4f0ca9bb | [
"MIT"
] | null | null | null | notebooks/svgtensor.ipynb | GeorgeProjects/deepsvg | 7b5b1985d9ab4648c56f8cf4ad1592ed4f0ca9bb | [
"MIT"
] | null | null | null | notebooks/svgtensor.ipynb | GeorgeProjects/deepsvg | 7b5b1985d9ab4648c56f8cf4ad1592ed4f0ca9bb | [
"MIT"
] | null | null | null | 241.83432 | 62,323 | 0.866389 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import os\nos.chdir(\"..\")",
"_____no_output_____"
],
[
"from deepsvg.svglib.geom import Point\nfrom deepsvg.svglib.svg import SVG\nfrom deepsvg.svglib.svg_path import SVGPath\nfrom deepsvg.svglib.utils import to_gif\n\nfrom deepsvg.difflib.tensor import SVGTensor\nfrom deepsvg.difflib.utils import *\nfrom deepsvg.difflib.loss import *",
"_____no_output_____"
],
[
"import torch.optim as optim\nimport IPython.display as ipd\nfrom moviepy.editor import ImageClip, concatenate_videoclips, ipython_display",
"_____no_output_____"
],
[
"import pickle\nimport csv",
"_____no_output_____"
]
],
[
[
"# Differentiable SVGTensor optimization",
"_____no_output_____"
],
[
"Load a target SVG and apply the standard pre-processing.",
"_____no_output_____"
]
],
[
[
"svg = SVG.load_svg(\"docs/imgs/dolphin.svg\").normalize().zoom(0.9).canonicalize().simplify_heuristic()",
"simplify\n"
]
],
[
[
"Convert the SVG to the differentiable SVGTensor data-structure.",
"_____no_output_____"
]
],
[
[
"svg_target = SVGTensor.from_data(svg.to_tensor())",
"_____no_output_____"
],
[
"p_target = svg_target.sample_points()\nplot_points(p_target, show_color=True)",
"_____no_output_____"
]
],
[
[
"Create an arbitrary SVG whose Bézier parameters will be optimized to match the target shape.",
"_____no_output_____"
]
],
[
[
"circle = SVG.unit_circle().normalize().zoom(0.9).split(8) # split: 1/2/4/8\nsvg_pred = SVGTensor.from_data(circle.to_tensor())",
"_____no_output_____"
]
],
[
[
"SVGTensor enables to sample points in a differentiable way, so that the loss that will be backpropagated down to the SVG Bézier parameters.",
"_____no_output_____"
]
],
[
[
"p_pred = svg_pred.sample_points()\nplot_points(p_pred, show_color=True)",
"_____no_output_____"
],
[
"svg_pred.control1.requires_grad_(True)\nsvg_pred.control2.requires_grad_(True)\nsvg_pred.end_pos.requires_grad_(True);",
"_____no_output_____"
],
[
"optimizer = optim.Adam([svg_pred.control1, svg_pred.control2, svg_pred.end_pos], lr=0.1)",
"_____no_output_____"
]
],
[
[
"Write a standard gradient descent algorithm and observe the step-by-step optimization!",
"_____no_output_____"
]
],
[
[
"img_list = []\n\nfor i in range(150):\n optimizer.zero_grad()\n\n p_pred = svg_pred.sample_points()\n l = svg_emd_loss(p_pred, p_target)\n l.backward()\n optimizer.step()\n \n if i % 4 == 0:\n img = svg_pred.draw(with_points=True, do_display=False, return_png=True)\n img_list.append(img)\n \nto_gif(img_list)",
"_____no_output_____"
],
[
"svg = SVG.load_svg(\"docs/imgs/dolphin.svg\")\nprint(svg)\nsvg_tensor = svg.to_tensor()\nprint(svg_tensor)",
"SVG[Bbox(0.0 0.0 294.8680114746094 294.8680114746094)](\n\tSVGPathGroup(SVGPath(M[P(0.0, 0.0), P(284.3949890136719, 115.12999725341797)] C[P(284.3949890136719, 115.12999725341797), P(280.5419921875, 119.21499633789062), P(274.864990234375, 119.21499633789062), P(272.9989929199219, 119.21499633789062)] C[P(272.9989929199219, 119.21499633789062), P(269.53900146484375, 119.21499633789062), P(265.260986328125, 118.64199829101562), P(259.1309814453125, 117.35599517822266)] C[P(259.1309814453125, 117.35599517822266), P(254.31597900390625, 116.34599304199219), P(250.16998291015625, 115.33899688720703), P(246.51397705078125, 114.45199584960938)] C[P(246.51397705078125, 114.45199584960938), P(239.3219757080078, 112.70499420166016), P(234.1239776611328, 111.4419937133789), P(229.23097229003906, 111.4419937133789)] C[P(229.23097229003906, 111.4419937133789), P(226.4729766845703, 111.4419937133789), P(221.44097900390625, 112.63699340820312), P(216.11196899414062, 113.90299224853516)] C[P(216.11196899414062, 113.90299224853516), P(207.81497192382812, 115.87399291992188), P(197.48997497558594, 118.32598876953125), P(187.5029754638672, 118.32598876953125)] C[P(187.5029754638672, 118.32598876953125), P(177.41998291015625, 118.32598876953125), P(166.80697631835938, 117.5419921875), P(160.11997985839844, 116.9369888305664)] C[P(160.11997985839844, 116.9369888305664), P(157.89898681640625, 125.26298522949219), P(152.61497497558594, 138.99298095703125), P(140.6209716796875, 148.82098388671875)] C[P(140.6209716796875, 148.82098388671875), P(124.31397247314453, 162.1809844970703), P(112.22396850585938, 168.1409912109375), P(111.71697235107422, 168.3879852294922)] C[P(111.71697235107422, 168.3879852294922), P(108.6199722290039, 169.89797973632812), P(104.8959732055664, 169.1069793701172), P(102.68296813964844, 166.4659881591797)] C[P(102.68296813964844, 166.4659881591797), P(100.469970703125, 163.82699584960938), P(100.33796691894531, 160.02098083496094), P(102.36396789550781, 157.23599243164062)] C[P(102.36396789550781, 157.23599243164062), P(102.43096923828125, 157.1409912109375), P(110.58097076416016, 145.6199951171875), P(110.2679672241211, 132.6719970703125)] C[P(110.2679672241211, 132.6719970703125), P(110.14396667480469, 127.52099609375), P(109.95697021484375, 123.48500061035156), P(109.7659683227539, 120.41099548339844)] C[P(109.7659683227539, 120.41099548339844), P(99.60897064208984, 123.40499877929688), P(82.31497192382812, 130.09799194335938), P(73.39396667480469, 142.69699096679688)] C[P(73.39396667480469, 142.69699096679688), P(59.880001068115234, 161.77999877929688), P(54.33599853515625, 191.16000366210938), P(55.82099914550781, 200.79800415039062)] C[P(55.82099914550781, 200.79800415039062), P(57.367000579833984, 210.83999633789062), P(57.551998138427734, 210.83999633789062), P(62.31699752807617, 210.83999633789062)] C[P(62.31699752807617, 210.83999633789062), P(72.6989974975586, 210.83999633789062), P(81.4209976196289, 211.26499938964844), P(91.2449951171875, 216.6129913330078)] C[P(91.2449951171875, 216.6129913330078), P(99.93499755859375, 221.3419952392578), P(104.72099304199219, 226.5159912109375), P(105.23699188232422, 227.0909881591797)] C[P(105.23699188232422, 227.0909881591797), P(107.18598937988281, 229.2559814453125), P(107.70499420166016, 232.35398864746094), P(106.5679931640625, 235.0349884033203)] C[P(106.5679931640625, 235.0349884033203), P(105.43099212646484, 237.71798706054688), P(102.843994140625, 239.49899291992188), P(99.93299102783203, 239.6029815673828)] C[P(99.93299102783203, 239.6029815673828), P(97.12499237060547, 239.70797729492188), P(88.62799072265625, 240.41598510742188), P(83.1669921875, 242.39797973632812)] C[P(83.1669921875, 242.39797973632812), P(80.2669906616211, 243.4509735107422), P(77.76898956298828, 244.6829833984375), P(75.35298919677734, 245.87498474121094)] C[P(75.35298919677734, 245.87498474121094), P(71.48799133300781, 247.781982421875), P(67.83699035644531, 249.58297729492188), P(63.57798767089844, 250.0859832763672)] C[P(63.57798767089844, 250.0859832763672), P(62.192989349365234, 250.24998474121094), P(60.77098846435547, 250.32998657226562), P(59.42498779296875, 250.3049774169922)] C[P(59.42498779296875, 250.3049774169922), P(57.85498809814453, 253.9519805908203), P(55.182987213134766, 258.2989807128906), P(50.688987731933594, 261.74798583984375)] C[P(50.688987731933594, 261.74798583984375), P(47.24898910522461, 264.38897705078125), P(43.74898910522461, 266.0179748535156), P(40.660987854003906, 267.4539794921875)] C[P(40.660987854003906, 267.4539794921875), P(36.868988037109375, 269.218994140625), P(33.87298583984375, 270.61297607421875), P(31.572986602783203, 273.4649658203125)] C[P(31.572986602783203, 273.4649658203125), P(25.544986724853516, 280.9399719238281), P(23.521987915039062, 289.0449523925781), P(23.502986907958984, 289.1259765625)] C[P(23.502986907958984, 289.1259765625), P(22.71498680114746, 292.36297607421875), P(19.8809871673584, 294.708984375), P(16.55298614501953, 294.8599853515625)] C[P(16.55298614501953, 294.8599853515625), P(16.437986373901367, 294.8659973144531), P(16.321985244750977, 294.86798095703125), P(16.206985473632812, 294.86798095703125)] C[P(16.206985473632812, 294.86798095703125), P(13.015985488891602, 294.86798095703125), P(10.152985572814941, 292.8559875488281), P(9.11198616027832, 289.8119812011719)] C[P(9.11198616027832, 289.8119812011719), P(8.642986297607422, 288.4449768066406), P(4.689986228942871, 275.9159851074219), P(9.9509859085083, 257.57696533203125)] C[P(9.9509859085083, 257.57696533203125), P(11.831985473632812, 251.02096557617188), P(14.745985984802246, 245.34596252441406), P(17.562986373901367, 239.85595703125)] C[P(17.562986373901367, 239.85595703125), P(22.14698600769043, 230.9269561767578), P(26.105987548828125, 223.21595764160156), P(24.44498634338379, 213.76695251464844)] C[P(24.44498634338379, 213.76695251464844), P(16.636985778808594, 169.35595703125), P(17.175987243652344, 133.24594116210938), P(26.04598617553711, 106.44395446777344)] C[P(26.04598617553711, 106.44395446777344), P(39.8380012512207, 64.76000213623047), P(73.22599792480469, 41.53499984741211), P(79.77899932861328, 37.30400085449219)] C[P(79.77899932861328, 37.30400085449219), P(83.02999877929688, 35.202999114990234), P(85.95600128173828, 33.35499954223633), P(88.4219970703125, 31.819000244140625)] C[P(88.4219970703125, 31.819000244140625), P(86.13499450683594, 29.996999740600586), P(83.22799682617188, 28.08300018310547), P(79.7719955444336, 26.61400032043457)] C[P(79.7719955444336, 26.61400032043457), P(71.90599822998047, 23.270000457763672), P(68.5999984741211, 22.356000900268555), P(67.68399810791016, 22.13800048828125)] C[P(67.68399810791016, 22.13800048828125), P(67.625, 22.14000129699707), P(67.56800079345703, 22.14000129699707), P(67.51100158691406, 22.14000129699707)] C[P(67.51100158691406, 22.14000129699707), P(64.24600219726562, 22.141000747680664), P(61.736000061035156, 19.985000610351562), P(60.781002044677734, 16.801000595092773)] C[P(60.781002044677734, 16.801000595092773), P(59.78900146484375, 13.495000839233398), P(61.63200378417969, 9.946001052856445), P(64.6030044555664, 8.191000938415527)] C[P(64.6030044555664, 8.191000938415527), P(65.16799926757812, 7.855999946594238), P(78.68099975585938, 0.0), P(96.48999786376953, 0.0)] C[P(96.48999786376953, 0.0), P(100.16500091552734, 0.0), P(103.81599426269531, 0.3370000123977661), P(107.34099578857422, 1.0019999742507935)] C[P(107.34099578857422, 1.0019999742507935), P(118.3239974975586, 3.0749998092651367), P(126.08599853515625, 6.171999931335449), P(133.5919952392578, 9.165999412536621)] C[P(133.5919952392578, 9.165999412536621), P(140.92098999023438, 12.089999198913574), P(147.843994140625, 14.851999282836914), P(158.17999267578125, 17.02899932861328)] C[P(158.17999267578125, 17.02899932861328), P(163.20098876953125, 18.086999893188477), P(167.8249969482422, 18.902999877929688), P(172.29598999023438, 19.6929988861084)] C[P(172.29598999023438, 19.6929988861084), P(187.67898559570312, 22.408998489379883), P(200.9639892578125, 24.7549991607666), P(220.82699584960938, 34.89799880981445)] C[P(220.82699584960938, 34.89799880981445), P(246.75099182128906, 48.13800048828125), P(261.3280029296875, 62.624000549316406), P(261.8739929199219, 75.68499755859375)] C[P(261.8739929199219, 75.68499755859375), P(261.9889831542969, 78.43799591064453), P(261.9289855957031, 80.89799499511719), P(261.6929931640625, 83.05999755859375)] C[P(261.6929931640625, 83.05999755859375), P(264.1189880371094, 84.27299499511719), P(267.0669860839844, 85.80599975585938), P(270.04498291015625, 87.5)] C[P(270.04498291015625, 87.5), P(282.16796875, 94.3949966430664), P(287.2519836425781, 99.55899810791016), P(287.5909729003906, 105.3219985961914)] C[P(287.5909729003906, 105.3219985961914), P(287.82598876953125, 109.33200073242188), P(286.7510070800781, 112.63099670410156), P(284.3949890136719, 115.12999725341797)] Z[P(284.3949890136719, 115.12999725341797), P(284.3949890136719, 115.12999725341797)]))\n)\ntensor([[ 0.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 0.0000,\n 0.0000, -1.0000, -1.0000, -1.0000, -1.0000, 284.3950, 115.1300],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 284.3950,\n 115.1300, 280.5420, 119.2150, 274.8650, 119.2150, 272.9990, 119.2150],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 272.9990,\n 119.2150, 269.5390, 119.2150, 265.2610, 118.6420, 259.1310, 117.3560],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 259.1310,\n 117.3560, 254.3160, 116.3460, 250.1700, 115.3390, 246.5140, 114.4520],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 246.5140,\n 114.4520, 239.3220, 112.7050, 234.1240, 111.4420, 229.2310, 111.4420],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 229.2310,\n 111.4420, 226.4730, 111.4420, 221.4410, 112.6370, 216.1120, 113.9030],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 216.1120,\n 113.9030, 207.8150, 115.8740, 197.4900, 118.3260, 187.5030, 118.3260],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 187.5030,\n 118.3260, 177.4200, 118.3260, 166.8070, 117.5420, 160.1200, 116.9370],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 160.1200,\n 116.9370, 157.8990, 125.2630, 152.6150, 138.9930, 140.6210, 148.8210],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 140.6210,\n 148.8210, 124.3140, 162.1810, 112.2240, 168.1410, 111.7170, 168.3880],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 111.7170,\n 168.3880, 108.6200, 169.8980, 104.8960, 169.1070, 102.6830, 166.4660],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 102.6830,\n 166.4660, 100.4700, 163.8270, 100.3380, 160.0210, 102.3640, 157.2360],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 102.3640,\n 157.2360, 102.4310, 157.1410, 110.5810, 145.6200, 110.2680, 132.6720],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 110.2680,\n 132.6720, 110.1440, 127.5210, 109.9570, 123.4850, 109.7660, 120.4110],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 109.7660,\n 120.4110, 99.6090, 123.4050, 82.3150, 130.0980, 73.3940, 142.6970],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 73.3940,\n 142.6970, 59.8800, 161.7800, 54.3360, 191.1600, 55.8210, 200.7980],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 55.8210,\n 200.7980, 57.3670, 210.8400, 57.5520, 210.8400, 62.3170, 210.8400],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 62.3170,\n 210.8400, 72.6990, 210.8400, 81.4210, 211.2650, 91.2450, 216.6130],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 91.2450,\n 216.6130, 99.9350, 221.3420, 104.7210, 226.5160, 105.2370, 227.0910],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 105.2370,\n 227.0910, 107.1860, 229.2560, 107.7050, 232.3540, 106.5680, 235.0350],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 106.5680,\n 235.0350, 105.4310, 237.7180, 102.8440, 239.4990, 99.9330, 239.6030],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 99.9330,\n 239.6030, 97.1250, 239.7080, 88.6280, 240.4160, 83.1670, 242.3980],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 83.1670,\n 242.3980, 80.2670, 243.4510, 77.7690, 244.6830, 75.3530, 245.8750],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 75.3530,\n 245.8750, 71.4880, 247.7820, 67.8370, 249.5830, 63.5780, 250.0860],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 63.5780,\n 250.0860, 62.1930, 250.2500, 60.7710, 250.3300, 59.4250, 250.3050],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 59.4250,\n 250.3050, 57.8550, 253.9520, 55.1830, 258.2990, 50.6890, 261.7480],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 50.6890,\n 261.7480, 47.2490, 264.3890, 43.7490, 266.0180, 40.6610, 267.4540],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 40.6610,\n 267.4540, 36.8690, 269.2190, 33.8730, 270.6130, 31.5730, 273.4650],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 31.5730,\n 273.4650, 25.5450, 280.9400, 23.5220, 289.0450, 23.5030, 289.1260],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 23.5030,\n 289.1260, 22.7150, 292.3630, 19.8810, 294.7090, 16.5530, 294.8600],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 16.5530,\n 294.8600, 16.4380, 294.8660, 16.3220, 294.8680, 16.2070, 294.8680],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 16.2070,\n 294.8680, 13.0160, 294.8680, 10.1530, 292.8560, 9.1120, 289.8120],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 9.1120,\n 289.8120, 8.6430, 288.4450, 4.6900, 275.9160, 9.9510, 257.5770],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 9.9510,\n 257.5770, 11.8320, 251.0210, 14.7460, 245.3460, 17.5630, 239.8560],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 17.5630,\n 239.8560, 22.1470, 230.9270, 26.1060, 223.2160, 24.4450, 213.7670],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 24.4450,\n 213.7670, 16.6370, 169.3560, 17.1760, 133.2459, 26.0460, 106.4440],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 26.0460,\n 106.4440, 39.8380, 64.7600, 73.2260, 41.5350, 79.7790, 37.3040],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 79.7790,\n 37.3040, 83.0300, 35.2030, 85.9560, 33.3550, 88.4220, 31.8190],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 88.4220,\n 31.8190, 86.1350, 29.9970, 83.2280, 28.0830, 79.7720, 26.6140],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 79.7720,\n 26.6140, 71.9060, 23.2700, 68.6000, 22.3560, 67.6840, 22.1380],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 67.6840,\n 22.1380, 67.6250, 22.1400, 67.5680, 22.1400, 67.5110, 22.1400],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 67.5110,\n 22.1400, 64.2460, 22.1410, 61.7360, 19.9850, 60.7810, 16.8010],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 60.7810,\n 16.8010, 59.7890, 13.4950, 61.6320, 9.9460, 64.6030, 8.1910],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 64.6030,\n 8.1910, 65.1680, 7.8560, 78.6810, 0.0000, 96.4900, 0.0000],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 96.4900,\n 0.0000, 100.1650, 0.0000, 103.8160, 0.3370, 107.3410, 1.0020],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 107.3410,\n 1.0020, 118.3240, 3.0750, 126.0860, 6.1720, 133.5920, 9.1660],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 133.5920,\n 9.1660, 140.9210, 12.0900, 147.8440, 14.8520, 158.1800, 17.0290],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 158.1800,\n 17.0290, 163.2010, 18.0870, 167.8250, 18.9030, 172.2960, 19.6930],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 172.2960,\n 19.6930, 187.6790, 22.4090, 200.9640, 24.7550, 220.8270, 34.8980],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 220.8270,\n 34.8980, 246.7510, 48.1380, 261.3280, 62.6240, 261.8740, 75.6850],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 261.8740,\n 75.6850, 261.9890, 78.4380, 261.9290, 80.8980, 261.6930, 83.0600],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 261.6930,\n 83.0600, 264.1190, 84.2730, 267.0670, 85.8060, 270.0450, 87.5000],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 270.0450,\n 87.5000, 282.1680, 94.3950, 287.2520, 99.5590, 287.5910, 105.3220],\n [ 2.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 287.5910,\n 105.3220, 287.8260, 109.3320, 286.7510, 112.6310, 284.3950, 115.1300],\n [ 6.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, 284.3950,\n 115.1300, -1.0000, -1.0000, -1.0000, -1.0000, 284.3950, 115.1300]])\n"
]
],
[
[
"# Save svg tensor in a .pkl file",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c8e4a87111193e38f92d49f28a597e88fd829c | 17,150 | ipynb | Jupyter Notebook | notebooks/Creative Experiments.ipynb | 5agado/face-swap | ada4803d4f1d53233ba4592beec75271254e5182 | [
"Apache-2.0"
] | 9 | 2018-07-10T20:06:20.000Z | 2022-01-09T11:09:57.000Z | notebooks/Creative Experiments.ipynb | 5agado/face-swap | ada4803d4f1d53233ba4592beec75271254e5182 | [
"Apache-2.0"
] | null | null | null | notebooks/Creative Experiments.ipynb | 5agado/face-swap | ada4803d4f1d53233ba4592beec75271254e5182 | [
"Apache-2.0"
] | 3 | 2019-10-19T16:42:15.000Z | 2021-02-05T09:10:24.000Z | 33.044316 | 1,225 | 0.564723 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\" style=\"margin-top: 1em;\"><ul class=\"toc-item\"><li><span><a href=\"#Intro\" data-toc-modified-id=\"Intro-1\"><span class=\"toc-item-num\">1 </span>Intro</a></span></li><li><span><a href=\"#Load-Data\" data-toc-modified-id=\"Load-Data-2\"><span class=\"toc-item-num\">2 </span>Load Data</a></span></li><li><span><a href=\"#Cyclical-Feeding\" data-toc-modified-id=\"Cyclical-Feeding-3\"><span class=\"toc-item-num\">3 </span>Cyclical Feeding</a></span><ul class=\"toc-item\"><li><span><a href=\"#TODOs\" data-toc-modified-id=\"TODOs-3.1\"><span class=\"toc-item-num\">3.1 </span>TODOs</a></span></li></ul></li><li><span><a href=\"#Image-Sharpening\" data-toc-modified-id=\"Image-Sharpening-4\"><span class=\"toc-item-num\">4 </span>Image Sharpening</a></span></li><li><span><a href=\"#Source-Data-FaceSwap-and-Upscaling\" data-toc-modified-id=\"Source-Data-FaceSwap-and-Upscaling-5\"><span class=\"toc-item-num\">5 </span>Source Data FaceSwap and Upscaling</a></span></li><li><span><a href=\"#Celeba-Test\" data-toc-modified-id=\"Celeba-Test-6\"><span class=\"toc-item-num\">6 </span>Celeba Test</a></span></li></ul></div>",
"_____no_output_____"
],
[
"# Intro\nNotebook exploring random experiments around the use of the trained Faceswap generators.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\nimport sys\nimport pickle\nimport yaml\nfrom numpy.random import shuffle\nfrom ast import literal_eval\nimport tensorflow as tf\n\nimport cv2\n\nfrom tqdm import tqdm\n\n# Plotting\n%matplotlib notebook\n#%matplotlib inline\n\nsns.set_context(\"paper\")\nsns.set_style(\"dark\")\n\nsys.path.append('../face_swap')\n\nfrom utils import image_processing\nfrom utils import super_resolution\n\nfrom face_swap.deep_swap import swap_faces, Swapper\nfrom face_swap import faceswap_utils as utils\nfrom face_swap.plot_utils import stack_images\nfrom face_swap import FaceGenerator, FaceDetector\nfrom face_swap.train import get_original_data\nfrom face_swap import gan, gan_utils\nfrom face_swap import CONFIG_PATH\nfrom face_swap.Face import Face\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"data_folder = Path.home() / \"Documents/datasets/\"\nmodels_folder = Path.home() / \"Documents/models/\"",
"_____no_output_____"
]
],
[
[
"# Load Data",
"_____no_output_____"
]
],
[
[
"# Load two random celeba faces\nfrom_face_img = cv2.cvtColor(cv2.imread(str(data_folder / \"img_align_celeba\" / \n \"000{}{}{}.jpg\".format(*np.random.randint(0, 9, 3)))),\n cv2.COLOR_BGR2RGB)\nto_face_img = cv2.cvtColor(cv2.imread(str(data_folder / \"img_align_celeba\" / \n \"000{}{}{}.jpg\".format(*np.random.randint(0, 9, 3)))),\n cv2.COLOR_BGR2RGB)",
"_____no_output_____"
],
[
"plt.imshow(from_face_img)\nplt.show()\nplt.imshow(to_face_img)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Cyclical Feeding\nCycling feeding own output to generator. Can start with actual face or random noise. \n\n## TODOs\n* Try apply text on image before feeding to generator",
"_____no_output_____"
]
],
[
[
"def crop(img, crop_factor=0.2):\n h, w = img.shape[:2]\n h_crop = int((h * crop_factor)//2)\n w_crop = int((w * crop_factor)//2)\n return img[h_crop:h-h_crop, w_crop:w-w_crop]",
"_____no_output_____"
],
[
"def zoom(img, zoom_factor=1.5):\n h, w = img.shape[:2]\n mat = cv2.getRotationMatrix2D((w//2, h//2), 0, zoom_factor)\n #mat[:, 2] -= (w//2, h//2)\n result = cv2.warpAffine(img, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)\n return result",
"_____no_output_____"
],
[
"# load config\nwith open(CONFIG_PATH, 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\nmodel_cfg = cfg['masked_gan']['v1']",
"_____no_output_____"
],
[
"# load generator and related functions\ngen_a, gen_b, _, _ = gan.get_gan(model_cfg, load_discriminators=False)\n_, _, _, fun_generate_a, fun_mask_a, fun_abgr_a = gan_utils.cycle_variables_masked(gen_a)\n_, _, _, fun_generate_b, fun_mask_b, fun_abgr_b = gan_utils.cycle_variables_masked(gen_b)",
"_____no_output_____"
],
[
"gen_fun_a = lambda x: fun_abgr_a([np.expand_dims(x, 0)])[0][0]\ngen_fun_b = lambda x: fun_abgr_b([np.expand_dims(x, 0)])[0][0]",
"_____no_output_____"
],
[
"generator_a = FaceGenerator.FaceGenerator(\n lambda face_img: FaceGenerator.gan_masked_generate_face(gen_fun_a, face_img),\n input_size=(64, 64), tanh_fix=True)\ngenerator_b = FaceGenerator.FaceGenerator(\n lambda face_img: FaceGenerator.gan_masked_generate_face(gen_fun_b, face_img),\n input_size=(64, 64), tanh_fix=True)",
"_____no_output_____"
],
[
"gen_input = Face(img, img)\nuse_a = True\ngenerator = generator_a if use_a else generator_b\nfor i in range(500):\n out = get_hr_version(sr_model, generator.generate(gen_input, (64, 64))[0])\n #out = generator.generate(gen_input, (128, 128))[0]\n gen_input.face_img = FaceGenerator.random_transform(out, **cfg['random_transform'])\n #gen_input.img = zoom(out)\n res_path = str(data_folder / 'faceswap_experiments/cycle_feed/02/_{:04d}.png'.format(i))\n #cv2.imwrite(res_path, zoom(out))\n cv2.imwrite(res_path, out)\n # swap generator randomly every epoch\n #generator = generator_a if np.random.rand() > 0.5 else generator_b\n # swap generator every N epoch\n if i%50 == 0:\n use_a = not use_a\n generator = generator_a if use_a else generator_b",
"_____no_output_____"
]
],
[
[
"# Image Sharpening",
"_____no_output_____"
]
],
[
[
"# adapted from https://github.com/AdityaPokharel/Sharpen-Image\nregular_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\nedge_enhance_kernel = np.array([[-1,-1,-1,-1,-1],\n [-1,2,2,2,-1],\n [-1,2,8,2,-1],\n [-2,2,2,2,-1],\n [-1,-1,-1,-1,-1]])/8.0\ndef sharpen(img, kernel=regular_kernel):\n # apply kernel to input image\n res = cv2.filter2D(img, -1, kernel)\n return res\n\n# see also cv2.detailEnhance(src, sigma_s=10, sigma_r=0.15)",
"_____no_output_____"
],
[
"plt.imshow(sharpen(to_face_img))\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Source Data FaceSwap and Upscaling\nTry to cherry pick some results of face-swapping on the training data, apply upscaling to a reasonable size (e.g. 128x128) and any possible post-processing that might help in improving image quality.\n",
"_____no_output_____"
]
],
[
[
"input_path = data_folder / \"facesets\" / \"cage\"\nout_path = data_folder / \"faceswap_experiments\" / \"source_faceswap\" / \"cage_trump\"\n\nout_size = (64, 64)",
"_____no_output_____"
],
[
"# collected all image paths\nimg_paths = image_processing.get_imgs_paths(input_path, as_str=False)\n\n# iterate over all collected image paths\nfor i, img_path in enumerate(img_paths):\n img = cv2.imread(str(img_path))\n gen_input = Face(img, img)\n gen_face = generator_b.generate(gen_input)[0]\n gen_face = sharpen(gen_face)\n gen_face = cv2.resize(gen_face, out_size)\n cv2.imwrite(str(out_path / \"out_{:04d}.jpg\".format(i)),\n gen_face)",
"_____no_output_____"
]
],
[
[
"# Celeba Test\nTest Celeba training and generation of artworks",
"_____no_output_____"
]
],
[
[
"def plot_sample(images: list, predict_fun,\n tanh_fix=False, save_to: str=None, \n nb_test_imgs=14, nb_columns=3, white_border=3):\n # need number of images divisible by number of columns\n nb_rows = nb_test_imgs//nb_columns\n assert nb_test_imgs % nb_columns == 0\n images = images[0:nb_test_imgs]\n\n figure = np.stack([\n images,\n predict_fun(images),\n ], axis=1)\n # we split images on two columns\n figure = figure.reshape((nb_columns, nb_rows) + figure.shape[1:])\n figure = stack_images(figure)\n img_width = images[0].shape[1]\n img_height = images[0].shape[0]\n for i in range(1, nb_columns):\n x = img_width*2*i\n figure[:, x-white_border:x+white_border, :] = 255.0\n for i in range(1, nb_rows):\n y = img_height*i\n figure[y-white_border:y+white_border, :, :] = 255.0\n\n if save_to:\n cv2.imwrite(save_to, figure)\n else:\n figure = cv2.cvtColor(figure, cv2.COLOR_BGR2RGB)\n #plt.imshow(figure)\n #plt.show()\n display(Image.fromarray(figure))\n # crashes in notebooks\n #cv2.imshow('', figure)\n #cv2.waitKey(0)",
"_____no_output_____"
],
[
"# load config\nwith open(CONFIG_PATH, 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\nmodel_cfg = cfg['masked_gan']['v1']\nmodel_cfg['models_path'] = str(models_folder / \"face_recognition/deep_faceswap/masked_gan/cage_celeba/v4\")",
"_____no_output_____"
],
[
"#tf.reset_default_graph()\nface_detector = FaceDetector.FaceDetector(cfg)",
"_____no_output_____"
],
[
"# load generator and related functions\nnetGA, netGB, _, _ = gan.get_gan(model_cfg, load_discriminators=False)\n\n# define generation and plotting function\n# depending if using masked gan model or not\nif model_cfg['masked']:\n distorted_A, fake_A, mask_A, path_A, fun_mask_A, fun_abgr_A = gan_utils.cycle_variables_masked(netGA)\n distorted_B, fake_B, mask_B, path_B, fun_mask_B, fun_abgr_B = gan_utils.cycle_variables_masked(netGB)\n #gen_plot_a = lambda x: np.array(path_A([x])[0]) \n #gen_plot_b = lambda x: np.array(path_B([x])[0])\n gen_plot_a = lambda x: np.array(fun_abgr_A([x])[0][ :, :, :, 1:]) \n gen_plot_b = lambda x: np.array(fun_abgr_B([x])[0][ :, :, :, 1:])\n gen_plot_mask_a = lambda x: np.array(fun_mask_A([x])[0])*2-1\n gen_plot_mask_b = lambda x: np.array(fun_mask_B([x])[0])*2-1\nelse:\n gen_plot_a = lambda x: netGA.predict(x)\n gen_plot_b = lambda x: netGB.predict(x)",
"_____no_output_____"
],
[
"sr_model = super_resolution.get_SRResNet(cfg['super_resolution'])\nresize_fun = lambda img, size: FaceGenerator.super_resolution_resizing(sr_model, img, size)",
"_____no_output_____"
],
[
"gen_fun_a = lambda x: fun_abgr_A([np.expand_dims(x, 0)])[0][0]\ngen_fun_b = lambda x: fun_abgr_B([np.expand_dims(x, 0)])[0][0]\ngen_input_size = literal_eval(model_cfg['img_shape'])[:2]\nface_generator = FaceGenerator.FaceGenerator(\n lambda face_img: FaceGenerator.gan_masked_generate_face(gen_fun_a, face_img),\n input_size=gen_input_size, config=cfg['swap'], resize_fun=resize_fun)",
"_____no_output_____"
],
[
"swapper = Swapper(face_detector, face_generator, cfg['swap'], save_all=True)",
"_____no_output_____"
],
[
"def swap(img):\n face = Face(img.copy(), Face.Rectangle(0, 64, 64, 0))\n #return swap_faces(face, face_detector, cfg['swap'], face_generator)\n return face.get_face_img()\n#gen_plot_b = lambda x: [swap(img) for img in x]\ngen_plot = lambda x: [swapper.swap(img) for img in x]",
"_____no_output_____"
],
[
"img_dir_a = data_folder / 'facesets/cage'\nimg_dir_b = data_folder / 'celeba_tmp'\n#images_a, images_b = get_original_data(img_dir_a, img_dir_b, img_size=None, tanh_fix=False)\nimages = image_processing.load_data(image_processing.get_imgs_paths(img_dir_a), (128, 128))",
"_____no_output_____"
],
[
"dest_folder = str(data_folder / \"faceswap_experiments/source_faceswap/cage_celeba_masked/test_1/_{}.png\")\nswapper.config['mask_method'] = \"gen_mask\"\nface_generator.border_expand = (0.1, 0.1)\nface_generator.blur_size = 13\nface_generator.align = False\n#shuffle(images)\nfor i in range(20):\n print(i)\n images_subset = images[i*15:(i+1)*15]\n try:\n plot_sample(images_subset, gen_plot, nb_test_imgs=15, nb_columns=3, \n save_to=dest_folder.format(i), tanh_fix=False)\n except FaceDetector.FaceSwapException:\n pass",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9024beb95e9444f79f9a244cf2da8843b4a50 | 957,460 | ipynb | Jupyter Notebook | notebooks/3_demo_dynamical_relaxation_time.ipynb | ChenSun-Phys/ULDM_x_SPARC | 39ddb3c9f4145f510d391671ff3a8f07b040edfa | [
"MIT"
] | null | null | null | notebooks/3_demo_dynamical_relaxation_time.ipynb | ChenSun-Phys/ULDM_x_SPARC | 39ddb3c9f4145f510d391671ff3a8f07b040edfa | [
"MIT"
] | null | null | null | notebooks/3_demo_dynamical_relaxation_time.ipynb | ChenSun-Phys/ULDM_x_SPARC | 39ddb3c9f4145f510d391671ff3a8f07b040edfa | [
"MIT"
] | null | null | null | 91.781058 | 117,012 | 0.736045 | [
[
[
"In this notebook, we show the dynamical relaxation time.",
"_____no_output_____"
],
[
"# Init",
"_____no_output_____"
]
],
[
[
"from __future__ import division",
"_____no_output_____"
],
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys,os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))",
"_____no_output_____"
],
[
"from matplotlib import rcParams, rc",
"_____no_output_____"
],
[
"import spc\nimport model\nimport chi2\nimport margin\nimport tools as tl\n\nimport numpy as np\nimport matplotlib\n%matplotlib notebook\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import quad\nimport h5py\nimport glob\nimport re\n\nimport scan\nimport pickle\nimport glob\nfrom multiprocessing import Pool\nfrom contextlib import closing\n\nfrom matplotlib import cm\nfrom tqdm import tqdm",
"_____no_output_____"
],
[
"plt.rcParams.update({'font.size': 12})",
"_____no_output_____"
],
[
"path = '../data/SPARC.txt'\ndata = spc.readSPARC(path)\n\npath = '../data/SPARC_Lelli2016c.txt'\nspc.readSPARC_ext(data, path)",
"_____no_output_____"
],
[
"data2 = {}\nfor gal in data:\n data2[gal.name] = gal",
"_____no_output_____"
]
],
[
[
"# Functions",
"_____no_output_____"
],
[
"moved to the corresponding .py file",
"_____no_output_____"
]
],
[
[
"# def model.tau(f, m, v=57., rho=0.003):\n# \"\"\" relaxation time computation [Gyr]\n# :param f: fraction\n# :param m: scalar mass [eV]\n# :param v: dispersion [km/s]\n# :param rho: DM density [Msun/pc**3] \n \n# \"\"\"\n# return 0.6 * 1./f**2 * (m/(1.e-22))**3 * (v/100)**6 * (rho/0.1)**(-2)\n\n# model.tau(0.2, 1e-22, 100, 0.1) ",
"_____no_output_____"
],
[
"# def reconstruct_density(gal, flg_give_R=False):\n# \"\"\" reconstruct the local density based on the rotaion curve\n \n# \"\"\"\n# V = gal.Vobs\n# r = gal.R\n# M_unit = 232501.397985234 # Msun computed with km/s, kpc\n# M = V**2 * r * M_unit\n# r_mid = (r[1:] + r[:-1]) /2.\n# dr = r[1:] - r[:-1]\n# rho = (M[1:] - M[:-1]) / 4./np.pi/r_mid**2 / dr /1e9 #[Msun/pc**3]\n# if flg_give_R:\n# return (r_mid, rho)\n# else:\n# return rho",
"_____no_output_____"
]
],
[
[
"# Check the data",
"_____no_output_____"
]
],
[
[
"#gal = data2['UGC01281']\ngal = data2['UGC04325']\nprint(gal.Vobs[-1])\nmodel.reconstruct_density_DM(gal)",
"91.5\n"
],
[
"plt.subplots()\nplt.plot(gal.R, gal.Vobs, '.')\nplt.xlabel('R [kpc]')\nplt.ylabel(r'$v$ km/s')",
"_____no_output_____"
],
[
"fn, _, _ = model.reconstruct_density_DM(gal)\nplt.subplots()\nr_arr = np.logspace(gal.R[0], gal.R[-1])\nplt.plot(r_arr, fn(r_arr), '.')\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('R [kpc]')\nplt.ylabel(r'$\\rho$ [M$_\\odot$/pc$^3$]')\nplt.tight_layout()",
"_____no_output_____"
],
[
"vf_arr = []\nrhof_arr = []\n\nfor gal in data:\n v_f = gal.Vobs[-1]\n vf_arr.append(v_f)\n fn,_,_ = model.reconstruct_density_DM(gal)\n rhof_arr.append(fn(gal.R[-1])) \n \nplt.subplots()\nplt.plot(vf_arr, 'k.')\nplt.ylim(0, 400)\nplt.xlabel('Galaxy ID')\nplt.ylabel('V [km/s]')\nplt.title('End velocity of the rotation curve')",
"_____no_output_____"
],
[
"plt.subplots()\nplt.plot(rhof_arr, 'k.')\nplt.yscale('log')\nplt.xlabel('Galaxy ID')\nplt.ylabel(r'$\\rho$ [M$_\\odot$/pc$^3$]')\nplt.title('Density at the end of the rotation curve')",
"_____no_output_____"
],
[
"plt.subplots()\nplt.title(\"Scattering of rotation velocity\")\nplt.xlabel('R [kpc]')\nplt.ylabel('V [km/s]')\n\nfor name, gal in data2.items():\n plt.plot(gal.R, gal.Vobs, lw='0.8')",
"_____no_output_____"
]
],
[
[
"# Relaxatin time at last data point",
"_____no_output_____"
]
],
[
[
"f1 = 0.85\nf2 = 0.15\nm1_arr = np.logspace(-25, -19, 100)\nm2_arr = np.logspace(-25, -19, 100)\n\nm1_mesh, m2_mesh = np.meshgrid(m1_arr, m2_arr, indexing='ij')\nm1_flat, m2_flat = m1_mesh.reshape(-1), m2_mesh.reshape(-1)",
"_____no_output_____"
],
[
"gal = data2['UGC04325']",
"_____no_output_____"
],
[
"tau1_flat = []\ntau1_self_flat = []\n\nfor i in range(len(m1_flat)):\n m1 = m1_flat[i]\n m2 = m2_flat[i]\n \n R = gal.R[-1]\n sigma = model.sigma_disp_over_vcirc(gal, gal.R[-1]) * gal.Vobs[-1]\n rho_fn, _, _ = model.reconstruct_density_DM(gal)\n rho = rho_fn(gal.R[-1])\n cut_log=True\n \n tau1 = 1./(1./model.tau(f1, m1, sigma, rho, R, cut_log=cut_log) + 1./model.tau(f2, m2, sigma, rho, R, cut_log=cut_log))\n tau1_self = model.tau(f1, m1, sigma, rho, R, cut_log=cut_log) \n \n tau1_flat.append(tau1)\n tau1_self_flat.append(tau1_self)\n \ntau1_flat = np.asarray(tau1_flat)\ntau1_self_flat = np.asarray(tau1_self_flat)\n\ntau1_mesh = tau1_flat.reshape(m1_mesh.shape)\ntau1_self_mesh = tau1_self_flat.reshape(m1_mesh.shape)",
"_____no_output_____"
],
[
"_, ax = plt.subplots()\nplt.contourf(m1_mesh, m2_mesh, tau1_mesh, levels=[10, np.inf], colors='lightblue')\nplt.contour(m1_mesh, m2_mesh, tau1_self_mesh, levels=[10], linestyles={'dashed'})\nplt.fill_betweenx(np.logspace(-25, -19), 1e-25, 2.66e-21, color='salmon', alpha=0.5, zorder=0)\n\n#label\nplt.text(4e-23, 1e-20, r\"Lyman-$\\alpha$ constraints\", color='red', fontsize=14, rotation=90)\nplt.text(1e-21, 1e-20, r\"$\\tau$ > 10 Gyr\", color='blue', fontsize=14)\nplt.text(8e-23, 1e-24, r\"Coulomb log breaks for $m_2$\", color='blue', fontsize=14)\nplt.text(1e-25, 1e-24, r\"Coulomb, $m_1$ and $m_2$\", color='blue', fontsize=14)\nplt.text(3e-25, 1e-20, r\"Coulomb, $m_1$\", color='blue', fontsize=14)\n\n\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('$m_1$ [eV], 85% of total mass')\nplt.ylabel('$m_2$ [eV], 15% of total mass')\nplt.xlim(1e-25, 1e-19)\nplt.ylim(1e-25, 1e-19)\nplt.title(r\"UGC 1281\")\nax.set_aspect(aspect=0.618)\nplt.tight_layout()\n#plt.savefig('./sol_relaxation_contour.pdf')",
"_____no_output_____"
],
[
"# check relaxation time at the last data point",
"_____no_output_____"
],
[
"gal",
"_____no_output_____"
],
[
"#f1 = 0.85\nf1 = 1.\nm1_target_arr = []\nvf_arr = []\nrhof_arr = []\nm1_arr = np.logspace(-25, -19, 100)\n\nfor gal in data:\n fn, _, _ = model.reconstruct_density_DM(gal) # last data point is selected\n rho_f = fn(gal.R[-1])\n v_f = gal.Vobs[-1] # last data point\n vf_arr.append(v_f)\n rhof_arr.append(rho_f)\n tau1_self_arr = [] \n \n for m1 in m1_arr:\n R = gal.R[-1]\n sigma = model.sigma_disp_over_vcirc(gal, gal.R[-1]) * gal.Vobs[-1]\n cut_log=True \n \n tau1_self = model.tau(f1, m1, sigma=sigma, rho=rho_f, R=gal.R[-1], cut_log=cut_log) \n tau1_self_arr.append(tau1_self)\n tau1_self_arr = np.asarray(tau1_self_arr)\n #print(tau1_self_arr)\n mask = np.where(tau1_self_arr < 1000, True, False)\n #print(mask)\n if sum(mask) > 0:\n m1_target = np.exp(np.interp(np.log(10), np.log(tau1_self_arr[mask]), np.log(m1_arr[mask])))\n m1_target_arr.append(m1_target)",
"_____no_output_____"
]
],
[
[
"This is the result with coulomb log > 1. ",
"_____no_output_____"
]
],
[
[
"plt.subplots()\nplt.plot(m1_target_arr, 'k.')\nplt.yscale('log')\nplt.ylim(1e-25, 1e-19)\nplt.xlabel('Galaxy ID')\nplt.ylabel('m [eV]')\nplt.title('Dynamical relaxation time set to 10 Gyr')",
"_____no_output_____"
],
[
"_, ax = plt.subplots()\nplt.fill_betweenx(np.logspace(-25, -19), 1e-25, 2.66e-21, color='salmon', alpha=0.5, zorder=0)\n\nf1 = 0.85\nf2 = 0.15\nm1_arr = np.logspace(-25, -19, 50)\nm2_arr = np.logspace(-25, -19, 50)\n\nm1_mesh, m2_mesh = np.meshgrid(m1_arr, m2_arr, indexing='ij')\nm1_flat, m2_flat = m1_mesh.reshape(-1), m2_mesh.reshape(-1)\n\nfor gal in data:\n fn, _, _ = model.reconstruct_density_DM(gal) # last data point is selected\n rho_f = fn(gal.R[-1])\n v_f = gal.Vobs[-1] # last data point \n \n tau1_flat = []\n tau1_self_flat = []\n\n for i in range(len(m1_flat)):\n R = gal.R[-1]\n sigma = model.sigma_disp_over_vcirc(gal, gal.R[-1]) * gal.Vobs[-1]\n cut_log=True \n \n m1 = m1_flat[i]\n m2 = m2_flat[i]\n tau1 = 1./(1./model.tau(f1, \n m1, \n sigma=sigma,\n rho=rho_f, \n R=R, \n cut_log=cut_log) + \n 1./model.tau(f2, \n m2,\n sigma=sigma,\n rho=rho_f,\n R=R,\n cut_log=cut_log))\n \n tau1_self = model.tau(f1, \n m1,\n sigma=sigma,\n rho=rho_f,\n R=R,\n cut_log=cut_log) \n\n tau1_flat.append(tau1)\n tau1_self_flat.append(tau1_self)\n\n tau1_flat = np.asarray(tau1_flat)\n tau1_self_flat = np.asarray(tau1_self_flat)\n\n tau1_mesh = tau1_flat.reshape(m1_mesh.shape)\n tau1_self_mesh = tau1_self_flat.reshape(m1_mesh.shape) \n \n plt.contour(m1_mesh, m2_mesh, tau1_mesh, levels=[10], colors='lightblue')\n\n#label\nplt.text(1e-24, 1e-24, r\"Lyman-$\\alpha$ constraints\", color='red', fontsize=14)\nplt.text(1e-21, 1e-20, r\"$\\tau$ > 10 Gyr\", color='blue', fontsize=14)\n\n\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('$m_1$ [eV], 85% of total mass')\nplt.ylabel('$m_2$ [eV], 15% of total mass')\nplt.xlim(8e-26, 1e-19)\nplt.ylim(8e-26, 1e-19)\nax.set_aspect(aspect=0.618)\nplt.tight_layout()\n#plt.savefig('./sol_relaxation_contour.pdf')",
"_____no_output_____"
]
],
[
[
"### change the fraction",
"_____no_output_____"
]
],
[
[
"#gal = data2['NGC0100']\ngal = data2['UGC04325']\n#gal = data2['UGC01281']\n#gal = data2['NGC3769']\n#gal = data2['NGC3877']\n#gal = data2['NGC6503']\n\nm2 = 1.e-23 # [eV]\n#f2 = 0.15\nm1_arr = np.logspace(-25.2, -18.8, 50)\nf1_arr = np.linspace(0., 1., 50)\n\nm1_mesh, f1_mesh = np.meshgrid(m1_arr, f1_arr, indexing='ij')\nm1_flat, f1_flat = m1_mesh.reshape(-1), f1_mesh.reshape(-1)\n\ntau1_flat = []\ntau1_self_flat = []\nr_over_rc = 10\n\ncut_log = True\n\nfor i in range(len(m1_flat)):\n m1 = m1_flat[i]\n f1 = f1_flat[i]\n f2 = 1.-f1\n tau1 = 1./(1./model.relaxation_at_rc(m1, gal, f1, multiplier=r_over_rc, cut_log=cut_log) \n + 1./model.relaxation_at_rc(m2, gal, f2, multiplier=r_over_rc, cut_log=cut_log))\n tau1_flat.append(tau1)\n \n tau1_self = model.relaxation_at_rc(m1, gal, f1, multiplier=r_over_rc, cut_log=cut_log) \n tau1_self_flat.append(tau1_self)\n\n \ntau1_flat = np.asarray(tau1_flat)\ntau1_self_flat = np.asarray(tau1_self_flat)\n\ntau1_mesh = tau1_flat.reshape(m1_mesh.shape)\ntau1_self_mesh = tau1_self_flat.reshape(m1_mesh.shape)\n\n\n\n_, ax = plt.subplots()\n#plt.contourf(m1_mesh, f1_mesh, tau1_mesh, levels=[10, np.inf], colors='lightblue')\nplt.contourf(m1_mesh, f1_mesh, tau1_self_mesh, levels=[10, np.inf], colors='lightblue')\nplt.fill_between([1,2], 101, 100, color='C0', label=r\"$\\tau$ > 10 Gyr\", alpha=0.2)\n\n\n#label\n#plt.text(2e-23, 1e-22, r\"Lyman-$\\alpha$\", color='red', fontsize=14)\n#plt.text(3e-21, 0.5, r\"$\\tau$ > 10 Gyr\", color='blue', fontsize=14)\n\n\nplt.xscale('log')\n#plt.yscale('log')\nplt.xlabel('$m_1$ [eV]')\n#plt.ylabel('$m_2$ [eV], 15% of total mass')\nplt.ylabel(r'$f_1$')\nplt.xlim(2e-23, 1e-19)\nplt.ylim(0.02, 1.)\n\n# overlay with Kobayashi\npath = '../data/Kobayashi2017.csv'\ndata_lym_arr = np.loadtxt(path, delimiter=',')\nx = data_lym_arr[:,0]\ny = data_lym_arr[:,1]\nx = np.insert(x, 0, 1e-25)\ny = np.insert(y, 0, y[0])\nplt.fill_between(x, y, 100, color='C1', label=r'Lyman-$\\alpha$', alpha=0.2)\nplt.legend(loc=4)\n\n\nax.set_aspect(aspect=0.618)\nplt.title('%s' %gal.name)\nplt.tight_layout()\nplt.savefig('./plots/relaxation_time_f1_m1_%s.pdf' %gal.name)",
"_____no_output_____"
],
[
"#gal = data2['NGC0100']\ngal = data2['UGC04325']\n#gal = data2['UGC01281']\n#gal = data2['NGC3769']\n#gal = data2['NGC3877']\n#gal = data2['NGC6503']\n\nm2 = 1.e-23 # [eV]\n#f2 = 0.15\nm1_arr = np.logspace(-25.2, -18.8, 50)\nf1_arr = np.linspace(0., 1., 50)\n\nm1_mesh, f1_mesh = np.meshgrid(m1_arr, f1_arr, indexing='ij')\nm1_flat, f1_flat = m1_mesh.reshape(-1), f1_mesh.reshape(-1)\n\ntau1_flat = []\ntau1_self_flat = []\nr_over_rc = 10\n\ncut_log = True\n\nfor i in range(len(m1_flat)):\n m1 = m1_flat[i]\n f1 = f1_flat[i]\n f2 = 1.-f1\n tau1 = 1./(1./model.relaxation_at_rc(m1, gal, f1, multiplier=r_over_rc, cut_log=cut_log) \n + 1./model.relaxation_at_rc(m2, gal, f2, multiplier=r_over_rc, cut_log=cut_log))\n tau1_flat.append(tau1)\n \n tau1_self = model.relaxation_at_rc(m1, gal, f1, multiplier=r_over_rc, cut_log=cut_log) \n tau1_self_flat.append(tau1_self)\n\n \ntau1_flat = np.asarray(tau1_flat)\ntau1_self_flat = np.asarray(tau1_self_flat)\n\ntau1_mesh = tau1_flat.reshape(m1_mesh.shape)\ntau1_self_mesh = tau1_self_flat.reshape(m1_mesh.shape)\n\n\n\n_, ax = plt.subplots()\nplt.contourf(m1_mesh, f1_mesh, tau1_mesh, levels=[10, np.inf], colors='lightblue')\n#plt.contourf(m1_mesh, f1_mesh, tau1_self_mesh, levels=[10, np.inf], colors='lightblue')\nplt.fill_between([1,2], 101, 100, color='C0', label=r\"$\\tau$ > 10 Gyr\", alpha=0.2)\n\n\n#label\n#plt.text(2e-23, 1e-22, r\"Lyman-$\\alpha$\", color='red', fontsize=14)\n#plt.text(3e-21, 0.5, r\"$\\tau$ > 10 Gyr\", color='blue', fontsize=14)\n\n\nplt.xscale('log')\n#plt.yscale('log')\nplt.xlabel('$m_1$ [eV]')\n#plt.ylabel('$m_2$ [eV], 15% of total mass')\nplt.ylabel(r'$f_1$')\nplt.xlim(2e-23, 1e-19)\nplt.ylim(0.02, 1.)\n\n# overlay with Kobayashi\npath = '../data/Kobayashi2017.csv'\ndata_lym_arr = np.loadtxt(path, delimiter=',')\nx = data_lym_arr[:,0]\ny = data_lym_arr[:,1]\nx = np.insert(x, 0, 1e-25)\ny = np.insert(y, 0, y[0])\nplt.fill_between(x, y, 100, color='C1', label=r'Lyman-$\\alpha$', alpha=0.2)\nplt.legend(loc=4)\n\n\nax.set_aspect(aspect=0.618)\nplt.title('%s' %gal.name)\nplt.tight_layout()\n\nplt.savefig('./plots/relaxation_time_f1_m1_two_species_%s.pdf' %gal.name)",
"_____no_output_____"
]
],
[
[
"### velocity dispersion",
"_____no_output_____"
]
],
[
[
"gal = data2['NGC0100']\n\nR = np.logspace(-1, 3)\n#y = model.sigma_disp(gal, R, get_array=False)\n\n# debug interp\n#y_npinterp = model.sigma_disp_over_vcirc(gal, R)\n\n# no interp\nratio_arr = model.sigma_disp_over_vcirc(gal, R)\n\nplt.subplots()\n#plt.plot(R, y)\n#plt.plot(R, y_npinterp)\nplt.plot(R, ratio_arr, '--')\nplt.xscale('log')\n#plt.yscale('log')\nplt.xlabel('R [kpc]')\nplt.ylabel(r'$\\sigma/V_{\\rm circ}$')",
"_____no_output_____"
]
],
[
[
"# The Comloub Log",
"_____no_output_____"
]
],
[
[
"# plot out to check",
"_____no_output_____"
],
[
"gal = spc.findGalaxyByName('UGC04325', data) \n\ninterpol_method = 'linear' #nearest\n\nf_arr = np.linspace(0.01, 1, 200)\n#m = 2e-23\n#m = 1.3e-23\nm = 1e-23\n#m = 3e-24\n#m = 1e-21\nr_supply_arr = np.array([model.supply_radius(f, m, gal) for f in f_arr])\nr_relax_arr = np.array([model.relax_radius(f, m, gal, interpol_method=interpol_method) for f in f_arr])\nr_relax_arr2 = np.array([model.relax_radius(f, m, gal, interpol_method=interpol_method, cut_log=False) for f in f_arr])\nr_core_arr = np.array([1.9 * model.rc(m, model.M_SH(m, gal)) for f in f_arr])\n\nplt.subplots()\nplt.plot(f_arr, r_supply_arr, label=r'$r_{supply}$')\nplt.plot(f_arr, r_relax_arr, label=r'$r_{relax}$')\nplt.plot(f_arr, r_relax_arr2, '--', label=r'$r_{relax}$', color='C1')\nplt.plot(f_arr, r_core_arr, label=r'$r_{core}$')\n\n#plt.xscale('log')\nplt.yscale('log')\nplt.ylabel('r [kpc]')\nplt.xlabel('f')\n#plt.title('m=%.1e eV, %s' %(m, gal.name))\nplt.title('m=%s eV, %s' %(tl.scientific(m), gal.name))\nplt.legend(loc='best')\nplt.tight_layout()\n\n#plt.savefig('./plots/r_comparison_%s.pdf' %(gal.name))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c90dba7017c7b2b65cd52fc2b57c15c49e9888 | 17,571 | ipynb | Jupyter Notebook | Odata production environment.ipynb | ItamarGronich/hasadna-k8s | 30fd409c7b71b8a12a41f8d6d89a2e87d11f587b | [
"MIT"
] | 9 | 2018-10-28T00:28:09.000Z | 2021-11-26T23:02:05.000Z | Odata production environment.ipynb | ItamarGronich/hasadna-k8s | 30fd409c7b71b8a12a41f8d6d89a2e87d11f587b | [
"MIT"
] | 7 | 2019-01-07T18:12:04.000Z | 2021-11-09T18:04:17.000Z | Odata production environment.ipynb | ItamarGronich/hasadna-k8s | 30fd409c7b71b8a12a41f8d6d89a2e87d11f587b | [
"MIT"
] | 15 | 2018-08-13T13:38:17.000Z | 2020-10-08T17:45:32.000Z | 38.197826 | 361 | 0.576689 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c9107a715ba2a473c9d2529dd6c55bee6ae5e5 | 7,253 | ipynb | Jupyter Notebook | tutorials/genes.ipynb | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | 1 | 2021-05-12T08:51:44.000Z | 2021-05-12T08:51:44.000Z | tutorials/genes.ipynb | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | null | null | null | tutorials/genes.ipynb | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | null | null | null | 37.580311 | 291 | 0.640287 | [
[
[
"Although Basenji is unaware of the locations of known genes in the genome, we can go in afterwards and ask what a model predicts for those locations to interpret it as a gene expression prediction.\n\nTo do this, you'll need\n * Trained model\n * Gene Transfer Format (GTF) gene annotations\n * BigWig coverage tracks\n * Gene sequences saved in my HDF5 format.\n \nFirst, make sure you have an hg19 FASTA file visible. If you have it already, put a symbolic link into the data directory. Otherwise, I have a machine learning friendly simplified version you can download in the next cell.",
"_____no_output_____"
]
],
[
[
"import os, subprocess\n\nif not os.path.isfile('data/hg19.ml.fa'):\n subprocess.call('curl -o data/hg19.ml.fa https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa', shell=True)\n subprocess.call('curl -o data/hg19.ml.fa.fai https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa.fai', shell=True) ",
"_____no_output_____"
]
],
[
[
"Next, let's grab a few CAGE datasets from FANTOM5 related to heart biology.\n\nThese data were processed by\n1. Aligning with Bowtie2 with very sensitive alignment parameters.\n2. Distributing multi-mapping reads and estimating genomic coverage with bam_cov.py",
"_____no_output_____"
]
],
[
[
"if not os.path.isfile('data/CNhs11760.bw'):\n subprocess.call('curl -o data/CNhs11760.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs11760.bw', shell=True)\n subprocess.call('curl -o data/CNhs12843.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12843.bw', shell=True)\n subprocess.call('curl -o data/CNhs12856.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12856.bw', shell=True)_",
"_____no_output_____"
]
],
[
[
"Then we'll write out these BigWig files and labels to a samples table.",
"_____no_output_____"
]
],
[
[
"samples_out = open('data/heart_wigs.txt', 'w')\nprint('aorta\\tdata/CNhs11760.bw', file=samples_out)\nprint('artery\\tdata/CNhs12843.bw', file=samples_out)\nprint('pulmonic_valve\\tdata/CNhs12856.bw', file=samples_out)\nsamples_out.close()",
"_____no_output_____"
]
],
[
[
"Predictions in the portion of the genome that we trained might inflate our accuracy, so we'll focus on chr9 genes, which have formed my typical test set. Then we use [basenji_hdf5_genes.py](https://github.com/calico/basenji/blob/master/bin/basenji_hdf5_genes.py) to create the file.\n\nThe most relevant options are:\n\n| Option/Argument | Value | Note |\n|:---|:---|:---|\n| -g | data/human.hg19.genome | Genome assembly chromosome length to bound gene sequences. |\n| -l | 262144 | Sequence length. |\n| -c | 0.333 | Multiple genes per sequence are allowed, but the TSS must be in the middle 1/3 of the sequence. |\n| -p | 3 | Use 3 threads via \n| -t | data/heart_wigs.txt | Save coverage values from this table of BigWig files. |\n| -w | 128 | Bin the coverage values at 128 bp resolution. |\n| fasta_file | data/hg19.ml.fa | Genome FASTA file for extracting sequences. |\n| gtf_file | data/gencode_chr9.gtf | Gene annotations in gene transfer format. |\n| hdf5_file | data/gencode_chr9_l262k_w128.h5 | Gene sequence output HDF5 file. |",
"_____no_output_____"
]
],
[
[
"! basenji_hdf5_genes.py -g data/human.hg19.genome -l 262144 -c 0.333 -p 3 -t data/heart_wigs.txt -w 128 data/hg19.ml.fa data/gencode_chr9.gtf data/gencode_chr9_l262k_w128.h5",
"_____no_output_____"
]
],
[
[
"Now, you can either train your own model in the [Train/test tutorial](https://github.com/calico/basenji/blob/master/tutorials/train_test.ipynb) or download one that I pre-trained.",
"_____no_output_____"
]
],
[
[
"if not os.path.isfile('models/gm12878_d10.tf.meta'):\n subprocess.call('curl -o models/gm12878_d10.tf.index https://storage.googleapis.com/basenji_tutorial_data/model_gm12878_d10.tf.index', shell=True)\n subprocess.call('curl -o models/gm12878_d10.tf.meta https://storage.googleapis.com/basenji_tutorial_data/model_gm12878_d10.tf.meta', shell=True)\n subprocess.call('curl -o models/gm12878_d10.tf.data-00000-of-00001 https://storage.googleapis.com/basenji_tutorial_data/model_gm12878_d10.tf.data-00000-of-00001', shell=True)",
"_____no_output_____"
]
],
[
[
"Finally, you can offer data/gencode_chr9_l262k_w128.h5 and the model to [basenji_test_genes.py](https://github.com/calico/basenji/blob/master/bin/basenji_test_genes.py) to make gene expression predictions and benchmark them.\n\nThe most relevant options are:\n\n| Option/Argument | Value | Note |\n|:---|:---|:---|\n| -o | data/gencode_chr9_test | Output directory. |\n| --rc | | Average the forward and reverse complement to form prediction. |\n| -s | | Make scatter plots, comparing predictions to experiment values. |\n| --table | | Print gene expression table. |\n| params_file | models/params_small.txt | Table of parameters to setup the model architecture and optimization. |\n| model_file | models/gm12878_best.tf | Trained saved model prefix. |\n| genes_hdf5_file | data/gencode_chr9_l262k_w128.h5 | HDF5 file containing the gene sequences, annotations, and experiment values. |",
"_____no_output_____"
]
],
[
[
"! basenji_test_genes.py -o data/gencode_chr9_test --rc -s --table models/params_small.txt models/gm12878_best.tf data/gencode_chr9_l262k_w128.h5",
"_____no_output_____"
]
],
[
[
"Describe the output...",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7c917d5927173e1a291c197539f670db6d8a798 | 3,353 | ipynb | Jupyter Notebook | TF2_Tutorial/tf4.ipynb | KiLJ4EdeN/Tesnorflow2_Tutorial | 9e74a9df771605cf71a749dc1a16a547e158bd35 | [
"MIT"
] | 2 | 2020-04-04T11:36:11.000Z | 2020-09-29T10:16:38.000Z | TF2_Tutorial/tf4.ipynb | KiLJ4EdeN/TF_Tutorial | 9e74a9df771605cf71a749dc1a16a547e158bd35 | [
"MIT"
] | null | null | null | TF2_Tutorial/tf4.ipynb | KiLJ4EdeN/TF_Tutorial | 9e74a9df771605cf71a749dc1a16a547e158bd35 | [
"MIT"
] | 5 | 2020-04-02T09:29:31.000Z | 2021-12-13T12:15:52.000Z | 26.611111 | 131 | 0.467641 | [
[
[
"# HMM",
"_____no_output_____"
],
[
"import tensorflow_probability as tfp\nimport tensorflow as tf",
"_____no_output_____"
],
[
"tfd = tfp.distributions\n# chances are that the first day can be 80% hot and 20% cold.\ninitial_distribution = tfd.Categorical(probs=[0.8, 0.2])\n# cold day is 0 and hot is 1, so cold is encoded first.\n# first element is the change that the state changes to a cold day. which for cold day is 70% and for hot day its 20%.\ntransition_distribution = tfd.Categorical(probs=[[0.7, 0.3],\n [0.2, 0.8]])\n# loc is the average on cold and hot days respectively. Where scale is the std. (MUST BE FLOATS)\nobservation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.])\n",
"_____no_output_____"
],
[
"model = tfd.HiddenMarkovModel(\n initial_distribution=initial_distribution,\n transition_distribution=transition_distribution,\n observation_distribution=observation_distribution,\n num_steps=7\n)\n# steps is the number of days we want to predict the temperature for.",
"_____no_output_____"
],
[
"mean = model.mean()\n# partially computed.\n# create a session to compute.\nwith tf.compat.v1.Session() as sess:\n print(mean.numpy())",
"[2.9999998 5.9999995 7.4999995 8.25 8.625001 8.812501 8.90625 ]\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c91e7532fa77f9ee83f7672082c90885072e12 | 181,571 | ipynb | Jupyter Notebook | Advent Of Code 2018 mattmcd.ipynb | mattmcd/AdventOfCode2018 | 1f715eee17653795a13a319bf388d9bb8259f4c4 | [
"Apache-2.0"
] | null | null | null | Advent Of Code 2018 mattmcd.ipynb | mattmcd/AdventOfCode2018 | 1f715eee17653795a13a319bf388d9bb8259f4c4 | [
"Apache-2.0"
] | null | null | null | Advent Of Code 2018 mattmcd.ipynb | mattmcd/AdventOfCode2018 | 1f715eee17653795a13a319bf388d9bb8259f4c4 | [
"Apache-2.0"
] | null | null | null | 44.766026 | 10,548 | 0.547543 | [
[
[
"import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport re\nfrom collections import defaultdict, deque\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"# Helper function to read input from file if too large to paste inline\ndef read_input(fname):\n with open(fname, 'r') as f:\n data_str = f.read()\n return data_str",
"_____no_output_____"
]
],
[
[
"# Day 1: Chronal Calibration \n\"We've detected some temporal anomalies,\" one of Santa's Elves at the Temporal Anomaly Research and Detection Instrument Station tells you. She sounded pretty worried when she called you down here. \"At 500-year intervals into the past, someone has been changing Santa's history!\"\n\n\"The good news is that the changes won't propagate to our time stream for another 25 days, and we have a device\" - she attaches something to your wrist - \"that will let you fix the changes with no such propagation delay. It's configured to send you 500 years further into the past every few days; that was the best we could do on such short notice.\"\n\n\"The bad news is that we are detecting roughly fifty anomalies throughout time; the device will indicate fixed anomalies with stars. The other bad news is that we only have one device and you're the best person for the job! Good lu--\" She taps a button on the device and you suddenly feel like you're falling. To save Christmas, you need to get all fifty stars by December 25th.\n\nCollect stars by solving puzzles. Two puzzles will be made available on each day in the advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!\n\nAfter feeling like you've been falling for a few minutes, you look at the device's tiny screen. \"Error: Device must be calibrated before first use. Frequency drift detected. Cannot maintain destination lock.\" Below the message, the device shows a sequence of changes in frequency (your puzzle input). A value like +6 means the current frequency increases by 6; a value like -3 means the current frequency decreases by 3.\n\nFor example, if the device displays frequency changes of +1, -2, +3, +1, then starting from a frequency of zero, the following changes would occur:\n\n Current frequency 0, change of +1; resulting frequency 1.\n Current frequency 1, change of -2; resulting frequency -1.\n Current frequency -1, change of +3; resulting frequency 2.\n Current frequency 2, change of +1; resulting frequency 3.\n In this example, the resulting frequency is 3.\n\nHere are other example situations:\n\n +1, +1, +1 results in 3\n +1, +1, -2 results in 0\n -1, -2, -3 results in -6\n\nStarting with a frequency of zero, what is the resulting frequency after all of the changes in frequency have been applied?\n\nYour puzzle answer was 402.",
"_____no_output_____"
]
],
[
[
"day1_input = read_input('day_01.txt')",
"_____no_output_____"
],
[
"day1_freq_changes = map(int, day1_input.split())",
"_____no_output_____"
],
[
"# Part 1 - total frequency change\nsum(day1_freq_changes)",
"_____no_output_____"
]
],
[
[
"## Day 1 Part Two \nYou notice that the device repeats the same frequency change list over and over. To calibrate the device, you need to find the first frequency it reaches twice.\n\nFor example, using the same list of changes above, the device would loop as follows:\n\n Current frequency 0, change of +1; resulting frequency 1.\n Current frequency 1, change of -2; resulting frequency -1.\n Current frequency -1, change of +3; resulting frequency 2.\n Current frequency 2, change of +1; resulting frequency 3.\n (At this point, the device continues from the start of the list.)\n Current frequency 3, change of +1; resulting frequency 4.\n Current frequency 4, change of -2; resulting frequency 2, which has already been seen.\nIn this example, the first frequency reached twice is 2. Note that your device might need to repeat its list of frequency changes many times before a duplicate frequency is found, and that duplicates might be found while in the middle of processing the list.\n\nHere are other examples:\n\n +1, -1 first reaches 0 twice.\n +3, +3, +4, -2, -4 first reaches 10 twice.\n -6, +3, +8, +5, -6 first reaches 5 twice.\n +7, +7, -2, -7, -4 first reaches 14 twice.\nWhat is the first frequency your device reaches twice?\n\nYour puzzle answer was 481.",
"_____no_output_____"
]
],
[
[
"def first_freq_seen_twice(changes):\n # Keep looping through input and tracking what how many times we've seen current frequency\n i = 0\n loop_count = 0 \n N = len(changes)\n seen_twice = None\n current_freq = 0\n freq_seen = defaultdict(int)\n freq_seen[0] = 1\n while seen_twice is None:\n if i % N == 0:\n loop_count += 1\n this_change = changes[i % N]\n current_freq += this_change\n freq_seen[current_freq] += 1\n if freq_seen[current_freq] > 1:\n seen_twice = current_freq\n i += 1\n return seen_twice, loop_count",
"_____no_output_____"
],
[
"# first frequency seen twice and number of loop iterations required\nfirst_freq_seen_twice(day1_freq_changes)",
"_____no_output_____"
]
],
[
[
"Feels like there should be a smarter way to do this e.g. use cumsum input list somehow.\nWe see from the naive solution that it takes 142 loops to see a frequency again.\n\nThe plot below shows that the frequency changes are usually small with a few large jumps, and from part 1 we know that each loop has a net offset of +402. So we're interested in number of loops required before second or third regions in plot below start to ovelap either with first region or each other.\n\nI don't have a solution for this yet, one to ponder.",
"_____no_output_____"
]
],
[
[
"_ = plt.plot(np.cumsum(day1_freq_changes))\n_ = plt.show()",
"_____no_output_____"
]
],
[
[
"# Day 2: Inventory Management System \nYou stop falling through time, catch your breath, and check the screen on the device. \"Destination reached. Current Year: 1518. Current Location: North Pole Utility Closet 83N10.\" You made it! Now, to find those anomalies.\n\nOutside the utility closet, you hear footsteps and a voice. \"...I'm not sure either. But now that so many people have chimneys, maybe he could sneak in that way?\" Another voice responds, \"Actually, we've been working on a new kind of suit that would let him fit through tight spaces like that. But, I heard that a few days ago, they lost the prototype fabric, the design plans, everything! Nobody on the team can even seem to remember important details of the project!\"\n\n\"Wouldn't they have had enough fabric to fill several boxes in the warehouse? They'd be stored together, so the box IDs should be similar. Too bad it would take forever to search the warehouse for two similar box IDs...\" They walk too far away to hear any more.\n\nLate at night, you sneak to the warehouse - who knows what kinds of paradoxes you could cause if you were discovered - and use your fancy wrist device to quickly scan every box and produce a list of the likely candidates (your puzzle input).\n\nTo make sure you didn't miss any, you scan the likely candidate boxes again, counting the number that have an ID containing exactly two of any letter and then separately counting those with exactly three of any letter. You can multiply those two counts together to get a rudimentary checksum and compare it to what your device predicts.\n\nFor example, if you see the following box IDs:\n\n abcdef contains no letters that appear exactly two or three times.\n bababc contains two a and three b, so it counts for both.\n abbcde contains two b, but no letter appears exactly three times.\n abcccd contains three c, but no letter appears exactly two times.\n aabcdd contains two a and two d, but it only counts once.\n abcdee contains two e.\n ababab contains three a and three b, but it only counts once.\nOf these box IDs, four of them contain a letter which appears exactly twice, and three of them contain a letter which appears exactly three times. Multiplying these together produces a checksum of 4 * 3 = 12.\n\nWhat is the checksum for your list of box IDs?\n\nYour puzzle answer was 6225.",
"_____no_output_____"
]
],
[
[
"box_ids = read_input('day_02.txt').split('\\n')",
"_____no_output_____"
],
[
"len(box_ids)",
"_____no_output_____"
],
[
"def count_letters(box_id):\n res = defaultdict(int)\n for letter in box_id:\n res[letter] += 1\n count_2 = 1 if 2 in res.values() else 0\n count_3 = 1 if 3 in res.values() else 0\n return count_2, count_3",
"_____no_output_____"
],
[
"# checksum = \nnp.prod(np.array([sum(lst) for lst in zip(*[count_letters(box_id) for box_id in box_ids])]))",
"_____no_output_____"
]
],
[
[
"# Part Two\nConfident that your list of box IDs is complete, you're ready to find the boxes full of prototype fabric.\n\nThe boxes will have IDs which differ by exactly one character at the same position in both strings. For example, given the following box IDs:\n\n abcde\n fghij\n klmno\n pqrst\n fguij\n axcye\n wvxyz\nThe IDs abcde and axcye are close, but they differ by two characters (the second and fourth). However, the IDs fghij and fguij differ by exactly one character, the third (h and u). Those must be the correct boxes.\n\nWhat letters are common between the two correct box IDs? (In the example above, this is found by removing the differing character from either ID, producing fgij.)\n\nYour puzzle answer was revtaubfniyhsgxdoajwkqilp.",
"_____no_output_____"
]
],
[
[
"box_ids_ints = np.array([[ord(c) for c in box_id] for box_id in box_ids])",
"_____no_output_____"
],
[
"def find_diff_1_boxes(box_ids):\n X = np.array([[ord(c) for c in box_id] for box_id in box_ids])\n res = []\n n_boxes = len(box_ids)\n for i in range(n_boxes):\n for j in range(i, n_boxes):\n char_diff = np.not_equal(X[i, :] - X[j, :], 0)\n n_char_diff = char_diff.sum()\n if n_char_diff == 1:\n res.append((i, j, int(np.nonzero(char_diff)[0])))\n return res",
"_____no_output_____"
],
[
"[(box_ids[i], box_ids[j], box_ids[i][:ind] + box_ids[i][ind+1:]) for (i, j, ind) in find_diff_1_boxes(box_ids)]",
"_____no_output_____"
]
],
[
[
"# Day 3: No Matter How You Slice It \nThe Elves managed to locate the chimney-squeeze prototype fabric for Santa's suit (thanks to someone who helpfully wrote its box IDs on the wall of the warehouse in the middle of the night). Unfortunately, anomalies are still affecting them - nobody can even agree on how to cut the fabric.\n\nThe whole piece of fabric they're working on is a very large square - at least 1000 inches on each side.\n\nEach Elf has made a claim about which area of fabric would be ideal for Santa's suit. All claims have an ID and consist of a single rectangle with edges parallel to the edges of the fabric. Each claim's rectangle is defined as follows:\n\n* The number of inches between the left edge of the fabric and the left edge of the rectangle.\n* The number of inches between the top edge of the fabric and the top edge of the rectangle.\n* The width of the rectangle in inches.\n* The height of the rectangle in inches.\n\nA claim like `#123 @ 3,2: 5x4` means that claim ID 123 specifies a rectangle 3 inches from the left edge, 2 inches from the top edge, 5 inches wide, and 4 inches tall. Visually, it claims the square inches of fabric represented by # (and ignores the square inches of fabric represented by .) in the diagram below:\n\n ...........\n ...........\n ...#####...\n ...#####...\n ...#####...\n ...#####...\n ...........\n ...........\n ...........\nThe problem is that many of the claims overlap, causing two or more claims to cover part of the same areas. For example, consider the following claims:\n\n #1 @ 1,3: 4x4\n #2 @ 3,1: 4x4\n #3 @ 5,5: 2x2\nVisually, these claim the following areas:\n\n ........\n ...2222.\n ...2222.\n .11XX22.\n .11XX22.\n .111133.\n .111133.\n ........\nThe four square inches marked with X are claimed by both 1 and 2. (Claim 3, while adjacent to the others, does not overlap either of them.)\n\nIf the Elves all proceed with their own plans, none of them will have enough fabric. How many square inches of fabric are within two or more claims?\n\nYour puzzle answer was 116920.",
"_____no_output_____"
]
],
[
[
"def parse_day_03():\n lines = [line.split() for line in read_input('day_03.txt').split('\\n')]\n def parse_rec(rec):\n id = int(rec[0].lstrip('#'))\n x0, y0 = map(int, rec[2].rstrip(':').split(','))\n w, h = map(int, rec[3].split('x'))\n return {'id': id, 'x0': x0, 'y0': y0, 'w': w, 'h': h}\n recs = map(parse_rec, lines)\n return recs",
"_____no_output_____"
],
[
"claims = parse_day_03()",
"_____no_output_____"
],
[
"def find_overlap(claims):\n X = np.zeros((1000, 1000))\n claim_ok = []\n # Part 1: label all the squares claimed\n for claim in claims:\n X[claim['y0']:claim['y0']+claim['h'], claim['x0']:claim['x0']+claim['w']] += 1\n n_overlap = (X > 1).sum()\n \n # Part 2: check whether a claim is the only one for a given region\n for claim in claims:\n all_ok = (X[claim['y0']:claim['y0']+claim['h'], claim['x0']:claim['x0']+claim['w']] == 1).all()\n if all_ok:\n claim_ok.append(claim['id'])\n \n return n_overlap, claim_ok",
"_____no_output_____"
],
[
"find_overlap(claims)",
"_____no_output_____"
]
],
[
[
"# Part Two \nAmidst the chaos, you notice that exactly one claim doesn't overlap by even a single square inch of fabric with any other claim. If you can somehow draw attention to it, maybe the Elves will be able to make Santa's suit after all!\n\nFor example, in the claims above, only claim 3 is intact after all claims are made.\n\nWhat is the ID of the only claim that doesn't overlap?\n\nYour puzzle answer was 382.",
"_____no_output_____"
],
[
"# Day 4: Repose Record \nYou've sneaked into another supply closet - this time, it's across from the prototype suit manufacturing lab. You need to sneak inside and fix the issues with the suit, but there's a guard stationed outside the lab, so this is as close as you can safely get.\n\nAs you search the closet for anything that might help, you discover that you're not the first person to want to sneak in. Covering the walls, someone has spent an hour starting every midnight for the past few months secretly observing this guard post! They've been writing down the ID of the one guard on duty that night - the Elves seem to have decided that one guard was enough for the overnight shift - as well as when they fall asleep or wake up while at their post (your puzzle input).\n\nFor example, consider the following records, which have already been organized into chronological order:\n\n [1518-11-01 00:00] Guard #10 begins shift\n [1518-11-01 00:05] falls asleep\n [1518-11-01 00:25] wakes up\n [1518-11-01 00:30] falls asleep\n [1518-11-01 00:55] wakes up\n [1518-11-01 23:58] Guard #99 begins shift\n [1518-11-02 00:40] falls asleep\n [1518-11-02 00:50] wakes up\n [1518-11-03 00:05] Guard #10 begins shift\n [1518-11-03 00:24] falls asleep\n [1518-11-03 00:29] wakes up\n [1518-11-04 00:02] Guard #99 begins shift\n [1518-11-04 00:36] falls asleep\n [1518-11-04 00:46] wakes up\n [1518-11-05 00:03] Guard #99 begins shift\n [1518-11-05 00:45] falls asleep\n [1518-11-05 00:55] wakes up\nTimestamps are written using year-month-day hour:minute format. The guard falling asleep or waking up is always the one whose shift most recently started. Because all asleep/awake times are during the midnight hour (00:00 - 00:59), only the minute portion (00 - 59) is relevant for those events.\n\nVisually, these records show that the guards are asleep at these times:\n\n Date ID Minute\n 000000000011111111112222222222333333333344444444445555555555\n 012345678901234567890123456789012345678901234567890123456789\n 11-01 #10 .....####################.....#########################.....\n 11-02 #99 ........................................##########..........\n 11-03 #10 ........................#####...............................\n 11-04 #99 ....................................##########..............\n 11-05 #99 .............................................##########.....\nThe columns are Date, which shows the month-day portion of the relevant day; ID, which shows the guard on duty that day; and Minute, which shows the minutes during which the guard was asleep within the midnight hour. (The Minute column's header shows the minute's ten's digit in the first row and the one's digit in the second row.) Awake is shown as ., and asleep is shown as #.\n\nNote that guards count as asleep on the minute they fall asleep, and they count as awake on the minute they wake up. For example, because Guard #10 wakes up at 00:25 on 1518-11-01, minute 25 is marked as awake.\n\nIf you can figure out the guard most likely to be asleep at a specific time, you might be able to trick that guard into working tonight so you can have the best chance of sneaking in. You have two strategies for choosing the best guard/minute combination.\n\n__Strategy 1__: Find the guard that has the most minutes asleep. What minute does that guard spend asleep the most?\n\nIn the example above, Guard #10 spent the most minutes asleep, a total of 50 minutes (20+25+5), while Guard #99 only slept for a total of 30 minutes (10+10+10). Guard #10 was asleep most during minute 24 (on two days, whereas any other minute the guard was asleep was only seen on one day).\n\nWhile this example listed the entries in chronological order, your entries are in the order you found them. You'll need to organize them before they can be analyzed.\n\nWhat is the ID of the guard you chose multiplied by the minute you chose? (In the above example, the answer would be 10 * 24 = 240.)\n\nYour puzzle answer was 146622.",
"_____no_output_____"
]
],
[
[
"events = sorted(read_input('day_04.txt').split('\\n'))",
"_____no_output_____"
],
[
"def parse_sleep_events(events):\n res = []\n rec = None\n for event in events:\n if 'Guard' in event:\n # Start new record\n if rec is not None:\n res.append(rec)\n rec = {\n 'guard_id': int(re.findall('#(\\d+)', event)[0]),\n 'sleep': [],\n 'wake': []\n }\n if 'asleep' in event:\n rec['sleep'].append(int(re.findall(' 00:(\\d{2})', event)[0]))\n if 'wakes' in event:\n rec['wake'].append(int(re.findall(' 00:(\\d{2})', event)[0]))\n guard_sleeps = defaultdict(list)\n for rec in res:\n shift = np.zeros(60, dtype=np.int32)\n for sleep in rec['sleep']:\n shift[sleep] = 1\n for wake in rec['wake']:\n shift[wake] = -1\n shift = np.cumsum(shift)\n guard_sleeps[rec['guard_id']].append(shift)\n for guard_id in guard_sleeps.keys():\n guard_sleeps[guard_id] = np.array(guard_sleeps[guard_id])\n return guard_sleeps",
"_____no_output_____"
],
[
"guard_sleeps = parse_sleep_events(events)",
"_____no_output_____"
],
[
"def find_sleepiest(guard_sleeps):\n total_sleep = {k: v.sum() for k, v in guard_sleeps.iteritems()}\n sleepiest = None\n max_sleep = 0\n for guard, sleep in total_sleep.iteritems():\n if sleep > max_sleep:\n sleepiest = guard\n max_sleep = sleep\n most_often_asleep = np.argmax(np.sum(guard_sleeps[sleepiest], axis=0))\n return sleepiest, max_sleep, most_often_asleep, sleepiest*most_often_asleep",
"_____no_output_____"
],
[
"find_sleepiest(guard_sleeps)",
"_____no_output_____"
]
],
[
[
"# Part Two \nStrategy 2: Of all guards, which guard is most frequently asleep on the same minute?\n\nIn the example above, Guard #99 spent minute 45 asleep more than any other guard or minute - three times in total. (In all other cases, any guard spent any minute asleep at most twice.)\n\nWhat is the ID of the guard you chose multiplied by the minute you chose? (In the above example, the answer would be 99 * 45 = 4455.)\n\nYour puzzle answer was 31848.",
"_____no_output_____"
]
],
[
[
"def find_most_often_asleep(guard_sleeps):\n often_asleep = {k: np.sum(v, axis=0) for k, v in guard_sleeps.iteritems()}\n most_sleeps = 0\n sleep_time = None\n sleep_guard = None\n for guard, sleep in often_asleep.iteritems():\n if np.max(sleep) > most_sleeps:\n most_sleeps = np.max(sleep)\n sleep_time = np.argmax(sleep)\n sleep_guard = guard\n return sleep_guard, most_sleeps, sleep_time, sleep_guard * sleep_time",
"_____no_output_____"
],
[
"find_most_often_asleep(guard_sleeps)",
"_____no_output_____"
]
],
[
[
"# Day 5: Alchemical Reduction \nYou've managed to sneak in to the prototype suit manufacturing lab. The Elves are making decent progress, but are still struggling with the suit's size reduction capabilities.\n\nWhile the very latest in 1518 alchemical technology might have solved their problem eventually, you can do better. You scan the chemical composition of the suit's material and discover that it is formed by extremely long polymers (one of which is available as your puzzle input).\n\nThe polymer is formed by smaller units which, when triggered, react with each other such that two adjacent units of the same type and opposite polarity are destroyed. Units' types are represented by letters; units' polarity is represented by capitalization. For instance, r and R are units with the same type but opposite polarity, whereas r and s are entirely different types and do not react.\n\nFor example:\n\n- In aA, a and A react, leaving nothing behind.\n- In abBA, bB destroys itself, leaving aA. As above, this then destroys itself, leaving nothing.\n- In abAB, no two adjacent units are of the same type, and so nothing happens.\n- In aabAAB, even though aa and AA are of the same type, their polarities match, and so nothing happens.\n\nNow, consider a larger example, dabAcCaCBAcCcaDA:\n\n dabAcCaCBAcCcaDA The first 'cC' is removed.\n dabAaCBAcCcaDA This creates 'Aa', which is removed.\n dabCBAcCcaDA Either 'cC' or 'Cc' are removed (the result is the same).\n dabCBAcaDA No further actions can be taken.\nAfter all possible reactions, the resulting polymer contains 10 units.\n\nHow many units remain after fully reacting the polymer you scanned? \n\nYour puzzle answer was 11814.",
"_____no_output_____"
]
],
[
[
"polymer = read_input('day_05.txt')",
"_____no_output_____"
],
[
"def reduce_polymer(polymer, remove_unit=None):\n lower_letters = [chr(x) for x in range(ord('a'), ord('z') + 1)]\n upper_letters = [chr(x) for x in range(ord('A'), ord('Z') + 1)]\n lower_upper = [low + upp for low, upp in zip(lower_letters, upper_letters)]\n upper_lower = [upp + low for low, upp in zip(lower_letters, upper_letters)]\n if remove_unit is not None:\n polymer = polymer.replace(remove_unit.lower(), '').replace(remove_unit.upper(), '')\n n_poly = len(polymer)\n n_poly_new = n_poly\n done = False\n while not done:\n for lu in lower_upper:\n polymer = polymer.replace(lu, '')\n for ul in upper_lower:\n polymer = polymer.replace(ul, '')\n n_poly_new = len(polymer)\n done = n_poly_new == n_poly\n n_poly = n_poly_new\n \n return polymer",
"_____no_output_____"
],
[
"len(reduce_polymer(polymer))",
"_____no_output_____"
]
],
[
[
"# Part Two \nTime to improve the polymer.\n\nOne of the unit types is causing problems; it's preventing the polymer from collapsing as much as it should. Your goal is to figure out which unit type is causing the most problems, remove all instances of it (regardless of polarity), fully react the remaining polymer, and measure its length.\n\nFor example, again using the polymer dabAcCaCBAcCcaDA from above:\n\n- Removing all A/a units produces dbcCCBcCcD. Fully reacting this polymer produces dbCBcD, which has length 6.\n- Removing all B/b units produces daAcCaCAcCcaDA. Fully reacting this polymer produces daCAcaDA, which has length 8.\n- Removing all C/c units produces dabAaBAaDA. Fully reacting this polymer produces daDA, which has length 4.\n- Removing all D/d units produces abAcCaCBAcCcaA. Fully reacting this polymer produces abCBAc, which has length 6.\nIn this example, removing all C/c units was best, producing the answer 4.\n\nWhat is the length of the shortest polymer you can produce by removing all units of exactly one type and fully reacting the result?\n\nYour puzzle answer was 4282.",
"_____no_output_____"
]
],
[
[
"min([len(reduce_polymer(polymer, chr(x))) for x in range(ord('a'), ord('z')+1)])",
"_____no_output_____"
]
],
[
[
"# Day 6: Chronal Coordinates \nThe device on your wrist beeps several times, and once again you feel like you're falling.\n\n\"Situation critical,\" the device announces. \"Destination indeterminate. Chronal interference detected. Please specify new target coordinates.\"\n\nThe device then produces a list of coordinates (your puzzle input). Are they places it thinks are safe or dangerous? It recommends you check manual page 729. The Elves did not give you a manual.\n\nIf they're dangerous, maybe you can minimize the danger by finding the coordinate that gives the largest distance from the other points.\n\nUsing only the Manhattan distance, determine the area around each coordinate by counting the number of integer X,Y locations that are closest to that coordinate (and aren't tied in distance to any other coordinate).\n\nYour goal is to find the size of the largest area that isn't infinite. For example, consider the following list of coordinates:\n\n 1, 1\n 1, 6\n 8, 3\n 3, 4\n 5, 5\n 8, 9\nIf we name these coordinates A through F, we can draw them on a grid, putting 0,0 at the top left:\n\n ..........\n .A........\n ..........\n ........C.\n ...D......\n .....E....\n .B........\n ..........\n ..........\n ........F.\nThis view is partial - the actual grid extends infinitely in all directions. Using the Manhattan distance, each location's closest coordinate can be determined, shown here in lowercase:\n\n aaaaa.cccc\n aAaaa.cccc\n aaaddecccc\n aadddeccCc\n ..dDdeeccc\n bb.deEeecc\n bBb.eeee..\n bbb.eeefff\n bbb.eeffff\n bbb.ffffFf\nLocations shown as . are equally far from two or more coordinates, and so they don't count as being closest to any.\n\nIn this example, the areas of coordinates A, B, C, and F are infinite - while not shown here, their areas extend forever outside the visible grid. However, the areas of coordinates D and E are finite: D is closest to 9 locations, and E is closest to 17 (both including the coordinate's location itself). Therefore, in this example, the size of the largest area is 17.\n\nWhat is the size of the largest area that isn't infinite?\n\nYour puzzle answer was 4342.",
"_____no_output_____"
]
],
[
[
"coords = np.array([[int(x), int(y)] for x, y in [c.split(',') for c in read_input('day_06.txt').split('\\n') ]])",
"_____no_output_____"
],
[
" coords[:10]",
"_____no_output_____"
],
[
"def label_closest(coords):\n x_max, y_max = coords.max(axis=0) + 1\n region = np.nan*np.zeros((x_max, y_max))\n for x in range(x_max):\n for y in range(y_max):\n dist = np.sum(np.abs(coords - np.array([x, y])), axis=1)\n if len(dist[dist == dist.min()]) == 1:\n region[x, y] = np.argmin(dist)\n return region",
"_____no_output_____"
],
[
"closest = label_closest(coords)",
"_____no_output_____"
],
[
"def find_largest_finite_area(closest):\n # Ignore points that go to infinity ie ones on boundary\n on_boundary = [int(x) for x in list(\n set(closest[0, :].tolist()) | set(closest[-1, :].tolist()) \n | set(closest[:, 0].tolist()) | set(closest[:, -1].tolist())) if not np.isnan(x)]\n all_indexes = [int(x) for x in np.unique(closest) if not np.isnan(x)]\n finite_region_indexes = list(set(all_indexes) - set(on_boundary))\n max_area = 0\n max_area_index = None\n for ind in finite_region_indexes:\n area = np.sum(closest == ind)\n if area > max_area:\n max_area = area\n max_area_index = ind\n return max_area_index, max_area",
"_____no_output_____"
],
[
"find_largest_finite_area(closest)",
"_____no_output_____"
]
],
[
[
"# Part Two \nOn the other hand, if the coordinates are safe, maybe the best you can do is try to find a region near as many coordinates as possible.\n\nFor example, suppose you want the sum of the Manhattan distance to all of the coordinates to be less than 32. For each location, add up the distances to all of the given coordinates; if the total of those distances is less than 32, that location is within the desired region. Using the same coordinates as above, the resulting region looks like this:\n\n ..........\n .A........\n ..........\n ...###..C.\n ..#D###...\n ..###E#...\n .B.###....\n ..........\n ..........\n ........F.\nIn particular, consider the highlighted location 4,3 located at the top middle of the region. Its calculation is as follows, where abs() is the absolute value function:\n\n- Distance to coordinate A: `abs(4-1) + abs(3-1) = 5`\n- Distance to coordinate B: `abs(4-1) + abs(3-6) = 6`\n- Distance to coordinate C: `abs(4-8) + abs(3-3) = 4`\n- Distance to coordinate D: `abs(4-3) + abs(3-4) = 2`\n- Distance to coordinate E: `abs(4-5) + abs(3-5) = 3`\n- Distance to coordinate F: `abs(4-8) + abs(3-9) = 10`\n- Total distance: `5 + 6 + 4 + 2 + 3 + 10 = 30`\n\nBecause the total distance to all coordinates (30) is less than 32, the location is __within__ the region.\n\nThis region, which also includes coordinates D and E, has a total size of __16__.\n\nYour actual region will need to be much larger than this example, though, instead including all locations with a total distance of less than 10000.\n\nWhat is the size of the region containing all locations which have a total distance to all given coordinates of less than 10000?\n\nYour puzzle answer was 42966.",
"_____no_output_____"
]
],
[
[
"def label_total_dist(coords):\n x_max, y_max = coords.max(axis=0) + 1\n region = np.nan*np.zeros((x_max, y_max))\n for x in range(x_max):\n for y in range(y_max):\n region[x, y] = np.sum(np.abs(coords - np.array([x, y])))\n return region",
"_____no_output_____"
],
[
"total_dist = label_total_dist(coords)",
"_____no_output_____"
],
[
"np.sum(total_dist < 10000)",
"_____no_output_____"
]
],
[
[
"# Day 7: The Sum of Its Parts \nYou find yourself standing on a snow-covered coastline; apparently, you landed a little off course. The region is too hilly to see the North Pole from here, but you do spot some Elves that seem to be trying to unpack something that washed ashore. It's quite cold out, so you decide to risk creating a paradox by asking them for directions.\n\n\"Oh, are you the search party?\" Somehow, you can understand whatever Elves from the year 1018 speak; you assume it's Ancient Nordic Elvish. Could the device on your wrist also be a translator? \"Those clothes don't look very warm; take this.\" They hand you a heavy coat.\n\n\"We do need to find our way back to the North Pole, but we have higher priorities at the moment. You see, believe it or not, this box contains something that will solve all of Santa's transportation problems - at least, that's what it looks like from the pictures in the instructions.\" It doesn't seem like they can read whatever language it's in, but you can: \"Sleigh kit. Some assembly required.\"\n\n\"'Sleigh'? What a wonderful name! You must help us assemble this 'sleigh' at once!\" They start excitedly pulling more parts out of the box.\n\nThe instructions specify a series of steps and requirements about which steps must be finished before others can begin (your puzzle input). Each step is designated by a single letter. For example, suppose you have the following instructions:\n\n Step C must be finished before step A can begin.\n Step C must be finished before step F can begin.\n Step A must be finished before step B can begin.\n Step A must be finished before step D can begin.\n Step B must be finished before step E can begin.\n Step D must be finished before step E can begin.\n Step F must be finished before step E can begin.\n\nVisually, these requirements look like this:\n\n\n -->A--->B--\n / \\ \\\n C -->D----->E\n \\ /\n ---->F-----\nYour first goal is to determine the order in which the steps should be completed. If more than one step is ready, choose the step which is first alphabetically. In this example, the steps would be completed as follows:\n\n- Only C is available, and so it is done first.\n- Next, both A and F are available. A is first alphabetically, so it is done next.\n- Then, even though F was available earlier, steps B and D are now also available, and B is the first alphabetically of the three.\n- After that, only D and F are available. E is not available because only some of its prerequisites are complete. Therefore, D is completed next.\n- F is the only choice, so it is done next.\n- Finally, E is completed.\n\nSo, in this example, the correct order is CABDFE.\n\nIn what order should the steps in your instructions be completed?\n\nYour puzzle answer was `SCLPAMQVUWNHODRTGYKBJEFXZI`.",
"_____no_output_____"
]
],
[
[
"step_dep = [[c[5], c[-12]] for c in read_input('day_07.txt').split('\\n')]",
"_____no_output_____"
],
[
"def parse_dep(step_dep):\n pre_cond, step = zip(*step_dep)\n all_steps = list(set(pre_cond) | set(step))\n deps = defaultdict(list)\n for d in step_dep:\n deps[d[1]].append(d[0])\n for d in list(set(all_steps) - set(deps.keys())):\n deps[d] = []\n return deps",
"_____no_output_____"
],
[
"deps = parse_dep(step_dep)",
"_____no_output_____"
],
[
"def complete_steps(deps, n_helpers=0, step_base_time=60, display=False):\n steps_taken = ''\n steps_left = deps.keys()\n workers = {n: [] for n in range(n_helpers+1)}\n steps_in_progress = ''\n time_taken = 0\n while steps_left != []:\n # List of all steps with no dependencies\n next_steps = sorted([k for k, v in deps.iteritems() if v == [] and k not in steps_taken])\n \n # Allocate all available workers to the next available steps\n for step in next_steps:\n for w, v in workers.iteritems():\n if step in steps_in_progress:\n break\n if v == []:\n workers[w] = (step, step_base_time + ord(step) - ord('A') + 1)\n steps_in_progress += step\n \n if display:\n # Show what workers are doing and time remaining\n print(workers)\n \n # Increment time to next task completion\n time_to_next_completed_step = min([v[1] for v in workers.values() if v != []])\n time_taken += time_to_next_completed_step\n \n # Update time remaining\n for w, v in workers.iteritems():\n if v != []:\n workers[w] = (workers[w][0], workers[w][1] - time_to_next_completed_step)\n \n # Record completed steps\n for w, v in workers.iteritems():\n if v != [] and v[1] == 0:\n steps_taken += v[0]\n steps_in_progress = ''.join(list(set(steps_in_progress) - set(v[0])))\n workers[w] = []\n \n # Update dependencies to remove completed steps\n deps = {k: list(set(v) - set(steps_taken)) for k, v in deps.iteritems()}\n \n # Update list of steps still to do\n steps_left = list(set(steps_left) - set(steps_taken))\n return steps_taken, time_taken",
"_____no_output_____"
],
[
"complete_steps(parse_dep(step_dep), n_helpers=4, display=True)",
"{0: ('S', 79), 1: [], 2: [], 3: [], 4: []}\n{0: ('C', 63), 1: [], 2: [], 3: [], 4: []}\n{0: ('L', 72), 1: ('P', 76), 2: [], 3: [], 4: []}\n{0: ('V', 82), 1: ('P', 4), 2: ('W', 83), 3: [], 4: []}\n{0: ('V', 78), 1: ('A', 61), 2: ('W', 79), 3: ('M', 73), 4: ('Q', 77)}\n{0: ('V', 17), 1: ('Y', 85), 2: ('W', 18), 3: ('M', 12), 4: ('Q', 16)}\n{0: ('V', 5), 1: ('Y', 73), 2: ('W', 6), 3: [], 4: ('Q', 4)}\n{0: ('V', 1), 1: ('Y', 69), 2: ('W', 2), 3: [], 4: []}\n{0: ('U', 81), 1: ('Y', 68), 2: ('W', 1), 3: [], 4: []}\n{0: ('U', 80), 1: ('Y', 67), 2: ('N', 74), 3: [], 4: []}\n{0: ('U', 13), 1: [], 2: ('N', 7), 3: [], 4: []}\n{0: ('U', 6), 1: ('H', 68), 2: [], 3: [], 4: []}\n{0: [], 1: ('H', 62), 2: [], 3: [], 4: []}\n{0: ('O', 75), 1: [], 2: [], 3: [], 4: []}\n{0: ('D', 64), 1: ('T', 80), 2: [], 3: [], 4: []}\n{0: ('R', 78), 1: ('T', 16), 2: [], 3: [], 4: []}\n{0: ('R', 62), 1: ('G', 67), 2: [], 3: [], 4: []}\n{0: [], 1: ('G', 5), 2: [], 3: [], 4: []}\n{0: ('K', 71), 1: [], 2: [], 3: [], 4: []}\n{0: ('B', 62), 1: [], 2: [], 3: [], 4: []}\n{0: ('J', 70), 1: [], 2: [], 3: [], 4: []}\n{0: ('E', 65), 1: [], 2: [], 3: [], 4: []}\n{0: ('F', 66), 1: [], 2: [], 3: [], 4: []}\n{0: ('X', 84), 1: [], 2: [], 3: [], 4: []}\n{0: ('Z', 86), 1: [], 2: [], 3: [], 4: []}\n{0: ('I', 69), 1: [], 2: [], 3: [], 4: []}\n"
]
],
[
[
"# Part Two \nAs you're about to begin construction, four of the Elves offer to help. \"The sun will set soon; it'll go faster if we work together.\" Now, you need to account for multiple people working on steps simultaneously. If multiple steps are available, workers should still begin them in alphabetical order.\n\nEach step takes 60 seconds plus an amount corresponding to its letter: A=1, B=2, C=3, and so on. So, step A takes 60+1=61 seconds, while step Z takes 60+26=86 seconds. No time is required between steps.\n\nTo simplify things for the example, however, suppose you only have help from one Elf (a total of two workers) and that each step takes 60 fewer seconds (so that step A takes 1 second and step Z takes 26 seconds). Then, using the same instructions as above, this is how each second would be spent:\n\n Second Worker 1 Worker 2 Done\n 0 C . \n 1 C . \n 2 C . \n 3 A F C\n 4 B F CA\n 5 B F CA\n 6 D F CAB\n 7 D F CAB\n 8 D F CAB\n 9 D . CABF\n 10 E . CABFD\n 11 E . CABFD\n 12 E . CABFD\n 13 E . CABFD\n 14 E . CABFD\n 15 . . CABFDE\n \nEach row represents one second of time. The Second column identifies how many seconds have passed as of the beginning of that second. Each worker column shows the step that worker is currently doing (or . if they are idle). The Done column shows completed steps.\n\nNote that the order of the steps has changed; this is because steps now take time to finish and multiple workers can begin multiple steps simultaneously.\n\nIn this example, it would take 15 seconds for two workers to complete these steps.\n\nWith 5 workers and the 60+ second step durations described above, how long will it take to complete all of the steps?\n\nYour puzzle answer was `1234`.",
"_____no_output_____"
],
[
"# Day 8: Memory Maneuver \nThe sleigh is much easier to pull than you'd expect for something its weight. Unfortunately, neither you nor the Elves know which way the North Pole is from here.\n\nYou check your wrist device for anything that might help. It seems to have some kind of navigation system! Activating the navigation system produces more bad news: \"Failed to start navigation system. Could not read software license file.\"\n\nThe navigation system's license file consists of a list of numbers (your puzzle input). The numbers define a data structure which, when processed, produces some kind of tree that can be used to calculate the license number.\n\nThe tree is made up of nodes; a single, outermost node forms the tree's root, and it contains all other nodes in the tree (or contains nodes that contain nodes, and so on).\n\nSpecifically, a node consists of:\n\n- A header, which is always exactly two numbers:\n- - The quantity of child nodes.\n- - The quantity of metadata entries.\n- Zero or more child nodes (as specified in the header).\n- One or more metadata entries (as specified in the header).\n\nEach child node is itself a node that has its own header, child nodes, and metadata. For example:\n\n 2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\n A----------------------------------\n B----------- C-----------\n D-----\nIn this example, each node of the tree is also marked with an underline starting with a letter for easier identification. In it, there are four nodes:\n\n- A, which has 2 child nodes (B, C) and 3 metadata entries (1, 1, 2).\n- B, which has 0 child nodes and 3 metadata entries (10, 11, 12).\n- C, which has 1 child node (D) and 1 metadata entry (2).\n- D, which has 0 child nodes and 1 metadata entry (99).\nThe first check done on the license file is to simply add up all of the metadata entries. In this example, that sum is 1+1+2+10+11+12+2+99=138.\n\nWhat is the sum of all metadata entries?\n\nYour puzzle answer was 36307.",
"_____no_output_____"
]
],
[
[
"license = map(int, read_input('day_08.txt').split())",
"_____no_output_____"
],
[
"license[:10]",
"_____no_output_____"
],
[
"def parse_license(license):\n nodes = {}\n node = 0\n n_node = 0\n meta_sum = 0\n n_meta = 0\n parent = None\n mode = 'read_n_child'\n for x in license:\n if mode == 'read_n_child':\n if node not in nodes.keys():\n nodes[node] = {\n 'n_child': x, 'n_meta': 0, 'meta': [], \n 'parent': parent, 'children': [],\n 'value': 0 # Part 2\n }\n mode = 'read_n_meta'\n else:\n mode = 'read_meta'\n continue\n if mode == 'read_n_meta':\n nodes[node]['n_meta'] = x\n if nodes[node]['n_child'] == 0:\n mode = 'read_meta'\n n_meta = x\n else:\n # Create new node\n parent = n_node\n n_node += 1\n node = n_node\n mode = 'read_n_child'\n continue\n if mode == 'read_meta':\n nodes[node]['meta'].append(x)\n meta_sum += x\n n_meta -= 1\n if n_meta == 0:\n # Part 2\n if nodes[node]['n_child'] == 0:\n nodes[node]['value'] = sum(nodes[node]['meta'])\n else:\n for m in nodes[node]['meta']:\n # print(node, m, nodes[node]['children'])\n if m <= len(nodes[node]['children']):\n child_value = nodes[ nodes[node]['children'][m-1] ]['value']\n # print(node, child_value)\n nodes[node]['value'] += child_value\n \n if nodes[node]['parent'] is not None:\n nodes[nodes[node]['parent']]['children'].append(node)\n if len(nodes[nodes[node]['parent']]['children']) == nodes[nodes[node]['parent']]['n_child']:\n # Read parent metadata\n n_meta = nodes[nodes[node]['parent']]['n_meta']\n node = nodes[node]['parent']\n mode = 'read_meta'\n else:\n # Create new node\n parent = nodes[node]['parent']\n n_node = n_node + 1\n node = n_node\n mode = 'read_n_child'\n continue\n return nodes, meta_sum",
"_____no_output_____"
],
[
"parse_license(map(int, '2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2'.split()))\n# parse_license(map(int, '2 3 0 3 10 11 12 1 1 0 1 99 1 1 1 2'.split()))",
"_____no_output_____"
],
[
"license_nodes, license_meta_sum = parse_license(license)",
"_____no_output_____"
],
[
"license_meta_sum",
"_____no_output_____"
]
],
[
[
"# Part Two \nThe second check is slightly more complicated: you need to find the value of the root node (A in the example above).\n\nThe value of a node depends on whether it has child nodes.\n\nIf a node has no child nodes, its value is the sum of its metadata entries. So, the value of node B is 10+11+12=33, and the value of node D is 99.\n\nHowever, if a node does have child nodes, the metadata entries become indexes which refer to those child nodes. A metadata entry of 1 refers to the first child node, 2 to the second, 3 to the third, and so on. The value of this node is the sum of the values of the child nodes referenced by the metadata entries. If a referenced child node does not exist, that reference is skipped. A child node can be referenced multiple time and counts each time it is referenced. A metadata entry of 0 does not refer to any child node.\n\nFor example, again using the above nodes:\n\n- Node C has one metadata entry, 2. Because node C has only one child node, 2 references a child node which does not exist, and so the value of node C is 0.\n- Node A has three metadata entries: 1, 1, and 2. The 1 references node A's first child node, B, and the 2 references node A's second child node, C. Because node B has a value of 33 and node C has a value of 0, the value of node A is 33+33+0=66.\n\nSo, in this example, the value of the root node is 66.\n\nWhat is the value of the root node?\n\nYour puzzle answer was 25154.",
"_____no_output_____"
]
],
[
[
"license_nodes[0]['value']",
"_____no_output_____"
]
],
[
[
"# Day 9: Marble Mania \nYou talk to the Elves while you wait for your navigation system to initialize. To pass the time, they introduce you to their favorite marble game.\n\nThe Elves play this game by taking turns arranging the marbles in a circle according to very particular rules. The marbles are numbered starting with 0 and increasing by 1 until every marble has a number.\n\nFirst, the marble numbered 0 is placed in the circle. At this point, while it contains only a single marble, it is still a circle: the marble is both clockwise from itself and counter-clockwise from itself. This marble is designated the current marble.\n\nThen, each Elf takes a turn placing the lowest-numbered remaining marble into the circle between the marbles that are 1 and 2 marbles clockwise of the current marble. (When the circle is large enough, this means that there is one marble between the marble that was just placed and the current marble.) The marble that was just placed then becomes the current marble.\n\nHowever, if the marble that is about to be placed has a number which is a multiple of 23, something entirely different happens. First, the current player keeps the marble they would have placed, adding it to their score. In addition, the marble 7 marbles counter-clockwise from the current marble is removed from the circle and also added to the current player's score. The marble located immediately clockwise of the marble that was removed becomes the new current marble.\n\nFor example, suppose there are 9 players. After the marble with value 0 is placed in the middle, each player (shown in square brackets) takes a turn. The result of each of those turns would produce circles of marbles like this, where clockwise is to the right and the resulting current marble is in parentheses:\n\n [-] (0)\n [1] 0 (1)\n [2] 0 (2) 1 \n [3] 0 2 1 (3)\n [4] 0 (4) 2 1 3 \n [5] 0 4 2 (5) 1 3 \n [6] 0 4 2 5 1 (6) 3 \n [7] 0 4 2 5 1 6 3 (7)\n [8] 0 (8) 4 2 5 1 6 3 7 \n [9] 0 8 4 (9) 2 5 1 6 3 7 \n [1] 0 8 4 9 2(10) 5 1 6 3 7 \n [2] 0 8 4 9 2 10 5(11) 1 6 3 7 \n [3] 0 8 4 9 2 10 5 11 1(12) 6 3 7 \n [4] 0 8 4 9 2 10 5 11 1 12 6(13) 3 7 \n [5] 0 8 4 9 2 10 5 11 1 12 6 13 3(14) 7 \n [6] 0 8 4 9 2 10 5 11 1 12 6 13 3 14 7(15)\n [7] 0(16) 8 4 9 2 10 5 11 1 12 6 13 3 14 7 15 \n [8] 0 16 8(17) 4 9 2 10 5 11 1 12 6 13 3 14 7 15 \n [9] 0 16 8 17 4(18) 9 2 10 5 11 1 12 6 13 3 14 7 15 \n [1] 0 16 8 17 4 18 9(19) 2 10 5 11 1 12 6 13 3 14 7 15 \n [2] 0 16 8 17 4 18 9 19 2(20)10 5 11 1 12 6 13 3 14 7 15 \n [3] 0 16 8 17 4 18 9 19 2 20 10(21) 5 11 1 12 6 13 3 14 7 15 \n [4] 0 16 8 17 4 18 9 19 2 20 10 21 5(22)11 1 12 6 13 3 14 7 15 \n [5] 0 16 8 17 4 18(19) 2 20 10 21 5 22 11 1 12 6 13 3 14 7 15 \n [6] 0 16 8 17 4 18 19 2(24)20 10 21 5 22 11 1 12 6 13 3 14 7 15 \n [7] 0 16 8 17 4 18 19 2 24 20(25)10 21 5 22 11 1 12 6 13 3 14 7 15\nThe goal is to be the player with the highest score after the last marble is used up. Assuming the example above ends after the marble numbered 25, the winning score is 23+9=32 (because player 5 kept marble 23 and removed marble 9, while no other player got any points in this very short example game).\n\nHere are a few more examples:\n\n- 10 players; last marble is worth 1618 points: high score is 8317\n- 13 players; last marble is worth 7999 points: high score is 146373\n- 17 players; last marble is worth 1104 points: high score is 2764\n- 21 players; last marble is worth 6111 points: high score is 54718\n- 30 players; last marble is worth 5807 points: high score is 37305\n\nWhat is the winning Elf's score?\n\nYour puzzle answer was 390093.",
"_____no_output_____"
]
],
[
[
"n_players, n_marbles = [int(el) for i, el in enumerate(read_input('day_09.txt').split()) if i in [0, 6]]",
"_____no_output_____"
],
[
"def play_marbles(n_players, n_marbles, display=False):\n players = defaultdict(int)\n current_player = 1\n current_pos = 0\n marbles = deque()\n marbles.append(0)\n for m in range(1, n_marbles + 1):\n if display:\n print(marbles)\n if m % 23 != 0:\n# next_pos = (current_pos + 1) % len(marbles)\n# marbles = marbles[:next_pos + 1] + [m] + marbles[next_pos + 1:]\n# current_pos = next_pos + 1\n \n marbles.rotate(-1)\n marbles.append(m)\n else:\n# players[current_player] += m\n# pos_to_remove = (current_pos - 7) % len(marbles)\n# players[current_player] += marbles[pos_to_remove]\n# marbles = marbles[:pos_to_remove] + marbles[pos_to_remove + 1:]\n# current_pos = pos_to_remove\n\n marbles.rotate(7)\n players[current_player] += m + marbles.pop()\n marbles.rotate(-1)\n \n current_player = (current_player + 1) % n_players\n return max(players.values())",
"_____no_output_____"
],
[
"play_marbles(9, 25, display=True)",
"deque([0])\ndeque([0, 1])\ndeque([1, 0, 2])\ndeque([0, 2, 1, 3])\ndeque([2, 1, 3, 0, 4])\ndeque([1, 3, 0, 4, 2, 5])\ndeque([3, 0, 4, 2, 5, 1, 6])\ndeque([0, 4, 2, 5, 1, 6, 3, 7])\ndeque([4, 2, 5, 1, 6, 3, 7, 0, 8])\ndeque([2, 5, 1, 6, 3, 7, 0, 8, 4, 9])\ndeque([5, 1, 6, 3, 7, 0, 8, 4, 9, 2, 10])\ndeque([1, 6, 3, 7, 0, 8, 4, 9, 2, 10, 5, 11])\ndeque([6, 3, 7, 0, 8, 4, 9, 2, 10, 5, 11, 1, 12])\ndeque([3, 7, 0, 8, 4, 9, 2, 10, 5, 11, 1, 12, 6, 13])\ndeque([7, 0, 8, 4, 9, 2, 10, 5, 11, 1, 12, 6, 13, 3, 14])\ndeque([0, 8, 4, 9, 2, 10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15])\ndeque([8, 4, 9, 2, 10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16])\ndeque([4, 9, 2, 10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17])\ndeque([9, 2, 10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18])\ndeque([2, 10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 9, 19])\ndeque([10, 5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 9, 19, 2, 20])\ndeque([5, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 9, 19, 2, 20, 10, 21])\ndeque([11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 9, 19, 2, 20, 10, 21, 5, 22])\ndeque([2, 20, 10, 21, 5, 22, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 19])\ndeque([20, 10, 21, 5, 22, 11, 1, 12, 6, 13, 3, 14, 7, 15, 0, 16, 8, 17, 4, 18, 19, 2, 24])\n"
],
[
"play_marbles(10, 1618)",
"_____no_output_____"
],
[
"play_marbles(n_players, n_marbles)",
"_____no_output_____"
]
],
[
[
"# Part Two \nAmused by the speed of your answer, the Elves are curious:\n\nWhat would the new winning Elf's score be if the number of the last marble were 100 times larger?\n\nYour puzzle answer was 3150377341.",
"_____no_output_____"
],
[
"## Confession:\nMy original solution using Python lists was obviously going to be far to slow for this (took about 20s for Part 1). After thinking for a bit I caved in and checked the Reddit solutions and found out about deque solution which is equivalent but uses a better data structure. So today I learned something :)",
"_____no_output_____"
]
],
[
[
"play_marbles(n_players, n_marbles * 100)",
"_____no_output_____"
]
],
[
[
"# Day 10: The Stars Align \nIt's no use; your navigation system simply isn't capable of providing walking directions in the arctic circle, and certainly not in 1018.\n\nThe Elves suggest an alternative. In times like these, North Pole rescue operations will arrange points of light in the sky to guide missing Elves back to base. Unfortunately, the message is easy to miss: the points move slowly enough that it takes hours to align them, but have so much momentum that they only stay aligned for a second. If you blink at the wrong time, it might be hours before another message appears.\n\nYou can see these points of light floating in the distance, and record their position in the sky and their velocity, the relative change in position per second (your puzzle input). The coordinates are all given from your perspective; given enough time, those positions and velocities will move the points into a cohesive message!\n\nRather than wait, you decide to fast-forward the process and calculate what the points will eventually spell.\n\nFor example, suppose you note the following points:\n\n position=< 9, 1> velocity=< 0, 2>\n position=< 7, 0> velocity=<-1, 0>\n position=< 3, -2> velocity=<-1, 1>\n position=< 6, 10> velocity=<-2, -1>\n position=< 2, -4> velocity=< 2, 2>\n position=<-6, 10> velocity=< 2, -2>\n position=< 1, 8> velocity=< 1, -1>\n position=< 1, 7> velocity=< 1, 0>\n position=<-3, 11> velocity=< 1, -2>\n position=< 7, 6> velocity=<-1, -1>\n position=<-2, 3> velocity=< 1, 0>\n position=<-4, 3> velocity=< 2, 0>\n position=<10, -3> velocity=<-1, 1>\n position=< 5, 11> velocity=< 1, -2>\n position=< 4, 7> velocity=< 0, -1>\n position=< 8, -2> velocity=< 0, 1>\n position=<15, 0> velocity=<-2, 0>\n position=< 1, 6> velocity=< 1, 0>\n position=< 8, 9> velocity=< 0, -1>\n position=< 3, 3> velocity=<-1, 1>\n position=< 0, 5> velocity=< 0, -1>\n position=<-2, 2> velocity=< 2, 0>\n position=< 5, -2> velocity=< 1, 2>\n position=< 1, 4> velocity=< 2, 1>\n position=<-2, 7> velocity=< 2, -2>\n position=< 3, 6> velocity=<-1, -1>\n position=< 5, 0> velocity=< 1, 0>\n position=<-6, 0> velocity=< 2, 0>\n position=< 5, 9> velocity=< 1, -2>\n position=<14, 7> velocity=<-2, 0>\n position=<-3, 6> velocity=< 2, -1>\n\nEach line represents one point. Positions are given as <X, Y> pairs: X represents how far left (negative) or right (positive) the point appears, while Y represents how far up (negative) or down (positive) the point appears.\n\nAt 0 seconds, each point has the position given. Each second, each point's velocity is added to its position. So, a point with velocity <1, -2> is moving to the right, but is moving upward twice as quickly. If this point's initial position were <3, 9>, after 3 seconds, its position would become <6, 3>.\n\nOver time, the points listed above would move like this:\n\nInitially:\n\n ........#.............\n ................#.....\n .........#.#..#.......\n ......................\n #..........#.#.......#\n ...............#......\n ....#.................\n ..#.#....#............\n .......#..............\n ......#...............\n ...#...#.#...#........\n ....#..#..#.........#.\n .......#..............\n ...........#..#.......\n #...........#.........\n ...#.......#..........\n\nAfter 1 second:\n\n ......................\n ......................\n ..........#....#......\n ........#.....#.......\n ..#.........#......#..\n ......................\n ......#...............\n ....##.........#......\n ......#.#.............\n .....##.##..#.........\n ........#.#...........\n ........#...#.....#...\n ..#...........#.......\n ....#.....#.#.........\n ......................\n ......................\n\nAfter 2 seconds:\n\n ......................\n ......................\n ......................\n ..............#.......\n ....#..#...####..#....\n ......................\n ........#....#........\n ......#.#.............\n .......#...#..........\n .......#..#..#.#......\n ....#....#.#..........\n .....#...#...##.#.....\n ........#.............\n ......................\n ......................\n ......................\n\nAfter 3 seconds:\n\n ......................\n ......................\n ......................\n ......................\n ......#...#..###......\n ......#...#...#.......\n ......#...#...#.......\n ......#####...#.......\n ......#...#...#.......\n ......#...#...#.......\n ......#...#...#.......\n ......#...#..###......\n ......................\n ......................\n ......................\n ......................\n\nAfter 4 seconds:\n\n ......................\n ......................\n ......................\n ............#.........\n ........##...#.#......\n ......#.....#..#......\n .....#..##.##.#.......\n .......##.#....#......\n ...........#....#.....\n ..............#.......\n ....#......#...#......\n .....#.....##.........\n ...............#......\n ...............#......\n ......................\n ......................\n \nAfter 3 seconds, the message appeared briefly: HI. Of course, your message will be much longer and will take many more seconds to appear.\n\nWhat message will eventually appear in the sky?\n\nYour puzzle answer was LXJFKAXA.",
"_____no_output_____"
]
],
[
[
"stars = np.array(map(lambda s: map(int, \n s.lstrip(\n 'position=<'\n ).replace(\n '> velocity=<', ','\n ).rstrip(\n '>'\n ).split(\n ','\n )), \n read_input('day_10.txt').split('\\n')))",
"_____no_output_____"
],
[
"def align_stars(stars, t):\n pos = stars[:, :2] + stars[:,2:]*t\n pos = pos - np.min(pos, axis=0)\n cols, rows = np.max(pos, axis=0)\n res = None\n if cols < 2000 and rows < 2000:\n res = np.zeros((rows+1, cols+1))\n for p in pos.tolist():\n res[p[1], p[0]] = 1\n return res",
"_____no_output_____"
],
[
"np.mean(stars[:, :2] / stars[:, 2:])",
"_____no_output_____"
],
[
"# Trial and error around min distsance\n_ = plt.spy(align_stars(stars, 10312))",
"_____no_output_____"
]
],
[
[
"# Part Two\nGood thing you didn't have to wait, because that would have taken a long time - much longer than the 3 seconds in the example above.\n\nImpressed by your sub-hour communication capabilities, the Elves are curious: exactly how many seconds would they have needed to wait for that message to appear?\n\nYour puzzle answer was 10312.",
"_____no_output_____"
],
[
"# Day 11: Chronal Charge \nYou watch the Elves and their sleigh fade into the distance as they head toward the North Pole.\n\nActually, you're the one fading. The falling sensation returns.\n\nThe low fuel warning light is illuminated on your wrist-mounted device. Tapping it once causes it to project a hologram of the situation: a 300x300 grid of fuel cells and their current power levels, some negative. You're not sure what negative power means in the context of time travel, but it can't be good.\n\nEach fuel cell has a coordinate ranging from 1 to 300 in both the X (horizontal) and Y (vertical) direction. In X,Y notation, the top-left cell is 1,1, and the top-right cell is 300,1.\n\nThe interface lets you select any 3x3 square of fuel cells. To increase your chances of getting to your destination, you decide to choose the 3x3 square with the largest total power.\n\nThe power level in a given fuel cell can be found through the following process:\n\n- Find the fuel cell's rack ID, which is its X coordinate plus 10.\n- Begin with a power level of the rack ID times the Y coordinate.\n- Increase the power level by the value of the grid serial number (your puzzle input).\n- Set the power level to itself multiplied by the rack ID.\n- Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).\n- Subtract 5 from the power level.\n\nFor example, to find the power level of the fuel cell at 3,5 in a grid with serial number 8:\n\n- The rack ID is 3 + 10 = 13.\n- The power level starts at 13 * 5 = 65.\n- Adding the serial number produces 65 + 8 = 73.\n- Multiplying by the rack ID produces 73 * 13 = 949.\n- The hundreds digit of 949 is 9.\n- Subtracting 5 produces 9 - 5 = 4.\n\nSo, the power level of this fuel cell is 4.\n\nHere are some more example power levels:\n\n- Fuel cell at 122,79, grid serial number 57: power level -5.\n- Fuel cell at 217,196, grid serial number 39: power level 0.\n- Fuel cell at 101,153, grid serial number 71: power level 4.\n\nYour goal is to find the 3x3 square which has the largest total power. The square must be entirely within the 300x300 grid. Identify this square using the X,Y coordinate of its top-left fuel cell. For example:\n\nFor grid serial number 18, the largest total 3x3 square has a top-left corner of 33,45 (with a total power of 29); these fuel cells appear in the middle of this 5x5 region:\n\n -2 -4 4 4 4\n -4 4 4 4 -5\n 4 3 3 4 -4\n 1 1 2 4 -3\n -1 0 2 -5 -2\n \nFor grid serial number 42, the largest 3x3 square's top-left is 21,61 (with a total power of 30); they are in the middle of this region:\n\n -3 4 2 2 2\n -4 4 3 3 4\n -5 3 3 4 -4\n 4 3 3 4 -3\n 3 3 3 -5 -1\n \nWhat is the X,Y coordinate of the top-left fuel cell of the 3x3 square with the largest total power?\n\nYour puzzle input is 4151.\n\nYour puzzle answer was 20,46.",
"_____no_output_____"
]
],
[
[
"def grid_power(serial):\n X, Y = np.meshgrid(np.arange(300, dtype=np.int)+1, np.arange(300, dtype=np.int)+1)\n P = (np.floor((((((X + 10) * Y) + serial) * (X + 10)) % 1000) / 100) - 5).astype(np.int)\n return P",
"_____no_output_____"
],
[
"def find_max_grid_power(power, size=3):\n p_max = 0\n x_m = 0\n y_m = 0\n y_max, x_max = power.shape\n for x in range(x_max - size) :\n for y in range(y_max - size) :\n p = np.sum(power[y:y+size, x:x+size])\n if p > p_max:\n p_max = p\n x_m = x\n y_m = y\n return p_max, x_m+1, y_m+1, size",
"_____no_output_____"
],
[
"find_max_grid_power(grid_power(4151))",
"_____no_output_____"
]
],
[
[
"# Part Two\nYou discover a dial on the side of the device; it seems to let you select a square of any size, not just 3x3. Sizes from 1x1 to 300x300 are supported.\n\nRealizing this, you now must find the square of any size with the largest total power. Identify this square by including its size as a third parameter after the top-left coordinate: a 9x9 square with a top-left corner of 3,5 is identified as 3,5,9.\n\nFor example:\n\n- For grid serial number 18, the largest total square (with a total power of 113) is 16x16 and has a top-left corner of 90,269, so its identifier is 90,269,16.\n- For grid serial number 42, the largest total square (with a total power of 119) is 12x12 and has a top-left corner of 232,251, so its identifier is 232,251,12.\n\nWhat is the X,Y,size identifier of the square with the largest total power?\n\nYour puzzle input is still 4151.\n\nYour puzzle answer was 231,65,14.",
"_____no_output_____"
]
],
[
[
"find_max_grid_power(grid_power(42), 12)",
"_____no_output_____"
],
[
"def find_max_grid_power_and_size(serial):\n power = grid_power(serial)\n p_max = 0\n x_m = 0\n y_m = 0\n s_m = 0\n for size in range(1, 301):\n p, x, y, s = find_max_grid_power(power, size)\n if p > p_max:\n p_max = p\n x_m = x\n y_m = y\n s_m = s\n return p_max, x_m, y_m, s_m",
"_____no_output_____"
],
[
"find_max_grid_power_and_size(4151)",
"_____no_output_____"
]
],
[
[
"# Day 12: Subterranean Sustainability \nThe year 518 is significantly more underground than your history books implied. Either that, or you've arrived in a vast cavern network under the North Pole.\n\nAfter exploring a little, you discover a long tunnel that contains a row of small pots as far as you can see to your left and right. A few of them contain plants - someone is trying to grow things in these geothermally-heated caves.\n\nThe pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots currently contain plants.) For example, an initial state of `#..##....` indicates that pots 0, 3, and 4 currently contain plants.\n\nYour puzzle input also contains some notes you find on a nearby table: someone has been trying to figure out how these plants spread to nearby pots. Based on the notes, for each generation of plants, a given pot has or does not have a plant based on whether that pot (and the two pots on either side of it) had a plant in the last generation. These are written as `LLCRR =>` N, where L are pots to the left, C is the current pot being considered, R are the pots to the right, and N is whether the current pot will have a plant in the next generation. For example:\n\nA note like `..#.. => .` means that a pot that contains a plant but with no plants within two pots of it will not have a plant in it during the next generation.\nA note like `##.## => .` means that an empty pot with two plants on each side of it will remain empty in the next generation.\nA note like `.##.# => #` means that a pot has a plant in a given generation if, in the previous generation, there were plants in that pot, the one immediately to the left, and the one two pots to the right, but not in the ones immediately to the right and two to the left.\nIt's not clear what these plants are for, but you're sure it's important, so you'd like to make sure the current configuration of plants is sustainable by determining what will happen after 20 generations.\n\nFor example, given the following input:\n\n initial state: #..#.#..##......###...###\n\n ...## => #\n ..#.. => #\n .#... => #\n .#.#. => #\n .#.## => #\n .##.. => #\n .#### => #\n #.#.# => #\n #.### => #\n ##.#. => #\n ##.## => #\n ###.. => #\n ###.# => #\n ####. => #\n\nFor brevity, in this example, only the combinations which do produce a plant are listed. (Your input includes all possible combinations.) Then, the next 20 generations will look like this:\n\n 1 2 3 \n 0 0 0 0 \n 0: ...#..#.#..##......###...###...........\n 1: ...#...#....#.....#..#..#..#...........\n 2: ...##..##...##....#..#..#..##..........\n 3: ..#.#...#..#.#....#..#..#...#..........\n 4: ...#.#..#...#.#...#..#..##..##.........\n 5: ....#...##...#.#..#..#...#...#.........\n 6: ....##.#.#....#...#..##..##..##........\n 7: ...#..###.#...##..#...#...#...#........\n 8: ...#....##.#.#.#..##..##..##..##.......\n 9: ...##..#..#####....#...#...#...#.......\n 10: ..#.#..#...#.##....##..##..##..##......\n 11: ...#...##...#.#...#.#...#...#...#......\n 12: ...##.#.#....#.#...#.#..##..##..##.....\n 13: ..#..###.#....#.#...#....#...#...#.....\n 14: ..#....##.#....#.#..##...##..##..##....\n 15: ..##..#..#.#....#....#..#.#...#...#....\n 16: .#.#..#...#.#...##...#...#.#..##..##...\n 17: ..#...##...#.#.#.#...##...#....#...#...\n 18: ..##.#.#....#####.#.#.#...##...##..##..\n 19: .#..###.#..#.#.#######.#.#.#..#.#...#..\n 20: .#....##....#####...#######....#.#..##.\n\nThe generation is shown along the left, where 0 is the initial state. The pot numbers are shown along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not the leftmost pot used in this example.\n\nAfter one generation, only seven plants remain. The one in pot 0 matched the rule looking for `..#..`, the one in pot 4 matched the rule looking for `.#.#.`, pot 9 matched `.##..`, and so on.\n\nIn this example, after 20 generations, the pots shown as # contain plants, the furthest left of which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of plant-containing pots after the 20th generation produces 325.\n\nAfter 20 generations, what is the sum of the numbers of all pots which contain a plant?\n\nYour puzzle answer was 1430.",
"_____no_output_____"
]
],
[
[
"pots_input = read_input('day_12.txt')",
"_____no_output_____"
],
[
"def parse_pots_rule(pots_input):\n lines = pots_input.split('\\n')\n initial_state = lines[0].lstrip('initial state: ')\n update_rules = defaultdict(lambda: '.')\n for rule in lines[2:]:\n r, t = rule.split(' => ')\n update_rules[r] = t\n return initial_state, update_rules",
"_____no_output_____"
],
[
"test_pots_input = \"\"\"initial state: #..#.#..##......###...###\n\n...## => #\n..#.. => #\n.#... => #\n.#.#. => #\n.#.## => #\n.##.. => #\n.#### => #\n#.#.# => #\n#.### => #\n##.#. => #\n##.## => #\n###.. => #\n###.# => #\n####. => #\"\"\"",
"_____no_output_____"
],
[
"pots_initial_test, pots_rule_test = parse_pots_rule(test_pots_input)",
"_____no_output_____"
],
[
"def update_pots(state, rule):\n new_state = list('.' * len(state))\n for pos in range(2, len(state)-2):\n \n new_state[pos] = rule[state[pos-2:pos+3]]\n return ''.join(new_state)",
"_____no_output_____"
],
[
"def iterate_pots_update(initial_state, rule, n_gen, display=False, display_count=False):\n # Pad initial state \n state = ('.' * n_gen) + initial_state + ('.' * n_gen)\n pot_numbers = (\n range(-n_gen, 0) + range(len(initial_state) + 1) \n + range(len(initial_state)+1, len(initial_state)+1+n_gen)\n )\n # print(pot_numbers)\n for i in range(n_gen):\n pot_sum = sum(map(lambda (p, s): p if s == '#' else 0, zip(pot_numbers, state)))\n pot_count = sum(map(lambda s: 1 if s == '#' else 0, state))\n \n if display:\n print(state)\n if display_count and (n_gen - i) < 10:\n # Display last 10 generations\n print(i, pot_count, pot_sum)\n state = update_pots(state, rule)\n \n pot_sum = sum(map(lambda (p, s): p if s == '#' else 0, zip(pot_numbers, state)))\n \n return pot_sum",
"_____no_output_____"
],
[
"iterate_pots_update(pots_initial_test, pots_rule_test, 20, display=True)",
"....................#..#.#..##......###...###....................\n....................#...#....#.....#..#..#..#....................\n....................##..##...##....#..#..#..##...................\n...................#.#...#..#.#....#..#..#...#...................\n....................#.#..#...#.#...#..#..##..##..................\n.....................#...##...#.#..#..#...#...#..................\n.....................##.#.#....#...#..##..##..##.................\n....................#..###.#...##..#...#...#...#.................\n....................#....##.#.#.#..##..##..##..##................\n....................##..#..#####....#...#...#...#................\n...................#.#..#...#.##....##..##..##..##...............\n....................#...##...#.#...#.#...#...#...#...............\n....................##.#.#....#.#...#.#..##..##..##..............\n...................#..###.#....#.#...#....#...#...#..............\n...................#....##.#....#.#..##...##..##..##.............\n...................##..#..#.#....#....#..#.#...#...#.............\n..................#.#..#...#.#...##...#...#.#..##..##............\n...................#...##...#.#.#.#...##...#....#...#............\n...................##.#.#....#####.#.#.#...##...##..##...........\n..................#..###.#..#.#.#######.#.#.#..#.#...#...........\n"
],
[
"pots_initial, pots_rule = parse_pots_rule(pots_input)",
"_____no_output_____"
],
[
"iterate_pots_update(pots_initial, pots_rule, 20)",
"_____no_output_____"
]
],
[
[
"# Part Two\nYou realize that 20 generations aren't enough. After all, these plants will need to last another 1500 years to even reach your timeline, not to mention your future.\n\nAfter fifty billion (50000000000) generations, what is the sum of the numbers of all pots which contain a plant?\n\nYour puzzle answer was 1150000000457.",
"_____no_output_____"
]
],
[
[
"# See if things settle down after a while\niterate_pots_update(pots_initial, pots_rule, 120, display_count=True)",
"(111, 23, 3010)\n(112, 23, 3033)\n(113, 23, 3056)\n(114, 23, 3079)\n(115, 23, 3102)\n(116, 23, 3125)\n(117, 23, 3148)\n(118, 23, 3171)\n(119, 23, 3194)\n"
]
],
[
[
"Looks like by generation 120 the population stops changing and pot sum keeps increasing by 23 each generation.",
"_____no_output_____"
]
],
[
[
"# Final pot count after 50,000,000,000 generations\n3217 + (23 * (50000000000 - 120))",
"_____no_output_____"
]
],
[
[
"# Day 13: Mine Cart Madness \nA crop of this size requires significant logistics to transport produce, soil, fertilizer, and so on. The Elves are very busy pushing things around in carts on some kind of rudimentary system of tracks they've come up with.\n\nSeeing as how cart-and-track systems don't appear in recorded history for another 1000 years, the Elves seem to be making this up as they go along. They haven't even figured out how to avoid collisions yet.\n\nYou map out the tracks (your puzzle input) and see where you can help.\n\nTracks consist of straight paths (| and -), curves (/ and \\\\), and intersections (+). Curves connect exactly two perpendicular pieces of track; for example, this is a closed loop:\n\n /----\\\n | |\n | |\n \\----/\n\nIntersections occur when two perpendicular paths cross. At an intersection, a cart is capable of turning left, turning right, or continuing straight. Here are two loops connected by two intersections:\n\n /-----\\\n | |\n | /--+--\\\n | | | |\n \\--+--/ |\n | |\n \\-----/\n \nSeveral carts are also on the tracks. Carts always face either up (^), down (v), left (<), or right (>). (On your initial map, the track under each cart is a straight path matching the direction the cart is facing.)\n\nEach time a cart has the option to turn (by arriving at any intersection), it turns left the first time, goes straight the second time, turns right the third time, and then repeats those directions starting again with left the fourth time, straight the fifth time, and so on. This process is independent of the particular intersection at which the cart has arrived - that is, the cart has no per-intersection memory.\n\nCarts all move at the same speed; they take turns moving a single step at a time. They do this based on their current location: carts on the top row move first (acting from left to right), then carts on the second row move (again from left to right), then carts on the third row, and so on. Once each cart has moved one step, the process repeats; each of these loops is called a tick.\n\nFor example, suppose there are two carts on a straight track:\n\n | | | | |\n v | | | |\n | v v | |\n | | | v X\n | | ^ ^ |\n ^ ^ | | |\n | | | | |\n\nFirst, the top cart moves. It is facing down (v), so it moves down one square. Second, the bottom cart moves. It is facing up (^), so it moves up one square. Because all carts have moved, the first tick ends. Then, the process repeats, starting with the first cart. The first cart moves down, then the second cart moves up - right into the first cart, colliding with it! (The location of the crash is marked with an X.) This ends the second and last tick.\n\nHere is a longer example:\n\n /->-\\ \n | | /----\\\n | /-+--+-\\ |\n | | | | v |\n \\-+-/ \\-+--/\n \\------/ \n\n /-->\\ \n | | /----\\\n | /-+--+-\\ |\n | | | | | |\n \\-+-/ \\->--/\n \\------/ \n\n /---v \n | | /----\\\n | /-+--+-\\ |\n | | | | | |\n \\-+-/ \\-+>-/\n \\------/ \n\n /---\\ \n | v /----\\\n | /-+--+-\\ |\n | | | | | |\n \\-+-/ \\-+->/\n \\------/ \n\n /---\\ \n | | /----\\\n | /->--+-\\ |\n | | | | | |\n \\-+-/ \\-+--^\n \\------/ \n\n /---\\ \n | | /----\\\n | /-+>-+-\\ |\n | | | | | ^\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /----\\\n | /-+->+-\\ ^\n | | | | | |\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /----<\n | /-+-->-\\ |\n | | | | | |\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /---<\\\n | /-+--+>\\ |\n | | | | | |\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /--<-\\\n | /-+--+-v |\n | | | | | |\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /-<--\\\n | /-+--+-\\ |\n | | | | v |\n \\-+-/ \\-+--/\n \\------/ \n\n /---\\ \n | | /<---\\\n | /-+--+-\\ |\n | | | | | |\n \\-+-/ \\-<--/\n \\------/ \n\n /---\\ \n | | v----\\\n | /-+--+-\\ |\n | | | | | |\n \\-+-/ \\<+--/\n \\------/ \n\n /---\\ \n | | /----\\\n | /-+--v-\\ |\n | | | | | |\n \\-+-/ ^-+--/\n \\------/ \n\n /---\\ \n | | /----\\\n | /-+--+-\\ |\n | | | X | |\n \\-+-/ \\-+--/\n \\------/ \n \nAfter following their respective paths for a while, the carts eventually crash. To help prevent crashes, you'd like to know the location of the first crash. Locations are given in X,Y coordinates, where the furthest left column is X=0 and the furthest top row is Y=0:\n\n 111\n 0123456789012\n 0/---\\ \n 1| | /----\\\n 2| /-+--+-\\ |\n 3| | | X | |\n 4\\-+-/ \\-+--/\n 5 \\------/ \n \nIn this example, the location of the first crash is 7,3.",
"_____no_output_____"
]
],
[
[
"tracks_input = read_input('day_13.txt')",
"_____no_output_____"
],
[
"tracks_test_input = r\"\"\"/->-\\ \n| | /----\\\n| /-+--+-\\ |\n| | | | v |\n\\-+-/ \\-+--/\n \\------/ \"\"\"",
"_____no_output_____"
],
[
"def parse_tracks(tracks):\n mine = np.array(map(list, tracks.split('\\n')))\n return mine",
"_____no_output_____"
],
[
"print(parse_tracks(tracks_test_input))",
"[['/' '-' '>' '-' '\\\\' ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' ']\n ['|' ' ' ' ' ' ' '|' ' ' ' ' '/' '-' '-' '-' '-' '\\\\']\n ['|' ' ' '/' '-' '+' '-' '-' '+' '-' '\\\\' ' ' ' ' '|']\n ['|' ' ' '|' ' ' '|' ' ' ' ' '|' ' ' 'v' ' ' ' ' '|']\n ['\\\\' '-' '+' '-' '/' ' ' ' ' '\\\\' '-' '+' '-' '-' '/']\n [' ' ' ' '\\\\' '-' '-' '-' '-' '-' '-' '/' ' ' ' ' ' ']]\n"
],
[
"def update_tracks(mine):\n new_mine = mine.copy()\n rows, cols = mine.shape\n cart_syms = '<>^v'\n for i in range(rows):\n for j in range(cols):\n c = mine[i, j]\n if c in cart_syms:\n # Move cart\n if c == '>':\n new_mine[i, j] = '-'\n c_next = new_mine[i, j+1]\n new_mine[i, j+1] = (\n 'X' if c_next in cart_syms else (\n ''\n )\n )",
"_____no_output_____"
]
],
[
[
"# Day 14: Chocolate Charts \nYou finally have a chance to look at all of the produce moving around. Chocolate, cinnamon, mint, chili peppers, nutmeg, vanilla... the Elves must be growing these plants to make hot chocolate! As you realize this, you hear a conversation in the distance. When you go to investigate, you discover two Elves in what appears to be a makeshift underground kitchen/laboratory.\n\nThe Elves are trying to come up with the ultimate hot chocolate recipe; they're even maintaining a scoreboard which tracks the quality score (0-9) of each recipe.\n\nOnly two recipes are on the board: the first recipe got a score of 3, the second, 7. Each of the two Elves has a current recipe: the first Elf starts with the first recipe, and the second Elf starts with the second recipe.\n\nTo create new recipes, the two Elves combine their current recipes. This creates new recipes from the digits of the sum of the current recipes' scores. With the current recipes' scores of 3 and 7, their sum is 10, and so two new recipes would be created: the first with score 1 and the second with score 0. If the current recipes' scores were 2 and 3, the sum, 5, would only create one recipe (with a score of 5) with its single digit.\n\nThe new recipes are added to the end of the scoreboard in the order they are created. So, after the first round, the scoreboard is `3, 7, 1, 0`.\n\nAfter all new recipes are added to the scoreboard, each Elf picks a new current recipe. To do this, the Elf steps forward through the scoreboard a number of recipes equal to 1 plus the score of their current recipe. So, after the first round, the first Elf moves forward 1 + 3 = 4 times, while the second Elf moves forward 1 + 7 = 8 times. If they run out of recipes, they loop back around to the beginning. After the first round, both Elves happen to loop around until they land on the same recipe that they had in the beginning; in general, they will move to different recipes.\n\nDrawing the first Elf as parentheses and the second Elf as square brackets, they continue this process:\n\n (3)[7]\n (3)[7] 1 0 \n 3 7 1 [0](1) 0 \n 3 7 1 0 [1] 0 (1)\n (3) 7 1 0 1 0 [1] 2 \n 3 7 1 0 (1) 0 1 2 [4]\n 3 7 1 [0] 1 0 (1) 2 4 5 \n 3 7 1 0 [1] 0 1 2 (4) 5 1 \n 3 (7) 1 0 1 0 [1] 2 4 5 1 5 \n 3 7 1 0 1 0 1 2 [4](5) 1 5 8 \n 3 (7) 1 0 1 0 1 2 4 5 1 5 8 [9]\n 3 7 1 0 1 0 1 [2] 4 (5) 1 5 8 9 1 6 \n 3 7 1 0 1 0 1 2 4 5 [1] 5 8 9 1 (6) 7 \n 3 7 1 0 (1) 0 1 2 4 5 1 5 [8] 9 1 6 7 7 \n 3 7 [1] 0 1 0 (1) 2 4 5 1 5 8 9 1 6 7 7 9 \n 3 7 1 0 [1] 0 1 2 (4) *5 1 5 8 9 1 6 7 7 9* 2 \n\nThe Elves think their skill will improve after making a few recipes (your puzzle input). However, that could take ages; you can speed this up considerably by identifying the scores of the ten recipes after that. For example:\n\n- If the Elves think their skill will improve after making 9 recipes, the scores of the ten recipes after the first nine on the scoreboard would be 5158916779 (highlighted in the last line of the diagram).\n- After 5 recipes, the scores of the next ten would be 0124515891.\n- After 18 recipes, the scores of the next ten would be 9251071085.\n- After 2018 recipes, the scores of the next ten would be 5941429882.\n\nWhat are the scores of the ten recipes immediately after the number of recipes in your puzzle input?\n\nYour puzzle input is 260321.\n\nYour puzzle answer was 9276422810.",
"_____no_output_____"
]
],
[
[
"def make_recipes(n_recipes=0, find_recipe=None, initial='37'):\n recipes = initial\n pos_1 = 0\n pos_2 = 1\n new_recipes = []\n if not find_recipe:\n loop_cond = (lambda r: len(r) < n_recipes + 10)\n else:\n loop_cond = (lambda r: not(find_recipe in r[-len(find_recipe)-1:]))\n \n while loop_cond(recipes):\n # print(pos_1, pos_2)\n new_recipe = (str(int(recipes[pos_1]) + int(recipes[pos_2])))\n recipes += new_recipe\n pos_1 = (pos_1 + (int(recipes[pos_1]) + 1)) % len(recipes)\n pos_2 = (pos_2 + (int(recipes[pos_2]) + 1)) % len(recipes)\n\n if find_recipe:\n return len(re.sub(find_recipe + '.*', '', recipes))\n else:\n return recipes[(n_recipes):(n_recipes+10)]",
"_____no_output_____"
],
[
"make_recipes(9)",
"_____no_output_____"
],
[
"make_recipes(260321)",
"_____no_output_____"
]
],
[
[
"# Part Two\nAs it turns out, you got the Elves' plan backwards. They actually want to know how many recipes appear on the scoreboard to the left of the first recipes whose scores are the digits from your puzzle input.\n\n- 51589 first appears after 9 recipes.\n- 01245 first appears after 5 recipes.\n- 92510 first appears after 18 recipes.\n- 59414 first appears after 2018 recipes.\n\nHow many recipes appear on the scoreboard to the left of the score sequence in your puzzle input?\n\nYour puzzle input is still 260321.\n\nYour puzzle answer was 20319117.",
"_____no_output_____"
]
],
[
[
"make_recipes(None, '59414')",
"_____no_output_____"
],
[
"make_recipes(None, '260321')",
"_____no_output_____"
]
],
[
[
"# Day 15: Beverage Bandits\nHaving perfected their hot chocolate, the Elves have a new problem: the Goblins that live in these caves will do anything to steal it. Looks like they're here for a fight.\n\nYou scan the area, generating a map of the walls (#), open cavern (.), and starting position of every Goblin (G) and Elf (E) (your puzzle input).\n\nCombat proceeds in rounds; in each round, each unit that is still alive takes a turn, resolving all of its actions before the next unit's turn begins. On each unit's turn, it tries to move into range of an enemy (if it isn't already) and then attack (if it is in range).\n\nAll units are very disciplined and always follow very strict combat rules. Units never move or attack diagonally, as doing so would be dishonorable. When multiple choices are equally valid, ties are broken in reading order: top-to-bottom, then left-to-right. For instance, the order in which units take their turns within a round is the reading order of their starting positions in that round, regardless of the type of unit or whether other units have moved after the round started. For example:\n\n would take their\n These units: turns in this order:\n ####### #######\n #.G.E.# #.1.2.#\n #E.G.E# #3.4.5#\n #.G.E.# #.6.7.#\n ####### #######\n\nEach unit begins its turn by identifying all possible targets (enemy units). If no targets remain, combat ends.\n\nThen, the unit identifies all of the open squares (.) that are in range of each target; these are the squares which are adjacent (immediately up, down, left, or right) to any target and which aren't already occupied by a wall or another unit. Alternatively, the unit might already be in range of a target. If the unit is not already in range of a target, and there are no open squares which are in range of a target, the unit ends its turn.\n\nIf the unit is already in range of a target, it does not move, but continues its turn with an attack. Otherwise, since it is not in range of a target, it moves.\n\nTo move, the unit first considers the squares that are in range and determines which of those squares it could reach in the fewest steps. A step is a single movement to any adjacent (immediately up, down, left, or right) open (.) square. Units cannot move into walls or other units. The unit does this while considering the current positions of units and does not do any prediction about where units will be later. If the unit cannot reach (find an open path to) any of the squares that are in range, it ends its turn. If multiple squares are in range and tied for being reachable in the fewest steps, the square which is first in reading order is chosen. For example:\n\n Targets: In range: Reachable: Nearest: Chosen:\n ####### ####### ####### ####### #######\n #E..G.# #E.?G?# #E.@G.# #E.!G.# #E.+G.#\n #...#.# --> #.?.#?# --> #.@.#.# --> #.!.#.# --> #...#.#\n #.G.#G# #?G?#G# #@G@#G# #!G.#G# #.G.#G#\n ####### ####### ####### ####### #######\n\nIn the above scenario, the Elf has three targets (the three Goblins):\n\nEach of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).\nOf those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or unit to reach.\nThree of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).\nOf those, the square which is first in reading order is chosen (+).\nThe unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This requires knowing when there is more than one shortest path so that you can consider the first step of each such path.) For example:\n\n In range: Nearest: Chosen: Distance: Step:\n ####### ####### ####### ####### #######\n #.E...# #.E...# #.E...# #4E212# #..E..#\n #...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#\n #..?G?# #..!G.# #...G.# #432G2# #...G.#\n ####### ####### ####### ####### #######\n\nThe Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is chosen (+). Under \"Distance\", each open square is marked with its distance from the destination square; the two squares to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2 steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right one square.\n\nHere's a larger example of movement:\n\n Initially:\n #########\n #G..G..G#\n #.......#\n #.......#\n #G..E..G#\n #.......#\n #.......#\n #G..G..G#\n #########\n\n After 1 round:\n #########\n #.G...G.#\n #...G...#\n #...E..G#\n #.G.....#\n #.......#\n #G..G..G#\n #.......#\n #########\n\n After 2 rounds:\n #########\n #..G.G..#\n #...G...#\n #.G.E.G.#\n #.......#\n #G..G..G#\n #.......#\n #.......#\n #########\n\n After 3 rounds:\n #########\n #.......#\n #..GGG..#\n #..GEG..#\n #G..G...#\n #......G#\n #.......#\n #.......#\n #########\n\nOnce the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square in range of a target, and so none of the units can move until a unit dies.\n\nAfter moving (or if the unit began its turn in range of a target), the unit attacks.\n\nTo attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.\n\nThe unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.\n\nEach unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.\n\nFor example, suppose the only Elf is about to attack:\n\n HP: HP:\n G.... 9 G.... 9 \n ..G.. 4 ..G.. 4 \n ..EG. 2 --> ..E.. \n ..G.. 2 ..G.. 2 \n ...G. 1 ...G. 1 \n\nThe \"HP\" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it (also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.\n\nAfter attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.\n\nThe Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)\n\nBelow is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.\n\n Initially:\n ####### \n #.G...# G(200)\n #...EG# E(200), G(200)\n #.#.#G# G(200)\n #..G#E# G(200), E(200)\n #.....# \n ####### \n\n After 1 round:\n ####### \n #..G..# G(200)\n #...EG# E(197), G(197)\n #.#G#G# G(200), G(197)\n #...#E# E(197)\n #.....# \n ####### \n\n After 2 rounds:\n ####### \n #...G.# G(200)\n #..GEG# G(200), E(188), G(194)\n #.#.#G# G(194)\n #...#E# E(194)\n #.....# \n ####### \n\n Combat ensues; eventually, the top Elf dies:\n\n After 23 rounds:\n ####### \n #...G.# G(200)\n #..G.G# G(200), G(131)\n #.#.#G# G(131)\n #...#E# E(131)\n #.....# \n ####### \n\n After 24 rounds:\n ####### \n #..G..# G(200)\n #...G.# G(131)\n #.#G#G# G(200), G(128)\n #...#E# E(128)\n #.....# \n ####### \n\n After 25 rounds:\n ####### \n #.G...# G(200)\n #..G..# G(131)\n #.#.#G# G(125)\n #..G#E# G(200), E(125)\n #.....# \n ####### \n\n After 26 rounds:\n ####### \n #G....# G(200)\n #.G...# G(131)\n #.#.#G# G(122)\n #...#E# E(122)\n #..G..# G(200)\n ####### \n\n After 27 rounds:\n ####### \n #G....# G(200)\n #.G...# G(131)\n #.#.#G# G(119)\n #...#E# E(119)\n #...G.# G(200)\n ####### \n\n After 28 rounds:\n ####### \n #G....# G(200)\n #.G...# G(131)\n #.#.#G# G(116)\n #...#E# E(113)\n #....G# G(200)\n ####### \n\n More combat ensues; eventually, the bottom Elf dies:\n\n After 47 rounds:\n ####### \n #G....# G(200)\n #.G...# G(131)\n #.#.#G# G(59)\n #...#.# \n #....G# G(200)\n ####### \n\nBefore the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.\n\nHere are a few example summarized combats:\n\n ####### #######\n #G..#E# #...#E# E(200)\n #E#E.E# #E#...# E(197)\n #G.##.# --> #.E##.# E(185)\n #...#E# #E..#E# E(200), E(200)\n #...E.# #.....#\n ####### #######\n\n Combat ends after 37 full rounds\n Elves win with 982 total hit points left\n Outcome: 37 * 982 = 36334\n ####### ####### \n #E..EG# #.E.E.# E(164), E(197)\n #.#G.E# #.#E..# E(200)\n #E.##E# --> #E.##.# E(98)\n #G..#.# #.E.#.# E(200)\n #..E#.# #...#.# \n ####### ####### \n\n Combat ends after 46 full rounds\n Elves win with 859 total hit points left\n Outcome: 46 * 859 = 39514\n ####### ####### \n #E.G#.# #G.G#.# G(200), G(98)\n #.#G..# #.#G..# G(200)\n #G.#.G# --> #..#..# \n #G..#.# #...#G# G(95)\n #...E.# #...G.# G(200)\n ####### ####### \n\n Combat ends after 35 full rounds\n Goblins win with 793 total hit points left\n Outcome: 35 * 793 = 27755\n ####### ####### \n #.E...# #.....# \n #.#..G# #.#G..# G(200)\n #.###.# --> #.###.# \n #E#G#G# #.#.#.# \n #...#G# #G.G#G# G(98), G(38), G(200)\n ####### ####### \n\n Combat ends after 54 full rounds\n Goblins win with 536 total hit points left\n Outcome: 54 * 536 = 28944\n ######### ######### \n #G......# #.G.....# G(137)\n #.E.#...# #G.G#...# G(200), G(200)\n #..##..G# #.G##...# G(200)\n #...##..# --> #...##..# \n #...#...# #.G.#...# G(200)\n #.G...G.# #.......# \n #.....G.# #.......# \n ######### ######### \n\nCombat ends after 20 full rounds\nGoblins win with 937 total hit points left\nOutcome: 20 * 937 = 18740\nWhat is the outcome of the combat described in your puzzle input?",
"_____no_output_____"
],
[
"# Day 16: Chronal Classification \nAs you see the Elves defend their hot chocolate successfully, you go back to falling through time. This is going to become a problem.\n\nIf you're ever going to return to your own time, you need to understand how this device on your wrist works. You have a little while before you reach your next destination, and with a bit of trial and error, you manage to pull up a programming manual on the device's tiny screen.\n\nAccording to the manual, the device has four registers (numbered 0 through 3) that can be manipulated by instructions containing one of 16 opcodes. The registers start with the value 0.\n\nEvery instruction consists of four values: an opcode, two inputs (named A and B), and an output (named C), in that order. The opcode specifies the behavior of the instruction and how the inputs are interpreted. The output, C, is always treated as a register.\n\nIn the opcode descriptions below, if something says \"value A\", it means to take the number given as A literally. (This is also called an \"immediate\" value.) If something says \"register A\", it means to use the number given as A to read from (or write to) the register with that number. So, if the opcode addi adds register A and value B, storing the result in register C, and the instruction addi 0 7 3 is encountered, it would add 7 to the value contained by register 0 and store the sum in register 3, never modifying registers 0, 1, or 2 in the process.\n\nMany opcodes are similar except for how they interpret their arguments. The opcodes fall into seven general categories:\n\nAddition:\n\n- addr (add register) stores into register C the result of adding register A and register B.\n- addi (add immediate) stores into register C the result of adding register A and value B.\n\nMultiplication:\n\n- mulr (multiply register) stores into register C the result of multiplying register A and register B.\n- muli (multiply immediate) stores into register C the result of multiplying register A and value B.\n\nBitwise AND:\n\n- banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.\n- bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.\n\nBitwise OR:\n\n- borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.\n- bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.\n\nAssignment:\n\n- setr (set register) copies the contents of register A into register C. (Input B is ignored.)\n- seti (set immediate) stores value A into register C. (Input B is ignored.)\n\nGreater-than testing:\n\n- gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.\n- gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.\n- gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.\n\nEquality testing:\n\n- eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.\n- eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.\n- eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.\n\nUnfortunately, while the manual gives the name of each opcode, it doesn't seem to indicate the number. However, you can monitor the CPU to see the contents of the registers before and after instructions are executed to try to work them out. Each opcode has a number from 0 through 15, but the manual doesn't say which is which. For example, suppose you capture the following sample:\n\n Before: [3, 2, 1, 1]\n 9 2 1 2\n After: [3, 2, 2, 1]\n\nThis sample shows the effect of the instruction 9 2 1 2 on the registers. Before the instruction is executed, register 0 has value 3, register 1 has value 2, and registers 2 and 3 have value 1. After the instruction is executed, register 2's value becomes 2.\n\nThe instruction itself, 9 2 1 2, means that opcode 9 was executed with A=2, B=1, and C=2. Opcode 9 could be any of the 16 opcodes listed above, but only three of them behave in a way that would cause the result shown in the sample:\n\n- Opcode 9 could be mulr: register 2 (which has a value of 1) times register 1 (which has a value of 2) produces 2, which matches the value stored in the output register, register 2.\n- Opcode 9 could be addi: register 2 (which has a value of 1) plus value 1 produces 2, which matches the value stored in the output register, register 2.\n- Opcode 9 could be seti: value 2 matches the value stored in the output register, register 2; the number given for B is irrelevant.\n\nNone of the other opcodes produce the result captured in the sample. Because of this, the sample above behaves like three opcodes.\n\nYou collect many of these samples (the first section of your puzzle input). The manual also includes a small test program (the second section of your puzzle input) - you can ignore it for now.\n\nIgnoring the opcode numbers, how many samples in your puzzle input behave like three or more opcodes?",
"_____no_output_____"
],
[
"# Day 17: Reservoir Research \nYou arrive in the year 18. If it weren't for the coat you got in 1018, you would be very cold: the North Pole base hasn't even been constructed.\n\nRather, it hasn't been constructed yet. The Elves are making a little progress, but there's not a lot of liquid water in this climate, so they're getting very dehydrated. Maybe there's more underground?\n\nYou scan a two-dimensional vertical slice of the ground nearby and discover that it is mostly sand with veins of clay. The scan only provides data with a granularity of square meters, but it should be good enough to determine how much water is trapped there. In the scan, x represents the distance to the right, and y represents the distance down. There is also a spring of water near the surface at x=500, y=0. The scan identifies which square meters are clay (your puzzle input).\n\nFor example, suppose your scan shows the following veins of clay:\n\n x=495, y=2..7\n y=7, x=495..501\n x=501, y=3..7\n x=498, y=2..4\n x=506, y=1..2\n x=498, y=10..13\n x=504, y=10..13\n y=13, x=498..504\n \nRendering clay as #, sand as ., and the water spring as +, and with x increasing to the right and y increasing downward, this becomes:\n\n 44444455555555\n 99999900000000\n 45678901234567\n 0 ......+.......\n 1 ............#.\n 2 .#..#.......#.\n 3 .#..#..#......\n 4 .#..#..#......\n 5 .#.....#......\n 6 .#.....#......\n 7 .#######......\n 8 ..............\n 9 ..............\n 10 ....#.....#...\n 11 ....#.....#...\n 12 ....#.....#...\n 13 ....#######...\n\nThe spring of water will produce water forever. Water can move through sand, but is blocked by clay. Water always moves down when possible, and spreads to the left and right otherwise, filling space that has clay on both sides and falling out otherwise.\n\nFor example, if five squares of water are created, they will flow downward until they reach the clay and settle there. Water that has come to rest is shown here as ~, while sand through which water has passed (but which is now dry again) is shown as |:\n\n ......+.......\n ......|.....#.\n .#..#.|.....#.\n .#..#.|#......\n .#..#.|#......\n .#....|#......\n .#~~~~~#......\n .#######......\n ..............\n ..............\n ....#.....#...\n ....#.....#...\n ....#.....#...\n ....#######...\n\nTwo squares of water can't occupy the same location. If another five squares of water are created, they will settle on the first five, filling the clay reservoir a little more:\n\n ......+.......\n ......|.....#.\n .#..#.|.....#.\n .#..#.|#......\n .#..#.|#......\n .#~~~~~#......\n .#~~~~~#......\n .#######......\n ..............\n ..............\n ....#.....#...\n ....#.....#...\n ....#.....#...\n ....#######...\n\nWater pressure does not apply in this scenario. If another four squares of water are created, they will stay on the right side of the barrier, and no water will reach the left side:\n\n ......+.......\n ......|.....#.\n .#..#.|.....#.\n .#..#~~#......\n .#..#~~#......\n .#~~~~~#......\n .#~~~~~#......\n .#######......\n ..............\n ..............\n ....#.....#...\n ....#.....#...\n ....#.....#...\n ....#######...\n\nAt this point, the top reservoir overflows. While water can reach the tiles above the surface of the water, it cannot settle there, and so the next five squares of water settle like this:\n\n ......+.......\n ......|.....#.\n .#..#||||...#.\n .#..#~~#|.....\n .#..#~~#|.....\n .#~~~~~#|.....\n .#~~~~~#|.....\n .#######|.....\n ........|.....\n ........|.....\n ....#...|.#...\n ....#...|.#...\n ....#~~~~~#...\n ....#######...\n\nNote especially the leftmost |: the new squares of water can reach this tile, but cannot stop there. Instead, eventually, they all fall to the right and settle in the reservoir below.\n\nAfter 10 more squares of water, the bottom reservoir is also full:\n\n ......+.......\n ......|.....#.\n .#..#||||...#.\n .#..#~~#|.....\n .#..#~~#|.....\n .#~~~~~#|.....\n .#~~~~~#|.....\n .#######|.....\n ........|.....\n ........|.....\n ....#~~~~~#...\n ....#~~~~~#...\n ....#~~~~~#...\n ....#######...\n\nFinally, while there is nowhere left for the water to settle, it can reach a few more tiles before overflowing beyond the bottom of the scanned data:\n\n ......+....... (line not counted: above minimum y value)\n ......|.....#.\n .#..#||||...#.\n .#..#~~#|.....\n .#..#~~#|.....\n .#~~~~~#|.....\n .#~~~~~#|.....\n .#######|.....\n ........|.....\n ...|||||||||..\n ...|#~~~~~#|..\n ...|#~~~~~#|..\n ...|#~~~~~#|..\n ...|#######|..\n ...|.......|.. (line not counted: below maximum y value)\n ...|.......|.. (line not counted: below maximum y value)\n ...|.......|.. (line not counted: below maximum y value)\n\nHow many tiles can be reached by the water? To prevent counting forever, ignore tiles with a y coordinate smaller than the smallest y coordinate in your scan data or larger than the largest one. Any x coordinate is valid. In this example, the lowest y coordinate given is 1, and the highest is 13, causing the water spring (in row 0) and the water falling off the bottom of the render (in rows 14 through infinity) to be ignored.\n\nSo, in the example above, counting both water at rest (~) and other sand tiles the water can hypothetically reach (|), the total number of tiles the water can reach is 57.\n\nHow many tiles can the water reach within the range of y values in your scan?",
"_____no_output_____"
],
[
"# Day 18: Settlers of The North Pole \nOn the outskirts of the North Pole base construction project, many Elves are collecting lumber.\n\nThe lumber collection area is 50 acres by 50 acres; each acre can be either open ground (.), trees (|), or a lumberyard (#). You take a scan of the area (your puzzle input).\n\nStrange magic is at work here: each minute, the landscape looks entirely different. In exactly one minute, an open acre can fill with trees, a wooded acre can be converted to a lumberyard, or a lumberyard can be cleared to open ground (the lumber having been sent to other projects).\n\nThe change to each acre is based entirely on the contents of that acre as well as the number of open, wooded, or lumberyard acres adjacent to it at the start of each minute. Here, \"adjacent\" means any of the eight acres surrounding that acre. (Acres on the edges of the lumber collection area might have fewer than eight adjacent acres; the missing acres aren't counted.)\n\nIn particular:\n\n- An open acre will become filled with trees if three or more adjacent acres contained trees. Otherwise, nothing happens.\n- An acre filled with trees will become a lumberyard if three or more adjacent acres were lumberyards. Otherwise, nothing happens.\n- An acre containing a lumberyard will remain a lumberyard if it was adjacent to at least one other lumberyard and at least one acre containing trees. Otherwise, it becomes open.\n\nThese changes happen across all acres simultaneously, each of them using the state of all acres at the beginning of the minute and changing to their new form by the end of that same minute. Changes that happen during the minute don't affect each other.\n\nFor example, suppose the lumber collection area is instead only 10 by 10 acres with this initial configuration:\n\n Initial state:\n .#.#...|#.\n .....#|##|\n .|..|...#.\n ..|#.....#\n #.#|||#|#|\n ...#.||...\n .|....|...\n ||...#|.#|\n |.||||..|.\n ...#.|..|.\n\n After 1 minute:\n .......##.\n ......|###\n .|..|...#.\n ..|#||...#\n ..##||.|#|\n ...#||||..\n ||...|||..\n |||||.||.|\n ||||||||||\n ....||..|.\n\n After 2 minutes:\n .......#..\n ......|#..\n .|.|||....\n ..##|||..#\n ..###|||#|\n ...#|||||.\n |||||||||.\n ||||||||||\n ||||||||||\n .|||||||||\n\n After 3 minutes:\n .......#..\n ....|||#..\n .|.||||...\n ..###|||.#\n ...##|||#|\n .||##|||||\n ||||||||||\n ||||||||||\n ||||||||||\n ||||||||||\n\n After 4 minutes:\n .....|.#..\n ...||||#..\n .|.#||||..\n ..###||||#\n ...###||#|\n |||##|||||\n ||||||||||\n ||||||||||\n ||||||||||\n ||||||||||\n\n After 5 minutes:\n ....|||#..\n ...||||#..\n .|.##||||.\n ..####|||#\n .|.###||#|\n |||###||||\n ||||||||||\n ||||||||||\n ||||||||||\n ||||||||||\n\n After 6 minutes:\n ...||||#..\n ...||||#..\n .|.###|||.\n ..#.##|||#\n |||#.##|#|\n |||###||||\n ||||#|||||\n ||||||||||\n ||||||||||\n ||||||||||\n\n After 7 minutes:\n ...||||#..\n ..||#|##..\n .|.####||.\n ||#..##||#\n ||##.##|#|\n |||####|||\n |||###||||\n ||||||||||\n ||||||||||\n ||||||||||\n\n After 8 minutes:\n ..||||##..\n ..|#####..\n |||#####|.\n ||#...##|#\n ||##..###|\n ||##.###||\n |||####|||\n ||||#|||||\n ||||||||||\n ||||||||||\n\n After 9 minutes:\n ..||###...\n .||#####..\n ||##...##.\n ||#....###\n |##....##|\n ||##..###|\n ||######||\n |||###||||\n ||||||||||\n ||||||||||\n\n After 10 minutes:\n .||##.....\n ||###.....\n ||##......\n |##.....##\n |##.....##\n |##....##|\n ||##.####|\n ||#####|||\n ||||#|||||\n ||||||||||\n\nAfter 10 minutes, there are 37 wooded acres and 31 lumberyards. Multiplying the number of wooded acres by the number of lumberyards gives the total resource value after ten minutes: 37 * 31 = 1147.\n\nWhat will the total resource value of the lumber collection area be after 10 minutes?",
"_____no_output_____"
],
[
"# Day 19: Go With The Flow \nWith the Elves well on their way constructing the North Pole base, you turn your attention back to understanding the inner workings of programming the device.\n\nYou can't help but notice that the device's opcodes don't contain any flow control like jump instructions. The device's manual goes on to explain:\n\n\"In programs where flow control is required, the instruction pointer can be bound to a register so that it can be manipulated directly. This way, setr/seti can function as absolute jumps, addr/addi can function as relative jumps, and other opcodes can cause truly fascinating effects.\"\n\nThis mechanism is achieved through a declaration like #ip 1, which would modify register 1 so that accesses to it let the program indirectly access the instruction pointer itself. To compensate for this kind of binding, there are now six registers (numbered 0 through 5); the five not bound to the instruction pointer behave as normal. Otherwise, the same rules apply as the last time you worked with this device.\n\nWhen the instruction pointer is bound to a register, its value is written to that register just before each instruction is executed, and the value of that register is written back to the instruction pointer immediately after each instruction finishes execution. Afterward, move to the next instruction by adding one to the instruction pointer, even if the value in the instruction pointer was just updated by an instruction. (Because of this, instructions must effectively set the instruction pointer to the instruction before the one they want executed next.)\n\nThe instruction pointer is 0 during the first instruction, 1 during the second, and so on. If the instruction pointer ever causes the device to attempt to load an instruction outside the instructions defined in the program, the program instead immediately halts. The instruction pointer starts at 0.\n\nIt turns out that this new information is already proving useful: the CPU in the device is not very powerful, and a background process is occupying most of its time. You dump the background process' declarations and instructions to a file (your puzzle input), making sure to use the names of the opcodes rather than the numbers.\n\nFor example, suppose you have the following program:\n\n #ip 0\n seti 5 0 1\n seti 6 0 2\n addi 0 1 0\n addr 1 2 3\n setr 1 0 0\n seti 8 0 4\n seti 9 0 5\n \nWhen executed, the following instructions are executed. Each line contains the value of the instruction pointer at the time the instruction started, the values of the six registers before executing the instructions (in square brackets), the instruction itself, and the values of the six registers after executing the instruction (also in square brackets).\n\n ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0]\n ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0]\n ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0]\n ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0]\n ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9]\n\nIn detail, when running this program, the following events occur:\n\n- The first line (#ip 0) indicates that the instruction pointer should be bound to register 0 in this program. This is not an instruction, and so the value of the instruction pointer does not change during the processing of this line.\n- The instruction pointer contains 0, and so the first instruction is executed (seti 5 0 1). It updates register 0 to the current instruction pointer value (0), sets register 1 to 5, sets the instruction pointer to the value of register 0 (which has no effect, as the instruction did not modify register 0), and then adds one to the instruction pointer.\n- The instruction pointer contains 1, and so the second instruction, seti 6 0 2, is executed. This is very similar to the instruction before it: 6 is stored in register 2, and the instruction pointer is left with the value 2.\n- The instruction pointer is 2, which points at the instruction addi 0 1 0. This is like a relative jump: the value of the instruction pointer, 2, is loaded into register 0. Then, addi finds the result of adding the value in register 0 and the value 1, storing the result, 3, back in register 0. Register 0 is then copied back to the instruction pointer, which will cause it to end up 1 larger than it would have otherwise and skip the next instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction pointer.\n- The instruction pointer is 4, so the instruction setr 1 0 0 is run. This is like an absolute jump: it copies the value contained in register 1, 5, into register 0, which causes it to end up in the instruction pointer. The instruction pointer is then incremented, leaving it at 6.\n- The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into register 5. The instruction pointer is incremented, causing it to point outside the program, and so the program ends.\n\nWhat value is left in register 0 when the background process halts?",
"_____no_output_____"
],
[
"# Day 20: A Regular Map \nWhile you were learning about instruction pointers, the Elves made considerable progress. When you look up, you discover that the North Pole base construction project has completely surrounded you.\n\nThe area you are in is made up entirely of rooms and doors. The rooms are arranged in a grid, and rooms only connect to adjacent rooms when a door is present between them.\n\nFor example, drawing rooms as ., walls as #, doors as | or -, your current position as X, and where north is up, the area you're in might look like this:\n\n #####\n #.|.#\n #-###\n #.|X#\n #####\nYou get the attention of a passing construction Elf and ask for a map. \"I don't have time to draw out a map of this place - it's huge. Instead, I can give you directions to every room in the facility!\" He writes down some directions on a piece of parchment and runs off. In the example above, the instructions might have been ^WNE$, a regular expression or \"regex\" (your puzzle input).\n\nThe regex matches routes (like WNE for \"west, north, east\") that will take you from your current room through various doors in the facility. In aggregate, the routes will take you through every door in the facility at least once; mapping out all of these routes will let you build a proper map and find your way around.\n\n^ and $ are at the beginning and end of your regex; these just mean that the regex doesn't match anything outside the routes it describes. (Specifically, ^ matches the start of the route, and $ matches the end of it.) These characters will not appear elsewhere in the regex.\n\nThe rest of the regex matches various sequences of the characters N (north), S (south), E (east), and W (west). In the example above, ^WNE$ matches only one route, WNE, which means you can move west, then north, then east from your current position. Sequences of letters like this always match that exact route in the same order.\n\nSometimes, the route can branch. A branch is given by a list of options separated by pipes (|) and wrapped in parentheses. So, ^N(E|W)N$ contains a branch: after going north, you must choose to go either east or west before finishing your route by going north again. By tracing out the possible routes after branching, you can determine where the doors are and, therefore, where the rooms are in the facility.\n\nFor example, consider this regex: ^ENWWW(NEEE|SSE(EE|N))$\n\nThis regex begins with ENWWW, which means that from your current position, all routes must begin by moving east, north, and then west three times, in that order. After this, there is a branch. Before you consider the branch, this is what you know about the map so far, with doors you aren't sure about marked with a ?:\n\n #?#?#?#?#\n ?.|.|.|.?\n #?#?#?#-#\n ?X|.?\n #?#?#\n\nAfter this point, there is (NEEE|SSE(EE|N)). This gives you exactly two options: NEEE and SSE(EE|N). By following NEEE, the map now looks like this:\n\n #?#?#?#?#\n ?.|.|.|.?\n #-#?#?#?#\n ?.|.|.|.?\n #?#?#?#-#\n ?X|.?\n #?#?#\n\nNow, only SSE(EE|N) remains. Because it is in the same parenthesized group as NEEE, it starts from the same room NEEE started in. It states that starting from that point, there exist doors which will allow you to move south twice, then east; this ends up at another branch. After that, you can either move east twice or north once. This information fills in the rest of the doors:\n\n #?#?#?#?#\n ?.|.|.|.?\n #-#?#?#?#\n ?.|.|.|.?\n #-#?#?#-#\n ?.?.?X|.?\n #-#-#?#?#\n ?.|.|.|.?\n #?#?#?#?#\n\nOnce you've followed all possible routes, you know the remaining unknown parts are all walls, producing a finished map of the facility:\n\n #########\n #.|.|.|.#\n #-#######\n #.|.|.|.#\n #-#####-#\n #.#.#X|.#\n #-#-#####\n #.|.|.|.#\n #########\n\nSometimes, a list of options can have an empty option, like (NEWS|WNSE|). This means that routes at this point could effectively skip the options in parentheses and move on immediately. For example, consider this regex and the corresponding map:\n\n ^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$\n\n ###########\n #.|.#.|.#.#\n #-###-#-#-#\n #.|.|.#.#.#\n #-#####-#-#\n #.#.#X|.#.#\n #-#-#####-#\n #.#.|.|.|.#\n #-###-###-#\n #.|.|.#.|.#\n ###########\n \nThis regex has one main route which, at three locations, can optionally include additional detours and be valid: (NEWS|), (WNSE|), and (SWEN|). Regardless of which option is taken, the route continues from the position it is left at after taking those steps. So, for example, this regex matches all of the following routes (and more that aren't listed here):\n\n- ENNWSWWSSSEENEENNN\n- ENNWSWWNEWSSSSEENEENNN\n- ENNWSWWNEWSSSSEENEESWENNNN\n- ENNWSWWSSSEENWNSEEENNN\nBy following the various routes the regex matches, a full map of all of the doors and rooms in the facility can be assembled.\n\nTo get a sense for the size of this facility, you'd like to determine which room is furthest from you: specifically, you would like to find the room for which the shortest path to that room would require passing through the most doors.\n\n- In the first example (^WNE$), this would be the north-east corner 3 doors away.\n- In the second example (^ENWWW(NEEE|SSE(EE|N))$), this would be the south-east corner 10 doors away.\n- In the third example (^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$), this would be the north-east corner 18 doors away.\nHere are a few more examples:\n\nRegex: ^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$\n\n Furthest room requires passing 23 doors\n\n #############\n #.|.|.|.|.|.#\n #-#####-###-#\n #.#.|.#.#.#.#\n #-#-###-#-#-#\n #.#.#.|.#.|.#\n #-#-#-#####-#\n #.#.#.#X|.#.#\n #-#-#-###-#-#\n #.|.#.|.#.#.#\n ###-#-###-#-#\n #.|.#.|.|.#.#\n #############\n\n\nRegex: ^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$\n\n Furthest room requires passing 31 doors\n\n ###############\n #.|.|.|.#.|.|.#\n #-###-###-#-#-#\n #.|.#.|.|.#.#.#\n #-#########-#-#\n #.#.|.|.|.|.#.#\n #-#-#########-#\n #.#.#.|X#.|.#.#\n ###-#-###-#-#-#\n #.|.#.#.|.#.|.#\n #-###-#####-###\n #.|.#.|.|.#.#.#\n #-#-#####-#-#-#\n #.#.|.|.|.#.|.#\n ###############\n\nWhat is the largest number of doors you would be required to pass through to reach a room? That is, find the room for which the shortest path from your starting location to that room would require passing through the most doors; what is the fewest doors you can pass through to reach it?",
"_____no_output_____"
],
[
"# Day 21: Chronal Conversion \nYou should have been watching where you were going, because as you wander the new North Pole base, you trip and fall into a very deep hole!\n\nJust kidding. You're falling through time again.\n\nIf you keep up your current pace, you should have resolved all of the temporal anomalies by the next time the device activates. Since you have very little interest in browsing history in 500-year increments for the rest of your life, you need to find a way to get back to your present time.\n\nAfter a little research, you discover two important facts about the behavior of the device:\n\nFirst, you discover that the device is hard-wired to always send you back in time in 500-year increments. Changing this is probably not feasible.\n\nSecond, you discover the activation system (your puzzle input) for the time travel module. Currently, it appears to run forever without halting.\n\nIf you can cause the activation system to halt at a specific moment, maybe you can make the device send you so far back in time that you cause an integer underflow in time itself and wrap around back to your current time!\n\nThe device executes the program as specified in manual section one and manual section two.\n\nYour goal is to figure out how the program works and cause it to halt. You can only control register 0; every other register begins at 0 as usual.\n\nBecause time travel is a dangerous activity, the activation system begins with a few instructions which verify that bitwise AND (via bani) does a numeric operation and not an operation as if the inputs were interpreted as strings. If the test fails, it enters an infinite loop re-running the test instead of allowing the program to execute normally. If the test passes, the program continues, and assumes that all other bitwise operations (banr, bori, and borr) also interpret their inputs as numbers. (Clearly, the Elves who wrote this system were worried that someone might introduce a bug while trying to emulate this system with a scripting language.)\n\nWhat is the lowest non-negative integer value for register 0 that causes the program to halt after executing the fewest instructions? (Executing the same instruction multiple times counts as multiple instructions executed.)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c91ea9fbeadb6be2ec117315c8c3ce3b83213b | 24,375 | ipynb | Jupyter Notebook | 003 - Machine Learing/Logit_Regression_Iris_Decision_Boundary.ipynb | hroatti/Machine_Learning | 73a16d85c6cc3e7f89bc3765beec0170ead9fc9e | [
"MIT"
] | null | null | null | 003 - Machine Learing/Logit_Regression_Iris_Decision_Boundary.ipynb | hroatti/Machine_Learning | 73a16d85c6cc3e7f89bc3765beec0170ead9fc9e | [
"MIT"
] | null | null | null | 003 - Machine Learing/Logit_Regression_Iris_Decision_Boundary.ipynb | hroatti/Machine_Learning | 73a16d85c6cc3e7f89bc3765beec0170ead9fc9e | [
"MIT"
] | null | null | null | 203.125 | 21,652 | 0.914626 | [
[
[
"from sklearn import datasets\niris = datasets.load_iris()\n\nlist(iris.keys())",
"_____no_output_____"
],
[
"import numpy as np\nX = iris[\"data\"][:, 3:] # petal width\ny = (iris[\"target\"] == 2).astype(np.int) # 1 if Iris virginica, else 0",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\n\nlog_reg = LogisticRegression()\nlog_reg.fit(X, y)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\n%matplotlib inline\n\nX_new = np.linspace(0, 3, 1000).reshape(-1, 1)\ny_proba = log_reg.predict_proba(X_new)\nplt.plot(X_new, y_proba[:, 1], \"g-\", label=\"Iris virginica\")\nplt.plot(X_new, y_proba[:, 0], \"b--\", label=\"Not Iris virginica\")\n# + more Matplotlib code to make the image look pretty\nplt.legend()\nplt.xlabel('Petal width (cm)')\nplt.ylabel('Probability')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7c92ec6d93081a34c7a81679d296cfa36f4a0bf | 205,073 | ipynb | Jupyter Notebook | Part II - Sentiment Analysis Classifications - Review and Comparison.ipynb | JackShen1/sentimento | 7a5cc6559aac241ca6f7baaad05cec38faa1a315 | [
"MIT"
] | 2 | 2021-03-27T11:33:12.000Z | 2021-03-27T19:39:44.000Z | Part II - Sentiment Analysis Classifications - Review and Comparison.ipynb | JackShen1/sentimento-en | 7a5cc6559aac241ca6f7baaad05cec38faa1a315 | [
"MIT"
] | null | null | null | Part II - Sentiment Analysis Classifications - Review and Comparison.ipynb | JackShen1/sentimento-en | 7a5cc6559aac241ca6f7baaad05cec38faa1a315 | [
"MIT"
] | 1 | 2021-05-05T08:31:40.000Z | 2021-05-05T08:31:40.000Z | 151.905926 | 50,824 | 0.870115 | [
[
[
"<h1 align=\"center\">PART II</h1>\n<h1 align=\"center\">Sentiment Analysis Classifications - Review and Comparison</h1>\n\nFirst, we needed to create vector words. For simplicity, we used a pre-trained model.\n\nGoogle was able to teach the Word2Vec model on a massive Google News dataset that contained over 100 billion different words! Google has created [3 million vector words](https://code.google.com/archive/p/word2vec/#Pre-trained_word_and_phrase_vectors) from this model, each with a dimension of 300.\n\nIdeally, we would use these vectors, but because the vector-word matrix is quite large (3.6 GB), we used a much more manageable matrix, which was trained using [GloVe](https://nlp.stanford.edu/projects/glove/), with a similar model of vector word generation. This matrix contains 400,000 vector words, each with a dimension of 50. You can also download model [here](https://www.kaggle.com/anindya2906/glove6b?select=glove.6B.50d.txt).\n\n#### How word2vec works:\n\nThe idea behind word2vec is that:\n\n Take a 3 layer neural network. (1 input layer + 1 hidden layer + 1 output layer)\n Feed it a word and train it to predict its neighbouring word.\n Remove the last (output layer) and keep the input and hidden layer.\n Now, input a word from within the vocabulary. The output given at the hidden layer is the ‘word embedding’ of the input word.\n \nTwo popular examples of methods of learning word embeddings from text include:\n\n Word2Vec\n GloVe\n\nTo get started, let's download the necessary libraries:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport pickle\nimport gensim, logging\nimport gensim.models.keyedvectors as word2vec\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"Also let's write a style for alignment in the middle of all graphs, images, etc:",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import HTML\nHTML(\"\"\"\n<style>\n.output_png {\n display: table-cell;\n text-align: center;\n vertical-align: middle;\n}\n</style>\n\"\"\")",
"_____no_output_____"
]
],
[
[
"Next, we will load the sample data we processed in the previous part:",
"_____no_output_____"
]
],
[
[
"with open('documents.pql', 'rb') as f:\n docs = pickle.load(f)",
"_____no_output_____"
],
[
"print(\"Number of documents:\", len(docs))",
"Number of documents: 38544\n"
]
],
[
[
"Now we will load our glove model in word2vec format. Because the GloVe dump from Stanford's site is slightly different from the word2vec format. You can convert a GloVe file to word2vec format using the following command in your console:\n\n`python -m gensim.scripts.glove2word2vec --input model/glove.6B.50d.txt --output model/glove.6B.50d.w2vformat.txt`\n\nAfter that you can delete original GloVe model.\n\nNext operation may take some time, as the model contains 400 000 words, so we will get a 400 000 x 50 embedding matrix that contains all the values of the word vectors.",
"_____no_output_____"
]
],
[
[
"model = word2vec.KeyedVectors.load_word2vec_format('model/glove.6B.50d.w2vformat.txt', binary=False)",
"_____no_output_____"
]
],
[
[
"Now let's get a list of all the words from our dictionary:",
"_____no_output_____"
]
],
[
[
"words = list(model.vocab)",
"_____no_output_____"
]
],
[
[
"Just to make sure everything is loaded correctly, we can look at the dimensions of the dictionary list and the embedding matrix:",
"_____no_output_____"
]
],
[
[
"print(words[:50], \"\\n\\nTotal words:\", len(words), \"\\n\\nWord-Vectors shape:\", model.vectors.shape)",
"['the', ',', '.', 'of', 'to', 'and', 'in', 'a', '\"', \"'s\", 'for', '-', 'that', 'on', 'is', 'was', 'said', 'with', 'he', 'as', 'it', 'by', 'at', '(', ')', 'from', 'his', \"''\", '``', 'an', 'be', 'has', 'are', 'have', 'but', 'were', 'not', 'this', 'who', 'they', 'had', 'i', 'which', 'will', 'their', ':', 'or', 'its', 'one', 'after'] \n\nTotal words: 400000 \n\nWord-Vectors shape: (400000, 50)\n"
]
],
[
[
"We can also find a word like \"football\" in our word list and then access the corresponding vector through the embedding matrix:",
"_____no_output_____"
]
],
[
[
"print(model['football'])",
"[-1.8209 0.70094 -1.1403 0.34363 -0.42266 -0.92479 -1.3942\n 0.28512 -0.78416 -0.52579 0.89627 0.35899 -0.80087 -0.34636\n 1.0854 -0.087046 0.63411 1.1429 -1.6264 0.41326 -1.1283\n -0.16645 0.17424 0.99585 -0.81838 -1.7724 0.078281 0.13382\n -0.59779 -0.45068 2.5474 1.0693 -0.27017 -0.75646 0.24757\n 1.0261 0.11329 0.17668 -0.23257 -1.1561 -0.10665 -0.25377\n -0.65102 0.32393 -0.58262 0.88137 -0.13465 0.96903 -0.076259\n -0.59909 ]\n"
]
],
[
[
"<h2 align=\"center\">Word Average Embedding Model</h2>\n\nWell, let's start analyzing our vectors. Our first approach will be the **word average embedding model**. \n\nThe essence of this naive approach is to take the average of all word vectors from a sentence to get one 50-dimensional vector that represents the tone of the whole sentence that we feed the model and try to get some quick result.\n\nWe didn't have to put a try/except, but even though I cleaned up our sample, there were a couple of words left after the processing that needed to be searched for and removed.",
"_____no_output_____"
]
],
[
[
"def sent_embed(words, docs):\n x_sent_embed, y_sent_embed = [], []\n count_words, count_non_words = 0, 0 \n \n # recover the embedding of each sentence with the average of the vector that composes it\n # sent - sentence, state - state of the sentence (pos/neg)\n for sent, state in docs:\n # average embedding of all words in a sentence\n sent_embed = []\n for word in sent:\n try:\n # if word is present in the dictionary - add its vector representation\n count_words += 1\n sent_embed.append(model[word])\n except KeyError:\n # if word is not in the dictionary - add a zero vector\n count_non_words += 1\n sent_embed.append([0] * 50)\n \n # add a sentence vector to the list\n x_sent_embed.append(np.mean(sent_embed, axis=0).tolist())\n \n # add a label to y_sent_embed\n if state == 'pos': y_sent_embed.append(1)\n elif state == 'neg': y_sent_embed.append(0)\n \n print(count_non_words, \"out of\", count_words, \"words were not found in the vocabulary.\")\n \n return x_sent_embed, y_sent_embed",
"_____no_output_____"
],
[
"x, y = sent_embed(words, docs)",
"30709 out of 1802696 words were not found in the vocabulary.\n"
]
],
[
[
"<h2 align=\"center\">Cosine Similarity</h2>\n\nTo measure the similarity of 2 words, we need a way to measure the degree of similarity between 2 embedding vectors for these 2 words. Given 2 vectors $u$ and $v$, cosine similarity is determined as follows:\n\n$$\\text{cosine_similarity(u, v)} = \\frac {u . v} {||u||_2 ||v||_2} = cos(\\theta)$$\n\nwhere: \n\n* $u.v$ - dot product (or inner product) of two vectors;\n\n* $||u||_2$ - norm (or length) of the vector $u$;\n \n * **Note**: norm of $u$ is defined as $ ||u||_2 = \\sqrt{\\sum_{i=1}^{n} u_i^2}$)\n\n* $\\theta$ is the angle between $u$ and $v$. \n\nThis similarity depends on the angle between $u$ and $v$. If $u$ and $v$ are very similar, their cosine similarity will be close to 1; if they are dissimilar, the cosine similarity will take a smaller value. \n\n**`cosine_similarity()`** is a method that used to estimate the similarity between word vectors.",
"_____no_output_____"
]
],
[
[
"def cosine_similarity(u, v):\n \"\"\"\n Cosine similarity reflects the degree of similariy between u and v\n \n Arguments:\n u -- a word vector of shape (n,) \n v -- a word vector of shape (n,)\n\n Returns:\n cosine_similarity -- the cosine similarity between u and v defined by the formula above.\n \"\"\"\n \n distance = 0.0\n \n # compute the dot product between u and v\n dot = np.dot(u,v)\n \n # compute the L2 norm of u\n norm_u = np.sqrt(sum(u**2))\n \n # Compute the L2 norm of v\n norm_v = np.sqrt(sum(v**2))\n \n # Compute the cosine similarity defined by formula above\n cosine_similarity = dot/(norm_u*norm_v)\n \n return cosine_similarity",
"_____no_output_____"
]
],
[
[
"Let's check the cosine similarity on 2 negative sentences:",
"_____no_output_____"
]
],
[
[
"print(\"Sentence #5: \", docs[5], \"\\n\\nSentence #7: \", docs[7])\nprint(\"\\nSentence Embedding #5: \", x[5], \"\\n\\nSentence Embedding #7: \", x[7])",
"Sentence #5: (['eager', 'read', 'book', 'reading', 'left', 'completely', 'flat', 'rest', 'iceberg', 'hardly', 'smith', 'barely', 'give', 'could', 'reading', 'sport', 'illustrated', 'minnesota', 'sport', 'page', 'admits', 'football', 'majority', 'book', 'leisurely', 'describes', 'basic', 'part', 'football', 'career', 'book', 'touted', 'full', 'complete', 'story', 'learned', 'watching', 'stimpy', 'draft', 'type', 'meaty', 'story', 'want', 'read', 'telling', 'inside', 'detail', 'smith', 'deal', 'people', 'wanted', 'friend', 'girlfriend', 'star', 'football', 'player', 'friend', 'could', 'trust', 'need', 'trust', 'thought', 'enter', 'mind', 'look', 'hire', 'agent', 'goofing', 'missing', 'class', 'summer', 'covered', 'story', 'time', 'ohio', 'state', 'begin', 'using', 'football', 'smart', 'enough', 'study', 'break', 'coaching', 'process', 'mentally', 'challenging', 'happened', 'plan', 'medical', 'school', 'know', 'story', 'football', 'part', 'iceberg', 'already', 'documented', 'also', 'felt', 'smith', 'wrote', 'book', 'defending', 'america', 'attacking', 'feel', 'need', 'explain', 'paid', 'much', 'highly', 'paid', 'athlete', 'story', 'sorry', 'dime', 'dozen', 'compare', 'sitting', 'money', 'scenario', 'average', 'truth', 'many', 'people', 'take', 'worker', 'value', 'thing', 'money', 'happy', 'leaving', 'grammatical', 'error', 'reviewer', 'felt', 'like', 'reading', 'average', 'high', 'school', 'creative', 'writing', 'project', 'went', 'next', 'half', 'quote', 'wellspring', 'deep', 'philosophy', 'music', 'smith', 'note', 'percent', 'american', 'read', '10th', 'grade', 'level', 'well', 'book', 'certainly', 'reading', 'ability', 'many', 'people', 'lesson', 'book', 'smith', 'opportunity', 'swallow', 'pride', 'take', 'coaching', 'come', 'better', 'solution', 'book', 'another', 'time', 'hired', 'editor', 'writing', 'consultant', 'could', 'give', 'guidance', 'build', 'character', 'help', 'feel', 'story', 'instead', 'smith', 'felt', 'necessary', 'alone', 'emotion', 'sorry', 'compared', 'autobiography', 'emotional', 'book', 'sure', 'smith', 'thing', 'mike', 'gutter', 'thoughtful', 'honest', 'trustworthy', 'dependable', 'passionate', 'felt', 'best', 'story', 'would', 'demonstrate', 'characteristic', 'left', 'book', 'instead', 'still', 'looking', 'rest', 'iceberg'], 'neg') \n\nSentence #7: (['value', 'amazon', 'product', 'review', 'purchased', 'many', 'item', 'based', 'customer', 'feedback', 'purchased', 'phone', 'home', 'depot', 'returning', 'today', 'large', 'comfort', 'reception', 'letter', 'small', 'need', 'reading', 'glass', 'could'], 'neg')\n\nSentence Embedding #5: [0.01131779607385397, 0.23367555439472198, -0.056668754667043686, -0.24764131009578705, 0.430510550737381, 0.07081332057714462, -0.5177653431892395, -0.03903905302286148, -0.13728252053260803, 0.07700246572494507, -0.16849233210086823, 0.2067411094903946, -0.2801123857498169, -0.0610094778239727, 0.4939613342285156, -0.03934195637702942, 0.09831012785434723, 0.0422029048204422, -0.13356490433216095, -0.3223132789134979, -0.019953656941652298, 0.3334996700286865, 0.1518217921257019, 0.1095254123210907, 0.3594696521759033, -1.2954380512237549, -0.4831620156764984, -0.019770419225096703, 0.25212976336479187, -0.24769696593284607, 2.5406434535980225, 0.12557470798492432, -0.07102328538894653, -0.3904299736022949, 0.004364060703665018, 0.05013019219040871, -0.018428627401590347, 0.1928984671831131, 0.04191068187355995, -0.34819087386131287, -0.03821679949760437, -0.005490944255143404, -0.026860175654292107, 0.24909710884094238, -0.06916779279708862, 0.10717158019542694, -0.020162032917141914, 0.12701380252838135, -0.062044110149145126, 0.2513098120689392] \n\nSentence Embedding #7: [0.3527010381221771, 0.2081831991672516, 0.196530282497406, 0.01412939466536045, 0.4114040434360504, -0.11001740396022797, -0.7598518133163452, -0.23773261904716492, 0.21618972718715668, 0.04337448254227638, -0.009442123584449291, 0.12009995430707932, 0.19873353838920593, -0.2239973098039627, 0.2292730212211609, 0.16251251101493835, -0.3181024193763733, -0.0012830018531531096, -0.013101363554596901, -0.6070674061775208, 0.5082429051399231, -0.11066173762083054, -0.10543221235275269, 0.06870473176240921, -0.03560367971658707, -1.0910638570785522, -0.3817167282104492, 0.06267320364713669, 0.2765468955039978, -0.19624170660972595, 2.7380640506744385, 0.017234284430742264, 0.04990572854876518, -0.27236661314964294, 0.1264142394065857, -0.03530830889940262, 0.05308012664318085, 0.12018389999866486, -0.011113389395177364, -0.18697331845760345, 0.3190408945083618, -0.006428159307688475, -0.012993931770324707, 0.22762437164783478, -0.07230773568153381, 0.152089923620224, -0.20201432704925537, 0.20428138971328735, 0.01430835947394371, 0.13926735520362854]\n"
],
[
"print(\"cosine_similarity = \", cosine_similarity(np.array(x[5]), np.array(x[7])))",
"cosine_similarity = 0.8968743967161681\n"
]
],
[
[
"A value of 0.89 indicates that the sentences are close to each other, and so it is.\n\nLet's check on two positive sentences:",
"_____no_output_____"
]
],
[
[
"print(\"Sentence #1: \", docs[1], \"\\n\\nSentence #4: \", docs[4])\nprint(\"\\nSentence Embedding #1: \", x[1], \"\\n\\nSentence Embedding #4: \", x[4])",
"Sentence #1: (['diehard', 'graco', 'daughter', 'born', 'graco', 'snugride', 'rated', 'infant', 'seat', 'stock', 'consumer', 'report', 'infant', 'seat', 'great', 'part', 'travel', 'system', 'loved', 'stroller', 'seat', 'outgrew', 'seat', 'around', 'year', 'tried', 'eddie', 'bauer', 'convertible', 'seat', 'next', 'horrible', 'strap', 'always', 'twisted', 'daughter', 'head', 'always', 'fell', 'forward', 'onto', 'chest', 'fell', 'asleep', 'quickly', 'changed', 'graco', 'comfortsport', 'overhead', 'drop', 'loved', 'padding', 'head', 'rested', 'side', 'carseat', 'instead', 'chest', 'strap', 'never', 'twisted', 'easy', 'heavy', 'around', 'airport', 'outgrew', 'seat', 'decided', 'make', 'switch', 'britax', 'although', 'roundabout', 'rate', 'went', 'marathon', 'thing', 'like', 'infant', 'latch', 'need', 'child', '3yrs', 'higher', 'weight', 'limit', 'roundabout', 'honda', 'accord', 'door', 'husband', 'drive', 'door', 'toyota', 'echo', 'tiny', 'seat', 'perfectly', 'well', 'vehicle', 'forward', 'facing', 'position', 'toyota', 'latch', 'honda', 'move', 'partly', 'fact', 'husband', 'excellent', 'installing', 'partly', 'seat', 'weight', 'quite', 'heavy', 'pain', 'haul', 'around', 'airport', 'husband', 'usually', 'loosens', 'strap', 'wear', 'like', 'backpack', 'airport', 'daughter', 'love', 'girly', 'print', 'granite', 'plenty', 'room', 'never', 'uncomfortable', 'sleeping', 'head', 'chest', 'either', 'though', 'bright', 'year', 'never', 'able', 'seat', 'love', 'fact', '65lbs', 'five', 'point', 'restraint', 'strap', 'never', 'twist', 'britax', 'also', 'thoughtful', 'thing', 'like', 'adding', 'velcro', 'side', 'seat', 'hold', 'strap', 'place', 'putting', 'child', 'padded', 'area', 'clip', 'child', 'burned', 'plastic', 'summer', 'freezing', 'winter', 'plan', 'staying', 'britax'], 'pos') \n\nSentence #4: (['best', 'carseat', 'perego', 'britax', 'weighs', 'pound', 'break', 'easy', 'adjust', 'best', 'feature', 'base', 'designed', 'move', 'graco', 'infant', 'seat', 'completely', 'flipped', 'sideways', 'made', 'turn', 'secure', 'even', 'special', 'belt', 'clip', 'side', 'canopy', 'floppy', 'still', 'star'], 'pos')\n\nSentence Embedding #1: [0.12711267170236737, 0.09236475469575064, 0.1266332291272759, -0.3013924247956748, 0.294683316376447, 0.19624675583867915, -0.3043137320414023, -0.03493387958727003, -0.0061060525680500305, -0.11686981351259852, 0.0443252307245803, -0.011276101143878014, -0.26993070709101774, 0.03044914987060379, 0.24125360968253, 0.33569200120642373, -0.15215016011392973, 0.10218650844810745, -0.10612362727034287, -0.5607575885239032, 0.018712027263038798, 0.197580213804826, -0.03804364950329961, 0.02118312717094773, 0.11329135551025755, -1.125217992181335, -0.2051172027914006, 0.36529712330122466, 0.3430080961860981, -0.17787827458741562, 2.094838625420638, 0.2979897524642407, -0.07502967412100056, -0.025080831284708564, 0.1635532991838504, 0.06514588520732388, 0.12988997355887227, 0.18282932833936372, 0.03143368033452979, -0.2425769464789681, -0.1511472329278502, 0.12855439917235303, -0.020095951993863194, 0.09676632000568522, -0.0409723698006322, -0.05890869530416577, 0.03822630964523491, -0.3853921659247532, 0.03171381270741952, -0.07378304081607472] \n\nSentence Embedding #4: [-0.024003845115657896, 0.032413323933724314, 0.27177543845027685, -0.19323246960993856, 0.24157759512308985, 0.07526156672975048, -0.2760535052511841, -0.2720376163961191, 0.08585320168640465, 0.045967813464812934, 0.02758947404799983, 0.036348608118714765, -0.12089703255333006, 0.2962150317616761, 0.10597237956244498, 0.26013640710152686, -0.014499877695925534, -0.014260866533732042, -0.17533631611149758, -0.5312268664129078, -0.12386704773234669, -0.007135688385460526, 0.0460448763333261, 0.014909687997715082, 0.0422693156870082, -0.8960106205195189, -0.11439847142901272, 0.3665759698487818, 0.38871053216280416, -0.17619909485802054, 2.1205024998635054, 0.12035856361035258, -0.14507543152649305, 0.09130546535016038, 0.19292692920134868, 0.1704116902546957, 0.11577803136606235, 0.2672465007635765, -0.20746869717549998, -0.485804874508176, 0.03960801559878746, 0.1643327432248043, 0.007201527449069545, 0.038974627503193915, -0.1504140937468037, -0.030792123987339437, 0.24923472222872078, -0.3328784227996948, -0.03202455530845327, 0.00434977759141475]\n"
],
[
"print(\"cosine_similarity = \", cosine_similarity(np.array(x[1]), np.array(x[4])))",
"cosine_similarity = 0.9481159093219256\n"
]
],
[
[
"These sentences are also close to each other. \n\nSo now let's check sentences with different states:",
"_____no_output_____"
]
],
[
[
"print(\"Sentence #1: \", docs[0], \"\\n\\nSentence #5: \", docs[6])",
"Sentence #1: (['love', 'crate', 'smart', 'little', 'learned', 'zipper', 'undone', 'escape', 'course', 'home', 'find', 'small', 'gift', 'left', 'behind'], 'pos') \n\nSentence #5: (['product', 'generate', 'enough', 'heat', 'provide', 'relief', 'sore', 'aching', 'muscle', 'massaging', 'action', 'actually', 'vibration', 'action', 'heat', 'switched', 'vibration', 'action', 'reduced', 'dramatically'], 'neg')\n"
],
[
"print(\"cosine_similarity = \", cosine_similarity(np.array(x[0]), np.array(x[6])))",
"cosine_similarity = 0.7410293614966914\n"
]
],
[
[
"As we see, our average embedding still has some problems with separating different classes with cosine similarity.",
"_____no_output_____"
],
[
"<h2 align=\"center\">Split Corpus</h2>",
"_____no_output_____"
],
[
"Now, for further work, we will divide our corpus for training, testing and development sets:",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\n# train test\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n\n# train dev\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"print('Length of x_train:', len(x_train), '| Length of y_train:', len(y_train))\nprint('Length of x_test: ', len(x_test), '| Length of y_test: ', len(y_test))\nprint('Length of x_val: ', len(x_val), '| Length of y_val: ', len(y_val))",
"Length of x_train: 24668 | Length of y_train: 24668\nLength of x_test: 7709 | Length of y_test: 7709\nLength of x_val: 6167 | Length of y_val: 6167\n"
],
[
"print(\"Shape of x_train set:\", np.array(x_train).shape)",
"Shape of x_train set: (24668, 50)\n"
]
],
[
[
"<h2 align=\"center\">Visualization of Classification Report</h2>\n\nWe will need these methods when we start to visualize our data, so we will write them now.\n\nThe following function takes the conclusion of the `classification_report` function as an argument and plots the results ( function is based on [this](https://stackoverflow.com/a/31689645/14467732) solution).",
"_____no_output_____"
]
],
[
[
"def plot_classification_report(classification_report, title='Classification Report', cmap='RdBu'):\n \n lines = classification_report.split('\\n')\n\n classes, plotMat, support, class_names = [], [], [], []\n \n for line in lines[2 : (len(lines) - 5)]:\n t = line.strip().split()\n if len(t) < 2: continue\n classes.append(t[0])\n v = [float(x) for x in t[1: len(t) - 1]]\n support.append(int(t[-1]))\n class_names.append(t[0])\n plotMat.append(v)\n\n xlabel = 'Metrics'\n ylabel = 'Classes'\n \n xticklabels = ['Precision', 'Recall', 'F1-score']\n yticklabels = ['{0} ({1})'.format(class_names[idx], sup) for idx, sup in enumerate(support)]\n \n figure_width = 25\n figure_height = len(class_names) + 7\n correct_orientation = False\n \n heatmap(np.array(plotMat), title, xlabel, ylabel, xticklabels, yticklabels, figure_width, figure_height, correct_orientation, cmap=cmap)",
"_____no_output_____"
]
],
[
[
"This function is designed to create a heatmap with text in each cell using the matplotlib library (code based on idea from [here](https://stackoverflow.com/a/16124677/14467732)):",
"_____no_output_____"
]
],
[
[
"def heatmap(AUC, title, xlabel, ylabel, xticklabels, yticklabels, figure_width=40, figure_height=20, correct_orientation=False, cmap='RdBu'):\n fig, ax = plt.subplots()\n c = ax.pcolor(AUC, edgecolors='k', linestyle='dashed', linewidths=0.2, cmap=cmap)\n\n # put the major ticks at the middle of each cell\n ax.set_yticks(np.arange(AUC.shape[0]) + 0.5, minor=False)\n ax.set_xticks(np.arange(AUC.shape[1]) + 0.5, minor=False)\n\n # set tick labels\n ax.set_xticklabels(xticklabels, minor=False)\n ax.set_yticklabels(yticklabels, minor=False)\n\n # set title and x/y labels\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel) \n\n # remove last blank column\n plt.xlim( (0, AUC.shape[1]) )\n\n # turn off all the ticks\n ax = plt.gca() \n for t in ax.xaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n for t in ax.yaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n\n # add color bar\n plt.colorbar(c)\n\n # add text in each cell \n show_val(c)\n\n # proper orientation (origin at the top left instead of bottom left)\n if correct_orientation:\n ax.invert_yaxis()\n ax.xaxis.tick_top() \n\n # resize \n fig = plt.gcf()\n fig.set_size_inches(cm_to_inch(figure_width, figure_height))",
"_____no_output_____"
]
],
[
[
"This function just inserts the text into the cells of the heatmap (idea is taken from [here](https://stackoverflow.com/a/25074150/14467732)):",
"_____no_output_____"
]
],
[
[
"def show_val(pc, fmt=\"%.2f\", **kw):\n pc.update_scalarmappable()\n ax = pc.axes\n for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):\n x, y = p.vertices[:-2, :].mean(0)\n if np.all(color[:3] > 0.5):\n color = (0.0, 0.0, 0.0)\n else:\n color = (1.0, 1.0, 1.0)\n ax.text(x, y, fmt % value, ha=\"center\", va=\"center\", color=color, **kw)",
"_____no_output_____"
]
],
[
[
"The last auxiliary function is intended to specify the size of the figure in centimeters in matplotlib, because by default there is only the method `set_size_inches`, therefore, we will convert inches to centimeters and use this method:",
"_____no_output_____"
]
],
[
[
"def cm_to_inch(*dim):\n inch = 2.54\n return tuple(i/inch for i in dim[0]) if type(dim[0]) == tuple else tuple(i/inch for i in dim)",
"_____no_output_____"
]
],
[
[
"**Note:** To better understand the following classifiers, I advise you to read [this article](https://towardsdatascience.com/comparative-study-on-classic-machine-learning-algorithms-24f9ff6ab222) or other similar ones that you will find on the Internet.",
"_____no_output_____"
],
[
"<h2 align=\"center\">KNN Model</h2>\n\nThe K-nearest neighbors (KNN) algorithm is a type of supervised machine learning algorithms. KNN is extremely easy to implement in its most basic form, and yet performs quite complex classification tasks. It is a lazy learning algorithm since it doesn't have a specialized training phase. Rather, it uses all of the data for training while classifying a new data point or instance. KNN is also a non-parametric learning algorithm, which means that it doesn't assume anything about the underlying data.\n\n\nKNN algorithm simply calculates the distance of a new data point to all other training data points. The distance can be of any type e.g Euclidean or Manhattan etc. It then selects the K-nearest data points, where K can be any integer. Finally it assigns the data point to the class to which the majority of the K data points belong.\n\nNow, let's build KNN classifier model.\n\nFirst, we import the `KNeighborsClassifier` module and create KNN classifier object by passing argument number of neighbors in `KNeighborsClassifier()` function. Then, fit our model on the train set using `fit()` and perform prediction on the test set using `predict()`.\n\nOne way to help find the best value of neighbors is to plot the graph of neighbor value and the corresponding error rate for the dataset. We will plot the mean error for the predicted values of test set for all the neighbor values between 1 and 25.\nTo do so, let's first calculate the mean of error for all the predicted values where neighbor ranges from 1 and 25:",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\n\nerror = []\n\n# calculating error for neighbor values between 1 and 25\nfor i in range(1, 25):\n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(x_train, y_train)\n pred_i = knn.predict(x_test)\n error.append(np.mean(pred_i != y_test))",
"_____no_output_____"
]
],
[
[
"The next step is to plot the error values against neighbor values:",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10, 5))\nplt.plot(range(1, 25), error, color='black', linestyle='dashed', marker='o', markerfacecolor='green', markersize=10)\nplt.title('Error Rate Neighbor Value')\nplt.xlabel('Neighbor Value')\nplt.ylabel('Mean Error')",
"_____no_output_____"
]
],
[
[
"As we can see, it is best to take k=5, but still mean error a little higher than normal.",
"_____no_output_____"
]
],
[
[
"# create KNN Classifier\nknn = KNeighborsClassifier(n_neighbors=5, weights='distance')\n\n# train the classifier using the training sets\nknn.fit(x_train, y_train)\n\n# predict the response for test dataset\ny_pred = knn.predict(x_test)\n\nprint(\"Nearest Neighbors Result (k=5):\\n\" + '-' * 35)\nprint(\"Accuracy Score (k=5):\", str(round(knn.score(x_test, y_test) * 100, 2)) + '%')\nprint(\"Accuracy (x_train, y_train):\", str(round(knn.score(x_train, y_train), 4) * 100) + '%')",
"Nearest Neighbors Result (k=5):\n-----------------------------------\nAccuracy Score (k=5): 71.46%\nAccuracy (x_train, y_train): 100.0%\n"
]
],
[
[
"The accuracy of the model is good, we can work with it.\n\nNow let's explore our KNN Classification results with help of `classification_report` function from sklearn.metrics:",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report\nprint('\\nClassification KNN:\\n', classification_report(y_test, knn.predict(x_test)))",
"\nClassification KNN:\n precision recall f1-score support\n\n 0 0.67 0.65 0.66 3292\n 1 0.74 0.77 0.75 4417\n\n accuracy 0.71 7709\n macro avg 0.71 0.71 0.71 7709\nweighted avg 0.71 0.71 0.71 7709\n\n"
]
],
[
[
"Now finally let's visualize our classification report:",
"_____no_output_____"
]
],
[
[
"plot_classification_report(classification_report(y_test, knn.predict(x_test)), title='KNN Classification Report')",
"_____no_output_____"
]
],
[
[
"<h2 align=\"center\">Logistic Regression</h2>\n\nLogistic Regression is a Machine Learning classification algorithm that is used to predict the probability of a categorical dependent variable. In logistic regression, the dependent variable is a binary variable that contains data coded as 1 (yes, success, etc.) or 0 (no, failure, etc.). In other words, the logistic regression model predicts P(Y=1) as a function of X.",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\n\nlogit = LogisticRegression(solver='liblinear', multi_class='ovr', n_jobs=1)\nlogit.fit(x_train, y_train)",
"_____no_output_____"
],
[
"print(\"Accuracy Score:\", str(round(logit.score(x_test, y_test) * 100, 2)) + '%')",
"Accuracy Score: 72.79%\n"
],
[
"print('\\nClassification Logistic Regression:\\n', classification_report(y_test, logit.predict(x_test)))",
"\nClassification Logistic Regression:\n precision recall f1-score support\n\n 0 0.70 0.64 0.67 3292\n 1 0.75 0.79 0.77 4417\n\n accuracy 0.73 7709\n macro avg 0.72 0.72 0.72 7709\nweighted avg 0.73 0.73 0.73 7709\n\n"
]
],
[
[
"Now let's visualize our classification report:",
"_____no_output_____"
]
],
[
[
"plot_classification_report(classification_report(y_test, logit.predict(x_test)), title='Logistic Regression Classification Report')",
"_____no_output_____"
]
],
[
[
"<h2 align=\"center\">Random Forest Classifier</h2>\n\nThe Random forest or Random Decision Forest is a supervised Machine learning algorithm used for classification, regression, and other tasks using decision trees.\n\nThe Random forest classifier creates a set of decision trees from a randomly selected subset of the training set. It is basically a set of decision trees (DT) from a randomly selected subset of the training set and then It collects the votes from different decision trees to decide the final prediction.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\n\nclf = RandomForestClassifier(n_estimators=100)\nclf.fit(x_train, y_train)\n\nprint(\"Accuracy Score:\", str(round(clf.score(x_test, y_test), 4) * 100) + '%')",
"Accuracy Score: 75.56%\n"
],
[
"print('\\nClassification Random Forest:\\n', classification_report(y_test, clf.predict(x_test)))",
"\nClassification Random Forest:\n precision recall f1-score support\n\n 0 0.75 0.64 0.69 3292\n 1 0.76 0.84 0.80 4417\n\n accuracy 0.76 7709\n macro avg 0.75 0.74 0.74 7709\nweighted avg 0.76 0.76 0.75 7709\n\n"
]
],
[
[
"Now let's visualize our classification report:",
"_____no_output_____"
]
],
[
[
"plot_classification_report(classification_report(y_test, clf.predict(x_test)), title='Random Forest Classification Report')",
"_____no_output_____"
]
],
[
[
"<h2 align=\"center\">SVM Model</h2>\n\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\n\nThe advantages of support vector machines are:\n\n + Effective in high dimensional spaces.\n\n + Still effective in cases where number of dimensions is greater than the number of samples.\n\n + Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.\n\n + Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.\n\nThe disadvantages of support vector machines include:\n\n - If the number of features is much greater than the number of samples, avoid over-fitting in choosing Kernel functions and regularization term is crucial.\n\n - SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation (see Scores and probabilities, below).\n \nYou can read more about it [here](https://en.wikipedia.org/wiki/Support-vector_machine).",
"_____no_output_____"
]
],
[
[
"from sklearn import svm\n\nSVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto', probability=True)\nSVM.fit(x_train, y_train)\n\nprint(\"Accuracy Score:\", str(round(SVM.score(x_test, y_test), 4) * 100) + '%')",
"Accuracy Score: 73.11%\n"
],
[
"print('\\nClassification SVM:\\n', classification_report(y_test, SVM.predict(x_test)))",
"\nClassification SVM:\n precision recall f1-score support\n\n 0 0.70 0.65 0.67 3292\n 1 0.75 0.79 0.77 4417\n\n accuracy 0.73 7709\n macro avg 0.73 0.72 0.72 7709\nweighted avg 0.73 0.73 0.73 7709\n\n"
],
[
"plot_classification_report(classification_report(y_test, SVM.predict(x_test)), title='SVM Classification Report')",
"_____no_output_____"
]
],
[
[
"<h2 align=\"center\">Comparison of Models</h2> \n\nA useful tool when predicting the probability of a binary outcome is the Receiver Operating Characteristic curve, or ROC curve.\n\nIt is a plot of the false positive rate (x-axis) versus the true positive rate (y-axis) for a number of different candidate threshold values between 0.0 and 1.0. Put another way, it plots the false alarm rate versus the hit rate.\n\nThe true positive rate is calculated as the number of true positives divided by the sum of the number of true positives and the number of false negatives. It describes how good the model is at predicting the positive class when the actual outcome is positive.\n\nThe false positive rate is calculated as the number of false positives divided by the sum of the number of false positives and the number of true negatives.\n\nIt is also called the false alarm rate as it summarizes how often a positive class is predicted when the actual outcome is negative.\n\nTo make this clear:\n* Smaller values on the x-axis of the plot indicate lower false positives and higher true negatives.\n* Larger values on the y-axis of the plot indicate higher true positives and lower false negatives.",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\nfrom sklearn.metrics import roc_curve, auc\n\nfprKNN, tprKNN, thresholdsKNN = metrics.roc_curve(y_test, knn.predict_proba(x_test)[:, 1])\nfprLR, tprLR, thresholdsLR = metrics.roc_curve(y_test, logit.predict_proba(x_test)[:, 1])\nfprCLF, tprCLF, thresholdCLF = metrics.roc_curve(y_test, clf.predict_proba(x_test)[:, 1])\nfprSVM, trpSVM, thresholdSVM = metrics.roc_curve(y_test, SVM.predict_proba(x_test)[:, 1])",
"_____no_output_____"
],
[
"linewidth = 2\nplt.figure(figsize=(8, 5))\n\nplt.plot(fprKNN, tprKNN, color='#db6114', lw=linewidth, label='ROC Curve KNN (AUC = %0.3f)' % auc(fprKNN, tprKNN))\nplt.plot(fprLR, tprLR, color='#1565c0', lw=linewidth, label='ROC Curve Logistic Regression (AUC = %0.3f)' % auc(fprLR, tprLR))\nplt.plot(fprCLF, tprCLF, color='#2e7d32',lw=linewidth, label='ROC Curve Random Forest (AUC = %0.3f)' % auc(fprCLF, tprCLF))\nplt.plot(fprSVM, trpSVM, color='#6557d2',lw=linewidth, label='ROC Curve SVM (AUC = %0.3f)' % auc(fprSVM, trpSVM))\nplt.plot([0, 1], [0, 1], color='#616161', lw=linewidth, linestyle='--')\n\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\n\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC Curve Plots')\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Based on these data, we can conclude that the best model so far is a **Random Forest Model** with `AUC = 83.5%` and `Accuracy Score = 75.56%`.\n\nLet's save this model:",
"_____no_output_____"
]
],
[
[
"with open('RFModel.pickle', 'wb') as m:\n pickle.dump(logit, m)",
"_____no_output_____"
]
],
[
[
"Let's check if everything is loaded correctly:",
"_____no_output_____"
]
],
[
[
"with open('RFModel.pickle', 'rb') as m:\n rf = pickle.load(m)\n\nprint(\"Random Forest Accuracy Score:\", str(round(clf.score(x_test, y_test), 4) * 100) + '%')",
"Random Forest Accuracy Score: 75.56%\n"
]
],
[
[
"In the next part we will finally try the **LSTM** neural network, which takes a sequence of words and remembers the order of words in a sentence, this approach should give us the best results.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7c9398375ffd45aff7b0d5cc50f0bc5fc77e09f | 47,082 | ipynb | Jupyter Notebook | data/pyodKNN.DummyDataset-anomaly_detection.ipynb | therobotacademy/kaggle-anomaly-detection | 2882202950320b751970e8f972a169fbfe6c2f47 | [
"MIT"
] | null | null | null | data/pyodKNN.DummyDataset-anomaly_detection.ipynb | therobotacademy/kaggle-anomaly-detection | 2882202950320b751970e8f972a169fbfe6c2f47 | [
"MIT"
] | null | null | null | data/pyodKNN.DummyDataset-anomaly_detection.ipynb | therobotacademy/kaggle-anomaly-detection | 2882202950320b751970e8f972a169fbfe6c2f47 | [
"MIT"
] | null | null | null | 169.971119 | 30,564 | 0.901321 | [
[
[
"import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager",
"_____no_output_____"
]
],
[
[
"# Run source tutorial\nhttps://www.geeksforgeeks.org/machine-learning-for-anomaly-detection/",
"_____no_output_____"
]
],
[
[
"import matplotlib.font_manager\nfrom pyod.models.knn import KNN \nfrom pyod.utils.data import generate_data, get_outliers_inliers",
"_____no_output_____"
],
[
"# [1] CREATE SYNTHETIC DATA\nnpoints = 300\n\n# Generating a random dataset with two features\nX_train, y_train = generate_data(n_train = npoints, train_only = True,\n n_features = 2)",
"_____no_output_____"
],
[
"# Storing the outliers and inliners in different numpy arrays\nX_outliers, X_inliers = get_outliers_inliers(X_train, y_train)\nn_inliers = len(X_inliers)\nn_outliers = len(X_outliers)\nprint(\"There are\", n_inliers, \"inliers and\", n_outliers, \"outliers\")",
"There are 270 inliers and 30 outliers\n"
],
[
"# Separating the two features\nf1 = X_train[:, [0]] # .reshape(-1, 1) # This destructures the array f1[:,0]\nf2 = X_train[:, [1]] # .reshape(-1, 1)",
"_____no_output_____"
],
[
"# [2] VISUALIZE THE DATA\n# Visualising the dataset\n# create a meshgrid\nxx, yy = np.meshgrid(np.linspace(-10, 10, 200),\n np.linspace(-10, 10, 200))\n \n# scatter plot\nplt.scatter(f1, f2)\nplt.xlabel('Feature 1')\nplt.ylabel('Feature 2')",
"_____no_output_____"
],
[
"# [3] TRAIN THE MODEL AND EVALUATE\n\n# Setting the percentage of outliers\noutlier_fraction = 0.1\n\n# Training the classifier\nclf = KNN(contamination = outlier_fraction)\nclf.fit(X_train, y_train)\n \n# You can print this to see all the prediciton scores\nscores_pred = clf.decision_function(X_train)*-1\n \ny_pred = clf.predict(X_train)\nn_errors = (y_pred != y_train).sum()",
"/opt/conda/lib/python3.7/site-packages/pyod/models/base.py:349: UserWarning: y should not be presented in unsupervised learning.\n \"y should not be presented in unsupervised learning.\")\n"
],
[
"# Counting the number of errors \nprint('The number of prediction errors are', n_errors, ', equal to ', \"{:.2f}\".format(n_errors/npoints), '% out of', npoints, 'data points')",
"The number of prediction errors are 0 , equal to 0.00 % out of 300 data points\n"
],
[
"# [4] VISUALIZING THE PREDICTIONS\n\n# threshold value to consider a\n# datapoint inlier or outlier\nthreshold = stats.scoreatpercentile(scores_pred, 100 * outlier_fraction)",
"_____no_output_____"
],
[
"# decision function calculates the raw \n# anomaly score for every point\nZ = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) * -1\nZ = Z.reshape(xx.shape)",
"_____no_output_____"
],
[
"# fill blue colormap from minimum anomaly\n# score to threshold value\nsubplot = plt.subplot(1, 1, 1)\nsubplot.contourf(xx, yy, Z, levels = np.linspace(Z.min(), \n threshold, 10), cmap = plt.cm.Blues_r)\n\n# draw red contour line where anomaly \n# score is equal to threshold\na = subplot.contour(xx, yy, Z, levels =[threshold],\n linewidths = 2, colors ='red')\n\n# fill orange contour lines where range of anomaly\n# score is from threshold to maximum anomaly score\nsubplot.contourf(xx, yy, Z, levels =[threshold, Z.max()], colors ='orange')\n \n# scatter plot of inliers with white dots\nb = subplot.scatter(X_train[:-n_outliers, 0], X_train[:-n_outliers, 1],\n c ='white', s = 20, edgecolor ='k') \n \n# scatter plot of outliers with black dots\nc = subplot.scatter(X_train[-n_outliers:, 0], X_train[-n_outliers:, 1], \n c ='black', s = 20, edgecolor ='k')\nsubplot.axis('tight')\n\nsubplot.legend(\n [a.collections[0], b, c],\n ['learned decision function', 'true inliers', 'true outliers'],\n prop = matplotlib.font_manager.FontProperties(size = 10),\n loc ='lower right')\n \nsubplot.set_title('K-Nearest Neighbours')\n#subplot.set_xlim((-3.5, 4.5))\n#subplot.set_ylim((-3.5, 4.5))\nsubplot.set_xlim((-10, 10))\nsubplot.set_ylim((-10, 10))\nplt.show() ",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9454907672e3cd1a19d519eede64eb8f6d938 | 14,758 | ipynb | Jupyter Notebook | slides/17-iterative/iterative.ipynb | ucla-biostat-257-2020spring/ucla-biostat-257.github.io | 1ac89bf659502801b9ff8764edf6687891fb1a60 | [
"MIT"
] | 1 | 2020-12-15T12:04:01.000Z | 2020-12-15T12:04:01.000Z | slides/17-iterative/iterative.ipynb | ucla-biostat-257-2020spring/ucla-biostat-257.github.io | 1ac89bf659502801b9ff8764edf6687891fb1a60 | [
"MIT"
] | null | null | null | slides/17-iterative/iterative.ipynb | ucla-biostat-257-2020spring/ucla-biostat-257.github.io | 1ac89bf659502801b9ff8764edf6687891fb1a60 | [
"MIT"
] | 2 | 2021-02-27T03:26:42.000Z | 2021-07-10T14:34:24.000Z | 27.482309 | 308 | 0.52602 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c94c6e834abc3eb2c7d448d6b646b4ba6ec047 | 23,858 | ipynb | Jupyter Notebook | samples/end_to_end_sample/select_path.ipynb | y-okumura-isp/CARET_demos | 048fa2e7dccef78d34fc79ea2db82e85918853f2 | [
"Apache-2.0"
] | null | null | null | samples/end_to_end_sample/select_path.ipynb | y-okumura-isp/CARET_demos | 048fa2e7dccef78d34fc79ea2db82e85918853f2 | [
"Apache-2.0"
] | null | null | null | samples/end_to_end_sample/select_path.ipynb | y-okumura-isp/CARET_demos | 048fa2e7dccef78d34fc79ea2db82e85918853f2 | [
"Apache-2.0"
] | 1 | 2022-02-17T04:09:15.000Z | 2022-02-17T04:09:15.000Z | 66.272222 | 560 | 0.618074 | [
[
[
"import caret_analyze as caret\nimport caret_analyze.plot as caret_plot\n\napp = caret.Application('architecture.yaml', 'yaml', None)",
"_____no_output_____"
],
[
"start_callback_name = '/sensor_dummy_node/timer_callback_0'\nend_callback_name = '/actuator_dummy_node/subscription_callback_0'\npaths = app.search_paths(start_callback_name, end_callback_name)\nlen(paths) # 見つかったパスの数を出力",
"_____no_output_____"
],
[
"path = paths[0]\ncaret_plot.callback_graph(app, callbacks=path.callbacks)\n# caret_plot.callback_graph(app, callbacks=path.callbacks, export_path='callback_graph.svg') # 画像として保存する場合はパスを指定",
"_____no_output_____"
],
[
"app.path['target_path'] = path\napp.export_architecture('architecture.yaml')",
"_____no_output_____"
],
[
"! cat architecture.yaml",
"path_name_aliases:\n- path_name: target_path\n callbacks:\n - /sensor_dummy_node/timer_callback_0\n - /filter_node/subscription_callback_0\n - /message_driven_node/subscription_callback_0\n - /message_driven_node/subscription_callback_1\n - /timer_driven_node/subscription_callback_0\n - /timer_driven_node/timer_callback_0\n - /actuator_dummy_node/subscription_callback_0\nnodes:\n- node_name: /actuator_dummy_node\n callbacks:\n - callback_name: subscription_callback_0\n type: subscription_callback\n topic_name: /topic4\n symbol: ActuatorDummy::ActuatorDummy(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>)::{lambda(std::unique_ptr<sensor_msgs::msg::Image>)#1}\n- node_name: /filter_node\n callbacks:\n - callback_name: subscription_callback_0\n type: subscription_callback\n topic_name: /topic1\n symbol: NoDependencyNode::NoDependencyNode(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>)::{lambda(std::unique_ptr<sensor_msgs::msg::Image>)#1}\n publishes:\n - topic_name: /topic2\n callback_name: subscription_callback_0\n- node_name: /message_driven_node\n callbacks:\n - callback_name: subscription_callback_0\n type: subscription_callback\n topic_name: /topic2\n symbol: SubDependencyNode::SubDependencyNode(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>)::{lambda(std::unique_ptr<sensor_msgs::msg::Image>)#1}\n - callback_name: subscription_callback_1\n type: subscription_callback\n topic_name: /drive\n symbol: SubDependencyNode::SubDependencyNode(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>)::{lambda(std::unique_ptr<sensor_msgs::msg::Image>)#2}\n variable_passings:\n - callback_name_write: subscription_callback_0\n callback_name_read: subscription_callback_1\n publishes:\n - topic_name: /topic3\n callback_name: subscription_callback_1\n- node_name: /timer_driven_node\n callbacks:\n - callback_name: subscription_callback_0\n type: subscription_callback\n topic_name: /topic3\n symbol: TimerDependencyNode::TimerDependencyNode(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,int)::{lambda(std::unique_ptr<sensor_msgs::msg::Image>)#1}\n - callback_name: timer_callback_0\n type: timer_callback\n period_ns: 100000000\n symbol: TimerDependencyNode::TimerDependencyNode(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,int)::{lambda()#2}\n variable_passings:\n - callback_name_write: subscription_callback_0\n callback_name_read: timer_callback_0\n publishes:\n - topic_name: /topic4\n callback_name: timer_callback_0\n- node_name: /sensor_dummy_node\n callbacks:\n - callback_name: timer_callback_0\n type: timer_callback\n period_ns: 100000000\n symbol: SensorDummy::SensorDummy(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,int)::{lambda()#1}\n publishes:\n - topic_name: /topic1\n callback_name: timer_callback_0\n- node_name: /drive_node\n callbacks:\n - callback_name: timer_callback_0\n type: timer_callback\n period_ns: 100000000\n symbol: SensorDummy::SensorDummy(std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,std::__cxx11::basic_string<char,std::char_traits<char>,std::allocator<char>>,int)::{lambda()#1}\n publishes:\n - topic_name: /drive\n callback_name: timer_callback_0\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9541d04bd7bcc1c8d6259ab12af3f0e42ed0c | 8,822 | ipynb | Jupyter Notebook | lesson7exercises.ipynb | jennybrown8/python-notebook-coding-intro | 9fe9de2533171c786d272e8315b3ff0d1387a8dd | [
"Apache-2.0"
] | null | null | null | lesson7exercises.ipynb | jennybrown8/python-notebook-coding-intro | 9fe9de2533171c786d272e8315b3ff0d1387a8dd | [
"Apache-2.0"
] | null | null | null | lesson7exercises.ipynb | jennybrown8/python-notebook-coding-intro | 9fe9de2533171c786d272e8315b3ff0d1387a8dd | [
"Apache-2.0"
] | null | null | null | 33.416667 | 419 | 0.567105 | [
[
[
"# Lesson 7: Pattern Challenges",
"_____no_output_____"
],
[
"In this set of exercises, we'll play with number patterns, shape patterns, and other types of repetition and variation. This is to get you thinking more flexibly about what you can do with code, so that you can better apply it to practical situations later.\n\n\n",
"_____no_output_____"
],
[
"### Fibonacci\n\nThe fibonacci sequence of numbers starts from 1 and 1, and then rest come from adding the two prior numbers and putting the answer after them, making a list. Here are the first numbers:\n\n1 1 2 3 5 8 13 21 34 55 89\n\nSo 1 + 1 = 2. 1 + 2 = 3. 2 + 3 = 5. 3 + 5 = 8. And so on.\n\nWrite a program to calculate and print the first 30 fibonacci numbers. The two 1's that start the sequence are given automatically; have the program calculate the rest.\n",
"_____no_output_____"
],
[
"### Headlines\n\nWrite a program that asks for a headline text, and then prints a bar, a centered headline, and another bar. The bar should be 80 characters wide, using the = sign as the bar. The headline text should be centered within the width of the bars.\n\n =====================\n center\n =====================\n \nYou can find out the length of the headline text by calling the len() function on the variable, like this:\n\n size = len(headline)\n ",
"_____no_output_____"
],
[
"### Arrow\n\nWrite a program that prints a text-art arrow like below (it does not need to have spaces ahead of it; that's just the way it shows up here). The program should ask the user for the width of the widest row, and then print the right size arrow to match.\n\n #\n ##\n ###\n ####\n #####\n ######\n #######\n ######\n #####\n ####\n ###\n ##\n #\n \nThe widest row can be as much as 60 characters wide, so you don't want to have to make the rows by hand. This is an ideal place for a loop that makes the row the appropriate width. You might even like to create a drawRow() function with a loop inside it.\n\nYou also need to figure out how many rows you need based on the width of the widest row. There's some arithmetic involved here. Take your time and work it out on paper, possibly gridded graph paper so you can count the boxes and think about it carefully.",
"_____no_output_____"
],
[
"### Mountain Range\n\nNow we're going to flip it sideways. (Yes this will be completely new logic.) Ask the user for two numbers. One is the height of the mountains. The other is the number of mountains. Neither number will be bigger than 8. This is an example of mountains of height 3 with a total of 8 mountains.\n\n # # # # # # # #\n ### ### ### ### ### ### ### ### \n ##########################################\n\nA single mountain looks like this.\n\n #\n ###\n #####\n \nWrite a program that asks for the number of rows high for the mountain, and asks how many mountains to print. Don't worry if it ends up too wide for your web browser when there are a lot of mountains; just focus on getting the overall logic correct.",
"_____no_output_____"
],
[
"### Hundreds Chart\n\nWhen children are learning to count to 100, there's a board of numbers often used to help them understand number relations. It's call a hundred chart. It looks like this:\n\n 0 1 2 3 4 5 6 7 8 9\n 10 11 12 13 14 15 16 17 18 19 \n 20 21 22 23 24 25 26 27 28 29 \n 30 31 32 33 34 35 36 37 38 39 \n 40 41 42 43 44 45 46 47 48 49\n ... and so on.\n \nWrite a program that uses looping and if statements to print a well-formatted hundred chart from 0 to 99. You can print the 100 in the last row without worrying about its formatting.\n\nNote that you can use \\n to indicate a newline:\n \n print \"Hello\\nJenny\"\n",
"_____no_output_____"
]
],
[
[
"print \"Hello\\nJenny\"",
"Hello\nJenny\n"
]
],
[
[
"### Fence Perimeter\n\nFarmer Brown is creating a large garden. The local wildlife are very enthusiastic about eating her lettuce and tomatoes, so she wants to put a fence around the entire field. It will include a door somewhere, but you don't need to worry about that; the door will use the same materials as the rest of the fence.\n\nShe wants you to write a program that accepts a width and length of the field (in feet) and then calculates how many fence posts and how many board lengths are needed to surround the field.\n\nFence posts are 2 feet apart (measured from center of the post to center of the next post so it's exactly 2 feet). Fields will be a width and length that are each multiples of 2 so you don't have to worry about weirdness in that math.\n\nBetween each fence post will be one board. (She's going to hang netting from the board, all the way to the ground, so one board will be enough.)\n\nHere's an example field that is 4 feet by 4 feet.\n\n x--x--x\n | |\n x x\n | |\n x--x--x\n\nNotice how an edge has 2 boards but 3 fenceposts? And yet, in total, this 4' x 4' field needs 8 boards, and 8 fence posts.\n\nThe fact that one side is 2 boards but 3 fenceposts is called the \"fencepost problem\" - it's about paying attention to what happens in reality at the end of your loop. \"Fencepost problem\" or \"off by 1 error\" is a term you'll hear frequently in coding. The \"error\" piece refers to the fact that we often make mental mistakes when coding, resulting in a count that's 1 different from what it should be.\n\n\nWrite a program that accepts a width in feet and a length in feet (they may not necessarily be the same), and figures out how many boards and how many fence posts are needed to surround the field. It does NOT need to print a picture (that one was just to demonstrate the idea), just print the numbers.\n\nDraw out the answers on paper for the following examples to test your work.\n\n W: 2 L: 2\n W: 4 L: 4\n W: 4 L: 8\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7c96221827042ea072fbd55b92a077b024fc2ee | 18,842 | ipynb | Jupyter Notebook | docs/jupyter/geometry/pointcloud.ipynb | pmokeev/Open3D | 9d64b0c46338082d3fceac352ef84cc9dd0a42e7 | [
"MIT"
] | 1 | 2022-03-01T02:23:32.000Z | 2022-03-01T02:23:32.000Z | docs/jupyter/geometry/pointcloud.ipynb | pmokeev/Open3D | 9d64b0c46338082d3fceac352ef84cc9dd0a42e7 | [
"MIT"
] | null | null | null | docs/jupyter/geometry/pointcloud.ipynb | pmokeev/Open3D | 9d64b0c46338082d3fceac352ef84cc9dd0a42e7 | [
"MIT"
] | null | null | null | 38.849485 | 704 | 0.583112 | [
[
[
"import open3d as o3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport os\nimport sys\n\n# only needed for tutorial, monkey patches visualization\nsys.path.append('..')\nimport open3d_tutorial as o3dtut\n# change to True if you want to interact with the visualization windows\no3dtut.interactive = not \"CI\" in os.environ",
"_____no_output_____"
]
],
[
[
"# Point cloud\nThis tutorial demonstrates basic usage of a point cloud.\n\n## Visualize point cloud\nThe first part of the tutorial reads a point cloud and visualizes it.",
"_____no_output_____"
]
],
[
[
"print(\"Load a ply point cloud, print it, and render it\")\nply_point_cloud = o3d.data.PLYPointCloud()\npcd = o3d.io.read_point_cloud(ply_point_cloud.path)\nprint(pcd)\nprint(np.asarray(pcd.points))\no3d.visualization.draw_geometries([pcd],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])",
"_____no_output_____"
]
],
[
[
"`read_point_cloud` reads a point cloud from a file. It tries to decode the file based on the extension name. For a list of supported file types, refer to [File IO](file_io.ipynb).\n\n`draw_geometries` visualizes the point cloud. Use a mouse/trackpad to see the geometry from different view points.\n\nIt looks like a dense surface, but it is actually a point cloud rendered as surfels. The GUI supports various keyboard functions. For instance, the `-` key reduces the size of the points (surfels).\n\n<div class=\"alert alert-info\">\n \n**Note:** \n\nPress the `H` key to print out a complete list of keyboard instructions for the GUI. For more information of the visualization GUI, refer to [Visualization](visualization.ipynb) and [Customized visualization](../visualization/customized_visualization.rst).\n\n</div>\n\n<div class=\"alert alert-info\">\n \n**Note:** \n\nOn macOS, the GUI window may not receive keyboard events. In this case, try to launch Python with `pythonw` instead of `python`.\n\n</div>",
"_____no_output_____"
],
[
"## Voxel downsampling\nVoxel downsampling uses a regular voxel grid to create a uniformly downsampled point cloud from an input point cloud. It is often used as a pre-processing step for many point cloud processing tasks. The algorithm operates in two steps:\n\n1. Points are bucketed into voxels.\n2. Each occupied voxel generates exactly one point by averaging all points inside.",
"_____no_output_____"
]
],
[
[
"print(\"Downsample the point cloud with a voxel of 0.05\")\ndownpcd = pcd.voxel_down_sample(voxel_size=0.05)\no3d.visualization.draw_geometries([downpcd],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])",
"_____no_output_____"
]
],
[
[
"## Vertex normal estimation\nAnother basic operation for point cloud is point normal estimation.\nPress `N` to see point normals. The keys `-` and `+` can be used to control the length of the normal.",
"_____no_output_____"
]
],
[
[
"print(\"Recompute the normal of the downsampled point cloud\")\ndownpcd.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\no3d.visualization.draw_geometries([downpcd],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024],\n point_show_normal=True)",
"_____no_output_____"
]
],
[
[
"`estimate_normals` computes the normal for every point. The function finds adjacent points and calculates the principal axis of the adjacent points using covariance analysis.\n\nThe function takes an instance of `KDTreeSearchParamHybrid` class as an argument. The two key arguments `radius = 0.1` and `max_nn = 30` specifies search radius and maximum nearest neighbor. It has 10cm of search radius, and only considers up to 30 neighbors to save computation time.\n\n<div class=\"alert alert-info\">\n \n**Note:** \n\nThe covariance analysis algorithm produces two opposite directions as normal candidates. Without knowing the global structure of the geometry, both can be correct. This is known as the normal orientation problem. Open3D tries to orient the normal to align with the original normal if it exists. Otherwise, Open3D does a random guess. Further orientation functions such as `orient_normals_to_align_with_direction` and `orient_normals_towards_camera_location` need to be called if the orientation is a concern.\n\n</div>",
"_____no_output_____"
],
[
"## Access estimated vertex normal\nEstimated normal vectors can be retrieved from the `normals` variable of `downpcd`.",
"_____no_output_____"
]
],
[
[
"print(\"Print a normal vector of the 0th point\")\nprint(downpcd.normals[0])",
"_____no_output_____"
]
],
[
[
"To check out other variables, please use `help(downpcd)`. Normal vectors can be transformed as a numpy array using `np.asarray`.",
"_____no_output_____"
]
],
[
[
"print(\"Print the normal vectors of the first 10 points\")\nprint(np.asarray(downpcd.normals)[:10, :])",
"_____no_output_____"
]
],
[
[
"Check [Working with NumPy](working_with_numpy.ipynb) for more examples regarding numpy arrays.",
"_____no_output_____"
],
[
"## Crop point cloud",
"_____no_output_____"
]
],
[
[
"print(\"Load a polygon volume and use it to crop the original point cloud\")\ndemo_crop_data = o3d.data.DemoCropPointCloud()\npcd = o3d.io.read_point_cloud(demo_crop_data.point_cloud_path)\nvol = o3d.visualization.read_selection_polygon_volume(demo_crop_data.cropped_json_path)\nchair = vol.crop_point_cloud(pcd)\no3d.visualization.draw_geometries([chair],\n zoom=0.7,\n front=[0.5439, -0.2333, -0.8060],\n lookat=[2.4615, 2.1331, 1.338],\n up=[-0.1781, -0.9708, 0.1608])",
"_____no_output_____"
]
],
[
[
"`read_selection_polygon_volume` reads a json file that specifies polygon selection area. `vol.crop_point_cloud(pcd)` filters out points. Only the chair remains.",
"_____no_output_____"
],
[
"## Paint point cloud",
"_____no_output_____"
]
],
[
[
"print(\"Paint chair\")\nchair.paint_uniform_color([1, 0.706, 0])\no3d.visualization.draw_geometries([chair],\n zoom=0.7,\n front=[0.5439, -0.2333, -0.8060],\n lookat=[2.4615, 2.1331, 1.338],\n up=[-0.1781, -0.9708, 0.1608])",
"_____no_output_____"
]
],
[
[
"`paint_uniform_color` paints all the points to a uniform color. The color is in RGB space, [0, 1] range.",
"_____no_output_____"
],
[
"## Point cloud distance\nOpen3D provides the method `compute_point_cloud_distance` to compute the distance from a source point cloud to a target point cloud. I.e., it computes for each point in the source point cloud the distance to the closest point in the target point cloud.\n\nIn the example below we use the function to compute the difference between two point clouds. Note that this method could also be used to compute the Chamfer distance between two point clouds.",
"_____no_output_____"
]
],
[
[
"# Load data\ndemo_crop_data = o3d.data.DemoCropPointCloud()\npcd = o3d.io.read_point_cloud(demo_crop_data.point_cloud_path)\nvol = o3d.visualization.read_selection_polygon_volume(demo_crop_data.cropped_json_path)\nchair = vol.crop_point_cloud(pcd)\n\ndists = pcd.compute_point_cloud_distance(chair)\ndists = np.asarray(dists)\nind = np.where(dists > 0.01)[0]\npcd_without_chair = pcd.select_by_index(ind)\no3d.visualization.draw_geometries([pcd_without_chair],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])",
"_____no_output_____"
]
],
[
[
"## Bounding volumes\nThe `PointCloud` geometry type has bounding volumes as all other geometry types in Open3D. Currently, Open3D implements an `AxisAlignedBoundingBox` and an `OrientedBoundingBox` that can also be used to crop the geometry.",
"_____no_output_____"
]
],
[
[
"aabb = chair.get_axis_aligned_bounding_box()\naabb.color = (1, 0, 0)\nobb = chair.get_oriented_bounding_box()\nobb.color = (0, 1, 0)\no3d.visualization.draw_geometries([chair, aabb, obb],\n zoom=0.7,\n front=[0.5439, -0.2333, -0.8060],\n lookat=[2.4615, 2.1331, 1.338],\n up=[-0.1781, -0.9708, 0.1608])",
"_____no_output_____"
]
],
[
[
"## Convex hull\nThe convex hull of a point cloud is the smallest convex set that contains all points. Open3D contains the method `compute_convex_hull` that computes the convex hull of a point cloud. The implementation is based on [Qhull](http://www.qhull.org/).\n\nIn the example code below we first sample a point cloud from a mesh and compute the convex hull that is returned as a triangle mesh. Then, we visualize the convex hull as a red `LineSet`.",
"_____no_output_____"
]
],
[
[
"bunny = o3d.data.BunnyMesh()\nmesh = o3d.io.read_triangle_mesh(bunny.path)\nmesh.compute_vertex_normals()\n\npcl = mesh.sample_points_poisson_disk(number_of_points=2000)\nhull, _ = pcl.compute_convex_hull()\nhull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull)\nhull_ls.paint_uniform_color((1, 0, 0))\no3d.visualization.draw_geometries([pcl, hull_ls])",
"_____no_output_____"
]
],
[
[
"## DBSCAN clustering\nGiven a point cloud from e.g. a depth sensor we want to group local point cloud clusters together. For this purpose, we can use clustering algorithms. Open3D implements DBSCAN [\\[Ester1996\\]](../reference.html#Ester1996) that is a density based clustering algorithm. The algorithm is implemented in `cluster_dbscan` and requires two parameters: `eps` defines the distance to neighbors in a cluster and `min_points` defines the minimum number of points required to form a cluster. The function returns `labels`, where the label `-1` indicates noise.",
"_____no_output_____"
]
],
[
[
"ply_point_cloud = o3d.data.PLYPointCloud()\npcd = o3d.io.read_point_cloud(ply_point_cloud.path)\n\nwith o3d.utility.VerbosityContextManager(\n o3d.utility.VerbosityLevel.Debug) as cm:\n labels = np.array(\n pcd.cluster_dbscan(eps=0.02, min_points=10, print_progress=True))\n\nmax_label = labels.max()\nprint(f\"point cloud has {max_label + 1} clusters\")\ncolors = plt.get_cmap(\"tab20\")(labels / (max_label if max_label > 0 else 1))\ncolors[labels < 0] = 0\npcd.colors = o3d.utility.Vector3dVector(colors[:, :3])\no3d.visualization.draw_geometries([pcd],\n zoom=0.455,\n front=[-0.4999, -0.1659, -0.8499],\n lookat=[2.1813, 2.0619, 2.0999],\n up=[0.1204, -0.9852, 0.1215])",
"_____no_output_____"
]
],
[
[
"<div class=\"alert alert-info\">\n \n**Note:** \n\nThis algorithm precomputes all neighbors in the epsilon radius for all points. This can require a lot of memory if the chosen epsilon is too large.\n\n</div>",
"_____no_output_____"
],
[
"## Plane segmentation\nOpen3D also supports segmententation of geometric primitives from point clouds using RANSAC. To find the plane with the largest support in the point cloud, we can use `segment_plane`. The method has three arguments: `distance_threshold` defines the maximum distance a point can have to an estimated plane to be considered an inlier, `ransac_n` defines the number of points that are randomly sampled to estimate a plane, and `num_iterations` defines how often a random plane is sampled and verified. The function then returns the plane as $(a,b,c,d)$ such that for each point $(x,y,z)$ on the plane we have $ax + by + cz + d = 0$. The function further returns a list of indices of the inlier points.",
"_____no_output_____"
]
],
[
[
"pcd_point_cloud = o3d.data.PCDPointCloud()\npcd = o3d.io.read_point_cloud(pcd_point_cloud.path)\n\nplane_model, inliers = pcd.segment_plane(distance_threshold=0.01,\n ransac_n=3,\n num_iterations=1000)\n[a, b, c, d] = plane_model\nprint(f\"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0\")\n\ninlier_cloud = pcd.select_by_index(inliers)\ninlier_cloud.paint_uniform_color([1.0, 0, 0])\noutlier_cloud = pcd.select_by_index(inliers, invert=True)\no3d.visualization.draw_geometries([inlier_cloud, outlier_cloud],\n zoom=0.8,\n front=[-0.4999, -0.1659, -0.8499],\n lookat=[2.1813, 2.0619, 2.0999],\n up=[0.1204, -0.9852, 0.1215])",
"_____no_output_____"
]
],
[
[
"## Hidden point removal\nImagine you want to render a point cloud from a given view point, but points from the background leak into the foreground because they are not occluded by other points. For this purpose we can apply a hidden point removal algorithm. In Open3D the method by [\\[Katz2007\\]](../reference.html#Katz2007) is implemented that approximates the visibility of a point cloud from a given view without surface reconstruction or normal estimation.",
"_____no_output_____"
]
],
[
[
"print(\"Convert mesh to a point cloud and estimate dimensions\")\narmadillo = o3d.data.ArmadilloMesh()\nmesh = o3d.io.read_triangle_mesh(armadillo.path)\nmesh.compute_vertex_normals()\n\npcd = mesh.sample_points_poisson_disk(5000)\ndiameter = np.linalg.norm(\n np.asarray(pcd.get_max_bound()) - np.asarray(pcd.get_min_bound()))\no3d.visualization.draw_geometries([pcd])",
"_____no_output_____"
],
[
"print(\"Define parameters used for hidden_point_removal\")\ncamera = [0, 0, diameter]\nradius = diameter * 100\n\nprint(\"Get all points that are visible from given view point\")\n_, pt_map = pcd.hidden_point_removal(camera, radius)\n\nprint(\"Visualize result\")\npcd = pcd.select_by_index(pt_map)\no3d.visualization.draw_geometries([pcd])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7c9660a7110b125527cd269ffcdf9f5182ae0cd | 100,817 | ipynb | Jupyter Notebook | Part1/HSI_CNN.ipynb | efkandurakli/Graduation-Project1 | fd2cba89929da2cef49ec67214b54c310b57ce01 | [
"MIT"
] | 1 | 2019-12-18T08:16:55.000Z | 2019-12-18T08:16:55.000Z | Part1/HSI_CNN.ipynb | efkandurakli/Graduation-Project1 | fd2cba89929da2cef49ec67214b54c310b57ce01 | [
"MIT"
] | null | null | null | Part1/HSI_CNN.ipynb | efkandurakli/Graduation-Project1 | fd2cba89929da2cef49ec67214b54c310b57ce01 | [
"MIT"
] | null | null | null | 100,817 | 100,817 | 0.743297 | [
[
[
"!apt-get install -y -qq software-properties-common python-software-properties module-init-tools\n!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null\n!apt-get update -qq 2>&1 > /dev/null\n!apt-get -y install -qq google-drive-ocamlfuse fuse\nfrom google.colab import auth\nauth.authenticate_user()\nfrom oauth2client.client import GoogleCredentials\ncreds = GoogleCredentials.get_application_default()\nimport getpass\n!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL\nvcode = getpass.getpass()\n!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}\n",
"E: Package 'python-software-properties' has no installation candidate\nSelecting previously unselected package google-drive-ocamlfuse.\n(Reading database ... 130812 files and directories currently installed.)\nPreparing to unpack .../google-drive-ocamlfuse_0.7.3-0ubuntu3~ubuntu18.04.1_amd64.deb ...\nUnpacking google-drive-ocamlfuse (0.7.3-0ubuntu3~ubuntu18.04.1) ...\nSetting up google-drive-ocamlfuse (0.7.3-0ubuntu3~ubuntu18.04.1) ...\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nPlease, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force\n··········\nPlease, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force\nPlease enter the verification code: Access token retrieved correctly.\n"
],
[
"!mkdir -p drive\n!google-drive-ocamlfuse drive",
"_____no_output_____"
],
[
"ls",
"\u001b[0m\u001b[01;34m144bandPng\u001b[0m/ HSI_DATA.tif s1.tif\nHSI_AFTER_PCA_TEST.csv LiDAR_DATA.tif test.txt\nHSI_AFTER_PCA_TRAIN.csv \u001b[01;34mPCA3Band\u001b[0m/ train.txt\n"
],
[
"from osgeo import gdal\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport cv2",
"_____no_output_____"
],
[
"raster = gdal.Open(\"LiDAR_DATA.tif\")\n\ndata = np.array(raster.ReadAsArray())",
"_____no_output_____"
],
[
"cv2.imwrite(\"PCA3Band/3.png\", data)",
"_____no_output_____"
],
[
"import cv2\nfor i in range(data.shape[0]):\n cv2.imwrite(\"144BandPng/\" + str(i) + \".png\", data[i])",
"_____no_output_____"
],
[
"NUM_BANDS = data.shape[0]\nHEIGHT = data.shape[1]\nWIDTH = data.shape[2]\nhsi_data_2d = data.transpose(1, 2, 0).reshape((HEIGHT * WIDTH), NUM_BANDS)\npca = PCA(n_components=3)\nprincipalComponents = pca.fit_transform(hsi_data_2d)\nprincipalComponents = np.array(principalComponents).transpose(1, 0).reshape(3, HEIGHT, WIDTH)",
"_____no_output_____"
],
[
"cv2.imwrite(\"PCA3Band/0.png\", principalComponents[0])\ncv2.imwrite(\"PCA3Band/1.png\", principalComponents[1])\ncv2.imwrite(\"PCA3Band/2.png\", principalComponents[2])",
"_____no_output_____"
],
[
"from osgeo import gdal\nraster = gdal.Open(\"s1.tif\")\n",
"_____no_output_____"
],
[
"import numpy as np\narr = np.array(raster.ReadAsArray())\nprint(arr.shape)",
"(43, 19, 16)\n"
],
[
"arr = arr[0:13]",
"_____no_output_____"
],
[
"print(arr[0].shape)",
"(19, 16)\n"
],
[
"cd drive/Undergraduate_Project/",
"/content/drive/Undergraduate_Project\n"
],
[
"!ls",
"AverageAccuracy.py deneme.ipynb HSI_CNN.py Models\nbaboon.png\t drive\t HSI_LiDAR normalization.py\nblank.png\t HoustonDataset LiDAR_CNN.py parameter.py\ncengiz.py\t HoustonDataset.py lidar.png __pycache__\ndata\t\t HSI_CNN.ipynb map.png\n"
],
[
"import numpy as np\nimport HoustonDataset\ndataset = HoustonDataset.Houston()\n\nhsi_data = dataset.HSI_PCA(n_components=3)\n",
"_____no_output_____"
],
[
"pc1 = hsi_data[0]\npc2 = hsi_data[1]\npc3 = hsi_data[2]",
"_____no_output_____"
],
[
"pc1 = np.interp(pc1, (pc1.min(), pc1.max()), (0, 255)).astype(int)\npc2 = np.interp(pc2, (pc2.min(), pc2.max()), (0, 255)).astype(int)\npc3 = np.interp(pc3, (pc3.min(), pc3.max()), (0, 255)).astype(int)",
"_____no_output_____"
],
[
"import cv2\ncv2.imwrite('hsi_pc2.png',pc3)",
"_____no_output_____"
],
[
"lidar_data = dataset.get_lidar_data()",
"_____no_output_____"
],
[
"lidar = np.interp(lidar_data, (lidar_data.min(), lidar_data.max()), (0, 255)).astype(int)\nimport cv2\ncv2.imwrite('lidar.png',lidar)",
"_____no_output_____"
],
[
"import numpy as np\nimport parameter\nimport HoustonDataset\nimport time\nfrom datetime import timedelta\nimport math\nfrom sklearn import metrics\nimport os\nimport normalization",
"_____no_output_____"
],
[
"def get_patches(data, pixels, patch_size):\n HEIGHT = data.shape[1]\n WIDTH = data.shape[2]\n offset = int(patch_size / 2)\n train_patches = []\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if pixels[i][j] != 0:\n row_low = max(0, i - offset)\n row_high = min(HEIGHT - 1, i + offset)\n if row_low == 0:\n row_high = row_low + patch_size - 1\n if row_high == HEIGHT - 1:\n row_low = row_high - patch_size + 1\n\n col_low = max(0, j - offset)\n col_high = min(WIDTH - 1, j + offset)\n if col_low == 0:\n col_high = col_low + patch_size - 1\n if col_high == WIDTH - 1:\n col_low = col_high - patch_size + 1\n\n train_patches.append(data[0:, row_low:row_high + 1, col_low:col_high + 1])\n return np.array(train_patches)",
"_____no_output_____"
],
[
"HSI_PATCH_SIZE = 27\nLiDAR_PATCH_SIZE = 41\nKERNEL_SIZE = parameter.kernel_size\nCONV1 = parameter.conv1\nCONV2 = parameter.conv2\nFC1 = parameter.fc1\nFC2 = parameter.fc2\nLEARNING_RATE = parameter.learning_rate",
"_____no_output_____"
],
[
"dataset = HoustonDataset.Houston()\n\n#hsi_data = dataset.get_hsi_data()\nhsi_data = dataset.HSI_PCA(n_components=3)\n\n\n\ntrain_pixels = dataset.get_train_pixels() \ntest_pixels = dataset.get_test_pixels()\n\n\ntrain_labels = dataset.get_train_labels()\ntest_labels = dataset.get_test_labels()\n\n\nhsi_train_patches = get_patches(hsi_data, train_pixels, HSI_PATCH_SIZE)\nhsi_test_patches = get_patches(hsi_data, test_pixels, HSI_PATCH_SIZE)\n\n\n",
"_____no_output_____"
],
[
"print(hsi_data.shape)\nprint(train_pixels.shape)\nprint(test_pixels.shape)\nprint(train_labels.shape)\nprint(test_labels.shape)\nprint(hsi_train_patches.shape)\nprint(hsi_test_patches.shape)",
"(3, 349, 1905)\n(349, 1905)\n(349, 1905)\n(2832,)\n(12197,)\n(2832, 3, 27, 27)\n(12197, 3, 27, 27)\n"
],
[
"import keras\n\ntrain_one_hot = keras.utils.to_categorical(train_labels-1)\ntest_one_hot = keras.utils.to_categorical(test_labels-1)\n\nNUM_CLS = train_one_hot.shape[1]\n\nHSI_BANDS = hsi_train_patches.shape[1]\n\n\n #(2832, 144, 27, 27)\nhsi_train_img = np.array(hsi_train_patches.transpose(0,2,3,1).reshape(hsi_train_patches.shape[0], HSI_PATCH_SIZE, HSI_PATCH_SIZE, HSI_BANDS))\nhsi_test_img = np.array(hsi_test_patches.transpose(0, 2, 3, 1).reshape(hsi_test_patches.shape[0], HSI_PATCH_SIZE, HSI_PATCH_SIZE, HSI_BANDS))",
"Using TensorFlow backend.\n"
],
[
"print(hsi_train_img.shape)\nprint(hsi_test_img.shape)",
"(2832, 27, 27, 3)\n(12197, 27, 27, 3)\n"
],
[
"from tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Conv2D, Dense, Flatten\nfrom tensorflow.python.keras.layers import InputLayer\nfrom tensorflow.python.keras.layers import MaxPooling2D\nfrom tensorflow.python.keras.layers import BatchNormalization, Dropout\nfrom tensorflow.python.keras.optimizers import Adam,SGD\n\nmodel = Sequential()\nmodel.add(InputLayer(input_shape=(HSI_PATCH_SIZE, HSI_PATCH_SIZE, HSI_BANDS)))\nmodel.add(Conv2D(kernel_size=3, strides=1, filters=CONV1, padding='same', activation='relu', name='conv1'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=2, strides=2))\nmodel.add(Conv2D(kernel_size=5, strides=1, filters=CONV2, padding='same', activation='relu', name='conv2'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=2, strides=2))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(FC1, activation='relu'))\nmodel.add(Dropout(0.75))\nmodel.add(Dense(FC2, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(NUM_CLS, activation='softmax'))\n\nsgd = SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n\nhistory = model.fit(hsi_train_img, train_one_hot, batch_size=25, shuffle=True, epochs=240)",
"Epoch 1/240\n2832/2832 [==============================] - 8s 3ms/step - loss: 3.6474 - acc: 0.2454\nEpoch 2/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 1.8336 - acc: 0.4121\nEpoch 3/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 1.3815 - acc: 0.5565\nEpoch 4/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 1.1069 - acc: 0.6306\nEpoch 5/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.9326 - acc: 0.6787\nEpoch 6/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.8211 - acc: 0.7179\nEpoch 7/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.6928 - acc: 0.7758\nEpoch 8/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.6493 - acc: 0.7878\nEpoch 9/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.6014 - acc: 0.8097\nEpoch 10/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.5624 - acc: 0.8206\nEpoch 11/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.5038 - acc: 0.8298\nEpoch 12/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.4952 - acc: 0.8386\nEpoch 13/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.4598 - acc: 0.8485\nEpoch 14/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.4475 - acc: 0.8591\nEpoch 15/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3673 - acc: 0.8789\nEpoch 16/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3720 - acc: 0.8725\nEpoch 17/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3386 - acc: 0.8912\nEpoch 18/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3228 - acc: 0.8905\nEpoch 19/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3331 - acc: 0.8951\nEpoch 20/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2918 - acc: 0.9057\nEpoch 21/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.3103 - acc: 0.9025\nEpoch 22/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2898 - acc: 0.9071\nEpoch 23/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2978 - acc: 0.9008\nEpoch 24/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2655 - acc: 0.9085\nEpoch 25/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2795 - acc: 0.9153\nEpoch 26/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2516 - acc: 0.9191\nEpoch 27/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2527 - acc: 0.9170\nEpoch 28/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2743 - acc: 0.9138\nEpoch 29/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2588 - acc: 0.9174\nEpoch 30/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2549 - acc: 0.9220\nEpoch 31/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2613 - acc: 0.9184\nEpoch 32/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2670 - acc: 0.9195\nEpoch 33/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2444 - acc: 0.9230\nEpoch 34/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2188 - acc: 0.9322\nEpoch 35/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2320 - acc: 0.9255\nEpoch 36/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2056 - acc: 0.9322\nEpoch 37/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1895 - acc: 0.9379\nEpoch 38/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2186 - acc: 0.9347\nEpoch 39/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2105 - acc: 0.9319\nEpoch 40/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.2273 - acc: 0.9287\nEpoch 41/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1768 - acc: 0.9414\nEpoch 42/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1818 - acc: 0.9421\nEpoch 43/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1795 - acc: 0.9421\nEpoch 44/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1676 - acc: 0.9470\nEpoch 45/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1654 - acc: 0.9463\nEpoch 46/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1826 - acc: 0.9393\nEpoch 47/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1539 - acc: 0.9488\nEpoch 48/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1769 - acc: 0.9456\nEpoch 49/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1764 - acc: 0.9410\nEpoch 50/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1643 - acc: 0.9484\nEpoch 51/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1650 - acc: 0.9477\nEpoch 52/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1848 - acc: 0.9410\nEpoch 53/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1787 - acc: 0.9414\nEpoch 54/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1865 - acc: 0.9375\nEpoch 55/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1660 - acc: 0.9477\nEpoch 56/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1693 - acc: 0.9492\nEpoch 57/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1944 - acc: 0.9403\nEpoch 58/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1526 - acc: 0.9513\nEpoch 59/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1598 - acc: 0.9509\nEpoch 60/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1575 - acc: 0.9499\nEpoch 61/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1486 - acc: 0.9523\nEpoch 62/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1364 - acc: 0.9569\nEpoch 63/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1982 - acc: 0.9389\nEpoch 64/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1461 - acc: 0.9527\nEpoch 65/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1433 - acc: 0.9506\nEpoch 66/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1409 - acc: 0.9516\nEpoch 67/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1530 - acc: 0.9488\nEpoch 68/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1549 - acc: 0.9555\nEpoch 69/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1580 - acc: 0.9506\nEpoch 70/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1485 - acc: 0.9495\nEpoch 71/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1702 - acc: 0.9488\nEpoch 72/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1525 - acc: 0.9530\nEpoch 73/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1294 - acc: 0.9576\nEpoch 74/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1476 - acc: 0.9506\nEpoch 75/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1491 - acc: 0.9520\nEpoch 76/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1348 - acc: 0.9580\nEpoch 77/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1375 - acc: 0.9573\nEpoch 78/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1565 - acc: 0.9555\nEpoch 79/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1416 - acc: 0.9523\nEpoch 80/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1492 - acc: 0.9502\nEpoch 81/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1400 - acc: 0.9520\nEpoch 82/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1307 - acc: 0.9573\nEpoch 83/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1544 - acc: 0.9506\nEpoch 84/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1420 - acc: 0.9566\nEpoch 85/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1356 - acc: 0.9573\nEpoch 86/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1307 - acc: 0.9605\nEpoch 87/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1486 - acc: 0.9513\nEpoch 88/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1373 - acc: 0.9590\nEpoch 89/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1329 - acc: 0.9544\nEpoch 90/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1230 - acc: 0.9619\nEpoch 91/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1329 - acc: 0.9566\nEpoch 92/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1345 - acc: 0.9566\nEpoch 93/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1242 - acc: 0.9601\nEpoch 94/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1335 - acc: 0.9573\nEpoch 95/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1402 - acc: 0.9544\nEpoch 96/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1331 - acc: 0.9601\nEpoch 97/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1312 - acc: 0.9569\nEpoch 98/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1418 - acc: 0.9555\nEpoch 99/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1204 - acc: 0.9608\nEpoch 100/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1110 - acc: 0.9661\nEpoch 101/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1350 - acc: 0.9590\nEpoch 102/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1103 - acc: 0.9633\nEpoch 103/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1132 - acc: 0.9633\nEpoch 104/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1290 - acc: 0.9559\nEpoch 105/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1219 - acc: 0.9608\nEpoch 106/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1076 - acc: 0.9636\nEpoch 107/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1317 - acc: 0.9587\nEpoch 108/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1220 - acc: 0.9601\nEpoch 109/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1251 - acc: 0.9587\nEpoch 110/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1211 - acc: 0.9622\nEpoch 111/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1037 - acc: 0.9661\nEpoch 112/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1276 - acc: 0.9580\nEpoch 113/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1135 - acc: 0.9605\nEpoch 114/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1102 - acc: 0.9619\nEpoch 115/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1054 - acc: 0.9650\nEpoch 116/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1145 - acc: 0.9622\nEpoch 117/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1217 - acc: 0.9594\nEpoch 118/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1196 - acc: 0.9626\nEpoch 119/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1069 - acc: 0.9654\nEpoch 120/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1047 - acc: 0.9665\nEpoch 121/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1121 - acc: 0.9633\nEpoch 122/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1207 - acc: 0.9601\nEpoch 123/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1012 - acc: 0.9643\nEpoch 124/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0972 - acc: 0.9686\nEpoch 125/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1124 - acc: 0.9626\nEpoch 126/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1362 - acc: 0.9583\nEpoch 127/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1134 - acc: 0.9647\nEpoch 128/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1001 - acc: 0.9654\nEpoch 129/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1061 - acc: 0.9643\nEpoch 130/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1269 - acc: 0.9583\nEpoch 131/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0949 - acc: 0.9661\nEpoch 132/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1282 - acc: 0.9590\nEpoch 133/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1189 - acc: 0.9597\nEpoch 134/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1034 - acc: 0.9633\nEpoch 135/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1161 - acc: 0.9626\nEpoch 136/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1208 - acc: 0.9601\nEpoch 137/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0959 - acc: 0.9689\nEpoch 138/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1183 - acc: 0.9619\nEpoch 139/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1152 - acc: 0.9605\nEpoch 140/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1221 - acc: 0.9594\nEpoch 141/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1431 - acc: 0.9590\nEpoch 142/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1051 - acc: 0.9654\nEpoch 143/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1207 - acc: 0.9622\nEpoch 144/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0948 - acc: 0.9686\nEpoch 145/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0962 - acc: 0.9672\nEpoch 146/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1025 - acc: 0.9675\nEpoch 147/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1087 - acc: 0.9643\nEpoch 148/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1158 - acc: 0.9615\nEpoch 149/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1069 - acc: 0.9657\nEpoch 150/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1058 - acc: 0.9668\nEpoch 151/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1078 - acc: 0.9657\nEpoch 152/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1170 - acc: 0.9633\nEpoch 153/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1123 - acc: 0.9643\nEpoch 154/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1154 - acc: 0.9612\nEpoch 155/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1133 - acc: 0.9629\nEpoch 156/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1067 - acc: 0.9650\nEpoch 157/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1047 - acc: 0.9647\nEpoch 158/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1076 - acc: 0.9682\nEpoch 159/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1150 - acc: 0.9626\nEpoch 160/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1077 - acc: 0.9640\nEpoch 161/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1161 - acc: 0.9640\nEpoch 162/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1236 - acc: 0.9612\nEpoch 163/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0876 - acc: 0.9710\nEpoch 164/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1040 - acc: 0.9647\nEpoch 165/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0906 - acc: 0.9707\nEpoch 166/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0925 - acc: 0.9714\nEpoch 167/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1020 - acc: 0.9636\nEpoch 168/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0955 - acc: 0.9672\nEpoch 169/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1036 - acc: 0.9668\nEpoch 170/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1006 - acc: 0.9668\nEpoch 171/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0934 - acc: 0.9675\nEpoch 172/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0853 - acc: 0.9732\nEpoch 173/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1034 - acc: 0.9661\nEpoch 174/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0881 - acc: 0.9718\nEpoch 175/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0956 - acc: 0.9657\nEpoch 176/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1141 - acc: 0.9626\nEpoch 177/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0937 - acc: 0.9700\nEpoch 178/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1001 - acc: 0.9668\nEpoch 179/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1011 - acc: 0.9689\nEpoch 180/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1049 - acc: 0.9679\nEpoch 181/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1009 - acc: 0.9643\nEpoch 182/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0970 - acc: 0.9682\nEpoch 183/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0946 - acc: 0.9665\nEpoch 184/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0945 - acc: 0.9682\nEpoch 185/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1023 - acc: 0.9647\nEpoch 186/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1067 - acc: 0.9640\nEpoch 187/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0987 - acc: 0.9686\nEpoch 188/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1063 - acc: 0.9661\nEpoch 189/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0829 - acc: 0.9742\nEpoch 190/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1183 - acc: 0.9605\nEpoch 191/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1229 - acc: 0.9619\nEpoch 192/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1047 - acc: 0.9665\nEpoch 193/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0900 - acc: 0.9696\nEpoch 194/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0844 - acc: 0.9725\nEpoch 195/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1061 - acc: 0.9643\nEpoch 196/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1115 - acc: 0.9665\nEpoch 197/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0848 - acc: 0.9703\nEpoch 198/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1212 - acc: 0.9615\nEpoch 199/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1106 - acc: 0.9636\nEpoch 200/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0892 - acc: 0.9721\nEpoch 201/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0944 - acc: 0.9686\nEpoch 202/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1042 - acc: 0.9672\nEpoch 203/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1006 - acc: 0.9682\nEpoch 204/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0985 - acc: 0.9682\nEpoch 205/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0939 - acc: 0.9668\nEpoch 206/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0938 - acc: 0.9682\nEpoch 207/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0978 - acc: 0.9696\nEpoch 208/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1082 - acc: 0.9633\nEpoch 209/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1026 - acc: 0.9693\nEpoch 210/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1021 - acc: 0.9657\nEpoch 211/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0929 - acc: 0.9689\nEpoch 212/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1055 - acc: 0.9647\nEpoch 213/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0989 - acc: 0.9668\nEpoch 214/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0945 - acc: 0.9703\nEpoch 215/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0928 - acc: 0.9696\nEpoch 216/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1019 - acc: 0.9675\nEpoch 217/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1023 - acc: 0.9640\nEpoch 218/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0980 - acc: 0.9668\nEpoch 219/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1048 - acc: 0.9668\nEpoch 220/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1014 - acc: 0.9636\nEpoch 221/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1119 - acc: 0.9619\nEpoch 222/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1024 - acc: 0.9696\nEpoch 223/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0787 - acc: 0.9749\nEpoch 224/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1053 - acc: 0.9629\nEpoch 225/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0962 - acc: 0.9665\nEpoch 226/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0968 - acc: 0.9679\nEpoch 227/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0987 - acc: 0.9686\nEpoch 228/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1134 - acc: 0.9626\nEpoch 229/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0931 - acc: 0.9700\nEpoch 230/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0954 - acc: 0.9689\nEpoch 231/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0883 - acc: 0.9714\nEpoch 232/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0980 - acc: 0.9668\nEpoch 233/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.1070 - acc: 0.9640\nEpoch 234/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0927 - acc: 0.9679\nEpoch 235/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0733 - acc: 0.9756\nEpoch 236/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0832 - acc: 0.9707\nEpoch 237/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0946 - acc: 0.9718\nEpoch 238/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0882 - acc: 0.9732\nEpoch 239/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0972 - acc: 0.9657\nEpoch 240/240\n2832/2832 [==============================] - 4s 1ms/step - loss: 0.0950 - acc: 0.9654\n"
],
[
"train_cls = dataset.get_train_labels() - 1\nprediction = model.predict(hsi_train_img).argmax(axis=-1)\n\n\n\nfrom sklearn import metrics, preprocessing\nfrom AverageAccuracy import AA_andEachClassAccuracy\n\noverall_acc = metrics.accuracy_score(prediction, train_cls)\nkappa = metrics.cohen_kappa_score(prediction, train_cls)\nconfusion_matrix = metrics.confusion_matrix(prediction, train_cls)\neach_acc, average_acc = AA_andEachClassAccuracy(confusion_matrix) \n\n\nprint(\"Overall Accuracy of training sapmles : \",overall_acc)\nprint(\"Average Accuracy of training samples : \",average_acc)\nprint(\"Kappa statistics of training samples : \",kappa)\nprint(\"Each class accuracy of training samples : \", each_acc)\nprint(\"Confusion matrix :\", confusion_matrix)\n\n",
"Overall Accuracy of training sapmles : 0.998587570621469\nAverage Accuracy of training samples : 0.9986324786324786\nKappa statistics of training samples : 0.9984866050923707\nEach class accuracy of training samples : [1. 1. 1. 1. 1. 1.\n 1. 0.97948718 1. 1. 1. 1.\n 1. 1. 1. ]\nConfusion matrix : [[198 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 190 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 192 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 188 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 186 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 182 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 196 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 191 0 0 0 4 0 0 0]\n [ 0 0 0 0 0 0 0 0 193 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 191 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 181 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 188 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 184 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 181 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 187]]\n"
],
[
"#hsi_test_img = hsi_test_patches.transpose(0,2,3,1).reshape(hsi_test_patches.shape[0], HSI_PATCH_SIZE, HSI_PATCH_SIZE, HSI_BANDS)\n\ntest_cls = dataset.get_test_labels() - 1\nprediction = model.predict(hsi_test_img).argmax(axis=-1)\n\n\n\nfrom sklearn import metrics, preprocessing\nfrom AverageAccuracy import AA_andEachClassAccuracy\n\noverall_acc = metrics.accuracy_score(prediction, test_cls)\nkappa = metrics.cohen_kappa_score(prediction, test_cls)\nconfusion_matrix = metrics.confusion_matrix(prediction, test_cls)\neach_acc, average_acc = AA_andEachClassAccuracy(confusion_matrix) \n\n\nprint(\"Overall Accuracy of testing sapmles : \",overall_acc)\nprint(\"Average Accuracy of testing samples : \",average_acc)\nprint(\"Kappa statistics of testing samples : \",kappa)\nprint(\"Each class accuracy of testing samples : \", each_acc)\nprint(\"Confusion matrix :\", confusion_matrix)\n",
"Overall Accuracy of testing sapmles : 0.7105025826022793\nAverage Accuracy of testing samples : 0.7631304113768933\nKappa statistics of testing samples : 0.6863396388905167\nEach class accuracy of testing samples : [0.86792453 0.98391421 0.18059299 0.94016227 0.81124807 0.77922078\n 0.93515704 0.4511041 0.92921348 0.72572402 0.77941176 0.52990354\n 0.91532258 0.88979592 0.72826087]\nConfusion matrix : [[ 828 90 0 35 0 0 0 0 0 0 0 0 0 0\n 1]\n [ 11 734 0 1 0 0 0 0 0 0 0 0 0 0\n 0]\n [ 178 158 268 70 0 17 80 173 14 328 129 5 20 0\n 44]\n [ 9 9 0 927 0 0 16 0 25 0 0 0 0 0\n 0]\n [ 0 0 22 0 1053 0 0 19 1 0 0 1 0 0\n 202]\n [ 19 0 3 5 0 120 0 0 0 0 4 3 0 0\n 0]\n [ 4 12 19 8 0 0 923 11 0 0 0 0 10 0\n 0]\n [ 3 44 15 6 3 0 19 429 66 23 0 157 9 23\n 154]\n [ 0 0 0 2 0 6 34 20 827 1 0 0 0 0\n 0]\n [ 0 0 34 0 0 0 0 11 8 426 68 40 0 0\n 0]\n [ 0 8 129 0 0 0 0 9 66 0 795 7 0 6\n 0]\n [ 0 0 0 0 0 0 0 356 50 258 43 824 19 0\n 5]\n [ 0 0 0 2 0 0 0 0 0 0 15 4 227 0\n 0]\n [ 0 0 0 0 0 0 0 25 2 0 0 0 0 218\n 0]\n [ 1 9 15 0 0 0 0 0 0 0 0 0 0 0\n 67]]\n"
],
[
"model.save_weights('Models/hsi_model_weights.h5')\n",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.plot(history.history['acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.plot(history.history['loss'])\nplt.title('Modelin Maliyet Grafiği')\nplt.ylabel('Maliyer')\nplt.xlabel('Epoch')\nplt.legend(['Eğitim'], loc='upper right')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9691c24a9a2b2910a91c026052fde3e09d620 | 435,093 | ipynb | Jupyter Notebook | prototyping01.ipynb | xkortex/MuseEEG-Handler | 7cd2ac553d23d5c30bb3a60123d1fccc2d7ad92c | [
"Apache-2.0"
] | 1 | 2018-04-19T02:17:39.000Z | 2018-04-19T02:17:39.000Z | prototyping01.ipynb | xkortex/MuseEEG-Handler | 7cd2ac553d23d5c30bb3a60123d1fccc2d7ad92c | [
"Apache-2.0"
] | null | null | null | prototyping01.ipynb | xkortex/MuseEEG-Handler | 7cd2ac553d23d5c30bb3a60123d1fccc2d7ad92c | [
"Apache-2.0"
] | null | null | null | 861.570297 | 191,550 | 0.938376 | [
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport sigflux\nfrom sigflux.wave import cwtlets\nfrom scipy import signal\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot, imshow, scatter\nfrom matplotlib import rcParams",
"_____no_output_____"
],
[
"df = pd.read_csv('output/eeg1506721590.csv', index_col=0)\ndf = df.iloc[:-1] # shave off last because it's usually corrupted\n# df.drop([5,6,7], axis=1, inplace=True)\ndf['dt'] = pd.to_datetime(df['time'])\ndf.set_index('dt', inplace=True)\ndf.drop('time', axis=1, inplace=True)\nprint(df.dtypes)\nprint(df.isnull().sum())\nprint(len(df))\ndf.head()",
"ch0 float64\nch1 float64\nch2 float64\nch3 float64\nch4 float64\ndtype: object\nch0 0\nch1 0\nch2 0\nch3 0\nch4 0\ndtype: int64\n10359\n"
],
[
"df.tail()",
"_____no_output_____"
],
[
"df.mean()",
"_____no_output_____"
],
[
"df = df[~df.isnull()]\nprint(df.isnull().sum())\nprint(len(df))",
"ch0 0\nch1 0\nch2 0\nch3 0\nch4 0\ndtype: int64\n10359\n"
],
[
"df.index",
"_____no_output_____"
],
[
"df.plot()",
"_____no_output_____"
],
[
"df -= df.mean()",
"_____no_output_____"
],
[
"# df = df.clip(-250,250)",
"_____no_output_____"
],
[
"df.plot()",
"_____no_output_____"
],
[
"widths = np.linspace(1,16, 30)**2\nwt = sigflux.cwt(df.as_matrix()[:,4], signal.ricker, widths)",
"_____no_output_____"
],
[
"rcParams['figure.figsize'] = (16,8)\nimg = wt\nplt.imshow(img, extent=[-1, 1, widths[-1], widths[0]], cmap='seismic', aspect='auto',\n vmax=abs(img).max(), vmin=-abs(img).max())",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c96aa452dcd1fb79a12342c3b9347bab47519f | 109,507 | ipynb | Jupyter Notebook | PyCitySchools/PyCitySchools_1.ipynb | githotirado/pandas-challenge | 001d81c8fcf75907c96e144878b424f2b78cb636 | [
"Apache-2.0"
] | null | null | null | PyCitySchools/PyCitySchools_1.ipynb | githotirado/pandas-challenge | 001d81c8fcf75907c96e144878b424f2b78cb636 | [
"Apache-2.0"
] | null | null | null | PyCitySchools/PyCitySchools_1.ipynb | githotirado/pandas-challenge | 001d81c8fcf75907c96e144878b424f2b78cb636 | [
"Apache-2.0"
] | null | null | null | 35.7983 | 202 | 0.3793 | [
[
[
"### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport pandas as pd\n\n# File to Load (Remember to Change These)\nschool_data_to_load = \"Resources/schools_complete.csv\"\nstudent_data_to_load = \"Resources/students_complete.csv\"\n\n# Read School and Student Data File and store into Pandas DataFrames\nschool_data = pd.read_csv(school_data_to_load)\nstudent_data = pd.read_csv(student_data_to_load)\n\n# Combine the data into a single dataset. \nschool_data_complete = pd.merge(student_data, school_data, how=\"left\", on=[\"school_name\", \"school_name\"])\nschool_data_complete.head()",
"_____no_output_____"
]
],
[
[
"## District Summary\n\n* Calculate the total number of schools\n\n* Calculate the total number of students\n\n* Calculate the total budget\n\n* Calculate the average math score \n\n* Calculate the average reading score\n\n* Calculate the percentage of students with a passing math score (70 or greater)\n\n* Calculate the percentage of students with a passing reading score (70 or greater)\n\n* Calculate the percentage of students who passed math **and** reading (% Overall Passing)\n\n* Create a dataframe to hold the above results\n\n* Optional: give the displayed data cleaner formatting",
"_____no_output_____"
]
],
[
[
"number_of_schools = len(school_data[\"School ID\"].unique())\nnumber_of_schools",
"_____no_output_____"
],
[
"number_of_students = len(student_data[\"Student ID\"].unique())\nnumber_of_students",
"_____no_output_____"
],
[
"total_budget = school_data[\"budget\"].sum()\ntotal_budget",
"_____no_output_____"
],
[
"avg_math_score = student_data[\"math_score\"].mean()\navg_math_score",
"_____no_output_____"
],
[
"avg_read_score = student_data[\"reading_score\"].mean()\navg_read_score",
"_____no_output_____"
],
[
"passing_math = (student_data[\"math_score\"] >= 70)\nmath_passers = student_data.loc[passing_math]\nnumber_math_passers = len(math_passers)\npct_pass_math = number_math_passers * 100 / number_of_students\npct_pass_math",
"_____no_output_____"
],
[
"passing_read = (student_data[\"reading_score\"] >= 70)\nread_passers = student_data.loc[passing_read]\nnumber_read_passers = len(read_passers)\npct_pass_read = number_read_passers * 100 / number_of_students\npct_pass_read",
"_____no_output_____"
],
[
"pass_math_read = passing_math & passing_read\nmath_read_passers = student_data.loc[pass_math_read]\nnumber_math_read_passers = len(math_read_passers)\npct_pass_read_math = number_math_read_passers * 100 / number_of_students\npct_pass_read_math",
"_____no_output_____"
],
[
"district_summary_df = pd.DataFrame(\n [\n {\"number of schools\": number_of_schools,\n \"number of students\": number_of_students,\n \"total budget\": total_budget,\n \"average math score\": avg_math_score,\n \"average reading score\": avg_read_score,\n \"% passing math score\": pct_pass_math,\n \"% passing reading score\": pct_pass_read,\n \"% passing math and reading score\": pct_pass_read_math\n }\n ]\n )\ndistrict_summary_df",
"_____no_output_____"
],
[
"# Format final district summary\ndistrict_summary_df[\"number of students\"] = district_summary_df[\"number of students\"].map(\"{:,}\".format)\ndistrict_summary_df[\"total budget\"] = district_summary_df[\"total budget\"].map(\"${:,}\".format)\ndistrict_summary_df[\"average math score\"] = district_summary_df[\"average math score\"].map(\"{:.1f}\".format)\ndistrict_summary_df[\"average reading score\"] = district_summary_df[\"average reading score\"].map(\"{:.1f}\".format)\ndistrict_summary_df[\"% passing math score\"] = district_summary_df[\"% passing math score\"].map(\"{:.1f}%\".format)\ndistrict_summary_df[\"% passing reading score\"] = district_summary_df[\"% passing reading score\"].map(\"{:.1f}%\".format)\ndistrict_summary_df[\"% passing math and reading score\"] = district_summary_df[\"% passing math and reading score\"].map(\"{:.1f}%\".format)\ndistrict_summary_df",
"_____no_output_____"
]
],
[
[
"## School Summary",
"_____no_output_____"
],
[
"* Create an overview table that summarizes key metrics about each school, including:\n * School Name\n * School Type\n * Total Students\n * Total School Budget\n * Per Student Budget\n * Average Math Score\n * Average Reading Score\n * % Passing Math\n * % Passing Reading\n * % Overall Passing (The percentage of students that passed math **and** reading.)\n \n* Create a dataframe to hold the above results",
"_____no_output_____"
]
],
[
[
"# Strategy: school_data already has the first few columns. Format school_data, then calculate\n# additional series columns separately, then add each series column to the formatted dataframe\n\n# start with formatting school_data. Important to set index for future merges\nschool_summary = (school_data.set_index(\"school_name\")\n .sort_values(\"school_name\")\n .rename(columns = {\n \"type\": \"School Type\",\n \"size\": \"Total Students\",\n \"budget\": \"Total School Budget\"\n }\n )\n )\n# Calculate Per Student Budget series, append to school_summary\nschool_summary[\"Per Student Budget\"] = school_summary[\"Total School Budget\"] / school_summary[\"Total Students\"]\nschool_summary.head(5)",
"_____no_output_____"
],
[
"# Group and compute average math and reading scores from student_data\nschool_score_mean = (student_data.groupby(by=\"school_name\")\n .mean()\n )\nschool_score_mean.head(5)",
"_____no_output_____"
],
[
"# Append average math score and average reading score to school_summary\nschool_summary[\"Average Math Score\"] = school_score_mean[\"math_score\"]\nschool_summary[\"Average Reading Score\"] = school_score_mean[\"reading_score\"]\nschool_summary.head(5)",
"_____no_output_____"
],
[
"# Get number of students passing math by school. Set index.\nmath_pass_by_school = (math_passers.set_index(\"school_name\")\n .rename(columns={\"Student ID\": \"Number Students Pass Math\"})\n .groupby(by=\"school_name\")\n .count()\n )\nmath_pass_by_school.head(5)",
"_____no_output_____"
],
[
"# Get number of students passing reading by school. Set index.\nread_pass_by_school = (read_passers.set_index(\"school_name\")\n .rename(columns={\"Student ID\": \"Number Students Pass Read\"})\n .groupby(by=\"school_name\")\n .count()\n )\nread_pass_by_school.head(5)",
"_____no_output_____"
],
[
"# Get number of students passing math and reading by school. Set index.\nmath_read_pass_by_school = (math_read_passers.set_index(\"school_name\")\n .rename(columns={\"Student ID\": \"Number Students Pass Math and Read\"})\n .groupby(by=\"school_name\")\n .count()\n )\nmath_read_pass_by_school.head(5)",
"_____no_output_____"
],
[
"# Divide number of students passing by number of students per school, then append columns\n# to school_summary dataframe\nschool_summary[\"% Passing Math\"] = math_pass_by_school[\"Number Students Pass Math\"] / school_summary[\"Total Students\"] * 100\nschool_summary[\"% Passing Reading\"] = read_pass_by_school[\"Number Students Pass Read\"] / school_summary[\"Total Students\"] * 100\nschool_summary[\"% Overall Passing\"] = math_read_pass_by_school[\"Number Students Pass Math and Read\"] / school_summary[\"Total Students\"] * 100\nschool_summary.head()",
"_____no_output_____"
],
[
"# Make an unformatted copy for to use in 'Scores by School Spending' later on\nschool_summary_unformatted = school_summary.copy()\n\n# Add formatting to school_summary. This turns some float columns into strings\nschool_summary[\"Total School Budget\"] = school_summary[\"Total School Budget\"].map(\"${:,.2f}\".format)\nschool_summary[\"Per Student Budget\"] = school_summary[\"Per Student Budget\"].map(\"${:,.2f}\".format)\nschool_summary[\"Average Math Score\"] = school_summary[\"Average Math Score\"].map(\"{:.2f}\".format)\nschool_summary[\"Average Reading Score\"] = school_summary[\"Average Reading Score\"].map(\"{:.2f}\".format)\nschool_summary[\"% Passing Math\"] = school_summary[\"% Passing Math\"].map(\"{:.2f}%\".format)\nschool_summary[\"% Passing Reading\"] = school_summary[\"% Passing Reading\"].map(\"{:.2f}%\".format)\nschool_summary[\"% Overall Passing\"] = school_summary[\"% Overall Passing\"].map(\"{:.2f}%\".format)\nschool_summary",
"_____no_output_____"
]
],
[
[
"## Top Performing Schools (By % Overall Passing)",
"_____no_output_____"
],
[
"* Sort and display the top five performing schools by % overall passing.",
"_____no_output_____"
]
],
[
[
"(school_summary.sort_values(\"% Overall Passing\", ascending=False)\n .head(5)\n)",
"_____no_output_____"
]
],
[
[
"## Bottom Performing Schools (By % Overall Passing)",
"_____no_output_____"
],
[
"* Sort and display the five worst-performing schools by % overall passing.",
"_____no_output_____"
]
],
[
[
"(school_summary.sort_values(\"% Overall Passing\", ascending=True)\n .head(5)\n)",
"_____no_output_____"
]
],
[
[
"## Math Scores by Grade",
"_____no_output_____"
],
[
"* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.\n\n * Create a pandas series for each grade. Hint: use a conditional statement.\n \n * Group each series by school\n \n * Combine the series into a dataframe\n \n * Optional: give the displayed data cleaner formatting",
"_____no_output_____"
]
],
[
[
"# Index student_data and get only relevant columns\nscore_by_grade = student_data[[\"school_name\", \"grade\", \"math_score\"]].set_index(\"school_name\")\n\n# Create initial math_by_school dataframe, then create additional series and append them to\n# the dataframe\nmath_by_school = (score_by_grade.loc[score_by_grade[\"grade\"] == \"9th\"]\n .groupby(by=\"school_name\")\n .mean()\n .rename(columns={\"math_score\": \"9th\"})\n )\nmath_by_school[\"10th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"10th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nmath_by_school[\"11th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"11th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nmath_by_school[\"12th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"12th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nmath_by_school",
"_____no_output_____"
]
],
[
[
"## Reading Score by Grade ",
"_____no_output_____"
],
[
"* Perform the same operations as above for reading scores",
"_____no_output_____"
]
],
[
[
"score_by_grade = student_data[[\"school_name\", \"grade\", \"reading_score\"]].set_index(\"school_name\")\n\n# Create initial read_by_school dataframe, then create additional series and append them to\n# the dataframe\nread_by_school = (score_by_grade.loc[score_by_grade[\"grade\"] == \"9th\"]\n .groupby(by=\"school_name\")\n .mean()\n .rename(columns={\"reading_score\": \"9th\"})\n )\nread_by_school[\"10th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"10th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nread_by_school[\"11th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"11th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nread_by_school[\"12th\"] = (score_by_grade.loc[score_by_grade[\"grade\"] == \"12th\"]\n .groupby(by=\"school_name\")\n .mean()\n )\nread_by_school",
"_____no_output_____"
]
],
[
[
"## Scores by School Spending",
"_____no_output_____"
],
[
"* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:\n * Average Math Score\n * Average Reading Score\n * % Passing Math\n * % Passing Reading\n * Overall Passing Rate (Average of the above two)",
"_____no_output_____"
]
],
[
[
"# Use school_summary_unformatted dataframe that still has numeric columns as float\n# Define the cut parameters\nseries_to_cut = school_summary_unformatted[\"Per Student Budget\"]\nbins_to_fill = [0, 584.9, 629.9, 644.9, 675.9]\nbin_labels = [\"<$584\", \"$585-629\", \"$630-644\", \"$645-675\"]\n\n# New column with the bin definition into school_summary_unformatted\nschool_summary_unformatted[\"Spending Ranges (per student)\"] = pd.cut(x=series_to_cut, bins=bins_to_fill, labels=bin_labels)\n\n# Exclude unneeded columns, group by the bin series and take the average of the scores\nscores_by_spending = (school_summary_unformatted.groupby(by=\"Spending Ranges (per student)\")\n .mean()\n )\nscores_by_spending_final = scores_by_spending[[\"Average Math Score\",\n \"Average Reading Score\",\n \"% Passing Math\",\n \"% Passing Reading\",\n \"% Overall Passing\"]]\nscores_by_spending_final",
"_____no_output_____"
]
],
[
[
"## Scores by School Size",
"_____no_output_____"
],
[
"* Perform the same operations as above, based on school size.",
"_____no_output_____"
]
],
[
[
"# Use school_summary_unformatted dataframe that still has numeric columns as float\n# Define the cut parameters\nseries_to_cut = school_summary_unformatted[\"Total Students\"]\nbins_to_fill = [0, 1799.9, 2999.9, 4999.9]\nbin_labels = [\"Small (< 1800)\", \"Medium (1800-2999)\", \"Large (3000-5000)\"]\n\n# New column with the bin definition into school_summary_unformatted\nschool_summary_unformatted[\"School Size\"] = pd.cut(x=series_to_cut, bins=bins_to_fill, labels=bin_labels)\n\n# Exclude unneeded columns, group by the bin series and take the average of the scores\nscores_by_school_size = (school_summary_unformatted.groupby(by=\"School Size\")\n .mean()\n )\nscores_by_school_size_final = scores_by_school_size[[\"Average Math Score\",\n \"Average Reading Score\",\n \"% Passing Math\",\n \"% Passing Reading\",\n \"% Overall Passing\"]]\nscores_by_school_size_final",
"_____no_output_____"
]
],
[
[
"## Scores by School Type",
"_____no_output_____"
],
[
"* Perform the same operations as above, based on school type",
"_____no_output_____"
]
],
[
[
"# No cut action needed since 'School Type' is not numeric. Can be grouped as is.\n\n# Exclude unneeded columns, group by School Type and take the average of the scores\nscores_by_school_type = (school_summary_unformatted.groupby(by=\"School Type\")\n .mean()\n )\nscores_by_school_type_final = scores_by_school_type[[\"Average Math Score\",\n \"Average Reading Score\",\n \"% Passing Math\",\n \"% Passing Reading\",\n \"% Overall Passing\"]]\nscores_by_school_type_final",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7c96aae188d4188eea7bebcc078b5b84f6a2fa7 | 25,475 | ipynb | Jupyter Notebook | Home.ipynb | GooseHuang/Udacity-Data-Scientist-Nanodegree | 96980d6f7ce82a961c7f41d26c25fbe5f11ca773 | [
"MIT"
] | null | null | null | Home.ipynb | GooseHuang/Udacity-Data-Scientist-Nanodegree | 96980d6f7ce82a961c7f41d26c25fbe5f11ca773 | [
"MIT"
] | 1 | 2021-06-02T02:39:02.000Z | 2021-06-02T02:39:02.000Z | Home.ipynb | GooseHuang/Udacity-Data-Scientist-Nanodegree | 96980d6f7ce82a961c7f41d26c25fbe5f11ca773 | [
"MIT"
] | null | null | null | 49.95098 | 8,628 | 0.765182 | [
[
[
"2*2**0.5",
"_____no_output_____"
],
[
"import math\nmath.e",
"_____no_output_____"
],
[
"math.pi",
"_____no_output_____"
],
[
"4*2**0.5/(1+0.5*2**0.5)",
"_____no_output_____"
],
[
"n = 10\nfor n in range(200):\n A = 0.01* 1.01**n\n B = 0.99 * 0.99**n \n print(A/ (A+B))",
"0.01\n0.01019995960412038\n0.010403875573686895\n0.010611824492314351\n0.010823884357590845\n0.011040134604405808\n0.011260656128547476\n0.011485531310567792\n0.011714844039912214\n0.011948679739311876\n0.012187125389434922\n0.012430269553793675\n0.012678202403903889\n0.012931015744691864\n0.013188803040144916\n0.013451659439200233\n0.013719681801866647\n0.01399296872557348\n0.014271620571740144\n0.01455573949255956\n0.014845429457988129\n0.01514079628293423\n0.015441947654636929\n0.015748993160225686\n0.01606204431445151\n0.01638121458757921\n0.016706619433429864\n0.01703837631756176\n0.017376604745577617\n0.017721426291544922\n0.01807296462651556\n0.018431345547130166\n0.018796697004291773\n0.01916914913189236\n0.019548834275575386\n0.019935887021515893\n0.02033044422519963\n0.020732645040180832\n0.02114263094679791\n0.021560545780825027\n0.02198653576203646\n0.02242074952265959\n0.022863338135691254\n0.023314455143050975\n0.023774256583543395\n0.02424290102060097\n0.02472054956977685\n0.02520736592595626\n0.0257035163902537\n0.026209169896561635\n0.026724498037714987\n0.0272496750912344\n0.027784878044609523\n0.028330286620082298\n0.028886083298888216\n0.02945245334491267\n0.03002958482771686\n0.03061766864488693\n0.03121689854365776\n0.03182747114176127\n0.032449585947447276\n0.03308344537862295\n0.03372925478105552\n0.03438722244558029\n0.03505755962425467\n0.03574048054539679\n0.03643620242744509\n0.037144945491573524\n0.037866932972994906\n0.03860239113088267\n0.03935154925683952\n0.0401146396818391\n0.04089189778156486\n0.04168356198006798\n0.042489873751664256\n0.04331107762098749\n0.044147421161115\n0.044999154989678464\n0.04586653276287125\n0.04674981116726134\n0.04764924990931668\n0.048565111702547556\n0.049497662252168895\n0.05044717023718285\n0.05141390728978036\n0.05239814797195801\n0.05340016974924507\n0.05442025296143311\n0.055458680790199236\n0.05651573922351197\n0.05759171701670714\n0.05868690565011965\n0.05980159928315528\n0.060936094704685494\n0.0620906912796469\n0.0632656908917256\n0.06446139788200586\n0.06567811898346186\n0.06691616325116963\n0.06817584198811726\n0.06945746866648929\n0.07076135884430268\n0.07208783007727032\n0.07343720182576939\n0.07480979535679178\n0.07620593364075448\n0.07762594124304915\n0.07907014421021091\n0.0805388699505885\n0.08203244710939893\n0.08355120543805297\n0.08509547565763949\n0.0866655893164598\n0.0882618786415066\n0.0898846763837852\n0.09153431565737935\n0.09321112977216787\n0.09491545206010345\n0.09664761569496978\n0.09840795350554025\n0.10019679778206547\n0.10201448007602598\n0.10386133099309187\n0.10573767997923988\n0.10764385509998627\n0.10958018281270275\n0.11154698773199213\n0.11354459238811017\n0.1155733169784304\n0.11763347911196066\n0.1197253935469301\n0.1218493719214793\n0.12400572247749803\n0.12619474977766876\n0.1284167544157881\n0.13067203272045302\n0.13296087645221363\n0.13528357249431025\n0.13764040253712911\n0.14003164275652683\n0.14245756348619273\n0.14491842888423506\n0.14741449659419556\n0.14994601740071606\n0.1525132348801008\n0.15511638504603661\n0.15775569599075442\n0.1604313875219356\n0.1631436707956883\n0.16589274794593895\n0.1686788117106058\n0.1715020450549452\n0.17436262079247847\n0.17726070120393397\n0.18019643765465757\n0.1831699702109673\n0.1861814272559503\n0.18923092510522097\n0.19231856762318006\n0.19544444584033682\n0.1986086375722746\n0.20181120704086195\n0.20505220449832992\n0.2083316658548548\n0.2116496123103036\n0.21500604999081627\n0.21840096959091546\n0.22183434602184832\n0.22530613806688013\n0.22881628804427054\n0.2323647214786762\n0.23595134678173116\n0.2395760549425671\n0.24323871922903967\n0.2469391949004338\n0.2506773189324229\n0.2544529097550587\n0.25826576700456627\n0.26211567128971797\n0.2660023839735528\n0.2699256469712015\n0.2738851825645679\n0.27788069323460507\n0.2819118615119105\n0.2859783498463473\n0.29007980049638027\n0.2942158354387928\n0.29838605629942655\n0.30259004430556036\n0.3068273602605133\n0.3110975445410278\n0.3154001171179506\n0.31973457760069873\n0.32410040530595097\n0.3284970593509709\n0.33292397877191976\n0.33738058266747284\n0.34186627036800543\n0.3463804216305643\n0.35092239685978976\n"
],
[
"data = [1,0,0,0,1,1,1,1,1,1,0,0,1]\nplt.hist(data,bins=2)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n%matplotlib inline \n\nx = [1,2,3,4,5,6,7,8,1,2,3,4,2,3,4,1,2,3,1]\nplt.hist(x, bins =3)\nplt.title(\"Histogram with 'auto' bins\")\nplt.xlabel('xlabel', fontsize=18)\nplt.ylabel('ylabel', fontsize=16)\nplt.show()",
"_____no_output_____"
],
[
"import math",
"_____no_output_____"
],
[
"math.([1,2,3])",
"_____no_output_____"
],
[
"(7-9)/7.65**0.5",
"_____no_output_____"
],
[
"(6-9)/7.65**0.5",
"_____no_output_____"
],
[
"print((7.5-9)/7.65**0.5)\nprint((6.5-9)/7.65**0.5)",
"-0.5423261445466404\n-0.903876907577734\n"
],
[
"from scipy.special import comb, perm\n\nperm(3,2) #计算排列数 6\n\ncomb(3,2) #计算组合数 3",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c97f15f4026051977a32755b084ff98a800412 | 75,287 | ipynb | Jupyter Notebook | Tutorial-BSSN_time_evolution-C_codegen_library.ipynb | stevenrbrandt/nrpytutorial | 219af363f810cc46ea8955a9d28cf075f2252582 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-BSSN_time_evolution-C_codegen_library.ipynb | stevenrbrandt/nrpytutorial | 219af363f810cc46ea8955a9d28cf075f2252582 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-BSSN_time_evolution-C_codegen_library.ipynb | stevenrbrandt/nrpytutorial | 219af363f810cc46ea8955a9d28cf075f2252582 | [
"BSD-2-Clause"
] | null | null | null | 56.863293 | 490 | 0.590314 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# BSSN Time-Evolution C Code Generation Library\n\n## Author: Zach Etienne\n\n## This module implements a number of helper functions for generating C-code kernels that solve Einstein's equations in the covariant BSSN formalism [described in this NRPy+ tutorial notebook](Tutorial-BSSN_formulation.ipynb)\n\n**Notebook Status:** <font color = red><b> Not yet validated </b></font>\n\n**Validation Notes:** This module has NOT been validated to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution *after a short numerical evolution of the initial data* (see [plots at bottom](#convergence)), and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy).\n\n### NRPy+ modules that generate needed symbolic expressions:\n* [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\\[**tutorial**\\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian constraint in BSSN curvilinear basis/coordinates\n* [BSSN/BSSN_RHSs.py](../edit/BSSN/BSSN_RHSs.py); [\\[**tutorial**\\]](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb): Generates the right-hand sides for the BSSN evolution equations in singular, curvilinear coordinates\n* [BSSN/BSSN_gauge_RHSs.py](../edit/BSSN/BSSN_gauge_RHSs.py); [\\[**tutorial**\\]](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb): Generates the right-hand sides for the BSSN gauge evolution equations in singular, curvilinear coordinates\n* [BSSN/Enforce_Detgammahat_Constraint.py](../edit/BSSN/Enforce_Detgammahat_Constraint.py); [**tutorial**](Tutorial-BSSN_enforcing_determinant_gammabar_equals_gammahat_constraint.ipynb): Generates symbolic expressions for enforcing the $\\det{\\bar{\\gamma}}=\\det{\\hat{\\gamma}}$ constraint\n\n## Introduction:\nHere we use NRPy+ to generate the C source code kernels necessary to generate C functions needed/useful for evolving forward in time the BSSN equations, including:\n1. the BSSN RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration, with arbitrary gauge choice.\n1. the BSSN constraints as a check of numerical error",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis notebook is organized as follows\n\n1. [Step 1](#importmodules): Import needed Python modules\n1. [Step 2](#helperfuncs): Helper Python functions for C code generation\n1. [Step 3.a](#bssnrhs): Generate symbolic BSSN RHS expressions\n1. [Step 3.b](#bssnrhs_c_code): `rhs_eval()`: Register C function for evaluating BSSN RHS expressions\n1. [Step 3.c](#ricci): Generate symbolic expressions for 3-Ricci tensor $\\bar{R}_{ij}$\n1. [Step 3.d](#ricci_c_code): `Ricci_eval()`: Register C function for evaluating 3-Ricci tensor $\\bar{R}_{ij}$\n1. [Step 4.a](#bssnconstraints): Generate symbolic expressions for BSSN Hamiltonian & momentum constraints\n1. [Step 4.b](#bssnconstraints_c_code): `BSSN_constraints()`: Register C function for evaluating BSSN Hamiltonian & momentum constraints\n1. [Step 5](#enforce3metric): `enforce_detgammahat_constraint()`: Register C function for enforcing the conformal 3-metric $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint\n1. [Step 6.a](#psi4): `psi4_part_{0,1,2}()`: Register C function for evaluating Weyl scalar $\\psi_4$, in 3 parts (3 functions)\n1. [Step 6.b](#psi4_tetrad): `psi4_tetrad()`: Register C function for evaluating Weyl scalar $\\psi_4$ tetrad\n1. [Step 6.c](#swm2): `SpinWeight_minus2_SphHarmonics()`: Register C function for evaluating spin-weight $s=-2$ spherical harmonics\n1. [Step 7](#validation): Confirm above functions are bytecode-identical to those in `BSSN/BSSN_Ccodegen_library.py`\n1. [Step 8](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
],
[
"<a id='importmodules'></a>\n\n# Step 1: Import needed Python modules \\[Back to [top](#toc)\\]\n$$\\label{importmodules}$$",
"_____no_output_____"
]
],
[
[
"# RULES FOR ADDING FUNCTIONS TO THIS ROUTINE:\n# 1. The function must be runnable from a multiprocessing environment,\n# which means that the function\n# 1.a: cannot depend on previous function calls.\n# 1.b: cannot create directories (this is not multiproc friendly)\n\n\n# Step P1: Import needed NRPy+ core modules:\nfrom outputC import lhrh, add_to_Cfunction_dict # NRPy+: Core C code output module\nimport finite_difference as fin # NRPy+: Finite difference C code generation module\nimport NRPy_param_funcs as par # NRPy+: Parameter interface\nimport grid as gri # NRPy+: Functions having to do with numerical grids\nimport indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support\nimport reference_metric as rfm # NRPy+: Reference metric support\nfrom pickling import pickle_NRPy_env # NRPy+: Pickle/unpickle NRPy+ environment, for parallel codegen\nimport os, time # Standard Python modules for multiplatform OS-level functions, benchmarking\nimport BSSN.BSSN_RHSs as rhs\nimport BSSN.BSSN_gauge_RHSs as gaugerhs\nimport loop as lp",
"_____no_output_____"
]
],
[
[
"<a id='helperfuncs'></a>\n\n# Step 2: Helper Python functions for C code generation \\[Back to [top](#toc)\\]\n$$\\label{helperfuncs}$$\n\n* `print_msg_with_timing()` gives the user an idea of what's going on/taking so long. Also outputs timing info.\n* `get_loopopts()` sets up options for NRPy+'s `loop` module\n* `register_stress_energy_source_terms_return_T4UU()` registers gridfunctions for $T^{\\mu\\nu}$ if needed and not yet registered.",
"_____no_output_____"
]
],
[
[
"###############################################\n# Helper Python functions for C code generation\n# print_msg_with_timing() gives the user an idea of what's going on/taking so long. Also outputs timing info.\ndef print_msg_with_timing(desc, msg=\"Symbolic\", startstop=\"start\", starttime=0.0):\n CoordSystem = par.parval_from_str(\"reference_metric::CoordSystem\")\n elapsed = time.time()-starttime\n if msg == \"Symbolic\":\n if startstop == \"start\":\n print(\"Generating symbolic expressions for \" + desc + \" (%s coords)...\" % CoordSystem)\n return time.time()\n else:\n print(\"Finished generating symbolic expressions for \"+desc+\n \" (%s coords) in %.1f seconds. Next up: C codegen...\" % (CoordSystem, elapsed))\n elif msg == \"Ccodegen\":\n if startstop == \"start\":\n print(\"Generating C code for \"+desc+\" (%s coords)...\" % CoordSystem)\n return time.time()\n else:\n print(\"Finished generating C code for \"+desc+\" (%s coords) in %.1f seconds.\" % (CoordSystem, elapsed))\n\n\n# get_loopopts() sets up options for NRPy+'s loop module\ndef get_loopopts(points_to_update, enable_SIMD, enable_rfm_precompute, OMP_pragma_on, enable_xxs=True):\n loopopts = points_to_update + \",includebraces=False\"\n if enable_SIMD:\n loopopts += \",enable_SIMD\"\n if enable_rfm_precompute:\n loopopts += \",enable_rfm_precompute\"\n elif not enable_xxs:\n pass\n else:\n loopopts += \",Read_xxs\"\n if OMP_pragma_on != \"i2\":\n loopopts += \",pragma_on_\"+OMP_pragma_on\n return loopopts\n\n\n# register_stress_energy_source_terms_return_T4UU() registers gridfunctions\n# for T4UU if needed and not yet registered.\ndef register_stress_energy_source_terms_return_T4UU(enable_stress_energy_source_terms):\n if enable_stress_energy_source_terms:\n registered_already = False\n for i in range(len(gri.glb_gridfcs_list)):\n if gri.glb_gridfcs_list[i].name == \"T4UU00\":\n registered_already = True\n if not registered_already:\n return ixp.register_gridfunctions_for_single_rank2(\"AUXEVOL\", \"T4UU\", \"sym01\", DIM=4)\n else:\n return ixp.declarerank2(\"T4UU\", \"sym01\", DIM=4)\n return None",
"_____no_output_____"
]
],
[
[
"<a id='bssnrhs'></a>\n\n# Step 3.a: Generate symbolic BSSN RHS expressions \\[Back to [top](#toc)\\]\n$$\\label{bssnrhs}$$\n\nFirst we generate the symbolic expressions. Be sure to call this function from within a `reference_metric::enable_rfm_precompute=\"True\"` environment if reference metric precomputation is desired.\n\n\n`BSSN_RHSs__generate_symbolic_expressions()` supports the following features\n\n* (`\"OnePlusLog\"` by default) Lapse gauge choice\n* (`\"GammaDriving2ndOrder_Covariant\"` by default) Shift gauge choice\n* (disabled by default) Kreiss-Oliger dissipation\n* (disabled by default) Stress-energy ($T^{\\mu\\nu}$) source terms\n* (enabled by default) \"Leave Ricci symbolic\": do not compute the 3-Ricci tensor $\\bar{R}_{ij}$ within the BSSN RHSs, which only adds to the extreme complexity of the BSSN RHS expressions. Instead leave computation of $\\bar{R}_{ij}$=`RbarDD` to a separate function. Doing this generally increases C-code performance by about 10%.\n\nTwo lists are returned by this function:\n\n1. `betaU`: the un-rescaled shift vector $\\beta^i$, which is used to perform upwinding.\n1. `BSSN_RHSs_SymbExpressions`: the BSSN RHS symbolic expressions, using the `lhrh` named-tuple to store a list of LHSs and RHSs, where each LHS and RHS is defined as follows\n 1. LHS = BSSN gridfunction whose time derivative is being computed at grid point `i0,i1,i2`, and \n 1. RHS = time derivative expression for given variable at the given point.",
"_____no_output_____"
]
],
[
[
"def BSSN_RHSs__generate_symbolic_expressions(LapseCondition=\"OnePlusLog\",\n ShiftCondition=\"GammaDriving2ndOrder_Covariant\",\n enable_KreissOliger_dissipation=True,\n enable_stress_energy_source_terms=False,\n leave_Ricci_symbolic=True):\n ######################################\n # START: GENERATE SYMBOLIC EXPRESSIONS\n starttime = print_msg_with_timing(\"BSSN_RHSs\", msg=\"Symbolic\", startstop=\"start\")\n\n # Returns None if enable_stress_energy_source_terms==False; otherwise returns symb expressions for T4UU\n T4UU = register_stress_energy_source_terms_return_T4UU(enable_stress_energy_source_terms)\n\n # Evaluate BSSN RHSs:\n import BSSN.BSSN_quantities as Bq\n par.set_parval_from_str(\"BSSN.BSSN_quantities::LeaveRicciSymbolic\", str(leave_Ricci_symbolic))\n rhs.BSSN_RHSs()\n\n if enable_stress_energy_source_terms:\n import BSSN.BSSN_stress_energy_source_terms as Bsest\n Bsest.BSSN_source_terms_for_BSSN_RHSs(T4UU)\n rhs.trK_rhs += Bsest.sourceterm_trK_rhs\n for i in range(3):\n # Needed for Gamma-driving shift RHSs:\n rhs.Lambdabar_rhsU[i] += Bsest.sourceterm_Lambdabar_rhsU[i]\n # Needed for BSSN RHSs:\n rhs.lambda_rhsU[i] += Bsest.sourceterm_lambda_rhsU[i]\n for j in range(3):\n rhs.a_rhsDD[i][j] += Bsest.sourceterm_a_rhsDD[i][j]\n\n par.set_parval_from_str(\"BSSN.BSSN_gauge_RHSs::LapseEvolutionOption\", LapseCondition)\n par.set_parval_from_str(\"BSSN.BSSN_gauge_RHSs::ShiftEvolutionOption\", ShiftCondition)\n gaugerhs.BSSN_gauge_RHSs() # Can depend on above RHSs\n # Restore BSSN.BSSN_quantities::LeaveRicciSymbolic to False\n par.set_parval_from_str(\"BSSN.BSSN_quantities::LeaveRicciSymbolic\", \"False\")\n\n # Add Kreiss-Oliger dissipation to the BSSN RHSs:\n if enable_KreissOliger_dissipation:\n thismodule = \"KO_Dissipation\"\n diss_strength = par.Cparameters(\"REAL\", thismodule, \"diss_strength\", 0.1) # *Bq.cf # *Bq.cf*Bq.cf*Bq.cf # cf**1 is found better than cf**4 over the long term.\n\n alpha_dKOD = ixp.declarerank1(\"alpha_dKOD\")\n cf_dKOD = ixp.declarerank1(\"cf_dKOD\")\n trK_dKOD = ixp.declarerank1(\"trK_dKOD\")\n betU_dKOD = ixp.declarerank2(\"betU_dKOD\", \"nosym\")\n vetU_dKOD = ixp.declarerank2(\"vetU_dKOD\", \"nosym\")\n lambdaU_dKOD = ixp.declarerank2(\"lambdaU_dKOD\", \"nosym\")\n aDD_dKOD = ixp.declarerank3(\"aDD_dKOD\", \"sym01\")\n hDD_dKOD = ixp.declarerank3(\"hDD_dKOD\", \"sym01\")\n for k in range(3):\n gaugerhs.alpha_rhs += diss_strength * alpha_dKOD[k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n rhs.cf_rhs += diss_strength * cf_dKOD[k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n rhs.trK_rhs += diss_strength * trK_dKOD[k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n for i in range(3):\n if \"2ndOrder\" in ShiftCondition:\n gaugerhs.bet_rhsU[i] += diss_strength * betU_dKOD[i][k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n gaugerhs.vet_rhsU[i] += diss_strength * vetU_dKOD[i][k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n rhs.lambda_rhsU[i] += diss_strength * lambdaU_dKOD[i][k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n for j in range(3):\n rhs.a_rhsDD[i][j] += diss_strength * aDD_dKOD[i][j][k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n rhs.h_rhsDD[i][j] += diss_strength * hDD_dKOD[i][j][k] * rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]\n\n # We use betaU as our upwinding control vector:\n Bq.BSSN_basic_tensors()\n betaU = Bq.betaU\n\n # END: GENERATE SYMBOLIC EXPRESSIONS\n ######################################\n\n lhs_names = [\"alpha\", \"cf\", \"trK\"]\n rhs_exprs = [gaugerhs.alpha_rhs, rhs.cf_rhs, rhs.trK_rhs]\n for i in range(3):\n lhs_names.append(\"betU\" + str(i))\n rhs_exprs.append(gaugerhs.bet_rhsU[i])\n lhs_names.append(\"lambdaU\" + str(i))\n rhs_exprs.append(rhs.lambda_rhsU[i])\n lhs_names.append(\"vetU\" + str(i))\n rhs_exprs.append(gaugerhs.vet_rhsU[i])\n for j in range(i, 3):\n lhs_names.append(\"aDD\" + str(i) + str(j))\n rhs_exprs.append(rhs.a_rhsDD[i][j])\n lhs_names.append(\"hDD\" + str(i) + str(j))\n rhs_exprs.append(rhs.h_rhsDD[i][j])\n\n # Sort the lhss list alphabetically, and rhss to match.\n # This ensures the RHSs are evaluated in the same order\n # they're allocated in memory:\n lhs_names, rhs_exprs = [list(x) for x in zip(*sorted(zip(lhs_names, rhs_exprs), key=lambda pair: pair[0]))]\n\n # Declare the list of lhrh's\n BSSN_RHSs_SymbExpressions = []\n for var in range(len(lhs_names)):\n BSSN_RHSs_SymbExpressions.append(lhrh(lhs=gri.gfaccess(\"rhs_gfs\", lhs_names[var]), rhs=rhs_exprs[var]))\n\n print_msg_with_timing(\"BSSN_RHSs\", msg=\"Symbolic\", startstop=\"stop\", starttime=starttime)\n return [betaU, BSSN_RHSs_SymbExpressions]",
"_____no_output_____"
]
],
[
[
"<a id='bssnrhs_c_code'></a>\n\n# Step 3.b: `rhs_eval()`: Register C code for BSSN RHS expressions \\[Back to [top](#toc)\\]\n$$\\label{bssnrhs_c_code}$$\n\n`add_rhs_eval_to_Cfunction_dict()` supports the following features\n\n* (enabled by default) reference-metric precomputation\n* (disabled by default) \"golden kernels\", which greatly increases the C-code generation time in an attempt to reduce computational cost. Most often this results in no speed-up.\n* (enabled by default) SIMD output\n* (disabled by default) splitting of RHSs into smaller pieces (multiple loops) to improve performance. Doesn't help much.\n* (`\"OnePlusLog\"` by default) Lapse gauge choice\n* (`\"GammaDriving2ndOrder_Covariant\"` by default) Shift gauge choice\n* (disabled by default) enable Kreiss-Oliger dissipation\n* (disabled by default) add stress-energy ($T^{\\mu\\nu}$) source terms\n* (enabled by default) \"Leave Ricci symbolic\": do not compute the 3-Ricci tensor $\\bar{R}_{ij}$ within the BSSN RHSs, which only adds to the extreme complexity of the BSSN RHS expressions. Instead leave computation of $\\bar{R}_{ij}$=`RbarDD` to a separate function. Doing this generally increases C-code performance by about 10%.\n* (`\"i2\"` by default) OpenMP pragma acts on which loop (assumes `i2` is outermost and `i0` is innermost loop). For axisymmetric or near-axisymmetric calculations, `\"i1\"` may be *significantly* faster.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_rhs_eval_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"),\n enable_rfm_precompute=True, enable_golden_kernels=False,\n enable_SIMD=True, enable_split_for_optimizations_doesnt_help=False,\n LapseCondition=\"OnePlusLog\", ShiftCondition=\"GammaDriving2ndOrder_Covariant\",\n enable_KreissOliger_dissipation=False, enable_stress_energy_source_terms=False,\n leave_Ricci_symbolic=True, OMP_pragma_on=\"i2\",\n func_name_suffix=\"\"):\n if includes is None:\n includes = []\n if enable_SIMD:\n includes += [os.path.join(\"SIMD\", \"SIMD_intrinsics.h\")]\n enable_FD_functions = bool(par.parval_from_str(\"finite_difference::enable_FD_functions\"))\n if enable_FD_functions:\n includes += [\"finite_difference_functions.h\"]\n\n # Set up the C function for the BSSN RHSs\n desc = \"Evaluate the BSSN RHSs\"\n name = \"rhs_eval\" + func_name_suffix\n params = \"const paramstruct *restrict params, \"\n if enable_rfm_precompute:\n params += \"const rfm_struct *restrict rfmstruct, \"\n else:\n params += \"REAL *xx[3], \"\n params += \"\"\"\n const REAL *restrict auxevol_gfs,const REAL *restrict in_gfs,REAL *restrict rhs_gfs\"\"\"\n\n betaU, BSSN_RHSs_SymbExpressions = \\\n BSSN_RHSs__generate_symbolic_expressions(LapseCondition=LapseCondition, ShiftCondition=ShiftCondition,\n enable_KreissOliger_dissipation=enable_KreissOliger_dissipation,\n enable_stress_energy_source_terms=enable_stress_energy_source_terms,\n leave_Ricci_symbolic=leave_Ricci_symbolic)\n\n # Construct body:\n preloop=\"\"\n enableCparameters=True\n # Set up preloop in case we're outputting code for the Einstein Toolkit (ETK)\n if par.parval_from_str(\"grid::GridFuncMemAccess\") == \"ETK\":\n params, preloop = set_ETK_func_params_preloop(func_name_suffix)\n enableCparameters=False\n\n FD_outCparams = \"outCverbose=False,enable_SIMD=\" + str(enable_SIMD)\n FD_outCparams += \",GoldenKernelsEnable=\" + str(enable_golden_kernels)\n\n loopopts = get_loopopts(\"InteriorPoints\", enable_SIMD, enable_rfm_precompute, OMP_pragma_on)\n FDorder = par.parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\")\n starttime = print_msg_with_timing(\"BSSN_RHSs (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"start\")\n if enable_split_for_optimizations_doesnt_help and FDorder == 6:\n loopopts += \",DisableOpenMP\"\n BSSN_RHSs_SymbExpressions_pt1 = []\n BSSN_RHSs_SymbExpressions_pt2 = []\n for lhsrhs in BSSN_RHSs_SymbExpressions:\n if \"BETU\" in lhsrhs.lhs or \"LAMBDAU\" in lhsrhs.lhs:\n BSSN_RHSs_SymbExpressions_pt1.append(lhrh(lhs=lhsrhs.lhs, rhs=lhsrhs.rhs))\n else:\n BSSN_RHSs_SymbExpressions_pt2.append(lhrh(lhs=lhsrhs.lhs, rhs=lhsrhs.rhs))\n preloop += \"\"\"#pragma omp parallel\n {\n\"\"\"\n preloopbody = fin.FD_outputC(\"returnstring\", BSSN_RHSs_SymbExpressions_pt1,\n params=FD_outCparams,\n upwindcontrolvec=betaU)\n preloop += \"\\n#pragma omp for\\n\" + lp.simple_loop(loopopts, preloopbody)\n preloop += \"\\n#pragma omp for\\n\"\n body = fin.FD_outputC(\"returnstring\", BSSN_RHSs_SymbExpressions_pt2,\n params=FD_outCparams,\n upwindcontrolvec=betaU)\n postloop = \"\\n } // END #pragma omp parallel\\n\"\n else:\n preloop += \"\"\n body = fin.FD_outputC(\"returnstring\", BSSN_RHSs_SymbExpressions,\n params=FD_outCparams,\n upwindcontrolvec=betaU)\n postloop = \"\"\n print_msg_with_timing(\"BSSN_RHSs (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n preloop=preloop, body=body, loopopts=loopopts, postloop=postloop,\n rel_path_to_Cparams=rel_path_to_Cparams, enableCparameters=enableCparameters)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='ricci'></a>\n\n# Step 3.c: Generate symbolic expressions for 3-Ricci tensor $\\bar{R}_{ij}$ \\[Back to [top](#toc)\\]\n$$\\label{ricci}$$\n\nAs described above, we find a roughly 10% speedup by computing the 3-Ricci tensor $\\bar{R}_{ij}$ separately from the BSSN RHS equations and storing the 6 independent components in memory. Here we construct the symbolic expressions for all 6 independent components of $\\bar{R}_{ij}$ (which is symmetric under interchange of indices).\n\n`Ricci__generate_symbolic_expressions()` does not support any input parameters.\n\nOne list is returned by `Ricci__generate_symbolic_expressions()`: `Ricci_SymbExpressions`, which contains a list of expressions for the six independent components of $\\bar{R}_{ij}$, using the `lhrh` named-tuple to store a list of LHSs and RHSs, where each LHS and RHS is defined as follows\n\n1. LHS = gridfunction representation of the component of $\\bar{R}_{ij}$, computed at grid point i0,i1,i2, and\n1. RHS = expression for given component of $\\bar{R}_{ij}$.",
"_____no_output_____"
]
],
[
[
"def Ricci__generate_symbolic_expressions():\n ######################################\n # START: GENERATE SYMBOLIC EXPRESSIONS\n starttime = print_msg_with_timing(\"3-Ricci tensor\", msg=\"Symbolic\", startstop=\"start\")\n\n # Evaluate 3-Ricci tensor:\n import BSSN.BSSN_quantities as Bq\n par.set_parval_from_str(\"BSSN.BSSN_quantities::LeaveRicciSymbolic\", \"False\")\n\n # Register all BSSN gridfunctions if not registered already\n Bq.BSSN_basic_tensors()\n # Next compute Ricci tensor\n Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()\n # END: GENERATE SYMBOLIC EXPRESSIONS\n ######################################\n # Must register RbarDD as gridfunctions, as we're outputting them to gridfunctions here:\n foundit = False\n for i in range(len(gri.glb_gridfcs_list)):\n if \"RbarDD00\" in gri.glb_gridfcs_list[i].name:\n foundit = True\n if not foundit:\n ixp.register_gridfunctions_for_single_rank2(\"AUXEVOL\", \"RbarDD\", \"sym01\")\n\n Ricci_SymbExpressions = [lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD00\"), rhs=Bq.RbarDD[0][0]),\n lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD01\"), rhs=Bq.RbarDD[0][1]),\n lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD02\"), rhs=Bq.RbarDD[0][2]),\n lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD11\"), rhs=Bq.RbarDD[1][1]),\n lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD12\"), rhs=Bq.RbarDD[1][2]),\n lhrh(lhs=gri.gfaccess(\"auxevol_gfs\", \"RbarDD22\"), rhs=Bq.RbarDD[2][2])]\n print_msg_with_timing(\"3-Ricci tensor\", msg=\"Symbolic\", startstop=\"stop\", starttime=starttime)\n return Ricci_SymbExpressions",
"_____no_output_____"
]
],
[
[
"<a id='ricci_c_code'></a>\n\n# Step 3.d: `Ricci_eval()`: Register C function for evaluating 3-Ricci tensor $\\bar{R}_{ij}$ \\[Back to [top](#toc)\\]\n$$\\label{ricci_c_code}$$\n\n`add_Ricci_eval_to_Cfunction_dict()` supports the following features\n\n* (enabled by default) reference-metric precomputation\n* (disabled by default) \"golden kernels\", which greatly increases the C-code generation time in an attempt to reduce computational cost. Most often this results in no speed-up.\n* (enabled by default) SIMD output\n* (disabled by default) splitting of RHSs into smaller pieces (multiple loops) to improve performance. Doesn't help much.\n* (`\"i2\"` by default) OpenMP pragma acts on which loop (assumes `i2` is outermost and `i0` is innermost loop). For axisymmetric or near-axisymmetric calculations, `\"i1\"` may be *significantly* faster.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_Ricci_eval_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"),\n enable_rfm_precompute=True, enable_golden_kernels=False, enable_SIMD=True,\n enable_split_for_optimizations_doesnt_help=False, OMP_pragma_on=\"i2\",\n func_name_suffix=\"\"):\n if includes is None:\n includes = []\n if enable_SIMD:\n includes += [os.path.join(\"SIMD\", \"SIMD_intrinsics.h\")]\n enable_FD_functions = bool(par.parval_from_str(\"finite_difference::enable_FD_functions\"))\n if enable_FD_functions:\n includes += [\"finite_difference_functions.h\"]\n\n # Set up the C function for the 3-Ricci tensor\n desc = \"Evaluate the 3-Ricci tensor\"\n name = \"Ricci_eval\" + func_name_suffix\n params = \"const paramstruct *restrict params, \"\n if enable_rfm_precompute:\n params += \"const rfm_struct *restrict rfmstruct, \"\n else:\n params += \"REAL *xx[3], \"\n params += \"const REAL *restrict in_gfs, REAL *restrict auxevol_gfs\"\n\n # Construct body:\n Ricci_SymbExpressions = Ricci__generate_symbolic_expressions()\n FD_outCparams = \"outCverbose=False,enable_SIMD=\" + str(enable_SIMD)\n FD_outCparams += \",GoldenKernelsEnable=\" + str(enable_golden_kernels)\n loopopts = get_loopopts(\"InteriorPoints\", enable_SIMD, enable_rfm_precompute, OMP_pragma_on)\n\n FDorder = par.parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\")\n starttime = print_msg_with_timing(\"3-Ricci tensor (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"start\")\n\n # Construct body:\n preloop=\"\"\n enableCparameters=True\n # Set up preloop in case we're outputting code for the Einstein Toolkit (ETK)\n if par.parval_from_str(\"grid::GridFuncMemAccess\") == \"ETK\":\n params, preloop = set_ETK_func_params_preloop(func_name_suffix)\n enableCparameters=False\n\n if enable_split_for_optimizations_doesnt_help and FDorder >= 8:\n loopopts += \",DisableOpenMP\"\n Ricci_SymbExpressions_pt1 = []\n Ricci_SymbExpressions_pt2 = []\n for lhsrhs in Ricci_SymbExpressions:\n if \"RBARDD00\" in lhsrhs.lhs or \"RBARDD11\" in lhsrhs.lhs or \"RBARDD22\" in lhsrhs.lhs:\n Ricci_SymbExpressions_pt1.append(lhrh(lhs=lhsrhs.lhs, rhs=lhsrhs.rhs))\n else:\n Ricci_SymbExpressions_pt2.append(lhrh(lhs=lhsrhs.lhs, rhs=lhsrhs.rhs))\n preloop = \"\"\"#pragma omp parallel\n {\n#pragma omp for\n\"\"\"\n preloopbody = fin.FD_outputC(\"returnstring\", Ricci_SymbExpressions_pt1,\n params=FD_outCparams)\n preloop += lp.simple_loop(loopopts, preloopbody)\n preloop += \"#pragma omp for\\n\"\n body = fin.FD_outputC(\"returnstring\", Ricci_SymbExpressions_pt2,\n params=FD_outCparams)\n postloop = \"\\n } // END #pragma omp parallel\\n\"\n else:\n body = fin.FD_outputC(\"returnstring\", Ricci_SymbExpressions,\n params=FD_outCparams)\n postloop = \"\"\n print_msg_with_timing(\"3-Ricci tensor (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n preloop=preloop, body=body, loopopts=loopopts, postloop=postloop,\n rel_path_to_Cparams=rel_path_to_Cparams, enableCparameters=enableCparameters)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='bssnconstraints'></a>\n\n# Step 4.a: Generate symbolic expressions for BSSN Hamiltonian & momentum constraints \\[Back to [top](#toc)\\]\n$$\\label{bssnconstraints}$$\n\nNext output the C code for evaluating the BSSN Hamiltonian and momentum constraints [(**Tutorial**)](Tutorial-BSSN_constraints.ipynb). In the absence of numerical error, these constraints should evaluate to zero. However it does not due to numerical (typically truncation) error.\n\nWe will therefore measure the constraint violations to gauge the accuracy of our simulation, and, ultimately determine whether errors are dominated by numerical finite differencing (truncation) error as expected.\n\n`BSSN_constraints__generate_symbolic_expressions()` supports the following features:\n\n* (disabled by default) add stress-energy ($T^{\\mu\\nu}$) source terms\n* (disabled by default) output Hamiltonian constraint only\n\nOne list is returned by `BSSN_constraints__generate_symbolic_expressions()`: `BSSN_constraints_SymbExpressions`, which contains a list of expressions for the Hamiltonian and momentum constraints (4 elements total), using the `lhrh` named-tuple to store a list of LHSs and RHSs, where each LHS and RHS is defined as follows\n\n1. LHS = gridfunction representation of the BSSN constraint, computed at grid point i0,i1,i2, and\n1. RHS = expression for given BSSN constraint",
"_____no_output_____"
]
],
[
[
"def BSSN_constraints__generate_symbolic_expressions(enable_stress_energy_source_terms=False, leave_Ricci_symbolic=True,\n output_H_only=False):\n ######################################\n # START: GENERATE SYMBOLIC EXPRESSIONS\n starttime = print_msg_with_timing(\"BSSN constraints\", msg=\"Symbolic\", startstop=\"start\")\n\n # Define the Hamiltonian constraint and output the optimized C code.\n par.set_parval_from_str(\"BSSN.BSSN_quantities::LeaveRicciSymbolic\", str(leave_Ricci_symbolic))\n import BSSN.BSSN_constraints as bssncon\n\n # Returns None if enable_stress_energy_source_terms==False; otherwise returns symb expressions for T4UU\n T4UU = register_stress_energy_source_terms_return_T4UU(enable_stress_energy_source_terms)\n\n bssncon.BSSN_constraints(add_T4UUmunu_source_terms=False, output_H_only=output_H_only) # We'll add them below if desired.\n if enable_stress_energy_source_terms:\n import BSSN.BSSN_stress_energy_source_terms as Bsest\n Bsest.BSSN_source_terms_for_BSSN_constraints(T4UU)\n bssncon.H += Bsest.sourceterm_H\n for i in range(3):\n bssncon.MU[i] += Bsest.sourceterm_MU[i]\n\n BSSN_constraints_SymbExpressions = [lhrh(lhs=gri.gfaccess(\"aux_gfs\", \"H\"), rhs=bssncon.H)]\n if not output_H_only:\n BSSN_constraints_SymbExpressions += [lhrh(lhs=gri.gfaccess(\"aux_gfs\", \"MU0\"), rhs=bssncon.MU[0]),\n lhrh(lhs=gri.gfaccess(\"aux_gfs\", \"MU1\"), rhs=bssncon.MU[1]),\n lhrh(lhs=gri.gfaccess(\"aux_gfs\", \"MU2\"), rhs=bssncon.MU[2])]\n par.set_parval_from_str(\"BSSN.BSSN_quantities::LeaveRicciSymbolic\", \"False\")\n print_msg_with_timing(\"BSSN constraints\", msg=\"Symbolic\", startstop=\"stop\", starttime=starttime)\n # END: GENERATE SYMBOLIC EXPRESSIONS\n ######################################\n return BSSN_constraints_SymbExpressions",
"_____no_output_____"
]
],
[
[
"<a id='bssnconstraints_c_code'></a>\n\n# Step 4.b: `BSSN_constraints()`: Register C function for evaluating BSSN Hamiltonian & momentum constraints \\[Back to [top](#toc)\\]\n$$\\label{bssnconstraints_c_code}$$\n\n`add_BSSN_constraints_to_Cfunction_dict()` supports the following features\n\n* (enabled by default) reference-metric precomputation\n* (disabled by default) \"golden kernels\", which greatly increases the C-code generation time in an attempt to reduce computational cost. Most often this results in no speed-up.\n* (enabled by default) SIMD output\n* (disabled by default) splitting of RHSs into smaller pieces (multiple loops) to improve performance. Doesn't help much.\n* (disabled by default) add stress-energy ($T^{\\mu\\nu}$) source terms\n* (disabled by default) output Hamiltonian constraint only\n* (`\"i2\"` by default) OpenMP pragma acts on which loop (assumes `i2` is outermost and `i0` is innermost loop). For axisymmetric or near-axisymmetric calculations, `\"i1\"` may be *significantly* faster.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_BSSN_constraints_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"),\n enable_rfm_precompute=True, enable_golden_kernels=False, enable_SIMD=True,\n enable_stress_energy_source_terms=False, leave_Ricci_symbolic=True,\n output_H_only=False, OMP_pragma_on=\"i2\", func_name_suffix=\"\"):\n if includes is None:\n includes = []\n if enable_SIMD:\n includes += [os.path.join(\"SIMD\", \"SIMD_intrinsics.h\")]\n enable_FD_functions = bool(par.parval_from_str(\"finite_difference::enable_FD_functions\"))\n if enable_FD_functions:\n includes += [\"finite_difference_functions.h\"]\n\n # Set up the C function for the BSSN constraints\n desc = \"Evaluate the BSSN constraints\"\n name = \"BSSN_constraints\" + func_name_suffix\n params = \"const paramstruct *restrict params, \"\n if enable_rfm_precompute:\n params += \"const rfm_struct *restrict rfmstruct, \"\n else:\n params += \"REAL *xx[3], \"\n params += \"\"\"\n const REAL *restrict in_gfs, const REAL *restrict auxevol_gfs, REAL *restrict aux_gfs\"\"\"\n\n # Construct body:\n\n BSSN_constraints_SymbExpressions = BSSN_constraints__generate_symbolic_expressions(enable_stress_energy_source_terms,\n leave_Ricci_symbolic=leave_Ricci_symbolic,\n output_H_only=output_H_only)\n\n preloop=\"\"\n enableCparameters=True\n # Set up preloop in case we're outputting code for the Einstein Toolkit (ETK)\n if par.parval_from_str(\"grid::GridFuncMemAccess\") == \"ETK\":\n params, preloop = set_ETK_func_params_preloop(func_name_suffix)\n enableCparameters=False\n\n FD_outCparams = \"outCverbose=False,enable_SIMD=\" + str(enable_SIMD)\n FD_outCparams += \",GoldenKernelsEnable=\" + str(enable_golden_kernels)\n FDorder = par.parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\")\n starttime = print_msg_with_timing(\"BSSN constraints (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"start\")\n body = fin.FD_outputC(\"returnstring\", BSSN_constraints_SymbExpressions,\n params=FD_outCparams)\n print_msg_with_timing(\"BSSN constraints (FD order=\"+str(FDorder)+\")\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n preloop=preloop,\n body=body,\n loopopts=get_loopopts(\"InteriorPoints\", enable_SIMD, enable_rfm_precompute, OMP_pragma_on),\n rel_path_to_Cparams=rel_path_to_Cparams, enableCparameters=enableCparameters)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='enforce3metric'></a>\n\n# Step 5: `enforce_detgammahat_constraint()`: Register C function for enforcing the conformal 3-metric $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint \\[Back to [top](#toc)\\]\n$$\\label{enforce3metric}$$\n\nTo ensure stability when solving the BSSN equations, we must enforce the conformal 3-metric $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN_enforcing_determinant_gammabar_equals_gammahat_constraint.ipynb). This function imposes the $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint.\n\nApplying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\\det{\\bar{\\gamma}_{ij}}=\\det{\\hat{\\gamma}_{ij}}$ constraint.\n\n`add_enforce_detgammahat_constraint_to_Cfunction_dict()` supports the following features\n\n* (enabled by default) reference-metric precomputation\n* (disabled by default) \"golden kernels\", which greatly increases the C-code generation time in an attempt to reduce computational cost. Most often this results in no speed-up.\n* (`\"i2\"` by default) OpenMP pragma acts on which loop (assumes `i2` is outermost and `i0` is innermost loop). For axisymmetric or near-axisymmetric calculations, `\"i1\"` may be *significantly* faster.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_enforce_detgammahat_constraint_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"),\n enable_rfm_precompute=True, enable_golden_kernels=False,\n OMP_pragma_on=\"i2\", func_name_suffix=\"\"):\n # This function disables SIMD, as it includes cbrt() and abs() functions.\n if includes is None:\n includes = []\n # This function does not use finite differences!\n # enable_FD_functions = bool(par.parval_from_str(\"finite_difference::enable_FD_functions\"))\n # if enable_FD_functions:\n # includes += [\"finite_difference_functions.h\"]\n\n # Set up the C function for enforcing the det(gammabar) = det(gammahat) BSSN algebraic constraint\n desc = \"Enforce the det(gammabar) = det(gammahat) (algebraic) constraint\"\n name = \"enforce_detgammahat_constraint\" + func_name_suffix\n params = \"const paramstruct *restrict params, \"\n if enable_rfm_precompute:\n params += \"const rfm_struct *restrict rfmstruct, \"\n else:\n params += \"REAL *xx[3], \"\n params += \"REAL *restrict in_gfs\"\n\n # Construct body:\n enforce_detg_constraint_symb_expressions = EGC.Enforce_Detgammahat_Constraint_symb_expressions()\n\n preloop=\"\"\n enableCparameters=True\n # Set up preloop in case we're outputting code for the Einstein Toolkit (ETK)\n if par.parval_from_str(\"grid::GridFuncMemAccess\") == \"ETK\":\n params, preloop = set_ETK_func_params_preloop(func_name_suffix, enable_SIMD=False)\n enableCparameters=False\n\n FD_outCparams = \"outCverbose=False,enable_SIMD=False\"\n FD_outCparams += \",GoldenKernelsEnable=\" + str(enable_golden_kernels)\n starttime = print_msg_with_timing(\"Enforcing det(gammabar)=det(gammahat) constraint\", msg=\"Ccodegen\", startstop=\"start\")\n body = fin.FD_outputC(\"returnstring\", enforce_detg_constraint_symb_expressions,\n params=FD_outCparams)\n print_msg_with_timing(\"Enforcing det(gammabar)=det(gammahat) constraint\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n\n enable_SIMD = False\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n preloop=preloop,\n body=body,\n loopopts=get_loopopts(\"AllPoints\", enable_SIMD, enable_rfm_precompute, OMP_pragma_on),\n rel_path_to_Cparams=rel_path_to_Cparams, enableCparameters=enableCparameters)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='psi4'></a>\n\n# Step 6.a: `psi4_part_{0,1,2}()`: Register C function for evaluating Weyl scalar $\\psi_4$, in 3 parts (3 functions) \\[Back to [top](#toc)\\]\n$$\\label{psi4}$$\n\n$\\psi_4$ is a complex scalar related to the gravitational wave strain via\n\n$$\n\\psi_4 = \\ddot{h}_+ - i \\ddot{h}_\\times.\n$$\n\nWe construct the symbolic expression for $\\psi_4$ as described in the [corresponding NRPy+ Jupyter notebook](Tutorial-Psi4.ipynb), in three parts. The below `add_psi4_part_to_Cfunction_dict()` function will construct any of these three parts `0`, `1,` or `2`, and output the part to a function `psi4_part0()`, `psi4_part1()`, or `psi4_part2()`, respectively.\n\n`add_psi4_part_to_Cfunction_dict()` supports the following features\n\n* (`\"0\"` by default) which part? (`0`, `1,` or `2`), as described above\n* (disabled by default) \"setPsi4tozero\", which effectively turns this into a dummy function -- for when $\\psi_4$ is not needed, and it's easier to just set `psi_4=0` instead of calculating it.\n* (`\"i2\"` by default) OpenMP pragma acts on which loop (assumes `i2` is outermost and `i0` is innermost loop). For axisymmetric or near-axisymmetric calculations, `\"i1\"` may be *significantly* faster.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_psi4_part_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"), whichpart=0,\n setPsi4tozero=False, OMP_pragma_on=\"i2\"):\n starttime = print_msg_with_timing(\"psi4, part \" + str(whichpart), msg=\"Ccodegen\", startstop=\"start\")\n\n # Set up the C function for psi4\n if includes is None:\n includes = []\n includes += [\"NRPy_function_prototypes.h\"]\n\n desc = \"Compute psi4 at all interior gridpoints, part \" + str(whichpart)\n name = \"psi4_part\" + str(whichpart)\n params = \"\"\"const paramstruct *restrict params, const REAL *restrict in_gfs, REAL *restrict xx[3], REAL *restrict aux_gfs\"\"\"\n\n body = \"\"\n gri.register_gridfunctions(\"AUX\", [\"psi4_part\" + str(whichpart) + \"re\", \"psi4_part\" + str(whichpart) + \"im\"])\n FD_outCparams = \"outCverbose=False,enable_SIMD=False,CSE_sorting=none\"\n if not setPsi4tozero:\n # Set the body of the function\n # First compute the symbolic expressions\n psi4.Psi4(specify_tetrad=False)\n\n # We really don't want to store these \"Cparameters\" permanently; they'll be set via function call...\n # so we make a copy of the original par.glb_Cparams_list (sans tetrad vectors) and restore it below\n Cparams_list_orig = par.glb_Cparams_list.copy()\n par.Cparameters(\"REAL\", __name__, [\"mre4U0\", \"mre4U1\", \"mre4U2\", \"mre4U3\"], [0, 0, 0, 0])\n par.Cparameters(\"REAL\", __name__, [\"mim4U0\", \"mim4U1\", \"mim4U2\", \"mim4U3\"], [0, 0, 0, 0])\n par.Cparameters(\"REAL\", __name__, [\"n4U0\", \"n4U1\", \"n4U2\", \"n4U3\"], [0, 0, 0, 0])\n\n body += \"\"\"\nREAL mre4U0,mre4U1,mre4U2,mre4U3,mim4U0,mim4U1,mim4U2,mim4U3,n4U0,n4U1,n4U2,n4U3;\npsi4_tetrad(params,\n in_gfs[IDX4S(CFGF, i0,i1,i2)],\n in_gfs[IDX4S(HDD00GF, i0,i1,i2)],\n in_gfs[IDX4S(HDD01GF, i0,i1,i2)],\n in_gfs[IDX4S(HDD02GF, i0,i1,i2)],\n in_gfs[IDX4S(HDD11GF, i0,i1,i2)],\n in_gfs[IDX4S(HDD12GF, i0,i1,i2)],\n in_gfs[IDX4S(HDD22GF, i0,i1,i2)],\n &mre4U0,&mre4U1,&mre4U2,&mre4U3,&mim4U0,&mim4U1,&mim4U2,&mim4U3,&n4U0,&n4U1,&n4U2,&n4U3,\n xx, i0,i1,i2);\n\"\"\"\n body += \"REAL xCart_rel_to_globalgrid_center[3];\\n\"\n body += \"xx_to_Cart(params, xx, i0, i1, i2, xCart_rel_to_globalgrid_center);\\n\"\n body += \"int ignore_Cart_to_i0i1i2[3]; REAL xx_rel_to_globalgridorigin[3];\\n\"\n body += \"Cart_to_xx_and_nearest_i0i1i2_global_grid_center(params, xCart_rel_to_globalgrid_center,xx_rel_to_globalgridorigin,ignore_Cart_to_i0i1i2);\\n\"\n for i in range(3):\n body += \"const REAL xx\" + str(i) + \"=xx_rel_to_globalgridorigin[\" + str(i) + \"];\\n\"\n body += fin.FD_outputC(\"returnstring\",\n [lhrh(lhs=gri.gfaccess(\"in_gfs\", \"psi4_part\" + str(whichpart) + \"re\"),\n rhs=psi4.psi4_re_pt[whichpart]),\n lhrh(lhs=gri.gfaccess(\"in_gfs\", \"psi4_part\" + str(whichpart) + \"im\"),\n rhs=psi4.psi4_im_pt[whichpart])],\n params=FD_outCparams)\n par.glb_Cparams_list = Cparams_list_orig.copy()\n\n elif setPsi4tozero:\n body += fin.FD_outputC(\"returnstring\",\n [lhrh(lhs=gri.gfaccess(\"in_gfs\", \"psi4_part\" + str(whichpart) + \"re\"),\n rhs=sp.sympify(0)),\n lhrh(lhs=gri.gfaccess(\"in_gfs\", \"psi4_part\" + str(whichpart) + \"im\"),\n rhs=sp.sympify(0))],\n params=FD_outCparams)\n enable_SIMD = False\n enable_rfm_precompute = False\n\n print_msg_with_timing(\"psi4, part \" + str(whichpart), msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n body=body,\n loopopts=get_loopopts(\"InteriorPoints\", enable_SIMD, enable_rfm_precompute, OMP_pragma_on,\n enable_xxs=False),\n rel_path_to_Cparams=rel_path_to_Cparams)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='psi4_tetrad'></a>\n\n# Step 6.b: `psi4_tetrad()`: Register C function for evaluating Weyl scalar $\\psi_4$ tetrad \\[Back to [top](#toc)\\]\n$$\\label{psi4_tetrad}$$\n\nComputing $\\psi_4$ requires that an observer tetrad be specified. We adopt a \"quasi-Kinnersley tetrad\" as described in [the corresponding NRPy+ tutorial notebook](Tutorial-Psi4_tetrads.ipynb).\n\n`add_psi4_tetrad_to_Cfunction_dict()` supports the following features\n\n* (disabled by default) \"setPsi4tozero\", which effectively turns this into a dummy function -- for when $\\psi_4$ is not needed, and it's easier to just set `psi_4=0` instead of calculating it.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_psi4_tetrad_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"), setPsi4tozero=False):\n starttime = print_msg_with_timing(\"psi4 tetrads\", msg=\"Ccodegen\", startstop=\"start\")\n\n # Set up the C function for BSSN basis transformations\n desc = \"Compute tetrad for psi4\"\n name = \"psi4_tetrad\"\n\n # First set up the symbolic expressions (RHSs) and their names (LHSs)\n psi4tet.Psi4_tetrads()\n list_of_varnames = []\n list_of_symbvars = []\n for i in range(4):\n list_of_varnames.append(\"*mre4U\" + str(i))\n list_of_symbvars.append(psi4tet.mre4U[i])\n for i in range(4):\n list_of_varnames.append(\"*mim4U\" + str(i))\n list_of_symbvars.append(psi4tet.mim4U[i])\n for i in range(4):\n list_of_varnames.append(\"*n4U\" + str(i))\n list_of_symbvars.append(psi4tet.n4U[i])\n\n paramsindent = \" \"\n params = \"\"\"const paramstruct *restrict params,\\n\"\"\" + paramsindent\n list_of_metricvarnames = [\"cf\"]\n for i in range(3):\n for j in range(i, 3):\n list_of_metricvarnames.append(\"hDD\" + str(i) + str(j))\n for var in list_of_metricvarnames:\n params += \"const REAL \" + var + \",\"\n params += \"\\n\" + paramsindent\n for var in list_of_varnames:\n params += \"REAL \" + var + \",\"\n params += \"\\n\" + paramsindent + \"REAL *restrict xx[3], const int i0,const int i1,const int i2\"\n\n # Set the body of the function\n body = \"\"\n outCparams = \"includebraces=False,outCverbose=False,enable_SIMD=False,preindent=1\"\n if not setPsi4tozero:\n for i in range(3):\n body += \" const REAL xx\" + str(i) + \" = xx[\" + str(i) + \"][i\" + str(i) + \"];\\n\"\n body += \" // Compute tetrads:\\n\"\n body += \" {\\n\"\n # Sort the lhss list alphabetically, and rhss to match:\n lhss, rhss = [list(x) for x in zip(*sorted(zip(list_of_varnames, list_of_symbvars), key=lambda pair: pair[0]))]\n body += outputC(rhss, lhss, filename=\"returnstring\", params=outCparams)\n body += \" }\\n\"\n\n elif setPsi4tozero:\n body += \"return;\\n\"\n loopopts = \"\"\n\n print_msg_with_timing(\"psi4 tetrads\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n add_to_Cfunction_dict(\n includes=includes,\n desc=desc,\n name=name, params=params,\n body=body,\n loopopts=loopopts,\n rel_path_to_Cparams=rel_path_to_Cparams)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='swm2'></a>\n\n# Step 6.c: `SpinWeight_minus2_SphHarmonics()`: Register C function for evaluating spin-weight $s=-2$ spherical harmonics \\[Back to [top](#toc)\\]\n$$\\label{swm2}$$\n\nAfter evaluating $\\psi_4$ at all interior gridpoints on a numerical grid, we next decompose $\\psi_4$ into spin-weight $s=-2$ spherical harmonics, which are documented in [this NRPy+ tutorial notebook](Tutorial-SpinWeighted_Spherical_Harmonics.ipynb).\n\n`SpinWeight_minus2_SphHarmonics()` supports the following features\n\n* (`\"8\"` by default) `maximum_l`, the maximum $\\ell$ mode to output. Symbolic expressions $(\\ell,m)$ modes up to and including `maximum_l` will be output.\n\nAlso to enable parallel C-code kernel generation, the NRPy+ environment is pickled and returned.",
"_____no_output_____"
]
],
[
[
"def add_SpinWeight_minus2_SphHarmonics_to_Cfunction_dict(includes=None, rel_path_to_Cparams=os.path.join(\".\"),\n maximum_l=8):\n starttime = print_msg_with_timing(\"Spin-weight s=-2 Spherical Harmonics\", msg=\"Ccodegen\", startstop=\"start\")\n\n # Set up the C function for computing the spin-weight -2 spherical harmonic at theta,phi: Y_{s=-2, l,m}(theta,phi)\n prefunc = r\"\"\"// Compute at a single point (th,ph) the spin-weight -2 spherical harmonic Y_{s=-2, l,m}(th,ph)\n// Manual \"inline void\" of this function results in compilation error with clang.\nvoid SpinWeight_minus2_SphHarmonics(const int l, const int m, const REAL th, const REAL ph,\n REAL *reYlmswm2_l_m, REAL *imYlmswm2_l_m) {\n\"\"\"\n # Construct prefunc:\n outCparams = \"preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False\"\n prefunc += \"\"\"\n switch(l) {\n\"\"\"\n for l in range(maximum_l + 1): # Output values up to and including l=8.\n prefunc += \" case \" + str(l) + \":\\n\"\n prefunc += \" switch(m) {\\n\"\n for m in range(-l, l + 1):\n prefunc += \" case \" + str(m) + \":\\n\"\n prefunc += \" {\\n\"\n Y_m2_lm = SWm2SH.Y(-2, l, m, SWm2SH.th, SWm2SH.ph)\n prefunc += outputC([sp.re(Y_m2_lm), sp.im(Y_m2_lm)], [\"*reYlmswm2_l_m\", \"*imYlmswm2_l_m\"],\n \"returnstring\", outCparams)\n prefunc += \" }\\n\"\n prefunc += \" return;\\n\"\n prefunc += \" } // END switch(m)\\n\"\n prefunc += \" } // END switch(l)\\n\"\n\n prefunc += r\"\"\"\n fprintf(stderr, \"ERROR: SpinWeight_minus2_SphHarmonics handles only l=[0,\"\"\"+str(maximum_l)+r\"\"\"] and only m=[-l,+l] is defined.\\n\");\n fprintf(stderr, \" You chose l=%d and m=%d, which is out of these bounds.\\n\",l,m);\n exit(1);\n}\n\nvoid lowlevel_decompose_psi4_into_swm2_modes(const int Nxx_plus_2NGHOSTS1,const int Nxx_plus_2NGHOSTS2,\n const REAL dxx1, const REAL dxx2,\n const REAL curr_time, const REAL R_ext,\n const REAL *restrict th_array, const REAL *restrict sinth_array, const REAL *restrict ph_array,\n const REAL *restrict psi4r_at_R_ext, const REAL *restrict psi4i_at_R_ext) {\n for(int l=2;l<=\"\"\"+str(maximum_l)+r\"\"\";l++) { // The maximum l here is set in Python.\n for(int m=-l;m<=l;m++) {\n // Parallelize the integration loop:\n REAL psi4r_l_m = 0.0;\n REAL psi4i_l_m = 0.0;\n#pragma omp parallel for reduction(+:psi4r_l_m,psi4i_l_m)\n for(int i1=0;i1<Nxx_plus_2NGHOSTS1-2*NGHOSTS;i1++) {\n const REAL th = th_array[i1];\n const REAL sinth = sinth_array[i1];\n for(int i2=0;i2<Nxx_plus_2NGHOSTS2-2*NGHOSTS;i2++) {\n const REAL ph = ph_array[i2];\n // Construct integrand for psi4 spin-weight s=-2,l=2,m=0 spherical harmonic\n REAL ReY_sm2_l_m,ImY_sm2_l_m;\n SpinWeight_minus2_SphHarmonics(l,m, th,ph, &ReY_sm2_l_m,&ImY_sm2_l_m);\n\n const int idx2d = i1*(Nxx_plus_2NGHOSTS2-2*NGHOSTS)+i2;\n const REAL a = psi4r_at_R_ext[idx2d];\n const REAL b = psi4i_at_R_ext[idx2d];\n const REAL c = ReY_sm2_l_m;\n const REAL d = ImY_sm2_l_m;\n psi4r_l_m += (a*c + b*d) * dxx2 * sinth*dxx1;\n psi4i_l_m += (b*c - a*d) * dxx2 * sinth*dxx1;\n }\n }\n // Step 4: Output the result of the integration to file.\n char filename[100];\n sprintf(filename,\"outpsi4_l%d_m%d-r%.2f.txt\",l,m, (double)R_ext);\n // If you love \"+\"'s in filenames by all means enable this (ugh):\n //if(m>=0) sprintf(filename,\"outpsi4_l%d_m+%d-r%.2f.txt\",l,m, (double)R_ext);\n FILE *outpsi4_l_m;\n // 0 = n*dt when n=0 is exactly represented in double/long double precision,\n // so no worries about the result being ~1e-16 in double/ld precision\n if(curr_time==0) outpsi4_l_m = fopen(filename, \"w\");\n else outpsi4_l_m = fopen(filename, \"a\");\n fprintf(outpsi4_l_m,\"%e %.15e %.15e\\n\", (double)(curr_time),\n (double)psi4r_l_m,(double)psi4i_l_m);\n fclose(outpsi4_l_m);\n }\n }\n}\n\"\"\"\n\n desc = \"\"\n name = \"driver__spherlikegrids__psi4_spinweightm2_decomposition\"\n params = r\"\"\"const paramstruct *restrict params, REAL *restrict diagnostic_output_gfs,\n const int *restrict list_of_R_ext_idxs, const int num_of_R_ext_idxs, const REAL time,\n REAL *restrict xx[3],void xx_to_Cart(const paramstruct *restrict params, REAL *restrict xx[3],const int i0,const int i1,const int i2, REAL xCart[3])\"\"\"\n\n body = r\"\"\" // Step 1: Allocate memory for 2D arrays used to store psi4, theta, sin(theta), and phi.\n const int sizeof_2Darray = sizeof(REAL)*(Nxx_plus_2NGHOSTS1-2*NGHOSTS)*(Nxx_plus_2NGHOSTS2-2*NGHOSTS);\n REAL *restrict psi4r_at_R_ext = (REAL *restrict)malloc(sizeof_2Darray);\n REAL *restrict psi4i_at_R_ext = (REAL *restrict)malloc(sizeof_2Darray);\n // ... also store theta, sin(theta), and phi to corresponding 1D arrays.\n REAL *restrict sinth_array = (REAL *restrict)malloc(sizeof(REAL)*(Nxx_plus_2NGHOSTS1-2*NGHOSTS));\n REAL *restrict th_array = (REAL *restrict)malloc(sizeof(REAL)*(Nxx_plus_2NGHOSTS1-2*NGHOSTS));\n REAL *restrict ph_array = (REAL *restrict)malloc(sizeof(REAL)*(Nxx_plus_2NGHOSTS2-2*NGHOSTS));\n\n // Step 2: Loop over all extraction indices:\n for(int ii0=0;ii0<num_of_R_ext_idxs;ii0++) {\n // Step 2.a: Set the extraction radius R_ext based on the radial index R_ext_idx\n REAL R_ext;\n {\n REAL xCart[3]; xx_to_Cart(params,xx,list_of_R_ext_idxs[ii0],1,1,xCart); // values for itheta and iphi don't matter.\n R_ext = sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]);\n }\n\n // Step 2.b: Compute psi_4 at this extraction radius and store to a local 2D array.\n const int i0=list_of_R_ext_idxs[ii0];\n#pragma omp parallel for\n for(int i1=NGHOSTS;i1<Nxx_plus_2NGHOSTS1-NGHOSTS;i1++) {\n th_array[i1-NGHOSTS] = xx[1][i1];\n sinth_array[i1-NGHOSTS] = sin(xx[1][i1]);\n for(int i2=NGHOSTS;i2<Nxx_plus_2NGHOSTS2-NGHOSTS;i2++) {\n ph_array[i2-NGHOSTS] = xx[2][i2];\n\n // Compute real & imaginary parts of psi_4, output to diagnostic_output_gfs\n const REAL psi4r = (diagnostic_output_gfs[IDX4S(PSI4_PART0REGF, i0,i1,i2)] +\n diagnostic_output_gfs[IDX4S(PSI4_PART1REGF, i0,i1,i2)] +\n diagnostic_output_gfs[IDX4S(PSI4_PART2REGF, i0,i1,i2)]);\n const REAL psi4i = (diagnostic_output_gfs[IDX4S(PSI4_PART0IMGF, i0,i1,i2)] +\n diagnostic_output_gfs[IDX4S(PSI4_PART1IMGF, i0,i1,i2)] +\n diagnostic_output_gfs[IDX4S(PSI4_PART2IMGF, i0,i1,i2)]);\n\n // Store result to \"2D\" array (actually 1D array with 2D storage):\n const int idx2d = (i1-NGHOSTS)*(Nxx_plus_2NGHOSTS2-2*NGHOSTS)+(i2-NGHOSTS);\n psi4r_at_R_ext[idx2d] = psi4r;\n psi4i_at_R_ext[idx2d] = psi4i;\n }\n }\n // Step 3: Perform integrations across all l,m modes from l=2 up to and including L_MAX (global variable):\n lowlevel_decompose_psi4_into_swm2_modes(Nxx_plus_2NGHOSTS1,Nxx_plus_2NGHOSTS2,\n dxx1,dxx2,\n time, R_ext, th_array, sinth_array, ph_array,\n psi4r_at_R_ext,psi4i_at_R_ext);\n }\n\n // Step 4: Free all allocated memory:\n free(psi4r_at_R_ext); free(psi4i_at_R_ext);\n free(sinth_array); free(th_array); free(ph_array);\n\"\"\"\n\n print_msg_with_timing(\"Spin-weight s=-2 Spherical Harmonics\", msg=\"Ccodegen\", startstop=\"stop\", starttime=starttime)\n\n add_to_Cfunction_dict(\n includes=includes,\n prefunc=prefunc,\n desc=desc,\n name=name, params=params,\n body=body,\n rel_path_to_Cparams=rel_path_to_Cparams)\n return pickle_NRPy_env()",
"_____no_output_____"
]
],
[
[
"<a id='validation'></a>\n\n# Step 7: Confirm above functions are bytecode-identical to those in `BSSN/BSSN_Ccodegen_library.py` \\[Back to [top](#toc)\\]\n$$\\label{validation}$$",
"_____no_output_____"
]
],
[
[
"import BSSN.BSSN_Ccodegen_library as BCL\nimport sys\n\nfunclist = [(\"print_msg_with_timing\", print_msg_with_timing, BCL.print_msg_with_timing),\n (\"get_loopopts\", get_loopopts, BCL.get_loopopts),\n (\"register_stress_energy_source_terms_return_T4UU\", register_stress_energy_source_terms_return_T4UU, BCL.register_stress_energy_source_terms_return_T4UU),\n (\"BSSN_RHSs__generate_symbolic_expressions\", BSSN_RHSs__generate_symbolic_expressions, BCL.BSSN_RHSs__generate_symbolic_expressions),\n (\"add_rhs_eval_to_Cfunction_dict\", add_rhs_eval_to_Cfunction_dict, BCL.add_rhs_eval_to_Cfunction_dict),\n (\"Ricci__generate_symbolic_expressions\", Ricci__generate_symbolic_expressions, BCL.Ricci__generate_symbolic_expressions),\n (\"add_Ricci_eval_to_Cfunction_dict\", add_Ricci_eval_to_Cfunction_dict, BCL.add_Ricci_eval_to_Cfunction_dict),\n (\"BSSN_constraints__generate_symbolic_expressions\", BSSN_constraints__generate_symbolic_expressions, BCL.BSSN_constraints__generate_symbolic_expressions),\n (\"add_BSSN_constraints_to_Cfunction_dict\", add_BSSN_constraints_to_Cfunction_dict, BCL.add_BSSN_constraints_to_Cfunction_dict),\n (\"add_enforce_detgammahat_constraint_to_Cfunction_dict\", add_enforce_detgammahat_constraint_to_Cfunction_dict, BCL.add_enforce_detgammahat_constraint_to_Cfunction_dict),\n (\"add_psi4_part_to_Cfunction_dict\", add_psi4_part_to_Cfunction_dict, BCL.add_psi4_part_to_Cfunction_dict),\n (\"add_psi4_tetrad_to_Cfunction_dict\", add_psi4_tetrad_to_Cfunction_dict, BCL.add_psi4_tetrad_to_Cfunction_dict),\n (\"add_SpinWeight_minus2_SphHarmonics_to_Cfunction_dict\", add_SpinWeight_minus2_SphHarmonics_to_Cfunction_dict, BCL.add_SpinWeight_minus2_SphHarmonics_to_Cfunction_dict)\n ]\n\nif sys.version_info.major >= 3:\n import inspect\n for func in funclist:\n # https://stackoverflow.com/questions/20059011/check-if-two-python-functions-are-equal\n if inspect.getsource(func[1]) != inspect.getsource(func[2]):\n print(\"inspect.getsource(func[1]):\")\n print(inspect.getsource(func[1]))\n print(\"inspect.getsource(func[2]):\")\n print(inspect.getsource(func[2]))\n print(\"ERROR: function \" + func[0] + \" is not the same as the Ccodegen_library version!\")\n sys.exit(1)\n\n print(\"PASS! ALL FUNCTIONS ARE IDENTICAL\")\nelse:\n print(\"SORRY CANNOT CHECK FUNCTION IDENTITY WITH PYTHON 2. PLEASE update your Python installation.\")",
"PASS! ALL FUNCTIONS ARE IDENTICAL\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 8: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-BSSN_time_evolution-C_codegen_library.pdf](Tutorial-BSSN_time_evolution-C_codegen_library.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface\ncmd.output_Jupyter_notebook_to_LaTeXed_PDF(\"Tutorial-BSSN_time_evolution-C_codegen_library\")",
"Created Tutorial-BSSN_time_evolution-C_codegen_library.tex, and compiled\n LaTeX file to PDF file Tutorial-BSSN_time_evolution-\n C_codegen_library.pdf\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c984f32885a40cd826bd564916d7bf83fddb58 | 617,471 | ipynb | Jupyter Notebook | PID_001/simple_prediction.ipynb | ThinhPham1988/AIDA-AT2019 | 8f2d63028d7bc9372e829ad8b06305551ad08610 | [
"MIT"
] | null | null | null | PID_001/simple_prediction.ipynb | ThinhPham1988/AIDA-AT2019 | 8f2d63028d7bc9372e829ad8b06305551ad08610 | [
"MIT"
] | null | null | null | PID_001/simple_prediction.ipynb | ThinhPham1988/AIDA-AT2019 | 8f2d63028d7bc9372e829ad8b06305551ad08610 | [
"MIT"
] | 1 | 2019-08-06T04:03:26.000Z | 2019-08-06T04:03:26.000Z | 1,173.89924 | 63,800 | 0.948098 | [
[
[
"import tensorflow as tf\nimport numpy as np\nimport gym\nfrom gym import wrappers\nimport tflearn\nimport argparse\nimport pprint as pp\nimport numpy\nimport pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm as CM\nfrom matplotlib import path\nimport itertools\nimport scipy.misc\n\nimport copy\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n%matplotlib inline\ndef converting_scenario(scenario):\n snapshot = pd.DataFrame(columns=['entry_x','entry_y','middle_x','middle_y','exit_x','exit_y','res_x','res_y'])\n flight_info = {}\n flight_info['entry_x'] = scenario.sync_point_x_1\n flight_info['entry_y'] = scenario.sync_point_y_1\n flight_info['exit_x'] = scenario.exit_x_1\n flight_info['exit_y'] = scenario.exit_y_1\n \n# flight_info['middle_x'] = (scenario.sync_point_x_1 + scenario.exit_x_1)/2\n# flight_info['middle_y'] = (scenario.sync_point_y_1 + scenario.exit_y_1)/2\n flight_info['middle_x'] = scenario.middle_x_1\n flight_info['middle_y'] = scenario.middle_y_1\n flight_info['res_x'] = scenario.res_x\n flight_info['res_y'] = scenario.res_y\n \n snapshot = snapshot.append(flight_info, ignore_index=True)\n\n flight_info = {}\n flight_info['entry_x'] = scenario.sync_point_x_0\n flight_info['entry_y'] = scenario.sync_point_y_0\n flight_info['exit_x'] = scenario.exit_x_0\n flight_info['exit_y'] = scenario.exit_y_0\n flight_info['middle_x'] = (flight_info['entry_x'] + scenario.exit_x_0)/2\n flight_info['middle_y'] = (flight_info['entry_y'] + scenario.exit_y_0)/2\n \n# flight_info['middle_x'] = scenario.middle_x_0\n# flight_info['middle_y'] = scenario.middle_y_0\n snapshot = snapshot.append(flight_info, ignore_index=True)\n \n list_flights = pd.read_json(scenario.surrounding_flight)\n for i in range(len(list_flights)):\n flight_info = {}\n flight_info['entry_x'] = list_flights.iloc[i].sync_point_x \n flight_info['entry_y'] = list_flights.iloc[i].sync_point_y\n flight_info['exit_x'] = list_flights.iloc[i].exit_x\n flight_info['exit_y'] = list_flights.iloc[i].exit_y\n flight_info['middle_x'] = (flight_info['entry_x'] + list_flights.iloc[i].exit_x)/2\n flight_info['middle_y'] = (flight_info['entry_y'] + list_flights.iloc[i].exit_y)/2\n snapshot = snapshot.append(flight_info, ignore_index=True)\n \n return snapshot\n\n# Environment for learning Demo data\nclass flightOb():\n def __init__(self, Flight, ownship):\n self.enter_x = Flight.entry_x\n self.enter_y = Flight.entry_y\n self.exit_x = Flight.exit_x\n self.exit_y = Flight.exit_y\n self.middle_x = Flight.middle_x\n self.middle_y = Flight.middle_y\n self.enter_time = 0\n self.ownship = ownship\n \nclass simEnv(): \n ##################\n def __init__(self, size):\n self.size = size[0]\n self.maxnum = size[1]\n self.flights = []\n self.action_space = np.zeros(2)\n self.resol = []\n\n# self.info = 7\n self.optimal_features = [0, 0, 0]\n self.position_features = [0,0,0,0]\n self.closure_features = np.zeros((self.maxnum-1)*3)\n# self.traffic_features = np.zeros((self.maxnum-1)*2)\n self.num_feature = len(self.closure_features) + len(self.position_features) + len(self.optimal_features)\n self.observation_space = np.ones(self.num_feature ,dtype='uint8')\n \n def loadAirSpace(self,snapshot):\n objects = []\n \n for i in range(len(snapshot)):\n objects.append(flightOb(snapshot.iloc[i],i==0))\n \n for i in range(self.maxnum - len(snapshot)):\n objects.append(flightOb(snapshot.iloc[1],i==0))\n \n self.resol = snapshot.iloc[0][['res_x','res_y']]\n return objects\n \n def render(self):\n plt.figure()\n Flight = self.flights[0]\n plt.plot([Flight.enter_x,Flight.middle_x,Flight.exit_x], [Flight.enter_y,Flight.middle_y,Flight.exit_y],'rx--')\n plt.scatter(Flight.enter_x,Flight.enter_y,c='r')\n \n Flight = self.flights[1]\n plt.plot([Flight.enter_x,Flight.middle_x,Flight.exit_x], [Flight.enter_y,Flight.middle_y,Flight.exit_y],'bx-.')\n plt.scatter(Flight.enter_x,Flight.enter_y,c='b')\n \n for i in range(2,len(self.flights)):\n Flight = self.flights[i]\n plt.plot([Flight.enter_x,Flight.middle_x,Flight.exit_x], [Flight.enter_y,Flight.middle_y,Flight.exit_y],'gx-.')\n plt.scatter(Flight.enter_x,Flight.enter_y,c='g')\n \n plt.scatter(self.resol[0],self.resol[1],c='b')\n \n plt.xlim(0, 800)\n plt.ylim(0, 800)\n plt.show()\n return 1\n\n def get_state(self):\n v = [self.flights[0].middle_x - self.resol[0], self.flights[0].middle_y - self.resol[1]]\n self.optimal_features = [np.linalg.norm(v),v[0],v[1]]\n position = [self.flights[0].middle_x-self.flights[0].enter_x, self.flights[0].middle_y-self.flights[0].enter_y]\n self.position_features = [np.linalg.norm(position),position[0],position[1], self.UL]\n \n state = self.closure_features + self.position_features + self.optimal_features\n return state\n \n def reset(self, snapshot, UL = 0):\n if UL == 0:\n per = 0\n elif UL == 1:\n per = 2\n elif UL == 2:\n per = 5\n else:\n per = 10\n \n self.UL = per\n self.flights = self.loadAirSpace(snapshot)\n self.check_state(self.flights[0]) \n state = self.get_state() \n return state\n \n def check_state(self,Ownship, init = False):\n# Ownship = self.flights[0]\n CFeature = []\n penalty = 0\n for i in range(1,len(self.flights)):\n Intruder = self.flights[i] \n #[True, cpaClosure, cpaPoint1, cpaPoint2,v]\n cf = self.conflict_dectector(Ownship, Intruder, 0.8888, 0.8888)\n \n CFeature.append(cf[1])\n CFeature.extend(cf[4])\n \n cpaClosure = cf[1]\n if cpaClosure <1: \n penalty = min(penalty,(np.exp((cpaClosure-1))-1)*100)\n \n# if init & (i == 1):\n# self.resol = cf[2]\n \n# for i in range(self.maxnum - len(self.flights)):\n# CFeature.extend([10,800,800])\n\n self.closure_features = CFeature \n return penalty\n \n def cpa_calculator(self,begin1x, begin1y, end1x, end1y, begin2x, begin2y, end2x, end2y, speed1, speed2):\n # Min CPA allowed\n cpaThreshold = 40\n # related points\n begin1 = np.array([begin1x, begin1y])\n end1 = np.array([end1x, end1y])\n begin2 = np.array([begin2x, begin2y])\n end2 = np.array([end2x, end2y]) \n # segments length\n d1 = np.linalg.norm(end1 - begin1)\n d2 = np.linalg.norm(end2 - begin2)\n \n default = np.linalg.norm(begin1 - begin2)\n cpaStatus = [False, default,begin1, begin2,begin1-begin2]\n\n if (d1 == 0) | (d2 == 0):\n if default < cpaThreshold:\n cpaStatus[0] = True\n cpaStatus[1] = cpaStatus[1]/cpaThreshold\n return cpaStatus\n \n # directional unit velocity vectors\n v1 = np.array(end1 - begin1) / d1\n v2 = np.array(end2 - begin2) / d2\n # initial closure vector and relative velocity vector\n w0 = np.array(begin1 - begin2)\n dv = v1 * speed1 - v2 * speed2\n time2cpa = - (np.dot(w0, dv)) / (np.linalg.norm(dv)**2)\n travelledDist1 = speed1 * time2cpa\n travelledDist2 = speed2 * time2cpa\n \n if time2cpa >= 0 and travelledDist1 <= d1 and travelledDist2 <= d2 : \n cpaPoint1 = begin1 + v1 * travelledDist1 \n cpaPoint2 = begin2 + v2 * travelledDist2\n cpaClosure = np.linalg.norm(cpaPoint1 - cpaPoint2)\n v = cpaPoint1 - cpaPoint2\n if cpaClosure < cpaThreshold :\n cpaStatus = [True, cpaClosure, cpaPoint1, cpaPoint2,v]\n else:\n cpaStatus = [False, cpaClosure, cpaPoint1, cpaPoint2,v]\n \n cpaStatus[1] = cpaStatus[1]/cpaThreshold\n return cpaStatus\n\n def conflict_dectector(self,ownship, intruder, speed1, speed2) :\n conflict = False\n # entry and exit positions of intruder\n intruder_entry = np.array([intruder.enter_x, intruder.enter_y])\n intruder_exit = np.array([intruder.exit_x, intruder.exit_y])\n intruder_path_length = np.linalg.norm(intruder_exit - intruder_entry) \n # find direction of intruder\n intruder_dir = (intruder_exit - intruder_entry) / intruder_path_length\n # sync_point is position of intruder at the moment ownship enters the sector\n # sync_point = enter_point + offset in the moving direction\n intruder_entry_offset = ownship.enter_time * speed2 \n sync_point = intruder_entry + intruder_dir * intruder_entry_offset \n\n # now check: ownship1 vs intruder\n # begin point of intruder must be sync_point\n\n hasCpa = self.cpa_calculator(ownship.enter_x, ownship.enter_y, ownship.middle_x, ownship.middle_y, \n sync_point[0], sync_point[1], intruder.exit_x, intruder.exit_y,\n speed1, speed2)\n hasCpa1 = [False, 1000]\n if hasCpa[0] :\n return hasCpa\n else :\n # entry, middle, and exit positions of intruder\n ownship_entry = np.array([ownship.enter_x, ownship.enter_y])\n ownship_middle = np.array([ownship.middle_x, ownship.middle_y])\n ownship_exit = np.array([ownship.exit_x, ownship.exit_y])\n ownship_first_length = np.linalg.norm(ownship_middle - ownship_entry)\n intruder_second_offset = intruder_entry_offset + ownship_first_length\n if intruder_second_offset <= intruder_path_length :\n intruder_begin = intruder_entry + intruder_dir * intruder_second_offset\n # checking ownship2 vs intruder:\n hasCpa1 = self.cpa_calculator(ownship.middle_x, ownship.middle_y, ownship.exit_x, ownship.exit_y, \n intruder_begin[0], intruder_begin[1], intruder.exit_x, intruder.exit_y,\n speed1, speed2)\n if hasCpa1[0] :\n return hasCpa1\n if hasCpa[1] > hasCpa1[1]:\n hasCpa = hasCpa1[:]\n return hasCpa\n \ndef train_agent(df_train, size, max_num):\n env = simEnv([size,max_num])\n X = []\n Y = []\n\n for index in range(0,len(df_train)):\n snapshot = converting_scenario(df_train.iloc[index])\n state = env.reset(snapshot[:max_num])\n X.append(state)\n Y.append(env.resol.values)\n \n regr = RandomForestRegressor(max_depth=10, random_state=1, n_estimators=100)\n regr.fit(X, Y) \n return regr\n \ndef predict_agent(model, scenario, size, max_num):\n env = simEnv([size,max_num])\n X = []\n snapshot = converting_scenario(scenario)\n state = env.reset(snapshot[:max_num])\n X.append(state)\n \n Y= model.predict(X)\n return Y\n\ndef score(F_Res, Y):\n list_score=[]\n for i in range(len(F_Res)):\n list_score.append(np.linalg.norm(np.array(F_Res[i])-Y))\n \n return np.array(list_score)",
"_____no_output_____"
],
[
"df = pd.read_csv('100_5.csv')\ndf_train = pd.DataFrame()\ndf_test = pd.DataFrame()\ndf_ = df.sample(frac=0.8,random_state=200)\ndf_train = df_train.append(df_)\ndf_test = df_test.append(df.drop(df_.index))",
"_____no_output_____"
],
[
"model = train_agent(df_train, 400, max_num)",
"_____no_output_____"
],
[
"for i in range(len(df_test)):\n Y = predict_agent(model, df_test.iloc[i], 400, max_num)\n F_Res = np.array(All_Map[df_test.index[i]])\n l_score = score(F_Res, Y) \n Pred_P = F_Res[np.argmin(l_score)]\n plt.scatter(F_Res[:,0],F_Res[:,1],c=l_score)\n plt.scatter(Y[0][0],Y[0][1], c='r')\n plt.scatter(Y_test[i][0],Y_test[i][1], c='b')\n plt.scatter(Pred_P[0],Pred_P[1], c='g')\n plt.title(df_test.index[i])\n plt.show()",
"_____no_output_____"
],
[
"df = pd.read_csv('map.csv',delimiter=',', header=0)\n\nAll_Map = []\nfor step in range(len(df)//200):\n F_Res = []\n\n for i in range(200):\n for j in range(step*200, (step+1)*200):\n if df[str(i)][j] == 0:\n F_Res.append([i*4,(j-step*200)*4])\n All_Map.append(F_Res)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c99261baf986e8ccfb01a6d85d70384db94c36 | 842,098 | ipynb | Jupyter Notebook | Dan_notebooks/bisondata.ipynb | daw538/y4project | f8b7cb7c8ee0a5312a661a366e339371cf428533 | [
"MIT"
] | null | null | null | Dan_notebooks/bisondata.ipynb | daw538/y4project | f8b7cb7c8ee0a5312a661a366e339371cf428533 | [
"MIT"
] | null | null | null | Dan_notebooks/bisondata.ipynb | daw538/y4project | f8b7cb7c8ee0a5312a661a366e339371cf428533 | [
"MIT"
] | null | null | null | 973.523699 | 636,300 | 0.949375 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import rc\nrc(\"font\", family=\"serif\", size=11)",
"_____no_output_____"
]
],
[
[
"Here we shall import some data taken from HiROS and import into a pandas dataframe for analysis.",
"_____no_output_____"
]
],
[
[
"# Import required data\nbroomhall = '../data/broomhall2009.txt'\ndavies = '../data/davies2014.txt'\n\nfile = input(\"Please select file: 'broomhall' or 'davies': \")\nif file == str('broomhall'):\n file = broomhall\nelif file == str('davies'):\n file = davies\nelse:\n print('Please try again')\n\ndf = pd.read_csv(file, header=None, delim_whitespace=True, names=['n', 'l', 'nu', 'sg_nu'])\ndf.head()",
"Please select file: 'broomhall' or 'davies': broomhall\n"
]
],
[
[
"We can see from the preview above that the file contains a mix of radial modes at increasing orders. To perform any useful analysis, the induvidual modes $l$ must be considered separately. A neat way of doing this is to use a list comprehension, which avoids the need for multiple for loops and appending to arrays each time. This produces separate arrays for each value of $l$ which are contained within an overall list that can be called.",
"_____no_output_____"
]
],
[
[
"l = [df[(df.l == i)] for i in (range(max(df.l)-min(df.l)+1))]",
"_____no_output_____"
],
[
"plt.figure(1)\nplt.errorbar(l[0].n, l[0].nu, yerr=l[0].sg_nu, fmt='x')\nplt.errorbar(l[1].n, l[1].nu, yerr=l[1].sg_nu, fmt='x')\nplt.errorbar(l[2].n, l[2].nu, yerr=l[2].sg_nu, fmt='x')\nplt.errorbar(l[3].n, l[3].nu, yerr=l[3].sg_nu, fmt='x')\nplt.xlabel('Value of $n$')\nplt.ylabel('Frequency ($\\mu$Hz)')\nplt.show()\n\nprint(u\"%.5f\" % np.median(np.diff(l[0].nu)))\nprint(u\"%.5f\" % np.median(np.diff(l[1].nu)))\nprint(u\"%.5f\" % np.median(np.diff(l[2].nu)))\nprint(u\"%.5f\" % np.median(np.diff(l[3].nu)))\n\n\n# Échelle Plot for the data\ndnu = 135.2\nplt.figure(2)\n# New plotting method\nimport itertools\nmarkers = itertools.cycle(('+', '1', 'x', '*'))\n\nfor i in range(max(df.l)-min(df.l)+1):\n plt.scatter(df.loc[(df.l == i) & (df.n > 11)].nu % dnu, df.loc[(df.l == i) & (df.n > 11)].nu,\n label=r'$l=$'+str(i), marker=next(markers))\n plt.scatter(df.loc[(df.l == i) & (df.n < 12)].nu % dnu, df.loc[(df.l == i) & (df.n < 12)].nu,\n facecolors='none', edgecolors=['lightgrey'], label='')\nplt.title('Échelle Diagram for Sun')\nplt.xlabel('Modulo Frequency Spacing ('+ str(dnu) +') $\\mu$Hz')\nplt.ylabel('Frequency ($\\mu$Hz)')\nplt.legend()\nplt.savefig('seminar/solarechelle.pdf', bbox='tight_layout')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The above Échelle diagrams show how the four lowest modes form broadly straight lines in modulo frequency space, though there are significant deviations that form a tail at the lower values of $n$ (visible as faint circles). We shall select only the $l=0$ modes for analysis.\n\n",
"_____no_output_____"
],
[
"#### Using Vrard Paper\n\nTo compute the local frequency separation for a mode $\\nu_{n,0}$ we use the average difference over the adjacent modes\n$$ \\Delta\\nu(n) = \\frac{\\nu_{n+1,0} - \\nu_{n-1,0}}{2}$$\nwhich cannot be appropriately calculated for modes the limits of n.\n\nThe asymptotic dependence of the large frequency separation wrt. n is given in the paper as\n$$ \\Delta\\nu_{\\textrm{up}}(n) = \\left( 1 + \\alpha\\left(n-n_\\textrm{max}\\right)\\right) \\left<\\Delta\\nu\\right>$$\nwhere $\\alpha$ is defined by the power law $\\alpha = A\\left<\\Delta\\nu\\right>^Β$. In the paper, the constants are set as $A=0.015$ and $B=-0.32$\n\nHaving calulated these extra frequencies $\\Delta\\nu_\\textrm{up}$, the difference between the theoretical and observed large frequency separation is calculated with $\\delta_\\textrm{g,obs} = \\Delta\\nu(n) - \\Delta\\nu_{\\textrm{up}}(n)$",
"_____no_output_____"
]
],
[
[
"nmax = 22\n# Modelling from Vrard Paper\n\nl0 = df.loc[(df.l == 0) & (df.n > 14)]\nl0['dnu_n'] = (l0['nu'].diff(2).shift(-1))/2 # Differences between neighbouring frequencies\n\nalpha = 0.015*np.mean(l0['dnu_n'])**(-0.32) # Equation provided in paper\n\nl0['dnu_up'] = (1 + alpha*(l0['n']-nmax)) * (np.mean(l0['dnu_n'])) # Calculating Δν_up (see equation above)\n\nl0['dg'] = l0['dnu_n']-l0['dnu_up'] # Difference between theoretical and observed large freq spacings\n\n\n# Plots to replicate results of Figure 2 in the Vrard paper\nplt.figure(10, figsize=(12,5))\nplt.subplot(1,2,1)\nplt.scatter(l0.nu, l0.dnu_n)\nplt.xlabel(r'Frequency ($\\mu m$)')\nplt.ylabel(r'$\\Delta\\nu(n)$')\nplt.subplot(1,2,2)\nplt.scatter(l0.nu, l0.dg)\nplt.xlabel(r'Frequency ($\\mu m$)')\nplt.ylabel(r'$\\delta_g$')\nplt.show()\n\n\nl0",
"/usr/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"\n/usr/lib/python3.7/site-packages/ipykernel_launcher.py:9: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n if __name__ == '__main__':\n/usr/lib/python3.7/site-packages/ipykernel_launcher.py:11: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n # This is added back by InteractiveShellApp.init_path()\n"
]
],
[
[
"In order to provide Stan with suitable starting parameters (to prevent the complete lack of convergance), we shall first attempt to manually fit a rather basic model to the data.\n\n$ \\Delta\\nu(n+\\epsilon) + k(\\frac{\\nu_\\textrm{max}}{\\Delta\\nu}+n)^2 + A\\sin(\\omega n+\\phi)e^{-(n/\\tau)}$\n\nwhere the latter terms represent the curvature and a decaying oscillatory component. $k$ is the curvature parameter, whilst $\\tau$ is a decay parameter for the glitch. We can then attempt to replicate this using Stan.",
"_____no_output_____"
]
],
[
[
"# Look at l=0 data initially only\nplt.figure(3, figsize=(7,4))\nplt.scatter(l0.nu % dnu, l0.nu, \n c=l0.n,cmap='viridis',\n label=r'$l=$'+str(0))\nplt.colorbar(label=r'Value of $n$')\n\ndef model(n, dnu, nmax, epsilon, k, A, omega, phi, tau):\n freqs = (n + epsilon) * dnu\n freqs += (nmax-n)**2 * k\n freqs += A*np.sin(omega*n + phi)*np.exp(-n/tau)\n return freqs\n\nn = np.arange(12,30,1)\ndnu = 135.2\nnmax = 22\nepsilon = 1.435\nk = 0.14\nA = 2.7\nomega = 5\nphi = 2.5\ntau = 10\n\nf = model(n, dnu, nmax, epsilon, k, A, omega, phi, tau)\n\nplt.plot(f % dnu, f, label='Model')\nplt.ylabel('Frequency ($\\mu$Hz)')\nplt.xlabel(r'Mod. Freq. Spacing ('+ str(dnu) +') $\\mu$Hz')\nplt.legend()\nplt.savefig('seminar/solarmodes1.pdf', bbox='tight_layout')\nplt.show()",
"_____no_output_____"
],
[
"code = '''\nfunctions {\n real model(real n, real dnu, real nmax, real epsilon, real k, real A, real omega, real phi, real tau){\n return (dnu*(n+epsilon) + k*(nmax - n)^2 + A*sin(omega*n + phi)*exp(-n/tau));\n }\n}\ndata {\n int N;\n real n[N];\n real freq[N];\n real freq_err[N];\n real dnu_guess;\n}\nparameters {\n real<lower = 0> dnu;\n real<lower = 0> nmax;\n real epsilon;\n real k;\n real<lower = 0> A;\n real<lower = 0> omega;\n real<lower = -2.0*pi(), upper = 2.0*pi()> phi;\n real<lower = 0> tau;\n}\nmodel {\n real mod[N];\n for (i in 1:N){\n mod[i] = model(n[i], dnu, nmax, epsilon, k, A, omega, phi, tau);\n }\n mod ~ normal(freq, freq_err);\n dnu ~ normal(dnu_guess, dnu_guess*0.001);\n nmax ~ normal(20,4);\n epsilon ~ normal(1.4, 0.1);\n k ~ lognormal(log(0.14), 0.3);\n A ~ lognormal(log(0.1), 0.3);\n omega ~ normal(0.8, 0.1);\n tau ~ normal(10,5);\n // phi ~ normal(0, 1.5);\n \n}\n'''\nimport pystan\nsm = pystan.StanModel(model_code=code)",
"INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_851e7df1114bd6e518bd6651bc2ad1dd NOW.\n"
],
[
"stan_data = {'N': len(l0['n'].values),\n 'n': l0['n'].values, \n 'freq': (l0['nu'].values),\n 'freq_err': l0['sg_nu'].values,\n 'dnu_guess': dnu\n }\nstart = {'dnu': dnu,\n 'nmax': 22,\n 'epsilon': 1.435,\n 'k': 0.14,\n 'A': 0.5,\n 'omega': 5,\n 'phi': 2.5,\n 'tau': 50\n }\nnchains = 4\n\nfit = sm.sampling(data=stan_data, iter=5000, chains=nchains, init=[start for n in range(nchains)],)\n #control=dict(max_treedepth=15))",
"WARNING:pystan:7944 of 10000 iterations saturated the maximum tree depth of 10 (79.44%)\nWARNING:pystan:Run again with max_treedepth larger than 10 to avoid saturation\n"
],
[
"print(fit)",
"Inference for Stan model: anon_model_851e7df1114bd6e518bd6651bc2ad1dd.\n4 chains, each with iter=5000; warmup=2500; thin=1; \npost-warmup draws per chain=2500, total post-warmup draws=10000.\n\n mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat\ndnu 135.2 9.2e-3 0.13 134.95 135.12 135.2 135.28 135.45 193 1.03\nnmax 22.5 0.04 0.55 21.43 22.15 22.5 22.85 23.57 194 1.03\nepsilon 1.44 1.6e-3 0.02 1.39 1.42 1.44 1.45 1.48 194 1.03\nk 0.12 2.4e-5 8.2e-4 0.12 0.12 0.12 0.12 0.12 1154 1.0\nA 2.69 0.01 0.44 1.91 2.39 2.66 2.96 3.65 1450 1.0\nomega 4.93 4.4e-4 9.4e-3 4.91 4.92 4.93 4.93 4.94 456 1.01\nphi 4.54 7.8e-3 0.17 4.22 4.43 4.54 4.65 4.87 454 1.01\ntau 9.42 0.02 0.77 8.09 8.88 9.35 9.88 11.15 1520 1.0\nlp__ -984.9 0.06 2.01 -989.7 -986.0 -984.6 -983.4 -982.0 1235 1.0\n\nSamples were drawn using NUTS at Sun Jan 20 23:21:49 2019.\nFor each parameter, n_eff is a crude measure of effective sample size,\nand Rhat is the potential scale reduction factor on split chains (at \nconvergence, Rhat=1).\n"
],
[
"fit.plot()\nplt.savefig('seminar/solarstan.pdf', bbox='tight_layout')\nplt.show()",
"WARNING:pystan:Deprecation warning. In future, use ArviZ library (`pip install arviz`)\n"
],
[
"import corner\ndata = np.vstack([fit['epsilon'], fit['k'], fit['dnu'], fit['nmax'],\n fit['A'], fit['omega'], fit['phi'], fit['tau']]).T\ncorner.corner(data, labels=[r'$\\epsilon$', r'$k$',r'$\\Delta\\nu$',r'$n_{max}$',\n r'$A$', r'$\\omega$', r'$\\phi$', r'$\\tau$'])\n #, truths=[1.436, 0.07, 0.3, 2, 0])\nplt.savefig('seminar/solarcorner.pdf', bbox='tight_layout')\nplt.show()",
"_____no_output_____"
],
[
"n = np.arange(12,30,1)\n\nplt.figure(4)\nplt.scatter(df.loc[(df.l == 0) & (df.n > 11)].nu % 135.2, df.loc[(df.l == 0) & (df.n > 11)].nu,\n c='k', marker='x', label=r'$l=$'+str(0))\n #c=df.loc[(df.l == 0) & (df.n > 11)].n,cmap='viridis')\n\nmod = 135.2\n#plt.colorbar(label=r'Value of $n$')\n#f = model(n, dnu, 3050.0, 1.436, 0.07, 0.3, 2, 0)\ng = model(n, fit['dnu'].mean(), fit['nmax'].mean(), fit['epsilon'].mean(), fit['k'].mean(),\n fit['A'].mean(), fit['omega'].mean(), fit['phi'].mean(), fit['tau'].mean())\nplt.plot(f % dnu, f, ':', label='Guess')\nplt.plot(g % fit['dnu'].mean(), g, label='Fit')\n#plt.plot(g % dnu, g, label='Fit')\nplt.errorbar(df.loc[(df.l == 0) & (df.n > 11)].nu % 135.2, df.loc[(df.l == 0) & (df.n > 11)].nu,\n xerr=df.loc[(df.l == 0) & (df.n > 11)].sg_nu, zorder=0, fmt=\"none\", label=\"none\",\n c='k', capsize=2, markersize=4, elinewidth=1)\nplt.ylabel('Frequency')\nplt.xlabel(r'Mod. Freq. Spacing ('+ str(mod) +') $\\mu$Hz')\nplt.xlim(58,68)\nplt.legend()\nplt.savefig('seminar/solarmodes2.pdf', bbox='tight_layout')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c994f3b78135d06cf329a706e89dd49e1b0ed2 | 51,453 | ipynb | Jupyter Notebook | docs/examples/example_gs_multipop.ipynb | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 10 | 2020-03-26T01:08:10.000Z | 2021-12-04T13:02:10.000Z | docs/examples/example_gs_multipop.ipynb | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 25 | 2020-06-08T14:52:28.000Z | 2022-03-08T02:30:54.000Z | docs/examples/example_gs_multipop.ipynb | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 8 | 2020-03-24T14:11:25.000Z | 2021-11-06T06:32:59.000Z | 130.591371 | 19,960 | 0.804462 | [
[
[
"# Models with Multiple Source Populations",
"_____no_output_____"
],
[
"*ARES* can handle an arbitrary number of source populations. To\naccess this functionality, create a dictionary representing each source\npopulation of interest. Below, we'll create a population representative of PopII stars and another representative of PopIII stars.\n\nBefore we start, it is important to note that in *ARES*, source populations are identified by their spectra over some contiguous interval in photon energy. This can be somewhat counterintuitive. For example, though UV emission from stars and X-ray emission from their compact remnants, e.g., X-ray binary systems, are both natural byproducts of star formation, we treat them as separate source populations in *ARES* even though the emission from each type of source is related to the same rate of star formation. However, because stars and XRBs have very different spectra, whose normalizations are parameterized differently, it is more convenient in the code to keep them separate. Because of this, what you might think of as a single source population (stars and their remnants) actually constitutes *two* source populations in *ARES*. \n\nLet's start with a PopII source population, and a few standard imports:",
"_____no_output_____"
]
],
[
[
"%pylab inline\nimport ares\nimport numpy as np\nimport matplotlib.pyplot as pl",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"pars = \\\n{\n 'problem_type': 100, # Blank slate global 21-cm signal\n\n\n # Setup star formation\n 'pop_Tmin{0}': 1e4, # atomic cooling halos\n 'pop_fstar{0}': 1e-1, # 10% star formation efficiency\n \n # Setup UV emission\n 'pop_sed_model{0}': True,\n 'pop_sed{0}': 'bb', # PopII stars -> 10^4 K blackbodies\n 'pop_temperature{0}': 1e4,\n 'pop_rad_yield{0}': 1e42,\n 'pop_fesc{0}': 0.2,\n 'pop_Emin{0}': 10.19, \n 'pop_Emax{0}': 24.6,\n 'pop_EminNorm{0}': 13.6,\n 'pop_EmaxNorm{0}': 24.6,\n 'pop_lya_src{0}': True,\n 'pop_ion_src_cgm{0}': True,\n 'pop_heat_src_igm{0}': False,\n \n # Setup X-ray emission\n 'pop_sed{1}': 'pl',\n 'pop_alpha{1}': -1.5, \n 'pop_rad_yield{1}': 2.6e38,\n 'pop_Emin{1}': 2e2, \n 'pop_Emax{1}': 3e4,\n 'pop_EminNorm{1}': 5e2,\n 'pop_EmaxNorm{1}': 8e3,\n \n 'pop_lya_src{1}': False,\n 'pop_ion_src_cgm{1}': False,\n 'pop_heat_src_igm{1}': True,\n \n 'pop_sfr_model{1}': 'link:sfrd:0',\n}",
"_____no_output_____"
]
],
[
[
"**NOTE:** See [problem_types](../problem_types.html) for more information about why we chose ``problem_type=100`` here. ",
"_____no_output_____"
],
[
"We might as well go ahead and run this to establish a baseline:",
"_____no_output_____"
]
],
[
[
"sim = ares.simulations.Global21cm(**pars)\nsim.run()\n \nax, zax = sim.GlobalSignature(color='k')",
"# Loaded $ARES/input/inits/inits_planck_TTTEEE_lowl_lowE_best.txt.\n\n##############################################################################################################\n#### ARES Simulation: Overview ####\n##############################################################################################################\n#### ---------------------------------------------------------------------------------------------------- ####\n#### Source Populations ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### sfrd sed radio O/IR Ly-a LW Ly-C X-ray RTE ####\n#### pop #0 : fcoll yes x x x ####\n#### pop #1 : link:sfrd:0 yes x ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### Physics ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### cgm_initial_temperature : [10000.0] ####\n#### clumping_factor : 1 ####\n#### secondary_ionization : 1 ####\n#### approx_Salpha : 1 ####\n#### include_He : False ####\n#### feedback_LW : False ####\n##############################################################################################################\n# Loaded $ARES/input/hmf/hmf_ST_planck_TTTEEE_lowl_lowE_best_logM_1400_4-18_z_1201_0-60.hdf5.\n"
]
],
[
[
"Now, let's add a PopIII-like source population. We'll assume that PopIII sources are brighter on average (in both the UV and X-ray) but live in lower mass halos. We could just copy-pase the dictionary above, change the population ID numbers and, for example, the UV and X-ray ``pop_rad_yield`` parameters. Or, we could use some built-in tricks to speed this up.\n\nFirst, let's take the PopII parameter set and make a ``ParameterBundle`` object:",
"_____no_output_____"
]
],
[
[
"popII = ares.util.ParameterBundle(**pars)",
"_____no_output_____"
]
],
[
[
"This let's us easily extract parameters according to their ID number, and assign new ones",
"_____no_output_____"
]
],
[
[
"popIII_uv = popII.pars_by_pop(0, True)\npopIII_uv.num = 2\npopIII_xr = popII.pars_by_pop(1, True)\npopIII_xr.num = 3",
"_____no_output_____"
]
],
[
[
"The second argument tells *ARES* to remove the parameter ID numbers.\n\nNow, we can simply reset the ID numbers and update a few important parameters:",
"_____no_output_____"
]
],
[
[
"popIII_uv['pop_Tmin{2}'] = 300\npopIII_uv['pop_Tmax{2}'] = 1e4\npopIII_uv['pop_temperature{2}'] = 1e5\npopIII_uv['pop_fstar{2}'] = 1e-4\n \npopIII_xr['pop_sfr_model{3}'] = 'link:sfrd:2'\npopIII_xr['pop_rad_yield{3}'] = 2.6e39",
"_____no_output_____"
]
],
[
[
"Now, let's make the final parameter dictionary and run it: ",
"_____no_output_____"
]
],
[
[
"pars.update(popIII_uv)\npars.update(popIII_xr)\n \nsim2 = ares.simulations.Global21cm(**pars)\nsim2.run()\n\nax, zax = sim.GlobalSignature(color='k')\nax, zax = sim2.GlobalSignature(color='b', ax=ax)",
"# Loaded $ARES/input/inits/inits_planck_TTTEEE_lowl_lowE_best.txt.\n\n##############################################################################################################\n#### ARES Simulation: Overview ####\n##############################################################################################################\n#### ---------------------------------------------------------------------------------------------------- ####\n#### Source Populations ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### sfrd sed radio O/IR Ly-a LW Ly-C X-ray RTE ####\n#### pop #0 : fcoll yes x x x ####\n#### pop #1 : link:sfrd:0 yes x ####\n#### pop #2 : fcoll yes x x x ####\n#### pop #3 : link:sfrd:2 yes x ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### Physics ####\n#### ---------------------------------------------------------------------------------------------------- ####\n#### cgm_initial_temperature : [10000.0] ####\n#### clumping_factor : 1 ####\n#### secondary_ionization : 1 ####\n#### approx_Salpha : 1 ####\n#### include_He : False ####\n#### feedback_LW : False ####\n##############################################################################################################\n# Loaded $ARES/input/hmf/hmf_ST_planck_TTTEEE_lowl_lowE_best_logM_1400_4-18_z_1201_0-60.hdf5.\n# Loaded $ARES/input/hmf/hmf_ST_planck_TTTEEE_lowl_lowE_best_logM_1400_4-18_z_1201_0-60.hdf5.\n"
]
],
[
[
"Note that the parameter file hangs onto the parameters of each population separately. To verify a few key changes, you could do: ",
"_____no_output_____"
]
],
[
[
"len(sim2.pf.pfs)",
"_____no_output_____"
],
[
"for key in ['pop_Tmin', 'pop_fstar', 'pop_rad_yield']:\n print(key, sim2.pf.pfs[0][key], sim2.pf.pfs[2][key])",
"pop_Tmin 10000.0 300\npop_fstar 0.1 0.0001\npop_rad_yield 1e+42 1e+42\n"
]
],
[
[
"**NOTE:** These are very simple models for PopII and PopIII stars. For more sophisticated approaches, see [More Realistic Galaxy Populations](example_pop_galaxy) and [Including Population III Stars](example_pop_popIII). \n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7c99a4111f2bf163db3144754bae80026d9b1ca | 29,422 | ipynb | Jupyter Notebook | notebooks/sandbox.ipynb | estorrs/mgitools | d29057676b4e365296d6692b9ca794bd1e7b3826 | [
"MIT"
] | null | null | null | notebooks/sandbox.ipynb | estorrs/mgitools | d29057676b4e365296d6692b9ca794bd1e7b3826 | [
"MIT"
] | null | null | null | notebooks/sandbox.ipynb | estorrs/mgitools | d29057676b4e365296d6692b9ca794bd1e7b3826 | [
"MIT"
] | null | null | null | 36.685786 | 4,856 | 0.480389 | [
[
[
"import sys\nsys.path.insert(0, '/Users/erikstorrs/Documents/ding/mgitools/mgitools/')",
"_____no_output_____"
],
[
"%load_ext autoreload",
"_____no_output_____"
],
[
"%autoreload 2",
"_____no_output_____"
],
[
"import os",
"_____no_output_____"
],
[
"import docker",
"_____no_output_____"
],
[
"command = 'python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --\\\ncat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.txt /Users/erikstorrs/Documents/din\\\ng/mgitools/tests/data/test.txt'",
"_____no_output_____"
],
[
"commands = [command] * 50\ncommands = [c.replace('output.txt', f'output.{i}.txt') for i, c in enumerate(commands)]\nf = open('commands.txt', 'w')\nfor command in commands:\n f.write(command + '\\n')\nf.close()\n\n# commands",
"_____no_output_____"
],
[
"docker.remap_command(command)",
"_____no_output_____"
],
[
"docker.generate_docker_command(commands[-1], 'python:3.6', return_list=True)",
"_____no_output_____"
],
[
"docker.execute_commands(commands, 'python:3.6', wait_time=10)",
"2019-05-28 16:10:34,092 - container id: 4ed17931e847aad386757dd67f9f6c5a8698f630415dccf98c26de20eb9fa144 is executing: python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --cat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.4.txt /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.txt\n2019-05-28 16:10:35,102 - container id: b54da95e5733ed9dba7c89e9fe35d356dff5f2197da600a8287ffb1a2fd2a003 is executing: python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --cat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.3.txt /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.txt\n2019-05-28 16:10:36,072 - container id: eabc42e578fc2445cc8f2e2d04b5e6e872765e99378f7c66026078330ed7dae7 is executing: python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --cat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.2.txt /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.txt\n2019-05-28 16:10:37,073 - container id: b6553ec7c23e0f4b8884ab4f04cec99a81e446b29ca3b6772cdac812acad3829 is executing: python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --cat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.1.txt /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.txt\n2019-05-28 16:10:38,110 - container id: aa1eec63955af7864b6ed09b74a52388be122dacbb08e05a87e833893b8bf31e is executing: python /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.py --verbose --cat-output /Users/erikstorrs/Documents/ding/mgitools/notebooks/output.0.txt /Users/erikstorrs/Documents/ding/mgitools/tests/data/test.txt\n2019-05-28 16:10:54,866 - jobs complete\n"
],
[
"os.path.abspath(\"/Users/erikstorrs/Documents/ding/mgitools/mgitools\")",
"_____no_output_____"
],
[
"'hello.txt'.split('/')",
"_____no_output_____"
],
[
"sys.stderr\n",
"_____no_output_____"
],
[
"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw\n",
"_____no_output_____"
],
[
"d = json.load(open('/Users/erikstorrs/Downloads/12.png.json'))",
"_____no_output_____"
],
[
"d",
"_____no_output_____"
],
[
"for obj in d['objects']:\n pts = [(x, y) for x, y in obj['points']['exterior']]\n\n# m = np.zeros((256,256), dtype=bool)\n img = Image.new('L', (256, 256), 0)\n ImageDraw.Draw(img).polygon(pts, outline=1, fill=1)\n mask = np.array(img)\n plt.imshow(mask)\n break\n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9adfab59f550121a3e24369b0db153c46fd4d | 21,772 | ipynb | Jupyter Notebook | notebooks/05.02-Refinement-Protocol.ipynb | So-AI-love/PyRosetta.notebooks | bd1adf0bcd300db4576b1418defc7bea71dc28cc | [
"MIT"
] | 1 | 2020-12-11T15:20:41.000Z | 2020-12-11T15:20:41.000Z | notebooks/05.02-Refinement-Protocol.ipynb | Paradoxia-crypo/PyRosetta.notebooks | 200a6d5489f2108999563ae38c7e3fcdabe8f5fe | [
"MIT"
] | null | null | null | notebooks/05.02-Refinement-Protocol.ipynb | Paradoxia-crypo/PyRosetta.notebooks | 200a6d5489f2108999563ae38c7e3fcdabe8f5fe | [
"MIT"
] | null | null | null | 62.924855 | 657 | 0.70531 | [
[
[
"<!--NOTEBOOK_HEADER-->\n*This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);\ncontent is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).*",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [High-Resolution Movers](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/05.01-High-Res-Movers.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Packing & Design](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.00-Introduction-to-Packing-and-Design.ipynb) ><p><a href=\"https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/05.02-Refinement-Protocol.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a>",
"_____no_output_____"
],
[
"# Refinement Protocol\n\nThe entire standard Rosetta refinement protocol, similar to that presented in Bradley, Misura, & Baker 2005, is available as a `Mover`. Note that the protocol can require ~40 minutes for a 100-residue protein. \n\n```\nsfxn = get_fa_scorefxn()\npose = pose_from_pdb(\"1YY8.clean.pdb\")\nrelax = pyrosetta.rosetta.protocols.relax.ClassicRelax()\nrelax.set_scorefxn(sfxn)\nrelax.apply(pose)\n```\n\nNote that this protocol is DEPRECATED and has been for quite some time. You will want to FastRelax() instead. It still takes quite a while. Replace the ClassicRelax() with FastRelax() and run it now. You will see the FastRelax mover used in many tutorials from here on out. FastRelax with constraints on each atom is useful to get a crystal structure into the Rosetta energy function. FastRelax can also be used for flexible-backbone design. These will all be covered in due time. ",
"_____no_output_____"
]
],
[
[
"# Notebook setup\nimport sys\nif 'google.colab' in sys.modules:\n !pip install pyrosettacolabsetup\n import pyrosettacolabsetup\n pyrosettacolabsetup.setup()\n print (\"Notebook is set for PyRosetta use in Colab. Have fun!\")\n\nfrom pyrosetta import *\nfrom pyrosetta.teaching import *\ninit()",
"\u001b[0mcore.init: \u001b[0mChecking for fconfig files in pwd and ./rosetta/flags\n\u001b[0mcore.init: \u001b[0mRosetta version: PyRosetta4.Release.python36.mac r208 2019.04+release.fd666910a5e fd666910a5edac957383b32b3b4c9d10020f34c1 http://www.pyrosetta.org 2019-01-22T15:55:37\n\u001b[0mcore.init: \u001b[0mcommand: PyRosetta -ex1 -ex2aro -database /Users/kathyle/Computational Protein Prediction and Design/PyRosetta4.Release.python36.mac.release-208/pyrosetta/database\n\u001b[0mcore.init: \u001b[0m'RNG device' seed mode, using '/dev/urandom', seed=-1509889871 seed_offset=0 real_seed=-1509889871\n\u001b[0mcore.init.random: \u001b[0mRandomGenerator:init: Normal mode, seed=-1509889871 RG_type=mt19937\n"
]
],
[
[
"**Make sure you are in the directory with the pdb files:**\n\n`cd google_drive/My\\ Drive/student-notebooks/`",
"_____no_output_____"
]
],
[
[
"\n### BEGIN SOLUTION\nsfxn = get_score_function()\npose = pose_from_pdb(\"inputs/1YY8.clean.pdb\")\nrelax = pyrosetta.rosetta.protocols.relax.FastRelax()\nrelax.set_scorefxn(sfxn)\n\n#Skip for tests\nif not os.getenv(\"DEBUG\"):\n relax.apply(pose)\n \n### END SOLUTION",
"\u001b[0mcore.scoring.ScoreFunctionFactory: \u001b[0mSCOREFUNCTION: \u001b[32mref2015\u001b[0m\n\u001b[0mcore.scoring.etable: \u001b[0mStarting energy table calculation\n\u001b[0mcore.scoring.etable: \u001b[0msmooth_etable: changing atr/rep split to bottom of energy well\n\u001b[0mcore.scoring.etable: \u001b[0msmooth_etable: spline smoothing lj etables (maxdis = 6)\n\u001b[0mcore.scoring.etable: \u001b[0msmooth_etable: spline smoothing solvation etables (max_dis = 6)\n\u001b[0mcore.scoring.etable: \u001b[0mFinished calculating energy tables.\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/hbonds/ref2015_params/HBPoly1D.csv\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/hbonds/ref2015_params/HBFadeIntervals.csv\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/hbonds/ref2015_params/HBEval.csv\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/hbonds/ref2015_params/DonStrength.csv\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/hbonds/ref2015_params/AccStrength.csv\n\u001b[0mcore.chemical.GlobalResidueTypeSet: \u001b[0mFinished initializing fa_standard residue type set. Created 696 residue types\n\u001b[0mcore.chemical.GlobalResidueTypeSet: \u001b[0mTotal time to initialize 1.07793 seconds.\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/rama/fd/all.ramaProb\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/rama/fd/prepro.ramaProb\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/omega/omega_ppdep.all.txt\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/omega/omega_ppdep.gly.txt\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/omega/omega_ppdep.pro.txt\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/omega/omega_ppdep.valile.txt\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/P_AA_pp/P_AA\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/P_AA_pp/P_AA_n\n\u001b[0mcore.scoring.P_AA: \u001b[0mshapovalov_lib::shap_p_aa_pp_smooth_level of 1( aka low_smooth ) got activated.\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/P_AA_pp/shapovalov/10deg/kappa131/a20.prop\n\u001b[0mcore.import_pose.import_pose: \u001b[0mFile 'inputs/1YY8.clean.pdb' automatically determined to be of type PDB\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CG on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CD on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NE on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CZ on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NH1 on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NH2 on residue ARG 18\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CG on residue GLN:NtermProteinFull 214\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CD on residue GLN:NtermProteinFull 214\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: OE1 on residue GLN:NtermProteinFull 214\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NE2 on residue GLN:NtermProteinFull 214\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CG on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CD on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NE on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CZ on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NH1 on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NH2 on residue ARG 452\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CG on residue GLN:NtermProteinFull 648\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: CD on residue GLN:NtermProteinFull 648\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: OE1 on residue GLN:NtermProteinFull 648\n\u001b[0mcore.conformation.Conformation: \u001b[0m\u001b[1m[ WARNING ]\u001b[0m missing heavyatom: NE2 on residue GLN:NtermProteinFull 648\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 23 88\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 23 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 88 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 23 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 88 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 134 194\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 134 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 194 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 134 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 194 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 235 308\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 235 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 308 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 235 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 308 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 359 415\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 359 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 415 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 359 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 415 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 457 522\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 457 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 522 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 457 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 522 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 568 628\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 568 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 628 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 568 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 628 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 669 742\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 669 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 742 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 669 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 742 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mFound disulfide between residues 793 849\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 793 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 849 CYS\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 793 CYD\n\u001b[0mcore.conformation.Conformation: \u001b[0mcurrent variant for 849 CYD\n\u001b[0mcore.pack.pack_missing_sidechains: \u001b[0mpacking residue number 18 because of missing atom number 6 atom name CG\n\u001b[0mcore.pack.pack_missing_sidechains: \u001b[0mpacking residue number 214 because of missing atom number 6 atom name CG\n\u001b[0mcore.pack.pack_missing_sidechains: \u001b[0mpacking residue number 452 because of missing atom number 6 atom name CG\n\u001b[0mcore.pack.pack_missing_sidechains: \u001b[0mpacking residue number 648 because of missing atom number 6 atom name CG\n\u001b[0mcore.pack.task: \u001b[0mPacker task: initialize from command line()\n\u001b[0mcore.scoring.ScoreFunctionFactory: \u001b[0mSCOREFUNCTION: \u001b[32mref2015\u001b[0m\n\u001b[0mbasic.io.database: \u001b[0mDatabase file opened: scoring/score_functions/elec_cp_reps.dat\n\u001b[0mcore.scoring.elec.util: \u001b[0mRead 40 countpair representative atoms\n\u001b[0mcore.pack.dunbrack.RotamerLibrary: \u001b[0mshapovalov_lib_fixes_enable option is true.\n"
]
],
[
[
"## Programming Exercises\n\n\n1. Use the `Mover` constructs to create a complex folding algorithm. Create a program to do the following:\n 1. Five small moves\n 2. Minimize\n 3. Five shear moves\n 4. Minimize\n 5. Monte Carlo Metropolis criterion\n 6. Repeat a–e 100 times\n 7. Repeat a–f five times, each time decreasing the magnitude of the small and shear moves from 25° to 5° in 5° increments.\n\n\nSketch a flowchart, and submit both the flowchart and your code.",
"_____no_output_____"
],
[
"2. *Ab initio folding algorithm*. Based on the Monte Carlo energy optimization algorithm from Workshop #4, write a complete program that will fold a protein. A suggested algorithm involves preliminary low-resolution modifications by fragment insertion (first 9-mers, then 3-mers), followed by high-resolution refinement using small, shear, and minimization movers. Output both your low-resolution intermediate structure and the final refined, high-resolution decoy.\n\n Test your code by attempting to fold domain 2 of the RecA protein (the last 60 amino acid residues of PDB ID 2REB). How do your results compare with the crystal structure? (Consider both your low-resolution and high-resolution results.) If your lowest-energy conformation is different than the native structure, explain why this is so in terms of the limitations of the computational approach.\n\n *Bonus*: After using the `PyMOL_Mover` or `PyMOL_Observer` to record the trajectory, export the frames and tie them together to create an animation. Search the Internet for “PyMOL animation” for additional tools and tips. Animated GIF files are probably the best quality; MPEG and QuickTime formats are also popular and widely compatible and uploadable to YouTube.",
"_____no_output_____"
],
[
"## Thought Questions\n1. With $kT$ = 1, what is the change in propensity of the rama score component that has a 50% chance of being accepted as a small move?\n\n\n2. How would you test whether an algorithm is effective? That is, what kind of measures can you use? What can you vary within an algorithm to make it more effective?",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [High-Resolution Movers](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/05.01-High-Res-Movers.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Packing & Design](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.00-Introduction-to-Packing-and-Design.ipynb) ><p><a href=\"https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/05.02-Refinement-Protocol.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7c9be0427c3e6f4cbabe416161bc28e81b74bd7 | 6,079 | ipynb | Jupyter Notebook | nbs/08a_tutorial.translation.ipynb | chsafouane/adaptnlp | 34bfb8fef32d1e59f89a94799db87ed6da774965 | [
"Apache-2.0"
] | null | null | null | nbs/08a_tutorial.translation.ipynb | chsafouane/adaptnlp | 34bfb8fef32d1e59f89a94799db87ed6da774965 | [
"Apache-2.0"
] | null | null | null | nbs/08a_tutorial.translation.ipynb | chsafouane/adaptnlp | 34bfb8fef32d1e59f89a94799db87ed6da774965 | [
"Apache-2.0"
] | null | null | null | 22.682836 | 235 | 0.546965 | [
[
[
"# all_slow",
"_____no_output_____"
]
],
[
[
"# Tutorial - Translation\n> Using the Translation API in AdaptNLP",
"_____no_output_____"
],
[
"## Translation\n\nTranslation is the task of producing the input text in another language.\n\nBelow, we'll walk through how we can use AdaptNLP's `EasyTranslator` module to translate text with state-of-the-art models.",
"_____no_output_____"
],
[
"## Getting Started\n\nWe'll first import the `EasyTranslator` class from AdaptNLP:",
"_____no_output_____"
]
],
[
[
"from adaptnlp import EasyTranslator",
"_____no_output_____"
]
],
[
[
"Then we'll write some example text to use:",
"_____no_output_____"
]
],
[
[
"text = [\"Machine learning will take over the world very soon.\",\n \"Machines can speak in many languages.\",]",
"_____no_output_____"
]
],
[
[
"Followed by instantiating the `EasyTranslator` class:",
"_____no_output_____"
]
],
[
[
"translator = EasyTranslator()",
"_____no_output_____"
]
],
[
[
"Next we can translate our text. We pass in the text we wish to translate, optionally a prefix for the t5 model (only used with t5 models), a model name, and any keyword arguments from `Transformers.PreTrainedModel.generate()`.\n\nHere we'll pass in `text`, have our model translate from English to German, and use the `t5-small` model.",
"_____no_output_____"
]
],
[
[
"translations = translator.translate(text = text, t5_prefix=\"translate English to German\", model_name_or_path=\"t5-small\", mini_batch_size=1, min_length=0, max_length=100, early_stopping=True)",
"_____no_output_____"
]
],
[
[
"And we can look at the outputs:",
"_____no_output_____"
]
],
[
[
"print(\"Translations:\\n\")\nfor t in translations:\n print(t, \"\\n\")",
"Translations:\n\nDas Maschinenlernen wird die Welt in Kürze übernehmen. \n\nMaschinen können in vielen Sprachen sprechen. \n\n"
]
],
[
[
"## Finding a Model with the Model Hub",
"_____no_output_____"
],
[
"Using the `HFModelHub` we can search for any translation models in HuggingFace like so:",
"_____no_output_____"
]
],
[
[
"from adaptnlp import HFModelHub\n\nhub = HFModelHub()\nmodels = hub.search_model_by_task('translation'); models",
"_____no_output_____"
]
],
[
[
"From there we can pass in any `HFModelResult` from it. Here we'll use the `t5-small` again:",
"_____no_output_____"
]
],
[
[
"model = models[-1]",
"_____no_output_____"
],
[
"translations = translator.translate(text = text, t5_prefix=\"translate English to German\", model_name_or_path=model, mini_batch_size=1, min_length=0, max_length=100, early_stopping=True)",
"_____no_output_____"
]
],
[
[
"And see that we get similar results:",
"_____no_output_____"
]
],
[
[
"print(\"Translations:\\n\")\nfor t in translations:\n print(t, \"\\n\")",
"Translations:\n\nDas Maschinenlernen wird die Welt in Kürze übernehmen. \n\nMaschinen können in vielen Sprachen sprechen. \n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7c9de08beb8c5084143298d0dc01829c424885d | 11,635 | ipynb | Jupyter Notebook | scripts_modules/CoLab_Random_Forest_Regression.ipynb | annapav7/NO2-tropomi_prediction_analysis | 106269c33f30574ac64bdc15155b97c9ae222ac1 | [
"BSD-3-Clause"
] | null | null | null | scripts_modules/CoLab_Random_Forest_Regression.ipynb | annapav7/NO2-tropomi_prediction_analysis | 106269c33f30574ac64bdc15155b97c9ae222ac1 | [
"BSD-3-Clause"
] | null | null | null | scripts_modules/CoLab_Random_Forest_Regression.ipynb | annapav7/NO2-tropomi_prediction_analysis | 106269c33f30574ac64bdc15155b97c9ae222ac1 | [
"BSD-3-Clause"
] | null | null | null | 29.381313 | 344 | 0.512333 | [
[
[
"# NO2 Prediction by using Machine Learning Regression Analyses in Google Earth Engine\n",
"_____no_output_____"
],
[
"## **Machine Learning can create a Model to Predict specific value base on existing data set (dependent and independent values).**",
"_____no_output_____"
],
[
"## **Introduction**\n### **Nitrogen Dioxide (NO2) air pollution**.\nThe World Health Organization estimates that air pollution kills 4.2 million people every year. \nThe main effect of breathing in raised levels of NO2 is the increased likelihood of respiratory problems. NO2 inflames the lining of the lungs, and it can reduce immunity to lung infections.\nThere are connections between respiratory deceases / also exposure to viruses and more deadly cases.\n\n##### ***Sources of NO2***:\nThe rapid population growth, \nThe fast urbanization: \n* Industrial facilities\n* Fossil fuels (coal, oil and gas)\n* Increase of transportation – 80 %.\n\n\n\nThe affect air pollution (NO2): population health, and global warming.\n",
"_____no_output_____"
],
[
"## **Objective**\nThe theme of this project is to create a Model to Predict specific value (NO2) for past years base on existing data set (Landsat and Sentinel-5P(TROPOMI) images) for 2019. These Prediction can be used for Monitoring and Statistical Analyses of developing NO2 over Time.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"## **DataSet:**\nThe Sentinel-5P satellite with TROPOspheric Monitoring Instrument (TROPOMI) instrument provides high spectral resolution (7x3.5 km2) for all spectral bands to register level of NO2. \nTROPOMI available from October 13, 2017.\nLandsat satellite launched in 1972 and images are available for more then 40 years.",
"_____no_output_____"
],
[
"## **Concept:**\nRegression: \nThe model can make generalizations about new data. The model has been learned from the training data, and can be used to predict the result of test data: here, we might be given an x-value, and the model would allow us to predict the y value. By drawing this separating line, we have learned a model which can generalize to new data.",
"_____no_output_____"
],
[
"## 1._ Install libraries",
"_____no_output_____"
]
],
[
[
"!pip install earthengine-api\n",
"_____no_output_____"
]
],
[
[
"## 2._ Establish connection",
"_____no_output_____"
]
],
[
[
"!earthengine authenticate",
"_____no_output_____"
]
],
[
[
"**`Complete End to End Python code for Random Forest Regression:`**",
"_____no_output_____"
]
],
[
[
"# Import necessary Libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport rasterio as rio\nfrom rasterio.plot import show\n\n# Import the data ( CSV formats)\ndata = pd.read_csv('name_of_file.csv')\ndata.head()",
"_____no_output_____"
],
[
"# Store the Data in form of dependent and independent variables separatly\nX = data.ilog[:, 0:1].values\ny = data.ilog[:, 1].values",
"_____no_output_____"
],
[
"# Import the Random Forest Regressor\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Craete a Random Forest Regressor object from Random Forest Regressor Class\nRFReg = RandomForestRegressor(n_estimators = 100, random_state = 0)\n\n# Fit the random forest regressor with Training Data represented by X_train and y_train\nRFReg.fit(X_train, y_train)",
"_____no_output_____"
],
[
"#Predicted Height from test dataset w.r.t Random Forest Regression\ny_predict_rfr = RFReg.predict((X_test))\n\n#Model Evaluation using R-Square for Random Forest Regression\nfrom sklearn import metrics\nr_square = metrics.r2_score(y_test, y_predict_rfr)\nprint('R-Square Error associated with Random Forest Regression is:', r_square)",
"_____no_output_____"
],
[
"''' Visualise the Random Forest Regression by creating range of values from min value of X_train to max value of X_train \nhaving a difference of 0.01 between two consecutive values'''\nX_val = np.arange(min(X_train), max(X_train), 0.01) \n \n#Reshape the data into a len(X_val)*1 array in order to make a column out of the X_val values \nX_val = X_val.reshape((len(X_val), 1)) \n \n#Define a scatter plot for training data \nplt.scatter(X_train, y_train, color = 'blue') \n \n#Plot the predicted data \nplt.plot(X_val, RFReg.predict(X_val), color = 'red') \n \n#Define the title \nplt.title('NO2 prediction using Random Forest Regression') \n \n#Define X axis label \nplt.xlabel('NDVI') \n \n#Define Y axis label \nplt.ylabel('Level of NO2') \n\n#Set the size of the plot for better clarity\nplt.figure(figsize=(1,1))\n \n#Draw the plot \nplt.show()",
"_____no_output_____"
],
[
"# Predicting Height based on Age using Random Forest Regression \nno2_pred = RFReg.predict([[41]])\nprint(\"Predicted NO2t: % d\"% no2_pred)",
"_____no_output_____"
]
],
[
[
"**Model Evaluation**",
"_____no_output_____"
]
],
[
[
"#Model Evaluation using Mean Square Error (MSE)\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_predict))",
"_____no_output_____"
],
[
"#Model Evaluation using Root Mean Square Error (RMSE)\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))",
"_____no_output_____"
],
[
"#Model Evaluation using Mean Absolute Error (MAE)\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_predict))",
"_____no_output_____"
],
[
"#Model Evaluation using R-Square\nfrom sklearn import metrics\nr_square = metrics.r2_score(y_test, y_predict)\nprint('R-Square Error:', r_square)",
"_____no_output_____"
],
[
"#For Illustration Purpose Only. \n#Considering Multiple Linear Equation with two Variables : grade = a0 + a1*time_to_study + a2*class_participation\n#Model Evaluation using Adjusted R-Square. \n# Here n = no. of observations and p = no. of independent variables\n\nn = 50\np = 2\nAdj_r_square = 1-(1-r_square)*(n-1)/(n-p-1)\nprint('Adjusted R-Square Error:', Adj_r_square)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7c9de708c31c3b119f81280dc08f2ca5755b31b | 7,214 | ipynb | Jupyter Notebook | python/+Tkinter.ipynb | lafamila/lafamila | 1ed2463c6d6143fd5aee69820c371cb4641f0d2a | [
"MIT"
] | 2 | 2017-12-29T05:51:30.000Z | 2018-11-25T22:04:22.000Z | python/+Tkinter.ipynb | lafamila/lafamila | 1ed2463c6d6143fd5aee69820c371cb4641f0d2a | [
"MIT"
] | null | null | null | python/+Tkinter.ipynb | lafamila/lafamila | 1ed2463c6d6143fd5aee69820c371cb4641f0d2a | [
"MIT"
] | 1 | 2021-04-15T07:41:25.000Z | 2021-04-15T07:41:25.000Z | 68.056604 | 1,781 | 0.628639 | [
[
[
"from Tkinter import *\nfrom PIL import ImageTk,Image\ntop = Tk()\n\n\n\n#uploaded background picture, referenced stackoverflow\n\nbg_image = PhotoImage(file='C:\\\\Users\\\\lafamila\\\\Desktop')\nbg_image_label =Label(top, image=bg_image)\nbg = Label(top)\nbg=Label(top,\n\n text=\"BATTLESHIP\",\n fg = \"black\",\n\n font = (\"Verdana 40 bold\")\n )\nbg.pack(pady=130)\nbg_image_label.image = bg_image\nbg_image_label.place(x=0, y=0, relwidth=1, relheight=1)\n\ndef buttonClick(event):\n top.geometry(\"500x500\")\n\n # uploaded background picture, referenced stackoverflow\n\n bg_image = PhotoImage(file='C:\\\\Users\\\\lafamila\\\\Desktop')\n bg_image_label = Label(top, image=bg_image)\n bg = Label(top)\n bg = Label(top,\n\n text=\"PREPARE FOR BATTLE\",\n fg=\"black\",\n\n font=(\"Verdana 25 bold\")\n )\n bg.pack(pady=2)\n bg_image_label.image = bg_image\n bg_image_label.place(x=0, y=0, relwidth=1, relheight=1)\n\n B1 = Button(top, text=\"YOUR IP\", bd=10, bg='red', fg='black')\n B1.place(x=250, y=135)\n B1.config(font=(\"YOUR IP\", 20, 'bold'))\n B2 = Button(top, text=\"PLAYER 2\", bd=10, bg='red', fg='black')\n B2.place(x=250, y=235)\n B2.config(font=(\"PLAYER 2\", 20, 'bold'))\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7c9e32be21089b45fd942c6f3f5c7d7a130355d | 13,280 | ipynb | Jupyter Notebook | notebooks/02.Interact/02.00-Using-Interact.ipynb | ibdafna/tutorial | 81cb9bbf0a97c4ec93f06fed5f7743fc635179a3 | [
"BSD-3-Clause"
] | null | null | null | notebooks/02.Interact/02.00-Using-Interact.ipynb | ibdafna/tutorial | 81cb9bbf0a97c4ec93f06fed5f7743fc635179a3 | [
"BSD-3-Clause"
] | null | null | null | notebooks/02.Interact/02.00-Using-Interact.ipynb | ibdafna/tutorial | 81cb9bbf0a97c4ec93f06fed5f7743fc635179a3 | [
"BSD-3-Clause"
] | null | null | null | 27.494824 | 314 | 0.573419 | [
[
[
"<!--NAVIGATION-->\n< [Overview](01.00-overview.ipynb) | [Contents](00.00-index.ipynb) | [OPTIONAL - More about interact](02.01-OPTIONAL-More-About-Interact.ipynb) >",
"_____no_output_____"
],
[
"# Widgets without writing widgets: interact",
"_____no_output_____"
],
[
"The `interact` function (`ipywidgets.interact`) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython's widgets.",
"_____no_output_____"
]
],
[
[
"from ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets",
"_____no_output_____"
]
],
[
[
"## Basic `interact`",
"_____no_output_____"
],
[
"At the most basic level, `interact` autogenerates UI controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use `interact`, you need to define a function that you want to explore. Here is a function that triples its argument, `x`.",
"_____no_output_____"
]
],
[
[
"def f(x):\n return 3 * x",
"_____no_output_____"
]
],
[
[
"When you pass this function as the first argument to `interact` along with an integer keyword argument (`x=10`), a slider is generated and bound to the function parameter.",
"_____no_output_____"
]
],
[
[
"interact(f, x=10);",
"_____no_output_____"
]
],
[
[
"When you move the slider, the function is called, and the return value is printed.\n\nIf you pass `True` or `False`, `interact` will generate a checkbox:",
"_____no_output_____"
]
],
[
[
"interact(f, x=True);",
"_____no_output_____"
]
],
[
[
"If you pass a string, `interact` will generate a `Text` field.",
"_____no_output_____"
]
],
[
[
"interact(f, x='Hi there!');",
"_____no_output_____"
]
],
[
[
"`interact` can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, `interact` also works with functions that have multiple arguments.",
"_____no_output_____"
]
],
[
[
"@widgets.interact(x=True, y=1.0)\ndef g(x, y):\n return (x, y)",
"_____no_output_____"
]
],
[
[
"## Fixing arguments using `fixed`",
"_____no_output_____"
],
[
"There are times when you may want to explore a function using `interact`, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the `fixed` function.",
"_____no_output_____"
]
],
[
[
"def h(p, q):\n return (p, q)",
"_____no_output_____"
]
],
[
[
"When we call `interact`, we pass `fixed(20)` for q to hold it fixed at a value of `20`.",
"_____no_output_____"
]
],
[
[
"interact(h, p=5, q=fixed(20));",
"_____no_output_____"
]
],
[
[
"Notice that a slider is only produced for `p` as the value of `q` is fixed.",
"_____no_output_____"
],
[
"## Widget abbreviations",
"_____no_output_____"
],
[
"When you pass an integer-valued keyword argument of `10` (`x=10`) to `interact`, it generates an integer-valued slider control with a range of `[-10, +3*10]`. In this case, `10` is an *abbreviation* for an actual slider widget:\n\n```python\nIntSlider(min=-10, max=30, step=1, value=10)\n```\n\nIn fact, we can get the same result if we pass this `IntSlider` as the keyword argument for `x`:",
"_____no_output_____"
]
],
[
[
"interact(f, x=widgets.IntSlider(min=-10, max=30, step=1, value=10));",
"_____no_output_____"
]
],
[
[
"This examples clarifies how `interact` processes its keyword arguments:\n\n1. If the keyword argument is a `Widget` instance with a `value` attribute, that widget is used. Any widget with a `value` attribute can be used, even custom ones.\n2. Otherwise, the value is treated as a *widget abbreviation* that is converted to a widget before it is used.\n\nThe following table gives an overview of different widget abbreviations:\n\n<table class=\"table table-condensed table-bordered\">\n <tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr> \n <tr><td>`True` or `False`</td><td>Checkbox</td></tr> \n <tr><td>`'Hi there'`</td><td>Text</td></tr>\n <tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr>\n <tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr>\n <tr><td>`['orange','apple']` or `[('one', 1), ('two', 2)]`</td><td>Dropdown</td></tr>\n</table>\nNote that a dropdown is used if a list or a list of tuples is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range).",
"_____no_output_____"
],
[
"You have seen how the checkbox and text widgets work above. Here, more details about the different abbreviations for sliders and dropdowns are given.\n\nIf a 2-tuple of integers is passed `(min, max)`, an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of `1` is used.",
"_____no_output_____"
]
],
[
[
"interact(f, x=(0, 4));",
"_____no_output_____"
]
],
[
[
"A `FloatSlider` is generated if any of the values are floating point. The step size can be changed by passing a third element in the tuple.",
"_____no_output_____"
]
],
[
[
"interact(f, x=(0, 10, 0.01));",
"_____no_output_____"
]
],
[
[
"### Exercise: Reverse some text\n\nHere is a function that takes text as an input and returns the text backwards.",
"_____no_output_____"
]
],
[
[
"def reverse(x):\n return x[::-1]\n\nreverse('I am printed backwards.')",
"_____no_output_____"
]
],
[
[
"Use `interact` to make interactive controls for this function.",
"_____no_output_____"
]
],
[
[
"# %load solutions/interact-basic-list/reverse-text.py\n",
"_____no_output_____"
]
],
[
[
"For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to `5.5`.",
"_____no_output_____"
]
],
[
[
"@interact(x=(0.0, 20.0, 0.5))\ndef h(x=5.5):\n return x",
"_____no_output_____"
]
],
[
[
"Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function.",
"_____no_output_____"
]
],
[
[
"interact(f, x=['apples','oranges']);",
"_____no_output_____"
]
],
[
[
"If you want a dropdown menu that passes non-string values to the Python function, you can pass a list of tuples of the form `('label', value)`. The first items are the names in the dropdown menu UI and the second items are values that are the arguments passed to the underlying Python function.",
"_____no_output_____"
]
],
[
[
"interact(f, x=[('one', 10), ('two', 20)]);",
"_____no_output_____"
]
],
[
[
"## Basic interactive plot\n\nThough the examples so far in this notebook had very basic output, more interesting possibilities are straightforward. \n\nThe function below plots a straight line whose slope and intercept are given by its arguments.",
"_____no_output_____"
]
],
[
[
"%matplotlib widget\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(m, b):\n plt.figure(2)\n plt.clf()\n plt.grid()\n x = np.linspace(-10, 10, num=1000)\n plt.plot(x, m * x + b)\n plt.ylim(-5, 5)\n plt.show()\n",
"_____no_output_____"
]
],
[
[
"The interactive below displays a line whose slope and intercept is set by the sliders. Note that if the variable containing the widget, `interactive_plot`, is the last thing in the cell it is displayed.",
"_____no_output_____"
]
],
[
[
"interact(f, m=(-2.0, 2.0), b=(-3, 3, 0.5))",
"_____no_output_____"
]
],
[
[
"### Exercise: Make a plot\n\nHere is a python function that, given $k$ and $p$, plots $f(x) = \\sin(k x - p)$.\n",
"_____no_output_____"
]
],
[
[
"def plot_f(k, p):\n plt.figure(5)\n plt.clf()\n plt.grid()\n x = np.linspace(0, 4 * np.pi)\n y = np.sin(k*x - p)\n plt.plot(x, y)\n plt.show()",
"_____no_output_____"
]
],
[
[
"Copy the above function definition and make it interactive using `interact`, so that there are sliders for the parameters $k$ and $p$, where $0.5\\leq k \\leq 2$ and $0 \\leq p \\leq 2\\pi$ (hint: use `np.pi` for $\\pi$).",
"_____no_output_____"
]
],
[
[
"# %load solutions/interact-basic-list/plot-function.py",
"_____no_output_____"
]
],
[
[
"# For more information \n\nSee the notebook [02.01-More About interact](02.01-OPTIONAL-More-About-Interact.ipynb) for more information about other ways of generating interactive controls for functions and for details about how to control when sliders are updated.\n\nFor more extended examples of `interact` and `interactive`, see [the example in the ipywidgets source repository](https://github.com/jupyter-widgets/ipywidgets/blob/master/docs/source/examples/Index.ipynb).",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Overview](01.00-overview.ipynb) | [Contents](00.00-index.ipynb) | [OPTIONAL - More about interact](02.01-OPTIONAL-More-About-Interact.ipynb) >",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7c9e481699dc9717813b85f3f246a849cc35ae5 | 140,383 | ipynb | Jupyter Notebook | examples/experimental_notebooks/IETF Participants.ipynb | nllz/bigbang | d4fef7eb41ae04e51f4e369de5a721c66231202b | [
"MIT"
] | 4 | 2016-05-25T16:13:44.000Z | 2017-11-06T15:16:30.000Z | examples/experimental_notebooks/IETF Participants.ipynb | nllz/bigbang | d4fef7eb41ae04e51f4e369de5a721c66231202b | [
"MIT"
] | 20 | 2016-06-13T15:28:56.000Z | 2016-07-10T17:58:17.000Z | examples/experimental_notebooks/IETF Participants.ipynb | nllz/bigbang | d4fef7eb41ae04e51f4e369de5a721c66231202b | [
"MIT"
] | 8 | 2016-05-25T11:47:32.000Z | 2016-07-06T08:29:00.000Z | 83.56131 | 22,580 | 0.786434 | [
[
[
"We want to analyze participants and patterns of participation across IETF groups. How many people participate, in which groups, how does affiliation, gender, RFC authorship or other characteristics relate to levels of participation, and a variety of other related questions. How do groups relate to one another? Which participants provide important connections between groups?",
"_____no_output_____"
],
[
"## Setup and gather data",
"_____no_output_____"
],
[
"Start by importing the necessary libraries.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport bigbang.ingress.mailman as mailman\nimport bigbang.analysis.graph as graph\nimport bigbang.analysis.process as process\nfrom bigbang.parse import get_date\nfrom bigbang.archive import Archive\nimport bigbang.utils as utils\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pytz\nimport pickle\nimport os\nimport csv\nimport re\nimport scipy\nimport scipy.cluster.hierarchy as sch\nimport email",
"_____no_output_____"
],
[
"#pd.options.display.mpl_style = 'default' # pandas has a set of preferred graph formatting options\nplt.rcParams['axes.facecolor'] = 'white'\nimport seaborn as sns\nsns.set()\nsns.set_style(\"white\")",
"_____no_output_____"
]
],
[
[
"Let's start with a single IETF mailing list. (Later, we can expand to all current groups, or all IETF lists ever.)",
"_____no_output_____"
]
],
[
[
"list_url = '6lo' # perpass happens to be one that I subscribe to\n\nietf_archives_dir = '../archives' # relative location of the ietf-archives directory/repo\n\nlist_archive = mailman.open_list_archives(list_url, ietf_archives_dir)\nactivity = Archive(list_archive).get_activity()",
"/home/lem/Data/bigbang/bigbang/mailman.py:138: UserWarning: No mailing list name found at 6lo\n warnings.warn(\"No mailing list name found at %s\" % url)\n"
],
[
"people = None\npeople = pd.DataFrame(activity.sum(0), columns=['6lo']) # sum the message count, rather than by date",
"_____no_output_____"
],
[
"people.describe()",
"_____no_output_____"
]
],
[
[
"Now repeat, parsing the archives and collecting the activities for all the mailing lists in the corpus. To make this faster, we try to open pre-created `-activity.csv` files which contain the activity summary for the full list archive. These files are created with `bin/mail_to_activity.py` or might be included in the mailing list archive repository.",
"_____no_output_____"
]
],
[
[
"f = open('../examples/mm.ietf.org.txt', 'r')\nietf_lists = set(f.readlines()) # remove duplicates, which is a bug in list maintenance",
"_____no_output_____"
],
[
"list_activities = []\n\nfor list_url in ietf_lists:\n try:\n activity_summary = mailman.open_activity_summary(list_url, ietf_archives_dir)\n if activity_summary is not None:\n list_activities.append((list_url, activity_summary))\n except Exception as e:\n print(str(e))",
"/home/lem/Data/bigbang/bigbang/mailman.py:138: UserWarning: No mailing list name found at \n warnings.warn(\"No mailing list name found at %s\" % url)\n"
],
[
"len(list_activities)",
"_____no_output_____"
]
],
[
[
"Merge all of the activity summaries together, so that every row is a \"From\" field, with a column for every mailing list and a cell that includes the number of messages sent to that list. This will be a very sparse, 2-d table. **This operation is a little slow.** Don't repeat this operation without recreating `people` from the cells above.",
"_____no_output_____"
]
],
[
[
"list_columns = []\nfor (list_url, activity_summary) in list_activities:\n list_name = mailman.get_list_name(list_url)\n activity_summary.rename(columns={'Message Count': list_name}, inplace=True) # name the message count column for the list\n people = pd.merge(people, activity_summary, how='outer', left_index=True, right_index=True)\n list_columns.append(list_name) # keep a list of the columns that specifically represent mailing list message counts",
"_____no_output_____"
],
[
"# the original message column was duplicated during the merge process, so we remove it here\npeople = people.drop(columns=['6lo_y'])\npeople = people.rename(columns={'6lo_x':'6lo'})",
"_____no_output_____"
],
[
"people.describe()",
"_____no_output_____"
],
[
"# not sure how the index ended up with NaN values, but need to change them to strings here so additional steps will work\nnew_index = people.index.fillna('missing')\npeople.index = new_index",
"_____no_output_____"
]
],
[
[
"Split out the email address and header name from the From header we started with.",
"_____no_output_____"
]
],
[
[
"froms = pd.Series(people.index)\nemails = froms.apply(lambda x: email.utils.parseaddr(x)[1])\nemails.index = people.index\nnames = froms.apply(lambda x: email.utils.parseaddr(x)[0])\nnames.index = people.index\npeople['email'] = emails\npeople['name'] = names",
"_____no_output_____"
]
],
[
[
"Let's create some summary statistical columns.",
"_____no_output_____"
]
],
[
[
"people['Total Messages'] = people[list_columns].sum(axis=1)\npeople['Number of Groups'] = people[list_columns].count(axis=1)\npeople['Median Messages per Group'] = people[list_columns].median(axis=1)",
"_____no_output_____"
],
[
"people['Total Messages'].sum()",
"_____no_output_____"
]
],
[
[
"In this corpus, **101,510** \"people\" sent a combined total of **1.2 million messages**. Most people sent only 1 message.",
"_____no_output_____"
],
[
"## Participation patterns",
"_____no_output_____"
],
[
"The vast majority of people send only a few messages, and to only a couple of lists. (These histograms use a log axis for Y, without which you couldn't even see the columns besides the first.)",
"_____no_output_____"
]
],
[
[
"people[['Total Messages']].plot(kind='hist', bins=100, logy=True, logx=False)\npeople[['Number of Groups']].plot(kind='hist', bins=100, logy=True, logx=False)",
"_____no_output_____"
]
],
[
[
"Let's limit our analysis for now to people who have sent at least 5 messages. We will also create log base 10 versions of our summary columns for easier graphing later.",
"_____no_output_____"
]
],
[
[
"working = people[people['Total Messages'] > 5]\n\nworking['Total Messages (log)'] = np.log10(working['Total Messages'])\nworking['Number of Groups (log)'] = np.log10(working['Number of Groups'])\n",
"/home/lem/.local/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/home/lem/.local/lib/python2.7/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n after removing the cwd from sys.path.\n"
]
],
[
[
"The median number of messages that a user sends to a group is also heavily weighted towards a small number, but the curve doesn't seem to drop off in the same extreme manner. There is a non-random tendency to send some messages to a group?",
"_____no_output_____"
]
],
[
[
"working[['Median Messages per Group']].plot(kind='hist', bins=100, logy=True)",
"_____no_output_____"
]
],
[
[
"Is there a relationship between the number of groups that a user has sent messages to and the number of messages that user has sent (total, or the median number to groups)?",
"_____no_output_____"
]
],
[
[
"working.plot.scatter('Number of Groups','Total Messages', xlim=(1,300), ylim=(1,20000), logx=False, logy=True)",
"_____no_output_____"
]
],
[
[
"It appears that there are interesting outliers here. Some who send a couple messages each to a large number of groups, but then a separate group of outliers that sends lots of messages and to lots of groups. That might be an elite component worthy of separate analysis.",
"_____no_output_____"
],
[
"A density graph will show, however, that while there are people who send many messages to a small number of groups, still, most people are clustered around sending few messages, to few groups.",
"_____no_output_____"
]
],
[
[
"sns.jointplot(x='Number of Groups',y='Total Messages (log)', data=working, kind=\"kde\", xlim=(0,50), ylim=(0,3));",
"_____no_output_____"
]
],
[
[
"## Relationships between groups and participants",
"_____no_output_____"
],
[
"Can we learn implicit relationships between groups based on the messaging patterns of participants?",
"_____no_output_____"
],
[
"### PCA",
"_____no_output_____"
],
[
"We want to work with just the data of people and how many messages they sent to each group.",
"_____no_output_____"
]
],
[
[
"df = people[people['Total Messages'] > 5]\n\ndf = df.drop(columns=['email','name','Total Messages','Number of Groups','Median Messages per Group'])\ndf = df.fillna(0)",
"_____no_output_____"
]
],
[
[
"Principal Component Analysis (PCA) will seek to explain the most variance in the samples (participants) based on the features (messages sent to different lists). Let's try with two components and see what PCA sees as the most distinguishing dimensions of IETF participation.",
"_____no_output_____"
]
],
[
[
"import sklearn\nfrom sklearn.decomposition import PCA\n\nscaled = sklearn.preprocessing.maxabs_scale(df)\n\npca = PCA(n_components=2, whiten=True)\npca.fit(scaled)",
"_____no_output_____"
],
[
"components_frame = pd.DataFrame(pca.components_)\ncomponents_frame.columns = df.columns\ncomponents_frame",
"_____no_output_____"
],
[
"for i, row in components_frame.iterrows():\n print('\\nComponent %d' % i)\n r = row.sort_values(ascending=False)\n print('Most positive correlation:\\n %s' % r[:5].index.values)\n print('Most negative correlation:\\n %s' % r[-5:].index.values)",
"\nComponent 0\nMost positive correlation:\n ['93attendees' '88attendees' '77attendees' '87attendees' 'bofchairs']\nMost negative correlation:\n ['tap' 'eos' 'dmarc-report' 'web' 'spam']\n\nComponent 1\nMost positive correlation:\n ['89all' '90all' '91all' '82all' '94all']\nMost negative correlation:\n ['ippm' 'rtgwg' 'i-d-announce' 'l2vpn' 'l3vpn']\n"
]
],
[
[
"Component 0 is mostly routing (Layer 3 and Layer 2 VPNs, the routing area working group, interdomain routing. (IP Performance/Measurement seems different -- is it related?)\n\nComponent 1 is all Internet area groups, mostly related to IPv6, and specifically different groups working on mobility-related extensions to IPv6.",
"_____no_output_____"
],
[
"When data was unscaled, PCA components seemed to connect to ops and ipv6, a significantly different result.\n\nFor our two components, we can see which features are most positively correlated and which are most negatively correlated. On positive correlation, looking up these groups, it seems like there is some meaningful coherence here. On Component 0, we see groups in the \"ops\" area: groups related to the management, configuration and measurement of networks. On the other component, we see groups in the Internet and transport areas: groups related to IPv6, the transport area and PSTN transport.\n\nThat we see such different results when the data is first scaled by each feature perhaps suggests that the initial analysis was just picking up on the largest groups.",
"_____no_output_____"
]
],
[
[
"pca.explained_variance_",
"_____no_output_____"
]
],
[
[
"The explained variance by our components seems extremely tiny.",
"_____no_output_____"
],
[
"With two components (or the two most significant components), we can attempt a basic visualization as a scatter plot.",
"_____no_output_____"
]
],
[
[
"component_df = pd.DataFrame(pca.transform(df), columns=['PCA%i' % i for i in range(2)], index=df.index)\ncomponent_df.plot.scatter(x='PCA0',y='PCA1')",
"_____no_output_____"
]
],
[
[
"And with a larger number of components?",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=10, whiten=True)\npca.fit(scaled)\ncomponents_frame = pd.DataFrame(pca.components_)\ncomponents_frame.columns = df.columns\nfor i, row in components_frame.iterrows():\n print('\\nComponent %d' % i)\n r = row.sort_values(ascending=False)\n print('Most positive correlation:\\n %s' % r[:5].index.values)\n print('Most negative correlation:\\n %s' % r[-5:].index.values)",
"\nComponent 0\nMost positive correlation:\n ['93attendees' '88attendees' '77attendees' '87attendees' 'bofchairs']\nMost negative correlation:\n ['tap' 'eos' 'dmarc-report' 'web' 'spam']\n\nComponent 1\nMost positive correlation:\n ['89all' '90all' '91all' '82all' '94all']\nMost negative correlation:\n ['ippm' 'rtgwg' 'i-d-announce' 'l2vpn' 'l3vpn']\n\nComponent 2\nMost positive correlation:\n ['l3vpn' 'l2vpn' 'adslmib' 'i-d-announce' 'psamp-text']\nMost negative correlation:\n ['100attendees' '96attendees' '88attendees' '97attendees' '93attendees']\n\nComponent 3\nMost positive correlation:\n ['88attendees' 'ngtrans' '94attendees' '96attendees' '93attendees']\nMost negative correlation:\n ['websec' 'happiana' 'art' 'http-auth' 'apps-discuss']\n\nComponent 4\nMost positive correlation:\n ['97attendees' '96attendees' 'rtgwg' '99attendees' 'rtg-yang-coord']\nMost negative correlation:\n ['monami6' '68attendees' 'mip6' '77attendees' '72attendees']\n\nComponent 5\nMost positive correlation:\n ['ianaplan' 'iasa20' 'v6ops' 'mtgvenue' 'ipv6']\nMost negative correlation:\n ['martini' '87attendees' '81attendees' 'rai' 'dispatch']\n\nComponent 6\nMost positive correlation:\n ['72attendees' 'opsawg' 'netconf' 'mib-doctors' 'supa']\nMost negative correlation:\n ['94attendees' '99attendees' '96attendees' '100attendees' '97attendees']\n\nComponent 7\nMost positive correlation:\n ['dispatch' 'rai' 'p2psip' 'martini' 'avtext']\nMost negative correlation:\n ['ietf-message-headers' 'hubmib' 'happiana' 'psamp-text' 'apps-discuss']\n\nComponent 8\nMost positive correlation:\n ['72attendees' 'idr' '81attendees' '74attendees' '75attendees']\nMost negative correlation:\n ['bofchairs' 'sipcore' 'martini' 'rai' 'dispatch']\n\nComponent 9\nMost positive correlation:\n ['tools-development' 'ietf-sow' 'agenda-tool' 'ccg' 'iola-wgcharter-tool']\nMost negative correlation:\n ['mcic' 'vpn4dc' 'wgguide' 'apps-discuss' '77attendees']\n"
]
],
[
[
"There are definitely subject domain areas in these lists (the last one, for example, on groups related to phone calls and emergency services). Also interesting is the presence of some meta-topics, like `mtgvenue` or `policy` or `iasa20` (an IETF governance topic).",
"_____no_output_____"
],
[
"_Future work: we might be able to use this sparse matrix of participation in different lists to provide recommendations of similarity. \"People who send messages to the same mix of groups you send to also like this other list\" or \"People who like this list, also often like this list\"._",
"_____no_output_____"
],
[
"### Betweenness, PageRank and graph visualization",
"_____no_output_____"
],
[
"Because we have people and the groups they send to, we can construct a _bipartite graph_.",
"_____no_output_____"
],
[
"We'll use just the top 5000 people, in order to make complicated calculations run faster.",
"_____no_output_____"
]
],
[
[
"df = people.sort_values(by=\"Total Messages\",ascending=False)[:5000]\ndf = df.drop(columns=['email','name','Total Messages','Number of Groups','Median Messages per Group'])\ndf = df.fillna(0)",
"_____no_output_____"
],
[
"import networkx as nx\n\nG = nx.Graph()\n\nfor group in df.columns:\n G.add_node(group,type=\"group\")\n \nfor name, data in df.iterrows():\n G.add_node(name,type=\"person\")\n \n for group, weight in data.items():\n if weight > 0:\n G.add_edge(name,group,weight=weight)",
"_____no_output_____"
],
[
"nx.is_bipartite(G)",
"_____no_output_____"
]
],
[
[
"Yep, it is bipartite! Now, we can export a graph file for use in visualization software Gephi.",
"_____no_output_____"
]
],
[
[
"nx.write_gexf(G,'ietf-participation-bipartite.gexf')",
"_____no_output_____"
],
[
"people_nodes, group_nodes = nx.algorithms.bipartite.sets(G)",
"_____no_output_____"
]
],
[
[
"We can calculate the \"PageRank\" of each person and group, using the weights (number of messages) between groups and people to distribute a kind of influence.",
"_____no_output_____"
]
],
[
[
"pr = nx.pagerank(G, weight=\"weight\")",
"_____no_output_____"
],
[
"nx.set_node_attributes(G, \"pagerank\", pr)",
"_____no_output_____"
],
[
"sorted([node for node in list(G.nodes(data=True)) \n if node[1]['type'] == 'group'], \n key=lambda x: x[1]['pagerank'], \n reverse =True)[:10]",
"_____no_output_____"
],
[
"sorted([node for node in list(G.nodes(data=True)) \n if node[1]['type'] == 'person'], \n key=lambda x: x[1]['pagerank'], \n reverse =True)[:10]",
"_____no_output_____"
]
],
[
[
"However, PageRank is probably less informative than usual here, because this is a bipartite, non-directed graph. Instead, let's calculate a normalized, closeness centrality specific to bipartite graphs.",
"_____no_output_____"
]
],
[
[
"person_nodes = [node[0] for node in G.nodes(data=True) if node[1]['type'] == 'person']",
"_____no_output_____"
]
],
[
[
"**NB: Slow operation for large graphs.**",
"_____no_output_____"
]
],
[
[
"cc = nx.algorithms.bipartite.centrality.closeness_centrality(G, person_nodes, normalized=True)",
"_____no_output_____"
],
[
"for node, value in list(cc.items()):\n if type(node) not in [str, str]:\n print(node)\n print(value)",
"5000.0\n0.0\n"
],
[
"del cc[14350.0] # remove a spurious node value",
"_____no_output_____"
],
[
"nx.set_node_attributes(G, \"closeness\", cc)",
"_____no_output_____"
],
[
"sorted([node for node in list(G.nodes(data=True)) \n if node[1]['type'] == 'person'], \n key=lambda x: x[1]['closeness'], \n reverse=True)[:25]",
"_____no_output_____"
]
],
[
[
"The people with the highest closeness centrality are the ones that have the most co-affiliation with every other person, or the shortest path to every other person. Automated accounts are, as we might expect, extremely high on this measure -- they're used to send announcements of publications and do so to basically every group. The individual people highest ranked on this measure include Stephen Farrell, Jari Arkko, Ben Campbell -- long-time participants with leadership roles. The highest ranked woman is Alissa Cooper, current Chair of the IETF.",
"_____no_output_____"
],
[
"_TODO: calculating bi-cliques (the people who all are connected to the same group) and then measuring correlation in bi-cliques (people who belong to many of the same groups) could allow for analysis of cohesive subgroups and a different network analysis/visualization._ See Borgatti, S.P. and Halgin, D. In press. “Analyzing Affiliation Networks”. In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. http://www.steveborgatti.com/papers/bhaffiliations.pdf",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7c9ef0c15918575ee738e1aee628bfbd16d4d21 | 160,399 | ipynb | Jupyter Notebook | IPython-Demo.ipynb | dloss/notebooks | 48b34e167ed20f0b4c20cc58ee895b6f0e53e798 | [
"MIT"
] | 8 | 2015-05-08T20:02:25.000Z | 2020-10-25T06:52:12.000Z | IPython-Demo.ipynb | dloss/notebooks | 48b34e167ed20f0b4c20cc58ee895b6f0e53e798 | [
"MIT"
] | null | null | null | IPython-Demo.ipynb | dloss/notebooks | 48b34e167ed20f0b4c20cc58ee895b6f0e53e798 | [
"MIT"
] | 4 | 2015-10-23T19:47:13.000Z | 2020-06-15T12:54:07.000Z | 129.249799 | 74,178 | 0.822798 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c9f5b5e43a8887a6eef4313142992e4373dbf9 | 3,417 | ipynb | Jupyter Notebook | IV-Implicits/templates/6.2 Conversiones implicitas.ipynb | alonsoir/scalacrashcourse | e37438a23a088aa4e2835855e5786f8873b16135 | [
"Apache-2.0"
] | null | null | null | IV-Implicits/templates/6.2 Conversiones implicitas.ipynb | alonsoir/scalacrashcourse | e37438a23a088aa4e2835855e5786f8873b16135 | [
"Apache-2.0"
] | null | null | null | IV-Implicits/templates/6.2 Conversiones implicitas.ipynb | alonsoir/scalacrashcourse | e37438a23a088aa4e2835855e5786f8873b16135 | [
"Apache-2.0"
] | null | null | null | 24.234043 | 293 | 0.594088 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7c9fc3a89ef729c7b6dad86e87fc67dbe558358 | 11,314 | ipynb | Jupyter Notebook | 05_1_cross_validation_uni_class_cdc16/uni-class_traffic_assignment_MSA_function_Oct_weekend.ipynb | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | 05_1_cross_validation_uni_class_cdc16/uni-class_traffic_assignment_MSA_function_Oct_weekend.ipynb | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | 05_1_cross_validation_uni_class_cdc16/uni-class_traffic_assignment_MSA_function_Oct_weekend.ipynb | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | 30.01061 | 105 | 0.49779 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7ca0a1988a27533e0ccee171320530b4279e983 | 593,722 | ipynb | Jupyter Notebook | NMA_project.ipynb | AvocadoChutneys/ProjectNMA | d145970665f4410f84ac8381f700e1cce6f1764b | [
"MIT"
] | 1 | 2020-08-01T00:30:02.000Z | 2020-08-01T00:30:02.000Z | NMA_project.ipynb | AvocadoChutneys/ProjectNMA | d145970665f4410f84ac8381f700e1cce6f1764b | [
"MIT"
] | null | null | null | NMA_project.ipynb | AvocadoChutneys/ProjectNMA | d145970665f4410f84ac8381f700e1cce6f1764b | [
"MIT"
] | null | null | null | 188.662854 | 114,626 | 0.83097 | [
[
[
"#@title Importing Libraries\n!pip install pycaret --quiet\nimport os, requests\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams ",
"_____no_output_____"
],
[
"#@title Plotting params\n\nrcParams['figure.figsize'] = [20, 4]\nrcParams['font.size'] =15\nrcParams['axes.spines.top'] = False\nrcParams['axes.spines.right'] = False\nrcParams['figure.autolayout'] = True",
"_____no_output_____"
],
[
"#@title Loading Spiking times\nfname1 = ['steinmetz_st.npz']\nfname1.append('steinmetz_wav.npz')\nfname1.append('steinmetz_lfp.npz')\nurl2 = [\"https://osf.io/4bjns/download\"]\nurl2.append(\"https://osf.io/ugm9v/download\")\nurl2.append(\"https://osf.io/kx3v9/download\")\n\n\nfor j in range(len(url2)):\n if not os.path.isfile(fname1[j]):\n try:\n r = requests.get(url2[j])\n except requests.ConnectionError:\n print(\"!!! Failed to download data !!!\")\n else:\n if r.status_code != requests.codes.ok:\n print(\"!!! Failed to download data !!!\")\n else:\n with open(fname1[j], \"wb\") as fid:\n fid.write(r.content)\n\n\ndat_ST = np.load('steinmetz_st.npz', allow_pickle=True)['dat']\ndat2 = dat_ST[11]\ndel dat_ST\nprint(dat2.keys())",
"dict_keys(['ss', 'ss_passive'])\n"
],
[
"#@title Load Binned Times\n\nfname = []\nfor j in range(3):\n fname.append('steinmetz_part%d.npz'%j)\nurl = [\"https://osf.io/agvxh/download\"]\nurl.append(\"https://osf.io/uv3mw/download\")\nurl.append(\"https://osf.io/ehmw2/download\")\n\nfor j in range(len(url)):\n if not os.path.isfile(fname[j]):\n try:\n r = requests.get(url[j])\n except requests.ConnectionError:\n print(\"!!! Failed to download data !!!\")\n else:\n if r.status_code != requests.codes.ok:\n print(\"!!! Failed to download data !!!\")\n else:\n with open(fname[j], \"wb\") as fid:\n fid.write(r.content)\n\n\n\nalldat = np.array([])\nfor j in range(len(fname)):\n alldat = np.hstack((alldat, np.load('steinmetz_part%d.npz'%j, allow_pickle=True)['dat']))\n\n# select just one of the recordings here. 11 is nice because it has some neurons in vis ctx. \ndat = alldat[11]\nprint(dat.keys())",
"dict_keys(['spks', 'wheel', 'pupil', 'response', 'response_time', 'bin_size', 'stim_onset', 'contrast_right', 'contrast_left', 'brain_area', 'feedback_time', 'feedback_type', 'gocue', 'mouse_name', 'date_exp', 'trough_to_peak', 'active_trials', 'contrast_left_passive', 'contrast_right_passive', 'spks_passive', 'pupil_passive', 'wheel_passive', 'prev_reward', 'ccf', 'ccf_axes', 'cellid_orig', 'reaction_time', 'face', 'face_passive', 'licks', 'licks_passive'])\n"
]
],
[
[
"# Data description\n\n`alldat` contains 39 sessions from 10 mice, data from Steinmetz et al, 2019. Time bins for all measurements are 10ms, starting 500ms before stimulus onset. The mouse had to determine which side has the highest contrast. For each `dat = alldat[k]`, you have the following fields:\n\n* `dat['mouse_name']`: mouse name\n* `dat['date_exp']`: when a session was performed\n* `dat['spks']`: neurons by trials by time bins. \n* `dat['brain_area']`: brain area for each neuron recorded. \n* `dat['contrast_right']`: contrast level for the right stimulus, which is always contralateral to the recorded brain areas.\n* `dat['contrast_left']`: contrast level for left stimulus. \n* `dat['gocue']`: when the go cue sound was played. \n* `dat['response_times']`: when the response was registered, which has to be after the go cue. The mouse can turn the wheel before the go cue (and nearly always does!), but the stimulus on the screen won't move before the go cue. \n* `dat['response']`: which side the response was (`-1`, `0`, `1`). When the right-side stimulus had higher contrast, the correct choice was `-1`. `0` is a no go response. \n* `dat['feedback_time']`: when feedback was provided. \n* `dat['feedback_type']`: if the feedback was positive (`+1`, reward) or negative (`-1`, white noise burst). \n* `dat['wheel']`: exact position of the wheel that the mice uses to make a response, binned at `10ms`. \n* `dat['pupil']`: pupil area (noisy, because pupil is very small) + pupil horizontal and vertical position. \n* `dat['lfp']`: recording of the local field potential in each brain area from this experiment, binned at `10ms`.\n* `dat['brain_area_lfp']`: brain area names for the LFP channels. \n* `dat['trough_to_peak']`: measures the width of the action potential waveform for each neuron. Widths `<=10` samples are \"putative fast spiking neurons\". \n* `dat['waveform_w']`: temporal components of spike waveforms. `w@u` reconstructs the time by channels action potential shape. \n* `dat['waveform_u]`: spatial components of spike waveforms.\n* `dat['%X%_passive']`: same as above for `X` = {`spks`, `lfp`, `pupil`, `wheel`, `contrast_left`, `contrast_right`} but for passive trials at the end of the recording when the mouse was no longer engaged and stopped making responses. ",
"_____no_output_____"
],
[
"# Hypothesis\n\n* Prestimulus activity for trial pairs [incorrect → correct , correct → correct ] better predicts current trial outcome depending on the outcome of the previous trial. \n\n\n## Spike Labeling\n\n* We were interested to classify our neurons based on its waveform properties, in order to do that we:\n\n * label them based on: [(Loren M. Frank, Emery N. Brown, and Matthew A. Wilson, 2001)](https://journals.physiology.org/doi/full/10.1152/jn.2001.86.4.2029) in which putative excitatory neurons (PE) had >0.4 trough to peak time $ms$ and <5 $Hz$ mean firing rate, in the other hand putative inhibitory neurons (FS) had <0.4 trough to peak time and >5 $Hz$ mean firing rate.\n\n",
"_____no_output_____"
]
],
[
[
"#@title Boundaries plot\n\ndt_waveforms = 1/30000 # dt of waveform\nbinsize = dat['bin_size'] # bin times spikes\nmean_firing = dat['spks'].mean(axis = (1,2)) * 1/binsize # computing mean firing rate\nt_t_peak = dat['trough_to_peak'] * dt_waveforms * 1e3 # computing trough to peak time in ms\nplt.scatter(mean_firing,t_t_peak)\nplt.axhline(y=0.4,ls = '--', alpha = 0.5, c = 'r')\nplt.axvline(x=5,ls = '--', alpha = 0.5, c = 'r')\nplt.ylabel('Trough to peak ($ms$)')\nplt.xlabel('Mean Firing Rate (Hz)');\n",
"_____no_output_____"
]
],
[
[
"Next, we create a dataframe with the related labels:",
"_____no_output_____"
]
],
[
[
"#@title Label DataFrame \n\nimport plotly.express as px\n\nlabeling_df = pd.DataFrame({\n \"Mean Firing Rate\": mean_firing,\n \"Trough to peak\": t_t_peak,\n \"Region\": dat['brain_area'],\n \"Area\":dat['brain_area']\n})\nlabeling_df.replace(\n {\n \"Area\": {\"CA1\":\"Hippocampus\",\"DG\":\"Hippocampus\",\"SUB\":\"Hippocampus\",\n \"VISp\": \"Visual Ctx\", \"VISam\":\"Visual Ctx\",\"MD\":\"Thalamus\",\"LGd\":\"Thalamus\", \"LH\":\"Thalamus\",\n \"PL\":\"Other Ctx\",\"MOs\":\"Other Ctx\",\"ACA\":\"Other Ctx\"\n }\n }, inplace = True\n)\n\n# Labeling according to conditions, other is the default condition\nlabeling_df['Cell Type'] = \"Other\"\nlabeling_df.loc[(labeling_df['Mean Firing Rate']<5)&(labeling_df['Trough to peak']>0.4),'Cell Type'] = \"Excitatory\"\nlabeling_df.loc[(labeling_df['Mean Firing Rate']>5)&(labeling_df['Trough to peak']<0.4), 'Cell Type'] = \"Inhibitory\"\n\npx.scatter(x=\"Mean Firing Rate\", y =\"Trough to peak\", color = \"Cell Type\", data_frame = labeling_df)",
"_____no_output_____"
]
],
[
[
"# Raster plot\n\n* We are now able to separate the **trials** based on *correct and incorrect* responses and separate the **neurons** based on *putative cell type*\n\n<font color='red'>* Inhibitory cells</font> <br/>\n<font color='black'>* Other cells</font> <br/>\n<font color='blue'>* Excitatory cells</font> ",
"_____no_output_____"
]
],
[
[
"#@title raster visualizer\nfrom ipywidgets import interact\nimport ipywidgets as widgets\nvis_right = dat['contrast_right'] # 0 - low - high\nvis_left = dat['contrast_left'] # 0 - low - high\nis_correct = np.sign(dat['response'])==np.sign(vis_left-vis_right)\ndef raster_visualizer(area,trial):\n spikes= dat2['ss']\n plt.figure(figsize=(9,5))\n plt.eventplot(spikes[(labeling_df['Area']==area) & (labeling_df['Cell Type']=='Excitatory')][:,trial]);\n plt.eventplot(spikes[(labeling_df['Area']==area) & (labeling_df['Cell Type']=='Other')][:,trial],color='k');\n plt.eventplot(spikes[(labeling_df['Area']==area) & (labeling_df['Cell Type']=='Inhibitory')][:,trial],colors = 'r');\n plt.yticks([]);\n plt.vlines(0.5,0,len(spikes[(labeling_df['Area']==area)])-50,'gray','--',alpha=0.5)\n plt.ylabel('Neurons');\n plt.xlabel('Time ($s$)');\n plt.title(f'Trial was correct?:{is_correct[trial]}')\ninteract(raster_visualizer, area=['Hippocampus','Visual Ctx','Thalamus','Other Ctx'], trial=(0,339));",
"_____no_output_____"
],
[
"#@title Mean firing rate based on response\n# response = dat['response'] # right - nogo - left (-1, 0, 1)\n\ndef mean_firing(area):\n Selection = (labeling_df['Area']==area) #& (labeling_df['Cell Type']=='Excitatory')\n spikes = dat['spks'][Selection].mean(axis = 0) #selecting spikes\n mean_fr_e = spikes[is_correct==True].mean(axis=(0))*1/binsize\n mean_fr_i = spikes[is_correct==False].mean(axis=(0))*1/binsize\n time = binsize * np.arange(dat['spks'].shape[-1])\n plt.plot(time, mean_fr_e,label='correct')\n plt.plot(time, mean_fr_i,label='incorrect')\n plt.axvline(x=0.5,ls = '--', alpha = 0.5, c = 'r', label='Stim')\n plt.axvline(x=np.mean(dat['response_time']),ls = '--', alpha = 0.5, c = 'k', label='Response')\n plt.ylabel('Mean Firing Rate ($Hz$)')\n plt.xlabel('Time ($ms$)')\n plt.legend()\ninteract(mean_firing, area=['Hippocampus','Visual Ctx','Thalamus','Other Ctx']);",
"_____no_output_____"
]
],
[
[
"# Modeling\n\n* first cell creates the full data frame:\n * each column is a neuron (*except the last one which is the target variable*)\n * each row is a trial\n * each cell is mean firing rate\n\nIn this example we are taking the hippocampal region",
"_____no_output_____"
]
],
[
[
"#@title DataFrame construction\n\n# selects only correct after incorrect trials and correct after correct trials\ncorrect_after_i = np.where(np.diff(is_correct.astype('float32'))==1)[0]\nidx_c_c = []\nfor i in range(len(is_correct)-1):\n if is_correct[i] == 1 & is_correct[i+1]==1:\n idx_c_c.append(i)\n correct_after_c = np.array(idx_c_c)\nidx = np.append(correct_after_i,correct_after_c)\nc_based_on_pre = np.append(np.array([0]*len(correct_after_i)),np.array([1]*len(correct_after_c)))\n\ndef get_full_X_y(area,y):\n bin_spk_hip = dat['spks'][(labeling_df['Area']== area)]\n bin_spk_hip = np.moveaxis(bin_spk_hip[:,:,:50],1,0)\n x= bin_spk_hip.mean(axis=2)\n return x,y\n\ndef get_prestim_X_y(area,y):\n bin_spk_hip = dat['spks'][(labeling_df['Area']== area)]\n bin_spk_hip = np.moveaxis(bin_spk_hip,1,0)\n x= bin_spk_hip[idx,:,:50].mean(axis=2)\n return x,y\n\nprint('Available options: Hippocampus, Visual Ctx, Thalamus, Other Ctx')\narea = input('Select the area to visualize:')\nx,y = get_prestim_X_y(area,c_based_on_pre)\n\ndef construct_df(x,y,named=False):\n if named == True:\n X = pd.DataFrame(x,columns=[f\"N{i}\" for i in range(x.shape[1])])\n else:\n X = pd.DataFrame(x)\n full_df = pd.concat([X,pd.Series(y,name='target')],axis=1)\n return full_df\n\ndf = construct_df(x,y)\n\nimport seaborn as sns\nsns.heatmap(x*1/binsize,cbar_kws={'label':'Mean Firing rate ($Hz$)'});\nplt.ylabel('Trials');\nplt.xlabel('Neuron (id)');",
"Available options: Hippocampus, Visual Ctx, Thalamus, Other Ctx\nSelect the area to visualize:Hippocampus\n"
],
[
"#@title Baseline model: Logistic Regression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nprint('Available options: Hippocampus, Visual Ctx, Thalamus, Other Ctx')\narea = input('Select the area to fit:')\nX_full,y_full = get_full_X_y(area,is_correct)\nX_pres,y_pres = get_prestim_X_y(area,c_based_on_pre) #Getting the data\n\ndef LogReg(X,y):\n model = LogisticRegression(C=1, solver=\"saga\", max_iter=5000)\n Log_model=model.fit(X,y)\n accuracies = cross_val_score(model,X,y,cv=10)\n auc = cross_val_score(model,X,y,cv=10,scoring='roc_auc')\n return Log_model, accuracies, auc \n\nmodel_full, accuracies_full, auc_full = LogReg(X_full,y_full)\nmodel_pre, accuracies_pre, auc_pre = LogReg(X_pres,y_pres)",
"Available options: Hippocampus, Visual Ctx, Thalamus, Other Ctx\nSelect the area to fit:Hippocampus\n"
],
[
"#@title Comparing Accuracy\n\noutput_df = pd.DataFrame({\n 'Model': np.concatenate((np.array([\"Full\"]*10),np.array([\"Pre_Stim\"]*10))),\n \"Accuracy\": np.concatenate((accuracies_full,accuracies_pre))\n})\nsns.boxplot(x='Accuracy',y='Model',data=output_df);\nplt.title(f'Accuracy at: {area}');",
"_____no_output_____"
],
[
"#@title Compare AUC \noutput_df = pd.DataFrame({\n 'Model': np.concatenate((np.array([\"Full\"]*10),np.array([\"Paired\"]*10))),\n \"AUC\": np.concatenate((auc_full,auc_pre))\n})\nsns.boxplot(x='AUC',y='Model',data=output_df);\nplt.title(f'AUC at: {area}');",
"_____no_output_____"
]
],
[
[
"This results make us think that the prestimulus activity **could** carry on information related with the previous trial.\n\nWe realized that we had a imbalance problem so we proceeded to balance the classes:",
"_____no_output_____"
]
],
[
[
"!pip install imbalanced-learn --quiet",
"_____no_output_____"
],
[
"#@title Balancing function\n\ndef balancer(X,y,undersample = 0.5):\n from imblearn.pipeline import Pipeline\n from imblearn.over_sampling import SMOTE\n from imblearn.under_sampling import RandomUnderSampler\n \n print('######################################################')\n print(f\"{np.sum(y)} original number of samples in the 1 class\") # 1 class\n print(f\"{len(y)-np.sum(y)} original number of samples in the 0 class\")\n print('######################################################')\n\n #model = LogisticRegression(C=1, solver=\"saga\", max_iter=5000)\n over = SMOTE(\"minority\",random_state = 43)\n under = RandomUnderSampler(sampling_strategy=undersample,random_state = 43)\n steps = [('under', under), ('over', over)]\n #steps = [('under', under), ('over', over)]\n pipeline = Pipeline(steps=steps);\n\n # transform the dataset\n X, y = pipeline.fit_resample(X,y);\n print('ooooooooooooooooooooooooooooooooooooooooooooooooooo')\n print(f\"{np.sum(y)} resampled data in the 1 class\") # 1 class\n print(f\"{len(y)-np.sum(y)} resampled data in the 0 class\")\n print('ooooooooooooooooooooooooooooooooooooooooooooooooooo')\n return X,y",
"_____no_output_____"
],
[
"b_X_pres, b_y_pres = balancer(X_pres,y_pres)",
"######################################################\n183 original number of samples in the 1 class\n52 original number of samples in the 0 class\n######################################################\nooooooooooooooooooooooooooooooooooooooooooooooooooo\n104 resampled data in the 1 class\n104 resampled data in the 0 class\nooooooooooooooooooooooooooooooooooooooooooooooooooo\n"
],
[
"b_X_full, b_y_full = balancer(X_full,y_full,0.9)",
"######################################################\n236 original number of samples in the 1 class\n104 original number of samples in the 0 class\n######################################################\nooooooooooooooooooooooooooooooooooooooooooooooooooo\n115 resampled data in the 1 class\n115 resampled data in the 0 class\nooooooooooooooooooooooooooooooooooooooooooooooooooo\n"
],
[
"#@title Compare accuracy with balanced classes \n\nmodel_full, accuracies_full, auc_full = LogReg(b_X_full, b_y_full )\nmodel_pre, accuracies_pre, auc_pre = LogReg(b_X_pres, b_y_pres)\noutput_df = pd.DataFrame({\n 'Model': np.concatenate((np.array([\"Full\"]*10),np.array([\"Paired\"]*10))),\n \"Accuracy\": np.concatenate((accuracies_full,accuracies_pre))\n})\nsns.boxplot(x='Accuracy',y='Model',data=output_df);\nplt.title(f'Accuracy at: {area}');",
"_____no_output_____"
],
[
"#@title Compare AUC with balanced classes\n\noutput_df = pd.DataFrame({\n 'Model': np.concatenate((np.array([\"Full\"]*10),np.array([\"Paired\"]*10))),\n \"AUC\": np.concatenate((auc_full,auc_pre))\n})\nsns.boxplot(x='AUC',y='Model',data=output_df);\nplt.title(f'AUC at: {area}');",
"_____no_output_____"
]
],
[
[
"After balancing the classes we see a decrease in accuracy but a fairly increase in AUC, which might mean that with the unbalanced dataset our classifier was assinging the most frequent class to every sample.",
"_____no_output_____"
],
[
"# Selection of a better model\n\nNow that we know that pre-estimulus activity might be able to classify a correct trial based on the preceding output, we are in a position to use a better model and see if it is possible.\n\nIn order to do that, we construct the dataframe with the balanced data, and setup the enviroment of pycaret, selecting the indicator variable of outcome pairs ([incorrect → correct , correct → correct ]) as the `target`.\n\n* `numeric_features` argument spcifies the datatype of those columns (pycaret was infering them as categorical)\n\nThis procedure also split the data in train and test sets, and setups a 10 K-Fold CV process.\n\n\n\n\n\n",
"_____no_output_____"
]
],
[
[
"from pycaret.classification import * ",
"_____no_output_____"
],
[
"resampled_df = construct_df(b_X_pres, b_y_pres,named=True)",
"_____no_output_____"
],
[
"exp_clf101 = setup(data = resampled_df, target = 'target', numeric_features=['N1','N22','N32','N143','N148','N153','N183','N184','N189'], session_id=123) ",
" \nSetup Succesfully Completed!\n"
]
],
[
[
"Here we are comparing different CV classification metrics from 14 different models, **Quadratic Discriminant Analysis** had the best performance",
"_____no_output_____"
]
],
[
[
"compare_models()",
"_____no_output_____"
]
],
[
[
"# Quadratic Discriminant Analysis\n\nWe have to classes $k \\in \\{0,1\\}$ that belongs to correct preceded by incorrect trial (0) and correct preceded by correct (1).\n\nEvery class has a prior probability $P(k) = 0.5$ since is a balanced dataframe and $P(k) = \\frac{N_k}{N}$.\n\nAnd basically we are trying to find the posterior probability of being in a class given the observations:\n\n$\\rm{Pr}(K = k |X = x) = \\frac{f_k(x) P(k)}{\\sum_{l=1}^K f_l(x) P_1(k)}$\n\nThe problem is related to which class maximazes that posterior probability:\n\n$C(k) = \\arg \\max_k \\rm{Pr}(K = k | X = x) = \\arg \\max_k f_k(x) P(k)$\n\nand it is assumed that data has a gaussian likelihood:\n\n$f_k(x) = {|2 \\pi \\Sigma_k|}^{-1/2} \\exp\\left(-\\frac{1}{2}(x - \\mu_k)^T\\Sigma_k^{-1}(x - \\mu_k)\\right)$\n\nFinally deriving $C(k)$:\n\n$C(k) = \\arg \\max_k( - \\frac{1}{2} \\log |\\Sigma_k| - \\frac{1}{2}(x- \\mu_k)^T \\Sigma_k^{-1} (x - \\mu_k) + \\log P(k))$\n\nHere we instanciate the model and make a 10-Fold CV run, having a final accuracy of **0.84** and an AUC of **0.84**. ",
"_____no_output_____"
]
],
[
[
"qda = create_model('qda')",
"_____no_output_____"
]
],
[
[
"Here we can see the ROC curve and the Precision-Recall curve of the classifier, describing that our classifier is able to discriminate between both clasess very well.\n\nResults in the test set:\n\n\n\n",
"_____no_output_____"
]
],
[
[
"plot_model(qda, plot = 'auc')",
"_____no_output_____"
],
[
"plot_model(qda, plot = 'pr')",
"_____no_output_____"
]
],
[
[
"The confusion matrix show us that it is easier to classify correctly a correct trial preceded by a correct trial from only the pre-estimulus activity.\n\nHaving 3 false positives in the test set:",
"_____no_output_____"
]
],
[
[
"plot_model(qda, plot = 'confusion_matrix')",
"_____no_output_____"
]
],
[
[
"Finally we test out the model and retrieve the metrics with unseen data (our test set):",
"_____no_output_____"
]
],
[
[
"predict_model(qda);",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7ca112b378b939bd5882ba66a7791d5066a0cc3 | 49,016 | ipynb | Jupyter Notebook | Investment Management/Course1/lab_110.ipynb | djoye21school/python | b48443d055d51a79489711e21d4c9bc1052f4893 | [
"CNRI-Python-GPL-Compatible"
] | 622 | 2018-07-17T09:05:41.000Z | 2022-03-29T02:57:02.000Z | Investment Management/Course1/lab_110.ipynb | djoye21school/python | b48443d055d51a79489711e21d4c9bc1052f4893 | [
"CNRI-Python-GPL-Compatible"
] | 21 | 2019-11-10T02:06:09.000Z | 2022-01-22T23:54:11.000Z | Investment Management/Course1/lab_110.ipynb | djoye21school/python | b48443d055d51a79489711e21d4c9bc1052f4893 | [
"CNRI-Python-GPL-Compatible"
] | 906 | 2018-07-17T09:05:43.000Z | 2022-03-31T12:55:49.000Z | 196.851406 | 15,084 | 0.906092 | [
[
[
"# Finding the Max Sharpe Ratio Portfolio\n\nWe've already seen that given a set of expected returns and a covariance matrix, we can plot the efficient frontier. In this section, we'll extend the code to locate the point on the efficient frontier that we are most interested in, which is the tangency portfolio or the Max Sharpe Ratio portfolio.\n\nLet's start by the usual imports, and load in the data.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport edhec_risk_kit_110 as erk\n\nind = erk.get_ind_returns()\ner = erk.annualize_rets(ind[\"1996\":\"2000\"], 12)\ncov = ind[\"1996\":\"2000\"].cov()",
"_____no_output_____"
]
],
[
[
"We already know how to identify points on the curve if we are given a target rate of return. Instead of minimizing the vol based on a target return, we want to find that one point on the curve that maximizes the Sharpe Ratio, given the risk free rate.\n\n```python\ndef msr(riskfree_rate, er, cov):\n \"\"\"\n Returns the weights of the portfolio that gives you the maximum sharpe ratio\n given the riskfree rate and expected returns and a covariance matrix\n \"\"\"\n n = er.shape[0]\n init_guess = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!\n # construct the constraints\n weights_sum_to_1 = {'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n def neg_sharpe(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the negative of the sharpe ratio\n of the given portfolio\n \"\"\"\n r = portfolio_return(weights, er)\n vol = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/vol\n \n weights = minimize(neg_sharpe, init_guess,\n args=(riskfree_rate, er, cov), method='SLSQP',\n options={'disp': False},\n constraints=(weights_sum_to_1,),\n bounds=bounds)\n return weights.x\n```\n\nLet's guess where the point might be:",
"_____no_output_____"
]
],
[
[
"ax = erk.plot_ef(20, er, cov)\nax.set_xlim(left = 0)",
"_____no_output_____"
],
[
"# plot EF\nax = erk.plot_ef(20, er, cov)\nax.set_xlim(left = 0)\n# get MSR\nrf = 0.1\nw_msr = erk.msr(rf, er, cov)\nr_msr = erk.portfolio_return(w_msr, er)\nvol_msr = erk.portfolio_vol(w_msr, cov)\n# add CML\ncml_x = [0, vol_msr]\ncml_y = [rf, r_msr]\nax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=12)",
"_____no_output_____"
],
[
"r_msr, vol_msr",
"_____no_output_____"
]
],
[
[
"Let's put it all together by adding the CML to the `plot_ef` code.\n\nAdd the following code:\n\n```python\n if show_cml:\n ax.set_xlim(left = 0)\n # get MSR\n w_msr = msr(riskfree_rate, er, cov)\n r_msr = portfolio_return(w_msr, er)\n vol_msr = portfolio_vol(w_msr, cov)\n # add CML\n cml_x = [0, vol_msr]\n cml_y = [riskfree_rate, r_msr]\n ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=12)\n```\n",
"_____no_output_____"
]
],
[
[
"erk.plot_ef(20, er, cov, style='-', show_cml=True, riskfree_rate=0.1)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7ca11c11560217b5de6fa98e8c0766d80982cfd | 128,363 | ipynb | Jupyter Notebook | .ipynb_checkpoints/IPL 2008 - 2018 Analysis-checkpoint.ipynb | srimani-programmer/IPL-Analysis | 7442658f63cfa6170e86dd372d3e96c089016d50 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/IPL 2008 - 2018 Analysis-checkpoint.ipynb | srimani-programmer/IPL-Analysis | 7442658f63cfa6170e86dd372d3e96c089016d50 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/IPL 2008 - 2018 Analysis-checkpoint.ipynb | srimani-programmer/IPL-Analysis | 7442658f63cfa6170e86dd372d3e96c089016d50 | [
"MIT"
] | null | null | null | 94.107771 | 32,480 | 0.755288 | [
[
[
"# Importing the Libraries\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\n\n# Filtering the Warnings\nwarnings.filterwarnings(\"ignore\")\nprint('Libraries Loaded Sucessfully...!')",
"Libraries Loaded Sucessfully...!\n"
],
[
"# Loading the Dataset\nmatches_dataset = pd.read_csv('Datasets/matches.csv')\n\n# Printing the first 5 rows\n\nmatches_dataset.head()",
"_____no_output_____"
],
[
"# Describing the Dataset\n\nmatches_dataset.describe()",
"_____no_output_____"
],
[
"# Shape of the Dataset\n\nmatches_dataset.shape",
"_____no_output_____"
],
[
"matches_dataset.tail()",
"_____no_output_____"
],
[
"# Droping the some columns from the Dataset\n\nmatches_dataset.drop('umpire3', inplace=True, axis=1)\n\nprint('Column Dropped Sucessfully...!')\n\nmatches_dataset.head(8)",
"Column Dropped Sucessfully...!\n"
],
[
"player_of_match = dict()\n\nfor player in matches_dataset['player_of_match']:\n player_of_match[player] = player_of_match.get(player, 0) + 1\n \nprint(player_of_match)",
"{'Yuvraj Singh': 5, 'SPD Smith': 4, 'CA Lynn': 4, 'GJ Maxwell': 5, 'KM Jadhav': 2, 'Rashid Khan': 6, 'N Rana': 4, 'AR Patel': 4, 'SV Samson': 5, 'JJ Bumrah': 2, 'SP Narine': 9, 'KA Pollard': 9, 'AJ Tye': 2, 'RV Uthappa': 7, 'CJ Anderson': 3, 'BA Stokes': 3, 'NM Coulter-Nile': 4, 'B Kumar': 5, 'CH Gayle': 20, 'KS Williamson': 3, 'JC Buttler': 4, 'SK Raina': 14, 'MJ McClenaghan': 2, 'MS Dhoni': 14, 'HM Amla': 2, 'G Gambhir': 13, 'LH Ferguson': 1, 'KH Pandya': 4, 'Sandeep Sharma': 5, 'DA Warner': 15, 'RG Sharma': 16, 'Mohammed Shami': 1, 'RA Tripathi': 1, 'RR Pant': 3, 'JD Unadkat': 4, 'LMP Simmons': 3, 'DR Smith': 11, 'S Dhawan': 4, 'MM Sharma': 2, 'SS Iyer': 3, 'WP Saha': 3, 'KK Nair': 3, 'Mohammed Siraj': 1, 'AT Rayudu': 9, 'HV Patel': 2, 'Washington Sundar': 1, 'KV Sharma': 1, 'BB McCullum': 5, 'MEK Hussey': 12, 'MF Maharoof': 1, 'MV Boucher': 2, 'DJ Hussey': 2, 'SR Watson': 13, 'V Sehwag': 11, 'ML Hayden': 4, 'YK Pathan': 16, 'KC Sangakkara': 5, 'JDP Oram': 1, 'AC Gilchrist': 7, 'SM Katich': 1, 'ST Jayasuriya': 2, 'GD McGrath': 1, 'SE Marsh': 9, 'SA Asnodkar': 1, 'R Vinay Kumar': 3, 'IK Pathan': 2, 'SM Pollock': 2, 'Sohail Tanvir': 2, 'S Sreesanth': 1, 'A Nehra': 6, 'SC Ganguly': 5, 'CRD Fernando': 1, 'L Balaji': 3, 'Shoaib Akhtar': 1, 'A Mishra': 10, 'DPMD Jayawardene': 5, 'GC Smith': 2, 'DJ Bravo': 3, 'M Ntini': 2, 'SP Goswami': 1, 'A Kumble': 3, 'KD Karthik': 4, 'JA Morkel': 2, 'P Kumar': 2, 'Umar Gul': 1, 'SR Tendulkar': 8, 'R Dravid': 2, 'DL Vettori': 1, 'RP Singh': 1, 'M Muralitharan': 2, 'AB de Villiers': 18, 'RS Bopara': 1, 'PP Ojha': 1, 'TM Dilshan': 1, 'HH Gibbs': 1, 'DP Nannes': 1, 'JP Duminy': 4, 'SB Jakati': 1, 'JH Kallis': 10, 'A Singh': 1, 'S Badrinath': 1, 'LRPL Taylor': 3, 'Harbhajan Singh': 6, 'R Bhatia': 1, 'SK Warne': 4, 'B Lee': 2, 'BJ Hodge': 6, 'LR Shukla': 1, 'MK Pandey': 4, 'AD Mathews': 1, 'MK Tiwary': 3, 'WPUJC Vaas': 1, 'A Symonds': 3, 'AA Jhunjhunwala': 1, 'J Theron': 1, 'AC Voges': 1, 'NV Ojha': 1, 'SL Malinga': 5, 'M Vijay': 6, 'KP Pietersen': 3, 'PD Collingwood': 1, 'MJ Lumb': 1, 'TL Suman': 1, 'RJ Harris': 1, 'PP Chawla': 3, 'Harmeet Singh': 2, 'R Ashwin': 1, 'R McLaren': 1, 'M Kartik': 1, 'DE Bollinger': 2, 'S Anirudha': 1, 'SK Trivedi': 2, 'SB Wagh': 1, 'PC Valthaty': 2, 'MD Mishra': 1, 'DW Steyn': 5, 'S Sohal': 1, 'MM Patel': 2, 'V Kohli': 11, 'I Sharma': 2, 'J Botha': 1, 'Iqbal Abdulla': 2, 'P Parameswaran': 1, 'R Sharma': 2, 'MR Marsh': 1, 'BA Bhatt': 1, 'S Aravind': 1, nan: 3, 'JEC Franklin': 1, 'RE Levi': 1, 'AM Rahane': 12, 'RA Jadeja': 8, 'MN Samuels': 1, 'M Morkel': 2, 'F du Plessis': 3, 'AD Mascarenhas': 1, 'Shakib Al Hasan': 2, 'JD Ryder': 1, 'S Nadeem': 1, 'KMDN Kulasekara': 1, 'CL White': 1, 'Mandeep Singh': 3, 'P Negi': 2, 'Azhar Mahmood': 2, 'BW Hilfenhaus': 2, 'A Chandila': 1, 'UT Yadav': 8, 'MS Bisla': 1, 'M Vohra': 3, 'GH Vihari': 2, 'AJ Finch': 5, 'JP Faulkner': 5, 'MS Gony': 1, 'DA Miller': 3, 'DJG Sammy': 1, 'MG Johnson': 2, 'KK Cooper': 1, 'PA Patel': 4, 'AP Tare': 1, 'LJ Wright': 1, 'YS Chahal': 1, 'PV Tambe': 2, 'DJ Hooda': 1, 'GJ Bailey': 1, 'AD Russell': 7, 'MA Agarwal': 1, 'MA Starc': 1, 'VR Aaron': 1, 'TA Boult': 1, 'EJG Morgan': 1, 'HH Pandya': 3, 'MC Henriques': 2, 'Z Khan': 1, 'Q de Kock': 1, 'Mustafizur Rahman': 1, 'SA Yadav': 1, 'AB Dinda': 2, 'CH Morris': 2, 'CR Brathwaite': 1, 'MP Stoinis': 2, 'A Zampa': 1, 'BCJ Cutting': 1, 'KL Rahul': 2, 'SW Billings': 1, 'JJ Roy': 1, 'B Stanlake': 1, 'J Archer': 1, 'AS Rajpoot': 2, 'TG Southee': 1, 'AS Yadav': 1, 'M Ur Rahman': 1, 'Ishan Kishan': 1, 'Kuldeep Yadav': 1, 'S Gopal': 1, 'L Ngidi': 1}\n"
],
[
"# Printing the results of Man of the Match\n\nfor player,number in player_of_match.items():\n if number == 1:\n print('{} got {} time Man of the Match Award.'.format(player,number))\n else:\n print('{} got {} times Man of the Match Award.'.format(player,number))",
"Yuvraj Singh got 5 times Man of the Match Award.\nSPD Smith got 4 times Man of the Match Award.\nCA Lynn got 4 times Man of the Match Award.\nGJ Maxwell got 5 times Man of the Match Award.\nKM Jadhav got 2 times Man of the Match Award.\nRashid Khan got 6 times Man of the Match Award.\nN Rana got 4 times Man of the Match Award.\nAR Patel got 4 times Man of the Match Award.\nSV Samson got 5 times Man of the Match Award.\nJJ Bumrah got 2 times Man of the Match Award.\nSP Narine got 9 times Man of the Match Award.\nKA Pollard got 9 times Man of the Match Award.\nAJ Tye got 2 times Man of the Match Award.\nRV Uthappa got 7 times Man of the Match Award.\nCJ Anderson got 3 times Man of the Match Award.\nBA Stokes got 3 times Man of the Match Award.\nNM Coulter-Nile got 4 times Man of the Match Award.\nB Kumar got 5 times Man of the Match Award.\nCH Gayle got 20 times Man of the Match Award.\nKS Williamson got 3 times Man of the Match Award.\nJC Buttler got 4 times Man of the Match Award.\nSK Raina got 14 times Man of the Match Award.\nMJ McClenaghan got 2 times Man of the Match Award.\nMS Dhoni got 14 times Man of the Match Award.\nHM Amla got 2 times Man of the Match Award.\nG Gambhir got 13 times Man of the Match Award.\nLH Ferguson got 1 time Man of the Match Award.\nKH Pandya got 4 times Man of the Match Award.\nSandeep Sharma got 5 times Man of the Match Award.\nDA Warner got 15 times Man of the Match Award.\nRG Sharma got 16 times Man of the Match Award.\nMohammed Shami got 1 time Man of the Match Award.\nRA Tripathi got 1 time Man of the Match Award.\nRR Pant got 3 times Man of the Match Award.\nJD Unadkat got 4 times Man of the Match Award.\nLMP Simmons got 3 times Man of the Match Award.\nDR Smith got 11 times Man of the Match Award.\nS Dhawan got 4 times Man of the Match Award.\nMM Sharma got 2 times Man of the Match Award.\nSS Iyer got 3 times Man of the Match Award.\nWP Saha got 3 times Man of the Match Award.\nKK Nair got 3 times Man of the Match Award.\nMohammed Siraj got 1 time Man of the Match Award.\nAT Rayudu got 9 times Man of the Match Award.\nHV Patel got 2 times Man of the Match Award.\nWashington Sundar got 1 time Man of the Match Award.\nKV Sharma got 1 time Man of the Match Award.\nBB McCullum got 5 times Man of the Match Award.\nMEK Hussey got 12 times Man of the Match Award.\nMF Maharoof got 1 time Man of the Match Award.\nMV Boucher got 2 times Man of the Match Award.\nDJ Hussey got 2 times Man of the Match Award.\nSR Watson got 13 times Man of the Match Award.\nV Sehwag got 11 times Man of the Match Award.\nML Hayden got 4 times Man of the Match Award.\nYK Pathan got 16 times Man of the Match Award.\nKC Sangakkara got 5 times Man of the Match Award.\nJDP Oram got 1 time Man of the Match Award.\nAC Gilchrist got 7 times Man of the Match Award.\nSM Katich got 1 time Man of the Match Award.\nST Jayasuriya got 2 times Man of the Match Award.\nGD McGrath got 1 time Man of the Match Award.\nSE Marsh got 9 times Man of the Match Award.\nSA Asnodkar got 1 time Man of the Match Award.\nR Vinay Kumar got 3 times Man of the Match Award.\nIK Pathan got 2 times Man of the Match Award.\nSM Pollock got 2 times Man of the Match Award.\nSohail Tanvir got 2 times Man of the Match Award.\nS Sreesanth got 1 time Man of the Match Award.\nA Nehra got 6 times Man of the Match Award.\nSC Ganguly got 5 times Man of the Match Award.\nCRD Fernando got 1 time Man of the Match Award.\nL Balaji got 3 times Man of the Match Award.\nShoaib Akhtar got 1 time Man of the Match Award.\nA Mishra got 10 times Man of the Match Award.\nDPMD Jayawardene got 5 times Man of the Match Award.\nGC Smith got 2 times Man of the Match Award.\nDJ Bravo got 3 times Man of the Match Award.\nM Ntini got 2 times Man of the Match Award.\nSP Goswami got 1 time Man of the Match Award.\nA Kumble got 3 times Man of the Match Award.\nKD Karthik got 4 times Man of the Match Award.\nJA Morkel got 2 times Man of the Match Award.\nP Kumar got 2 times Man of the Match Award.\nUmar Gul got 1 time Man of the Match Award.\nSR Tendulkar got 8 times Man of the Match Award.\nR Dravid got 2 times Man of the Match Award.\nDL Vettori got 1 time Man of the Match Award.\nRP Singh got 1 time Man of the Match Award.\nM Muralitharan got 2 times Man of the Match Award.\nAB de Villiers got 18 times Man of the Match Award.\nRS Bopara got 1 time Man of the Match Award.\nPP Ojha got 1 time Man of the Match Award.\nTM Dilshan got 1 time Man of the Match Award.\nHH Gibbs got 1 time Man of the Match Award.\nDP Nannes got 1 time Man of the Match Award.\nJP Duminy got 4 times Man of the Match Award.\nSB Jakati got 1 time Man of the Match Award.\nJH Kallis got 10 times Man of the Match Award.\nA Singh got 1 time Man of the Match Award.\nS Badrinath got 1 time Man of the Match Award.\nLRPL Taylor got 3 times Man of the Match Award.\nHarbhajan Singh got 6 times Man of the Match Award.\nR Bhatia got 1 time Man of the Match Award.\nSK Warne got 4 times Man of the Match Award.\nB Lee got 2 times Man of the Match Award.\nBJ Hodge got 6 times Man of the Match Award.\nLR Shukla got 1 time Man of the Match Award.\nMK Pandey got 4 times Man of the Match Award.\nAD Mathews got 1 time Man of the Match Award.\nMK Tiwary got 3 times Man of the Match Award.\nWPUJC Vaas got 1 time Man of the Match Award.\nA Symonds got 3 times Man of the Match Award.\nAA Jhunjhunwala got 1 time Man of the Match Award.\nJ Theron got 1 time Man of the Match Award.\nAC Voges got 1 time Man of the Match Award.\nNV Ojha got 1 time Man of the Match Award.\nSL Malinga got 5 times Man of the Match Award.\nM Vijay got 6 times Man of the Match Award.\nKP Pietersen got 3 times Man of the Match Award.\nPD Collingwood got 1 time Man of the Match Award.\nMJ Lumb got 1 time Man of the Match Award.\nTL Suman got 1 time Man of the Match Award.\nRJ Harris got 1 time Man of the Match Award.\nPP Chawla got 3 times Man of the Match Award.\nHarmeet Singh got 2 times Man of the Match Award.\nR Ashwin got 1 time Man of the Match Award.\nR McLaren got 1 time Man of the Match Award.\nM Kartik got 1 time Man of the Match Award.\nDE Bollinger got 2 times Man of the Match Award.\nS Anirudha got 1 time Man of the Match Award.\nSK Trivedi got 2 times Man of the Match Award.\nSB Wagh got 1 time Man of the Match Award.\nPC Valthaty got 2 times Man of the Match Award.\nMD Mishra got 1 time Man of the Match Award.\nDW Steyn got 5 times Man of the Match Award.\nS Sohal got 1 time Man of the Match Award.\nMM Patel got 2 times Man of the Match Award.\nV Kohli got 11 times Man of the Match Award.\nI Sharma got 2 times Man of the Match Award.\nJ Botha got 1 time Man of the Match Award.\nIqbal Abdulla got 2 times Man of the Match Award.\nP Parameswaran got 1 time Man of the Match Award.\nR Sharma got 2 times Man of the Match Award.\nMR Marsh got 1 time Man of the Match Award.\nBA Bhatt got 1 time Man of the Match Award.\nS Aravind got 1 time Man of the Match Award.\nnan got 3 times Man of the Match Award.\nJEC Franklin got 1 time Man of the Match Award.\nRE Levi got 1 time Man of the Match Award.\nAM Rahane got 12 times Man of the Match Award.\nRA Jadeja got 8 times Man of the Match Award.\nMN Samuels got 1 time Man of the Match Award.\nM Morkel got 2 times Man of the Match Award.\nF du Plessis got 3 times Man of the Match Award.\nAD Mascarenhas got 1 time Man of the Match Award.\nShakib Al Hasan got 2 times Man of the Match Award.\nJD Ryder got 1 time Man of the Match Award.\nS Nadeem got 1 time Man of the Match Award.\nKMDN Kulasekara got 1 time Man of the Match Award.\nCL White got 1 time Man of the Match Award.\nMandeep Singh got 3 times Man of the Match Award.\nP Negi got 2 times Man of the Match Award.\nAzhar Mahmood got 2 times Man of the Match Award.\nBW Hilfenhaus got 2 times Man of the Match Award.\nA Chandila got 1 time Man of the Match Award.\nUT Yadav got 8 times Man of the Match Award.\nMS Bisla got 1 time Man of the Match Award.\nM Vohra got 3 times Man of the Match Award.\nGH Vihari got 2 times Man of the Match Award.\nAJ Finch got 5 times Man of the Match Award.\nJP Faulkner got 5 times Man of the Match Award.\nMS Gony got 1 time Man of the Match Award.\nDA Miller got 3 times Man of the Match Award.\nDJG Sammy got 1 time Man of the Match Award.\nMG Johnson got 2 times Man of the Match Award.\nKK Cooper got 1 time Man of the Match Award.\nPA Patel got 4 times Man of the Match Award.\nAP Tare got 1 time Man of the Match Award.\nLJ Wright got 1 time Man of the Match Award.\nYS Chahal got 1 time Man of the Match Award.\nPV Tambe got 2 times Man of the Match Award.\nDJ Hooda got 1 time Man of the Match Award.\nGJ Bailey got 1 time Man of the Match Award.\nAD Russell got 7 times Man of the Match Award.\nMA Agarwal got 1 time Man of the Match Award.\nMA Starc got 1 time Man of the Match Award.\nVR Aaron got 1 time Man of the Match Award.\nTA Boult got 1 time Man of the Match Award.\nEJG Morgan got 1 time Man of the Match Award.\nHH Pandya got 3 times Man of the Match Award.\nMC Henriques got 2 times Man of the Match Award.\nZ Khan got 1 time Man of the Match Award.\nQ de Kock got 1 time Man of the Match Award.\nMustafizur Rahman got 1 time Man of the Match Award.\nSA Yadav got 1 time Man of the Match Award.\nAB Dinda got 2 times Man of the Match Award.\nCH Morris got 2 times Man of the Match Award.\nCR Brathwaite got 1 time Man of the Match Award.\nMP Stoinis got 2 times Man of the Match Award.\nA Zampa got 1 time Man of the Match Award.\nBCJ Cutting got 1 time Man of the Match Award.\nKL Rahul got 2 times Man of the Match Award.\nSW Billings got 1 time Man of the Match Award.\nJJ Roy got 1 time Man of the Match Award.\nB Stanlake got 1 time Man of the Match Award.\nJ Archer got 1 time Man of the Match Award.\nAS Rajpoot got 2 times Man of the Match Award.\nTG Southee got 1 time Man of the Match Award.\nAS Yadav got 1 time Man of the Match Award.\nM Ur Rahman got 1 time Man of the Match Award.\nIshan Kishan got 1 time Man of the Match Award.\nKuldeep Yadav got 1 time Man of the Match Award.\nS Gopal got 1 time Man of the Match Award.\nL Ngidi got 1 time Man of the Match Award.\n"
]
],
[
[
"### Plotting the results of Man Of the Match Award in IPL 2008 - 2018",
"_____no_output_____"
]
],
[
[
"player_names = list(player_of_match.keys())\nnumber_of_times = list(player_of_match.values())\n\n# Plotting the Graph\n\nplt.bar(range(len(player_of_match)), number_of_times)\nplt.title('Man Of the Match Award')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Number Of Wins Of Each Team",
"_____no_output_____"
]
],
[
[
"teamWinCounts = dict()\n\nfor team in matches_dataset['winner']:\n if team == None:\n continue\n else:\n teamWinCounts[team] = teamWinCounts.get(team,0) + 1\n\nfor teamName, Count in teamWinCounts.items():\n print(teamName,':',Count)",
"Sunrisers Hyderabad : 52\nRising Pune Supergiant : 10\nKolkata Knight Riders : 86\nKings XI Punjab : 76\nRoyal Challengers Bangalore : 79\nMumbai Indians : 98\nDelhi Daredevils : 67\nGujarat Lions : 13\nChennai Super Kings : 90\nRajasthan Royals : 70\nDeccan Chargers : 29\nPune Warriors : 12\nKochi Tuskers Kerala : 6\nnan : 3\nRising Pune Supergiants : 5\n"
]
],
[
[
"### Plotting the Results Of Team Winning",
"_____no_output_____"
]
],
[
[
"numberOfWins = teamWinCounts.values()\nteamName = teamWinCounts.keys()\nplt.bar(range(len(teamWinCounts)), numberOfWins)\nplt.xticks(range(len(teamWinCounts)), list(teamWinCounts.keys()), rotation='vertical')\nplt.xlabel('Team Names')\nplt.ylabel('Number Of Win Matches')\nplt.title('Analysis Of Number Of Matches win by Each Team From 2008 - 2018', color=\"Orange\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Total Matches Played by Each team From 2008 - 2018",
"_____no_output_____"
]
],
[
[
"totalMatchesCount = dict()\n\n# For Team1\nfor team in matches_dataset['team1']:\n totalMatchesCount[team] = totalMatchesCount.get(team, 0) + 1\n\n# For Team2\n\nfor team in matches_dataset['team2']:\n totalMatchesCount[team] = totalMatchesCount.get(team, 0) + 1\n\n# Printing the total matches played by each team\nfor teamName, count in totalMatchesCount.items():\n print('{} : {}'.format(teamName,count))",
"Sunrisers Hyderabad : 93\nMumbai Indians : 171\nGujarat Lions : 30\nRising Pune Supergiant : 16\nRoyal Challengers Bangalore : 166\nKolkata Knight Riders : 164\nDelhi Daredevils : 161\nKings XI Punjab : 162\nChennai Super Kings : 147\nRajasthan Royals : 133\nDeccan Chargers : 75\nKochi Tuskers Kerala : 14\nPune Warriors : 46\nRising Pune Supergiants : 14\n"
]
],
[
[
"### Plotting the Total Matches Played by Each Team",
"_____no_output_____"
]
],
[
[
"teamNames = totalMatchesCount.keys()\nteamCount = totalMatchesCount.values()\n\nplt.bar(range(len(totalMatchesCount)), teamCount)\nplt.xticks(range(len(totalMatchesCount)), list(teamNames), rotation='vertical')\nplt.xlabel('Team Names')\nplt.ylabel('Number Of Played Matches')\nplt.title('Total Number Of Matches Played By Each Team From 2008 - 2018')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7ca1461baffcfa2fe555f70e71a2b5470353a3d | 59,640 | ipynb | Jupyter Notebook | examples/plot_twiss.ipynb | carneirofc/pyaccel | e639cb6191293eec4c05aeeaf513d9066de8da2b | [
"MIT"
] | 6 | 2015-04-13T23:47:37.000Z | 2021-12-09T17:48:01.000Z | examples/plot_twiss.ipynb | carneirofc/pyaccel | e639cb6191293eec4c05aeeaf513d9066de8da2b | [
"MIT"
] | 53 | 2015-04-13T23:35:42.000Z | 2021-12-16T02:44:37.000Z | examples/plot_twiss.ipynb | carneirofc/pyaccel | e639cb6191293eec4c05aeeaf513d9066de8da2b | [
"MIT"
] | 3 | 2015-11-06T05:20:57.000Z | 2022-02-08T13:10:20.000Z | 317.234043 | 53,387 | 0.916499 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7ca18ea090c2c89062aba30dbb204b54f6175ca | 20,132 | ipynb | Jupyter Notebook | AE_VAE_CAE_CVAE/LabExercise3.ipynb | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | null | null | null | AE_VAE_CAE_CVAE/LabExercise3.ipynb | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | null | null | null | AE_VAE_CAE_CVAE/LabExercise3.ipynb | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | 1 | 2021-06-12T16:06:10.000Z | 2021-06-12T16:06:10.000Z | 25.387137 | 136 | 0.513859 | [
[
[
"# CO460 - Deep Learning - Lab exercise 3",
"_____no_output_____"
],
[
"## Introduction\n\nIn this exercise, you will develop and experiment with convolutional AEs (CAE) and VAEs (CVAE).\nYou will be asked to:\n\n- experiment with the architectures and compare the convolutional models to the fully connected ones. \n- investigate and implement sampling and interpolation in the latent space.",
"_____no_output_____"
]
],
[
[
"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image \nimport torch.nn.functional as F\nfrom utils import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils import denorm_for_tanh, denorm_for_sigmoid",
"_____no_output_____"
]
],
[
[
"### Device selection",
"_____no_output_____"
]
],
[
[
"GPU = True\ndevice_idx = 0\nif GPU:\n device = torch.device(\"cuda:\"+str(device_idx) if torch.cuda.is_available() else \"cpu\")\nelse:\n \n device = torch.device(\"cpu\")\nprint(device)",
"cuda:0\n"
]
],
[
[
"### Reproducibility",
"_____no_output_____"
]
],
[
[
"# We set a random seed to ensure that your results are reproducible.\nif torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\ntorch.manual_seed(0)",
"_____no_output_____"
]
],
[
[
"## Part 1 - CAE",
"_____no_output_____"
],
[
"### Normalization: \n$ x_{norm} = \\frac{x-\\mu}{\\sigma} $\n\n_Thus_ :\n$ \\min{x_{norm}} = \\frac{\\min{(x)}-\\mu}{\\sigma} = \\frac{0-0.5}{0.5} = -1 $\n\n_Similarly_:\n\n$ \\max{(x_{norm})} = ... = 1 $\n\n\n* Input $\\in [-1,1] $\n* Output should span the same interval $ \\rightarrow$ Activation function of the output layer should be chosen carfeully (Here??)",
"_____no_output_____"
]
],
[
[
"transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\ndenorm = denorm_for_tanh\n\ntrain_dat = datasets.MNIST(\n \"data/\", train=True, download=True, transform=transform\n)\ntest_dat = datasets.MNIST(\"data/\", train=False, transform=transform)",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\nProcessing...\nDone!\n"
]
],
[
[
"### Hyper-parameter selection",
"_____no_output_____"
]
],
[
[
"if not os.path.exists('./CAE'):\n os.mkdir('./CAE')\n \nnum_epochs = 20\nbatch_size = 128\nlearning_rate = 1e-3",
"_____no_output_____"
]
],
[
[
"### Define the dataloaders",
"_____no_output_____"
]
],
[
[
"train_loader = DataLoader(train_dat, batch_size, shuffle=True)\ntest_loader = DataLoader(test_dat, batch_size, shuffle=False)\n\nit = iter(test_loader)\nsample_inputs, _ = next(it)\nfixed_input = sample_inputs[:32, :, :, :]\n\nin_dim = fixed_input.shape[-1]*fixed_input.shape[-2]\n\nsave_image(fixed_input, './CAE/image_original.png')",
"_____no_output_____"
]
],
[
[
"### Define the model - CAE\n\nComplete the `encoder` and `decoder` methods in the CAE pipeline.\n\nTo find an effective architecture, you can experiment with the following:\n- the number of convolutional layers\n- the kernels' sizes\n- the stride values\n- the size of the latent space layer",
"_____no_output_____"
]
],
[
[
"class CAE(nn.Module):\n def __init__(self, latent_dim):\n super(CAE, self).__init__()\n \"\"\"\n TODO: Define here the layers (convolutions, relu etc.) that will be\n used in the encoder and decoder pipelines.\n \"\"\"\n \n \n def encode(self, x):\n \"\"\"\n TODO: Construct the encoder pipeline here. The encoder's\n output will be the laten space representation of x.\n \n \"\"\"\n \n return x\n \n def decode(self, z):\n \"\"\"\n TODO: Construct the decoder pipeline here. The decoder should \n generate an output tensor with equal dimenssions to the\n encoder's input tensor.\n \n \"\"\"\n \n return z\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x",
"_____no_output_____"
],
[
"# Instantiate the model\nlatent_dim = \ncv_AE = CAE(latent_dim=latent_dim)",
"_____no_output_____"
]
],
[
[
"### Define Loss function",
"_____no_output_____"
]
],
[
[
"criterion = nn.L1Loss(reduction='sum') # can we use any other loss here?\ndef loss_function_CAE(recon_x, x):\n recon_loss = criterion(recon_x, x)\n return recon_loss",
"_____no_output_____"
]
],
[
[
"### Initialize Model and print number of parameters",
"_____no_output_____"
]
],
[
[
"model = cv_AE.to(device)\nparams = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint(\"Total number of parameters is: {}\".format(params)) # what would the number actually be?\nprint(model)",
"_____no_output_____"
]
],
[
[
"### Choose and initialize optimizer",
"_____no_output_____"
]
],
[
[
"optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)",
"_____no_output_____"
]
],
[
[
"### Train",
"_____no_output_____"
]
],
[
[
"model.train()\n\nfor epoch in range(num_epochs):\n train_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n optimizer.zero_grad()\n # forward\n recon_batch = model(img)\n loss = loss_function_CAE(recon_batch, img)\n # backward\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n # print out losses and save reconstructions for every epoch\n print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, train_loss / len(train_loader.dataset)))\n recon = denorm(model(fixed_input.to(device)))\n save_image(recon, './CAE/reconstructed_epoch_{}.png'.format(epoch))\n\n# save the model\ntorch.save(model.state_dict(), './CAE/model.pth')",
"_____no_output_____"
]
],
[
[
"### Test",
"_____no_output_____"
]
],
[
[
"# load the model\nmodel.load_state_dict(torch.load(\"./CAE/model.pth\"))\nmodel.eval()\ntest_loss = 0\nwith torch.no_grad():\n for i, (img, _) in enumerate(test_loader):\n img = img.to(device)\n recon_batch = model(img)\n test_loss += loss_function_CAE(recon_batch, img)\n # reconstruct and save the last batch\n recon_batch = model(recon_batch.to(device))\n img = denorm(img.cpu())\n # save the original last batch\n save_image(img, './CAE/test_original.png')\n save_image(denorm(recon_batch.cpu()), './CAE/reconstructed_test.png')\n # loss calculated over the whole test set\n test_loss /= len(test_loader.dataset)\n print('Test set loss: {:.4f}'.format(test_loss))",
"_____no_output_____"
]
],
[
[
"### Interpolations",
"_____no_output_____"
]
],
[
[
"# Define inpute tensors\nx1 = \nx2 = \n\n# Create the latent representations\nz1 = model.encode(x1)\nz2 = model.encode(x2)\n\n\"\"\"\nTODO: Find a way to create interpolated results from the CAE.\n\"\"\"\nZ = \nX_hat = model.decode(Z)",
"_____no_output_____"
]
],
[
[
"## Part 2 - CVAE",
"_____no_output_____"
],
[
"### Normalization",
"_____no_output_____"
]
],
[
[
"transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\ndenorm = denorm_for_tanh\n\ntrain_dat = datasets.MNIST(\n \"data/\", train=True, download=True, transform=transform\n)\ntest_dat = datasets.MNIST(\"data/\", train=False, transform=transform)",
"_____no_output_____"
]
],
[
[
"### Hyper-parameter selection",
"_____no_output_____"
]
],
[
[
"if not os.path.exists('./CVAE'):\n os.mkdir('./CVAE')\n \nnum_epochs = 20\nbatch_size = 128\nlearning_rate = 1e-3",
"_____no_output_____"
]
],
[
[
"### Define the dataloaders",
"_____no_output_____"
]
],
[
[
"train_loader = DataLoader(train_dat, batch_size, shuffle=True)\ntest_loader = DataLoader(test_dat, batch_size, shuffle=False)\n\nit = iter(test_loader)\nsample_inputs, _ = next(it)\nfixed_input = sample_inputs[:32, :, :, :]\n\nin_dim = fixed_input.shape[-1]*fixed_input.shape[-2]\n\nsave_image(fixed_input, './CVAE/image_original.png')",
"_____no_output_____"
]
],
[
[
"### Define the model - CVAE\n\nComplete the `encoder` and `decoder` methods in the CVAE pipeline.\n\nTo find an effective architecture, you can experiment with the following:\n- the number of convolutional layers\n- the kernels' sizes\n- the stride values\n- the size of the latent space layer",
"_____no_output_____"
]
],
[
[
"class CVAE(nn.Module):\n def __init__(self, latent_dim):\n super(CVAE, self).__init__()\n \"\"\"\n TODO: Define here the layers (convolutions, relu etc.) that will be\n used in the encoder and decoder pipelines.\n \"\"\"\n \n \n def encode(self, x):\n \"\"\"\n TODO: Construct the encoder pipeline here. \n \"\"\"\n\n return mu, logvar\n\n def reparametrize(self, mu, logvar):\n \"\"\"\n TODO: Implement reparameterization here.\n \"\"\"\n\n return z\n\n def decode(self, z):\n \"\"\"\n TODO: Construct the decoder pipeline here. \n \"\"\"\n\n return z\n \n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparametrize(mu, logvar)\n x_hat = self.decode(z)\n return x_hat, mu, logvar",
"_____no_output_____"
],
[
"# Instantiate the model\nlatent_dim = \ncv_VAE = CVAE(latent_dim =latent_dim)",
"_____no_output_____"
]
],
[
[
"### Define Loss function",
"_____no_output_____"
]
],
[
[
"# Reconstruction + KL divergence losses summed over all elements and batch\ndef loss_function_VAE(recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x, size_average=False)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return BCE + KLD",
"_____no_output_____"
]
],
[
[
"### Initialize Model and print number of parameters",
"_____no_output_____"
]
],
[
[
"model = cv_AE.to(device)\nparams = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint(\"Total number of parameters is: {}\".format(params)) # what would the number actually be?\nprint(model)",
"_____no_output_____"
]
],
[
[
"### Choose and initialize optimizer",
"_____no_output_____"
]
],
[
[
"optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)",
"_____no_output_____"
]
],
[
[
"### Train",
"_____no_output_____"
]
],
[
[
"model.train()\n\nfor epoch in range(num_epochs):\n train_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n optimizer.zero_grad()\n # forward\n recon_batch = model(img)\n loss = loss_function_CAE(recon_batch, img)\n # backward\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n # print out losses and save reconstructions for every epoch\n print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, train_loss / len(train_loader.dataset)))\n recon = denorm(model(fixed_input.to(device)))\n save_image(recon, './CVAE/reconstructed_epoch_{}.png'.format(epoch))\n\n# save the model\ntorch.save(model.state_dict(), './CVAE/model.pth')",
"_____no_output_____"
]
],
[
[
"### Test",
"_____no_output_____"
]
],
[
[
"# load the model\nmodel.load_state_dict(torch.load(\"./CVAE/model.pth\"))\nmodel.eval()\ntest_loss = 0\nwith torch.no_grad():\n for i, (img, _) in enumerate(test_loader):\n img = img.to(device)\n recon_batch = model(img)\n test_loss += loss_function_CAE(recon_batch, img)\n # reconstruct and save the last batch\n recon_batch = model(recon_batch.to(device))\n img = denorm(img.cpu())\n # save the original last batch\n save_image(img, './CVAE/test_original.png')\n save_image(denorm(recon_batch.cpu()), './CVAE/reconstructed_test.png')\n # loss calculated over the whole test set\n test_loss /= len(test_loader.dataset)\n print('Test set loss: {:.4f}'.format(test_loss))",
"_____no_output_____"
]
],
[
[
"### Sample",
"_____no_output_____"
],
[
"Sample the latent space and use the `decoder` to generate resutls.",
"_____no_output_____"
]
],
[
[
"model.load_state_dict(torch.load(\"./CVAE/model.pth\"))\nmodel.eval()\nwith torch.no_grad():\n \"\"\"\n TODO: Investigate how to sample the latent space of the CVAE.\n \"\"\"\n z = \n sample = model.decode(z)\n save_image(denorm(sample).cpu(), './CVAE/samples_' + '.png')",
"_____no_output_____"
]
],
[
[
"### Interpolations",
"_____no_output_____"
]
],
[
[
"# Define inpute tensors\nx1 = \nx2 = \n\n# Create the latent representations\nz1 = model.encode(x1)\nz2 = model.encode(x2)\n\n\"\"\"\nTODO: Find a way to create interpolated results from the CVAE.\n\"\"\"\nZ = \nX_hat = model.decode(Z)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7ca2047f8ee1fa93e842b01e7e479d4a77cdac0 | 20,563 | ipynb | Jupyter Notebook | notebooks/0.31-compare-sequence-models-bf/0.2-bf-FOMM-SOMM-HDBSCAN-latent-models.ipynb | xingjeffrey/avgn_paper | 412e95dabc7b7b13a434b85cc54a21c06efe4e2b | [
"MIT"
] | null | null | null | notebooks/0.31-compare-sequence-models-bf/0.2-bf-FOMM-SOMM-HDBSCAN-latent-models.ipynb | xingjeffrey/avgn_paper | 412e95dabc7b7b13a434b85cc54a21c06efe4e2b | [
"MIT"
] | null | null | null | notebooks/0.31-compare-sequence-models-bf/0.2-bf-FOMM-SOMM-HDBSCAN-latent-models.ipynb | xingjeffrey/avgn_paper | 412e95dabc7b7b13a434b85cc54a21c06efe4e2b | [
"MIT"
] | null | null | null | 32.795853 | 253 | 0.515489 | [
[
[
"%load_ext autoreload\n%autoreload 2\n%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n%env CUDA_VISIBLE_DEVICES=0",
"env: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=0\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.autonotebook import tqdm\nimport pandas as pd\nfrom cuml.manifold.umap import UMAP as cumlUMAP\nfrom avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir\nfrom joblib import Parallel, delayed",
"/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n"
],
[
"import pomegranate\nfrom pomegranate import DiscreteDistribution, HiddenMarkovModel\npomegranate.utils.disable_gpu()\nfrom hmmlearn import hmm",
"_____no_output_____"
]
],
[
[
"### Fit models code",
"_____no_output_____"
]
],
[
[
"def AIC(log_likelihood, k):\n \"\"\" AIC given log_likelihood and # parameters (k)\n \"\"\"\n aic = 2 * k - 2 * log_likelihood\n return aic\n\n\ndef BIC(log_likelihood, n, k):\n \"\"\" BIC given log_likelihood, number of observations (n) and # parameters (k)\n \"\"\"\n bic = np.log(n) * k - 2 * log_likelihood\n return bic\n\ndef FOMM(seqs, prop_test=0.5):\n \"\"\" create a FOMM in pomegranite\n \"\"\"\n if prop_test == 0:\n seqs_train = seqs_test = seqs\n else:\n # split into train and test for cross validation\n training_mask = np.random.choice(\n np.arange(len(seqs)), size=int(len(seqs) * prop_test), replace=False\n )\n testing_mask = np.array(\n [i for i in np.arange(len(seqs)) if i not in training_mask]\n )\n\n seqs_train = np.array(seqs)[training_mask]\n seqs_test = np.array(seqs)[testing_mask]\n\n # make sure test set doesn't contain any data that train doesnt\n assert np.all(\n [\n i in np.unique(np.concatenate(seqs_train))\n for i in np.unique(np.concatenate(seqs_test))\n ]\n )\n\n # lengths of sequences\n seq_lens = [len(i) for i in seqs_train]\n\n # get states\n unique_states = np.unique(np.concatenate(seqs_train))\n\n # get start probabilities\n seq_starts = np.array([i[0] for i in seqs_train])\n start_probs = [np.sum(seq_starts == i) / len(seqs_train) for i in unique_states]\n\n end_states = [seq[-1] for seq in seqs]\n end_probs = [\n np.sum(end_states == i) / (np.sum(np.concatenate(seqs) == i) + 1)\n for i in np.arange(len(unique_states))\n ]\n\n # transition probs\n trans_mat = np.zeros((len(unique_states), len(unique_states)))\n for seq in seqs_train:\n for i, j in zip(seq[:-1], seq[1:]):\n trans_mat[i, j] += 1\n # smooth to nonzero probabilities\n trans_mat = (trans_mat.T / trans_mat.sum(axis=1)).T # np.sum(trans_mat, axis=1)\n\n # smooth emissions\n emission_prob = np.identity(len(unique_states)) + 1e-5\n emission_prob = (emission_prob.T / emission_prob.sum(axis=1)).T\n\n # number of datapoints\n test_seq_lens = [len(i) for i in seqs_test]\n n_data = np.sum(test_seq_lens)\n\n # initialize pomegranate model\n\n transmat = trans_mat\n start_probs = start_probs\n dists = emission_prob\n\n states = [\n DiscreteDistribution({vis: d[i] for i, vis in enumerate(unique_states)})\n for d in dists\n ]\n pom_model = HiddenMarkovModel.from_matrix(\n transition_probabilities=transmat,\n distributions=states,\n starts=start_probs,\n ends=end_probs, # discluding ends and merge makes models equal log prob\n merge=\"None\",\n )\n pom_model.bake()\n pom_log_probability = np.sum([pom_model.log_probability(seq) for seq in seqs_test])\n \n\n # number of params in model\n num_params = (\n pom_model.edge_count() + pom_model.node_count() + pom_model.state_count() # no hidden states in FOMM\n )\n\n # AIC and BIC\n aic = AIC(pom_log_probability, num_params)\n bic = BIC(pom_log_probability, n_data, num_params)\n return (\n pom_model,\n seqs_train,\n seqs_test,\n pom_log_probability,\n num_params,\n n_data,\n aic,\n bic,\n )\n\ndef fit_fixed_latent(seqs, latent_seqs, verbose=False):\n\n unique_latent_labels = np.unique(np.concatenate(latent_seqs))\n n_components = len(unique_latent_labels)\n\n # convert latent sequences to correct format\n label_seqs_str = [\n [\"None-start\"] + [\"s\" + str(i) for i in seq] + [\"None-end\"]\n for seq in latent_seqs\n ]\n \n pom_model = HiddenMarkovModel.from_samples(\n distribution=DiscreteDistribution,\n n_components=len(unique_latent_labels),\n X=seqs,\n labels=label_seqs_str,\n end_state=True,\n algorithm=\"labeled\",\n verbose=verbose,\n )\n\n log_prob = [pom_model.log_probability(seq) for seq in seqs]\n\n sum_log_prob = np.sum(log_prob)\n \n num_params = (\n pom_model.state_count() + pom_model.edge_count() + pom_model.node_count()\n )\n\n n_data = np.sum([len(i) for i in seqs])\n\n aic = AIC(sum_log_prob, num_params)\n bic = BIC(sum_log_prob, n_data, num_params)\n\n return pom_model, log_prob, sum_log_prob, n_components, num_params, n_data, aic, bic",
"_____no_output_____"
],
[
"DATASET_ID = 'koumura_bengalese_finch'\nembeddings_dfs = list(DATA_DIR.glob('bf_label_dfs/'+DATASET_ID+'/*.pickle'))\nDATASET_ID = 'bengalese_finch_sober'\nembeddings_dfs = embeddings_dfs + list(DATA_DIR.glob('bf_label_dfs/'+DATASET_ID+'/*.pickle'))",
"_____no_output_____"
],
[
"embeddings_dfs",
"_____no_output_____"
],
[
"for loc in tqdm(embeddings_dfs):\n # read dataframe\n indv_df = pd.read_pickle(loc).sort_values(by=[\"key\", \"start_time\"])\n indv = indv_df.indv.unique()[0]\n\n # Get seqs\n hand_seqs = [\n list(indv_df[indv_df.syllables_sequence_id == seqid][\"labels_num\"].values)\n for seqid in indv_df.syllables_sequence_id.unique()\n ]\n\n results_df_FOMM = pd.DataFrame(\n [FOMM(hand_seqs, prop_test=0)],\n columns=[\n \"pom_model\",\n \"seqs_train\",\n \"seqs_test\",\n \"pom_log_probability\",\n \"n_params\",\n \"n_data\",\n \"aic\",\n \"bic\",\n ],\n )\n results_df_FOMM[\"indv\"] = indv\n save_loc = DATA_DIR / \"HMM_fits\" / \"FOMM\" / (indv + \".pickle\")\n ensure_dir(save_loc)\n results_df_FOMM.to_pickle(save_loc)\n\n ### HDBSCAN as latent\n # HDBSCAN seqs\n \n for hdbscan_labels in [\"hdbscan_labels_num\", \"hdbscan_labels-0.1_num\", \"hdbscan_labels-0.25_num\"]:\n hdbscan_latent_seqs = [\n list(\n indv_df[indv_df.syllables_sequence_id == seqid][hdbscan_labels].values\n )\n for seqid in indv_df.syllables_sequence_id.unique()\n ]\n\n # make latent df\n results_df_umap_hidden = pd.DataFrame(\n [fit_fixed_latent(hand_seqs, hdbscan_latent_seqs, verbose=False)],\n columns=[\n \"pom_model\",\n \"log_prob\",\n \"sum_log_prob\",\n \"n_components\",\n \"num_params\",\n \"n_data\",\n \"aic\",\n \"bic\",\n ],\n )\n results_df_umap_hidden[\"indv\"] = indv\n save_loc = DATA_DIR / \"HMM_fits\" / hdbscan_labels / \"HDBSCAN\" / (indv + \".pickle\")\n ensure_dir(save_loc)\n results_df_umap_hidden.to_pickle(save_loc)\n\n ### second order model\n seqs_second_order_states = [\n list(\n indv_df[indv_df.syllables_sequence_id == seqid][\n \"seqs_second_order_states\"\n ].values\n )\n for seqid in indv_df.syllables_sequence_id.unique()\n ]\n\n results_df_second_order_hidden = pd.DataFrame(\n [fit_fixed_latent(hand_seqs, seqs_second_order_states, verbose=False)],\n columns=[\n \"pom_model\",\n \"log_prob\",\n \"sum_log_prob\",\n \"n_components\",\n \"num_params\",\n \"n_data\",\n \"aic\",\n \"bic\",\n ],\n )\n results_df_second_order_hidden[\"indv\"] = indv\n save_loc = DATA_DIR / \"SOMM\" / (indv + \".pickle\")\n ensure_dir(save_loc)\n results_df_second_order_hidden.to_pickle(save_loc)\n\n print(\n \"---{}---\\nAIC: \\n\\tSOMM: {}\\n\\tFOMM: {} \\n\\tHDBSCAN: {} \\nLL: \\n\\tSOMM: {}\\n\\tFOMM: {} \\n\\tHDBSCAN: {}\".format(\n indv,\n round(results_df_second_order_hidden.aic.values[0]),\n round(results_df_umap_hidden.aic.values[0]),\n round(results_df_FOMM.aic.values[0]),\n round(results_df_second_order_hidden.sum_log_prob.values[0]),\n round(results_df_umap_hidden.sum_log_prob.values[0]),\n round(results_df_FOMM.pom_log_probability.values[0]),\n )\n )",
"_____no_output_____"
],
[
"DATA_DIR",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7ca319a2031eb2dc2491ae3e4bb28a8bdb7cbe8 | 14,281 | ipynb | Jupyter Notebook | original_notebooks/L2_Non-Parametric_Tests_Part_1_Solution.ipynb | epasseto/ThirdProjectStudies | afe85efc26ce9520ee754590c7bc003832a00e9d | [
"MIT"
] | null | null | null | original_notebooks/L2_Non-Parametric_Tests_Part_1_Solution.ipynb | epasseto/ThirdProjectStudies | afe85efc26ce9520ee754590c7bc003832a00e9d | [
"MIT"
] | null | null | null | original_notebooks/L2_Non-Parametric_Tests_Part_1_Solution.ipynb | epasseto/ThirdProjectStudies | afe85efc26ce9520ee754590c7bc003832a00e9d | [
"MIT"
] | null | null | null | 53.486891 | 932 | 0.655066 | [
[
[
"# Non-Parametric Tests Part I\n\nUp until now, you've been using standard hypothesis tests on means of normal distributions to design and analyze experiments. However, it's possible that you will encounter scenarios where you can't rely on only standard tests. This might be due to uncertainty about the true variability of a metric's distribution, a lack of data to assume normality, or wanting to do inference on a statistic that lacks a standard test. It's useful to know about some **non-parametric tests** not just as a workaround for cases like this, but also as a second check on your experimental results. The main benefit of a non-parametric test is that they don't rely on many assumptions of the underlying population, and so can be used in a wider range of circumstances compared to standard tests. In this notebook, you'll cover two non-parametric approaches that use resampling of the data to make inferences about distributions and differences.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n% matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Bootstrapping\n\nBootstrapping is used to estimate sampling distributions by using the actually collected data to generate new samples that could have been hypothetically collected. In a standard bootstrap, a bootstrapped sample means drawing points from the original data _with replacement_ until we get as many points as there were in the original data. Essentially, we're treating the original data as the population: without making assumptions about the original population distribution, using the original data as a model of the population is the best that we can do.\n\nTaking a lot of bootstrapped samples allows us to estimate the sampling distribution for various statistics on our original data. For example, let's say that we wanted to create a 95% confidence interval for the 90th percentile from a dataset of 5000 data points. (Perhaps we're looking at website load times and want to reduce the worst cases.) Bootstrapping makes this easy to estimate. First of all, we take a bootstrap sample (i.e. draw 5000 points with replacement from the original data) and record the 90th percentile and repeat this a large number of times, let's say 100 000. From this bunch of bootstrapped 90th percentile estimates, we form our confidence interval by finding the values that capture the central 95% of the estimates (cutting off 2.5% on each tail). Implement this operation in the cells below, using the following steps:\n\n- Initialize some useful variables by storing the number of data points in `n_points` and setting up an empty list for the bootstrapped quantile values in `sample_qs`.\n- Create a loop for each trial where:\n - First generate a bootstrap sample by sampling from our data with replacement. ([`random.choice`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.choice.html) will be useful here.)\n - Then, compute the `q`th quantile of the sample and add it to the `sample_qs` list. If you're using numpy v0.15 or later, you can use the [`quantile`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.quantile.html) function to get the quantile directly with `q`; on v0.14 or earlier, you'll need to put `q` in terms of a percentile and use [`percentile`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html) instead.\n- After gathering the bootstrapped quantiles, find the limits that capture the central `c` proportion of quantiles to form the estimated confidence interval.",
"_____no_output_____"
]
],
[
[
"def quantile_ci(data, q, c = .95, n_trials = 1000):\n \"\"\"\n Compute a confidence interval for a quantile of a dataset using a bootstrap\n method.\n \n Input parameters:\n data: data in form of 1-D array-like (e.g. numpy array or Pandas series)\n q: quantile to be estimated, must be between 0 and 1\n c: confidence interval width\n n_trials: number of bootstrap samples to perform\n \n Output value:\n ci: Tuple indicating lower and upper bounds of bootstrapped\n confidence interval\n \"\"\"\n \n # initialize storage of bootstrapped sample quantiles\n n_points = data.shape[0]\n sample_qs = []\n \n # For each trial...\n for _ in range(n_trials):\n # draw a random sample from the data with replacement...\n sample = np.random.choice(data, n_points, replace = True)\n \n # compute the desired quantile...\n sample_q = np.percentile(sample, 100 * q)\n \n # and add the value to the list of sampled quantiles\n sample_qs.append(sample_q)\n \n # Compute the confidence interval bounds\n lower_limit = np.percentile(sample_qs, (1 - c)/2 * 100)\n upper_limit = np.percentile(sample_qs, (1 + c)/2 * 100)\n \n return (lower_limit, upper_limit)",
"_____no_output_____"
],
[
"data = pd.read_csv('../data/bootstrapping_data.csv')\ndata.head(10)",
"_____no_output_____"
],
[
"# data visualization\nplt.hist(data['time'], bins = np.arange(0, data['time'].max()+400, 400));",
"_____no_output_____"
],
[
"lims = quantile_ci(data['time'], 0.9)\nprint(lims)",
"_____no_output_____"
]
],
[
[
"### Bootstrapping Notes\n\nConfidence intervals coming from the bootstrap procedure will be optimistic compared to the true state of the world. This is because there will be things that we don't know about the real world that we can't account for, due to not having a parametric model of the world's state. Consider the extreme case of trying to understand the distribution of the maximum value: our confidence interval would never be able to include any value greater than the largest observed value and it makes no sense to have any lower bound below the maximum observation. Intuitively, however, there's a pretty clear possibility for there to be unobserved values that are larger than the one we've observed, especially for skewed data like shown in the example.\n\nThis doesn't override the bootstrap method's advantages, however. The bootstrap procedure is fairly simple and straightforward. Since you don't make assumptions about the distribution of data, it can be applicable for any case you encounter. The results should also be fairly comparable to standard tests. But it does take computational effort, and its output does depend on the data put in. For reference, for the 95% CI on the 90th percentile example explored above, the inferred interval would only capture about 83% of 90th percentiles from the original generating distribution. But a more intricate procedure using a binomial assumption to index on the observed data only does about one percentage point better (84%). And both of these depend on the specific data generated: a different set of 5000 points will produce different intervals, with different accuracies.\n\nBinomial solution for percentile CIs reference: [1](https://www-users.york.ac.uk/~mb55/intro/cicent.htm), [2](https://stats.stackexchange.com/questions/99829/how-to-obtain-a-confidence-interval-for-a-percentile)",
"_____no_output_____"
],
[
"## Permutation Tests\n\nThe permutation test is a resampling-type test used to compare the values on an outcome variable between two or more groups. In the case of the permutation test, resampling is done on the group labels. The idea here is that, under the null hypothesis, the outcome distribution should be the same for all groups, whether control or experimental. Thus, we can emulate the null by taking all of the data values as a single large group. Applying labels randomly to the data points (while maintaining the original group membership ratios) gives us one simulated outcome from the null.\n\nThe rest follows similar to the sampling approach to a standard hypothesis test, except that we haven't specified a reference distribution to sample from – we're sampling directly from the data we've collected. After applying the labels randomly to all the data and recording the outcome statistic many times, we compare our actual, observed statistic against the simulated statistics. A p-value is obtained by seeing how many simulated statistic values are as or more extreme as the one actually observed, and a conclusion is then drawn.\n\nTry implementing a permutation test in the cells below to test if the 90th percentile of times is staistically significantly smaller for the experimental group, as compared to the control group:\n\n- Initialize an empty list to store the difference in sample quantiles as `sample_diffs`.\n- Create a loop for each trial where:\n - First generate a permutation sample by randomly shuffling the data point labels. ([`random.permutation`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.permutation.html) will be useful here.)\n - Then, compute the `q`th quantile of the data points that have been assigned to each group based on the permuted labels. Append the difference in quantiles to the `sample_diffs` list.\n- After gathering the quantile differences for permuted samples, compute the observed difference for the actual data. Then, compute a p-value from the number of permuted sample differences that are less than or greater than the observed difference, depending on the desired alternative hypothesis.",
"_____no_output_____"
]
],
[
[
"def quantile_permtest(x, y, q, alternative = 'less', n_trials = 10_000):\n \"\"\"\n Compute a confidence interval for a quantile of a dataset using a bootstrap\n method.\n \n Input parameters:\n x: 1-D array-like of data for independent / grouping feature as 0s and 1s\n y: 1-D array-like of data for dependent / output feature\n q: quantile to be estimated, must be between 0 and 1\n alternative: type of test to perform, {'less', 'greater'}\n n_trials: number of permutation trials to perform\n \n Output value:\n p: estimated p-value of test\n \"\"\"\n \n \n # initialize storage of bootstrapped sample quantiles\n sample_diffs = []\n \n # For each trial...\n for _ in range(n_trials):\n # randomly permute the grouping labels\n labels = np.random.permutation(y)\n \n # compute the difference in quantiles\n cond_q = np.percentile(x[labels == 0], 100 * q)\n exp_q = np.percentile(x[labels == 1], 100 * q)\n \n # and add the value to the list of sampled differences\n sample_diffs.append(exp_q - cond_q)\n \n # compute observed statistic\n cond_q = np.percentile(x[y == 0], 100 * q)\n exp_q = np.percentile(x[y == 1], 100 * q)\n obs_diff = exp_q - cond_q\n \n # compute a p-value\n if alternative == 'less':\n hits = (sample_diffs <= obs_diff).sum()\n elif alternative == 'greater':\n hits = (sample_diffs >= obs_diff).sum()\n \n return (hits / n_trials)",
"_____no_output_____"
],
[
"data = pd.read_csv('../data/permutation_data.csv')\ndata.head(10)",
"_____no_output_____"
],
[
"# data visualization\nbin_borders = np.arange(0, data['time'].max()+400, 400)\nplt.hist(data[data['condition'] == 0]['time'], alpha = 0.5, bins = bin_borders)\nplt.hist(data[data['condition'] == 1]['time'], alpha = 0.5, bins = bin_borders)\nplt.legend(labels = ['control', 'experiment']);",
"_____no_output_____"
],
[
"# Just how different are the two distributions' 90th percentiles?\nprint(np.percentile(data[data['condition'] == 0]['time'], 90),\n np.percentile(data[data['condition'] == 1]['time'], 90))",
"_____no_output_____"
],
[
"quantile_permtest(data['time'], data['condition'], 0.9,\n alternative = 'less')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7ca37596b41358adb1b2b442b3cfa445c200d40 | 227,573 | ipynb | Jupyter Notebook | Machine Learning/Linear-Regression/Boston DataFrame2.ipynb | wagneralbjr/Python_data_science-bootcamp_udemy | cb7b1a90512aa554c3008105167716aa63c2b0b2 | [
"MIT"
] | 1 | 2018-01-02T12:19:04.000Z | 2018-01-02T12:19:04.000Z | Machine Learning/Linear-Regression/Boston DataFrame2.ipynb | juneba191/Python_data_science-bootcamp_udemy | cb7b1a90512aa554c3008105167716aa63c2b0b2 | [
"MIT"
] | null | null | null | Machine Learning/Linear-Regression/Boston DataFrame2.ipynb | juneba191/Python_data_science-bootcamp_udemy | cb7b1a90512aa554c3008105167716aa63c2b0b2 | [
"MIT"
] | 1 | 2020-07-24T12:50:36.000Z | 2020-07-24T12:50:36.000Z | 205.948416 | 100,290 | 0.884929 | [
[
[
"from sklearn.datasets import load_boston\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport seaborn as sns\n%matplotlib inline\nboston_aux = load_boston()",
"_____no_output_____"
],
[
"dir(boston_aux)",
"_____no_output_____"
],
[
"boston = pd.DataFrame(data=boston_aux['data'],columns=boston_aux['feature_names'])",
"_____no_output_____"
],
[
"price = pd.DataFrame(data=boston_aux['target'],columns=['Price'])",
"_____no_output_____"
],
[
"data = pd.concat([boston,price],axis=1)",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"sns.distplot(data['Price'])",
"_____no_output_____"
],
[
"data_tratada = data [data['Price'] < 45]",
"_____no_output_____"
],
[
"data_tratada.head()",
"_____no_output_____"
],
[
"sns.distplot(data_tratada['Price'])",
"_____no_output_____"
],
[
"boston.columns",
"_____no_output_____"
],
[
"X = data_tratada[['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',\n 'PTRATIO', 'B', 'LSTAT']]\ny = data_tratada[['Price']]",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=66)",
"_____no_output_____"
],
[
"X_train.head()",
"_____no_output_____"
],
[
"lm = LinearRegression()",
"_____no_output_____"
],
[
"lm.fit(X_train,y_train)",
"_____no_output_____"
],
[
"coeff_df = pd.DataFrame(lm.coef_[0],X.columns,[\"Coefficient\"])",
"_____no_output_____"
],
[
"coeff_df",
"_____no_output_____"
],
[
"predictions = lm.predict(X_test)",
"_____no_output_____"
],
[
"plot.scatter(y_test,predictions)",
"_____no_output_____"
],
[
"sns.distplot((y_test-predictions),bins=50);\n#RESIDUO, y_test - predição.",
"_____no_output_____"
],
[
"from sklearn import metrics",
"_____no_output_____"
],
[
"print('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))",
"MAE: 2.72924174506\nMSE: 13.9451394318\nRMSE: 3.73431913899\n"
]
],
[
[
" ainda tem mta diferença a predição \n \n #tentar com menos variáveis.",
"_____no_output_____"
]
],
[
[
"\nsns.heatmap(data_tratada.corr(),annot=True)",
"_____no_output_____"
],
[
"X = data_tratada[['CRIM', 'NOX', 'RM','LSTAT']]\ny = data_tratada[['Price']]",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=66)",
"_____no_output_____"
],
[
"lm = LinearRegression()",
"_____no_output_____"
],
[
"lm.fit(X_train,y_train)",
"_____no_output_____"
],
[
"predictions = lm.predict(X_test)",
"_____no_output_____"
],
[
"plot.scatter(y_test,predictions)",
"_____no_output_____"
],
[
"sns.distplot((y_test-predictions),bins=50);\n#RESIDUO, y_test - predição.",
"_____no_output_____"
],
[
"print('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))",
"MAE: 3.25779091361\nMSE: 19.0984201753\nRMSE: 4.37017392964\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7ca435435a8dd6d630e5b4598748bf86d09a7e1 | 200,560 | ipynb | Jupyter Notebook | data-science/HW-1-clustring.ipynb | mtjin/University_and_AndroidProjects | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | 1 | 2021-04-13T12:06:51.000Z | 2021-04-13T12:06:51.000Z | data-science/HW-1-clustring.ipynb | mtjin/University | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | 2 | 2022-01-21T23:46:50.000Z | 2022-01-21T23:48:45.000Z | data-science/HW-1-clustring.ipynb | mtjin/University | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | null | null | null | 35.833482 | 32,688 | 0.443189 | [
[
[
"from os.path import join\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\n# K-means Clustering\nfrom sklearn.cluster import KMeans\n\n# Hierarchical Clustering\nfrom sklearn.cluster import AgglomerativeClustering\nfrom scipy.cluster.hierarchy import dendrogram\n\n# Performance Evaluation\nfrom sklearn.metrics import silhouette_samples \nfrom sklearn.metrics import silhouette_score\n",
"_____no_output_____"
],
[
"\nabalone = pd.read_csv(join('data', 'abalone.csv')) # 데이터 불러오기\nprint(abalone.shape)\nabalone.head(10)\n",
"(4176, 9)\n"
],
[
"# Data Processing\ndatay = data[:,0] # 성별을 Y 레이블로함\ndel abalone['M'] # 카테고리컬 데이터인 M 은 input에서 삭제\n\ndata = np.array(abalone) # numpy array 로 변환\nnvar = data.shape[1] #컬럼데이터개수\ndatax = data[:, 0:nvar] # input 데이터 저장\nprint(datax[0:5,:]) \ndatax_scale = datax #오리지널 데이터는 datax는 남기기위해 datax_scale에 복사\nfor i in range(nvar):\n datax_scale[:,i] = (data[:,i]-np.mean(datax[:,i]))/np.std(datax[:,i])",
"[[ 0.35 0.265 0.09 0.2255 0.0995 0.0485 0.07 7. ]\n [ 0.53 0.42 0.135 0.677 0.2565 0.1415 0.21 9. ]\n [ 0.44 0.365 0.125 0.516 0.2155 0.114 0.155 10. ]\n [ 0.33 0.255 0.08 0.205 0.0895 0.0395 0.055 7. ]\n [ 0.425 0.3 0.095 0.3515 0.141 0.0775 0.12 8. ]]\n[[-1.44900723 -1.43989229 -1.18425209 ... -1.20532696 -1.21305408\n -0.90979684]\n [ 0.0498915 0.12201495 -0.10824748 ... -0.35684354 -0.20727719\n -0.28929846]\n [-0.69955786 -0.4322102 -0.34735962 ... -0.60773918 -0.60240383\n 0.02095074]\n ...\n [ 0.63279657 0.67624011 1.56553747 ... 0.97518418 0.49676663\n -0.28929846]\n [ 0.84097695 0.77700832 0.25042072 ... 0.73341202 0.41055718\n 0.02095074]\n [ 1.54879024 1.48238578 1.32642533 ... 1.78717368 1.84019719\n 0.64144912]]\n"
],
[
"# 출력 여러개 한번에 볼 수 있게 하기\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# K-means Clustering (k = 3, 5 ,7)\nkmeans_3 = KMeans(n_clusters=3 , init='random', random_state=122)\nkmeans_5 = KMeans(n_clusters=5 , init='random', random_state=122)\nkmeans_7 = KMeans(n_clusters=7 , init='random', random_state=122)\nkmeans_3.fit(datax_scale)\nkmeans_5.fit(datax_scale)\nkmeans_7.fit(datax_scale)\n\n#center vecotr\nkmeans_3.cluster_centers_\nkmeans_5.cluster_centers_\nkmeans_7.cluster_centers_\n\n#cluster label\nkmeans_3.labels_\nkmeans_5.labels_\nkmeans_7.labels_\n\n# 성능\nsilhouette_score(datax_scale, kmeans_3.labels_)\nsilhouette_score(datax_scale, kmeans_5.labels_)\nsilhouette_score(datax_scale, kmeans_7.labels_)",
"_____no_output_____"
],
[
"#datax = data[:, 0:nvar-1]\n# Hierarchical Clustering\ncomplete_clustering_3 = AgglomerativeClustering(linkage='complete', n_clusters=3).fit(datax)\ncomplete_clustering_5 = AgglomerativeClustering(linkage='complete', n_clusters=5).fit(datax)\ncomplete_clustering_7 = AgglomerativeClustering(linkage='complete', n_clusters=7).fit(datax)\n# 성능\nsilhouette_score(datax_scale, complete_clustering_3.labels_)\nsilhouette_score(datax_scale, complete_clustering_5.labels_)\nsilhouette_score(datax_scale, complete_clustering_7.labels_)\n# Visualization\nplt.figure(figsize=(10,10))\n\n# Hierarchical Clustering의 자식노드\nchildren_3 = complete_clustering_3.children_\n# 각 자식 노드간의 거리 정보를 가지고 있지 않기 때문에, 균일하게 그리도록 합니다.\ndistance_3 = np.arange(children_3.shape[0])\n\n# 각 클러스터 단계를 포함한 노드의 수 계산\nno_of_observations_3 = np.arange(2, children_3.shape[0]+2)\n\n# 덴드로그램을 그리기 위한 연결 매트릭스를 생성한다.\nlinkage_matrix_3 = np.column_stack([children_3, distance_3, no_of_observations_3]).astype(float)\n\ndendrogram(linkage_matrix_3, p = datax_scale.shape[0], labels = complete_clustering_3.labels_,\n show_contracted=True, no_labels = True)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7ca45237b8653b774778716e3fc4b54746a0ec5 | 11,270 | ipynb | Jupyter Notebook | Day18/.ipynb_checkpoints/Day18-checkpoint.ipynb | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | Day18/.ipynb_checkpoints/Day18-checkpoint.ipynb | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | Day18/.ipynb_checkpoints/Day18-checkpoint.ipynb | MichaelMKKang/AdventOfCode | fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff | [
"MIT"
] | null | null | null | 33.843844 | 206 | 0.417835 | [
[
[
"import re\nwith open('Day18 input.txt') as f:\n lines = f.readlines()\nlines = [x.strip() for x in lines]\n#for l in lines:\n# print(l)",
"_____no_output_____"
],
[
"#i is the index: the start of the first number once depth hit 5\ndef explode(snail,i):\n comma_index = i\n while snail[comma_index] != ',':\n comma_index += 1\n \n end_index = comma_index\n while snail[end_index] != ']':\n end_index += 1\n left_num = int(snail[i:comma_index])\n right_num = int(snail[comma_index+1:end_index])\n\n #first try exploding both ways, if error, then ...\n #try exploding left. if it doesn't work, then its gotta explode right\n try:\n left_index = i-1\n while snail[left_index] not in '0123456789':\n left_index -= 1\n if left_index < 0:\n raise Error\n left_left_index = left_index\n while snail[left_left_index] in '0123456789':\n left_left_index -= 1\n left_target = int(snail[left_left_index+1:left_index+1])\n right_index = end_index\n while snail[right_index] not in '0123456789':\n right_index += 1\n if right_index > len(snail):\n raise Error\n right_right_index = right_index\n while snail[right_right_index] in '0123456789':\n right_right_index += 1\n right_target = int(snail[right_index:right_right_index])\n #print('test',snail[left_index+1:i-1])\n new_snail = snail[:left_left_index+1] + str(left_num+left_target) + snail[left_index+1:i-1] + '0' + snail[end_index+1:right_index] + str(right_num+right_target) + snail[right_right_index:] \n \n except:\n try:\n left_index = i-1\n while snail[left_index] not in '0123456789':\n left_index -= 1\n if left_index < 0:\n raise Error\n left_left_index = left_index\n while snail[left_left_index] in '0123456789':\n left_left_index -= 1\n left_target = int(snail[left_left_index+1:left_index+1])\n new_snail = snail[:left_left_index+1] + str(left_num+left_target) + snail[left_index+1] + '0' + snail[end_index+1:]\n print('only exploding left')\n except:\n try:\n right_index = end_index\n while snail[right_index] not in '0123456789':\n right_index += 1\n if right_index > len(snail):\n raise Error\n right_right_index = right_index\n while snail[right_right_index] in '0123456789':\n right_right_index += 1\n right_target = int(snail[right_index:right_right_index])\n new_snail = snail[:i-1] + '0' + snail[end_index+1:right_index] + str(right_num+right_target) + snail[right_right_index:]\n print('only exploding right')\n except:\n print('bigger uhoh')\n snail = new_snail\n return snail\n\n#explode: iterate left until you find num, add to int(num) and add the strings togteher\n# can't just replace value since what if explode causes > 10 value\n#5,13,11,11\n# test = '[[[[0,7],4],[7,[[8,4],9]]],[1,1]]'\n# round2 = explode(test,17)\n# print(round2)\n\nprint('--------------')\nassert step('[[[[[9,8],1],2],3],4]') == '[[[[0,9],2],3],4]','check 0'\nassert step('[7,[6,[5,[4,[3,2]]]]]') == '[7,[6,[5,[7,0]]]]','check 1'\nassert step('[[6,[5,[4,[3,2]]]],1]') == '[[6,[5,[7,0]]],3]','check 2'\nround0 = step('[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]')\nassert round0 == '[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]','check 3.0'\nassert step(round0) == '[[3,[2,[8,0]]],[9,[5,[7,0]]]]','check3.1'\n\nassert step('[[[[[1,1],[2,2]],[3,3]],[4,4]],[5,5]]') == '[[[[0,[3,2]],[3,3]],[4,4]],[5,5]]', 'check combo1.0'\nassert step(step('[[[[[1,1],[2,2]],[3,3]],[4,4]],[5,5]]')) == '[[[[3,0],[5,3]],[4,4]],[5,5]]', 'check combo1.1'\n",
"--------------\nonly exploding right\nonly exploding left\nonly exploding left\nonly exploding right\nonly exploding right\n"
],
[
"import math\n\n#i is the index: the start of the first number where value > 9\ndef split(snail,i):\n num = int(snail[i:i+2])\n left = math.floor(num/2)\n right = math.ceil(num/2)\n return snail[:i] + '[' + str(left) + ',' + str(right) + ']' + snail[i+2:]\n\n# test = '[[[[0,7],4],[15,[0,13]]],[1,1]]'\n# round2 = split(test,13)\n# print(round2)\n# print(split(round2,22))\n\nassert split('[[[[0,7],4],[15,[0,13]]],[1,1]]',13) == '[[[[0,7],4],[[7,8],[0,13]]],[1,1]]', 'check0'\nassert split('[[[[0,7],4],[[7,8],[0,13]]],[1,1]]',22) == '[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]', 'check1'",
"only exploding right\n[[[[0,3,2]],[3,3]],[4,4]],[5,5]]\n"
],
[
"def step(line):\n depth = 0\n max_depth = 4\n max_depth_index = 0\n num = ''\n exploded = False\n for i in range(len(line)):\n if depth > max_depth:\n exploded = True\n max_depth = depth\n max_depth_index = i\n if line[i] == '[':\n depth += 1\n if line[i] == ']':\n depth -= 1\n if max_depth >= 5 and exploded:\n #print('max depth is',max_depth,'at index',max_depth_index)\n line = explode(line,max_depth_index)\n return line\n for i in range(len(line)):\n if line[i] in '0123456789':\n num += line[i]\n if int(num) > 9:\n #print(i)\n line = split(line,i-1)\n elif line[i] not in '0123456789' and num != '':\n num = ''\n return line\n\n#step1: run this\n# test = '[[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]'\n# for i in range(20):\n# z = step(test)\n# print(z)\n# test = z\n\n\n\ntest = '[[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]'\nfor i in range(5):\n z = step(test)\n print(z)\n test = z\n\nprint('-------')\nprint('[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]')\n \n#step2: this is the issue line\n# issue = '[[[[4,0],[5,4]],[[7,7],[6,0]]],[[7,[5,5]],[[0,[[5,6],3]],[[6,3],[8,8]]]]]'\n# print(issue[47:51])\n \n#print(step('[[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]'))\n#print(step(step('[[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]')))",
"[[[[4,0],[5,0]],[[[4,5],[2,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]\n[[[[4,0],[5,4]],[[0,[7,6]],[9,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]\n[[[[4,0],[5,4]],[[7,0],[15,5]]],[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]]\n[[[[4,0],[5,4]],[[7,0],[15,5]]],[10,[[0,[11,3]],[[6,3],[8,8]]]]]\n[[[[4,0],[5,4]],[[7,0],[15,5]]],[10,[[11,0],[[9,3],[8,8]]]]]\n-------\n[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]\n"
],
[
"def snail_add(left,right):\n line = '[' + left + ',' + right + ']'\n #print(line)\n while 1:\n temp_line = step(line)\n if temp_line == line:\n break\n else:\n line = temp_line[:]\n #print(line)\n return line\n\nassert snail_add('[[[[4,3],4],4],[7,[[8,4],9]]]','[1,1]') == '[[[[0,7],4],[[7,8],[6,0]]],[8,1]]', 'check combo0'",
"only exploding right\n"
],
[
"lines = [\n '[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]',\n '[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]',\n '[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]',\n '[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]',\n '[7,[5,[[3,8],[1,4]]]]',\n '[[2,[2,2]],[8,[8,1]]]',\n '[2,9]',\n '[1,[[[9,3],9],[[9,0],[0,7]]]]',\n '[[[5,[7,4]],7],1]',\n '[[[[4,2],2],6],[8,7]]'\n]\n\nline = lines[0]\nfor l in lines[1:]:\n line = snail_add(line,l)\n print(line)\n break\n",
"only exploding left\n[[[[4,0],[5,4]],[[7,7],[6,5]]],[[[0,6],[6,6]],[[6,7],[7,5]]]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7ca4b6d2823fe84f65c6f4646d8e3f5d313f70e | 92,208 | ipynb | Jupyter Notebook | IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__harm_utoprim_2d.ipynb | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | null | null | null | IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__harm_utoprim_2d.ipynb | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | null | null | null | IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__harm_utoprim_2d.ipynb | ksible/nrpytutorial | 4ca6e9da22def2a9c9bcbcad75847fd1db159f4b | [
"BSD-2-Clause"
] | null | null | null | 39.86511 | 365 | 0.474341 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Tutorial-IllinoisGRMHD: harm_utoprim_2d.c\n\n## Authors: Leo Werneck & Zach Etienne\n\n<font color='red'>**This module is currently under development**</font>\n\n## In this tutorial module we explain the conservative-to-primitive algorithm used by `HARM`. This module will likely be absorbed by another one once we finish documenting the code.\n\n### Required and recommended citations:\n\n* **(Required)** Etienne, Z. B., Paschalidis, V., Haas R., Mösta P., and Shapiro, S. L. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)).\n* **(Required)** Noble, S. C., Gammie, C. F., McKinney, J. C., Del Zanna, L. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)).\n* **(Recommended)** Del Zanna, L., Bucciantini N., Londrillo, P. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)).",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis module is organized as follows\n\n0. [Step 0](#src_dir): **Source directory creation**\n1. [Step 1](#introduction): **Introduction**\n1. [Step 2](#harm_utoprim_2d__c__eos_indep): **EOS independent routines**\n 1. [Step 2.a](#utoprim_2d): *The `Utoprim_2d()` function*\n 1. [Step 2.a.i](#utoprim_2d__bi_and_alpha): Setting $B^{i}_{\\rm HARM}$ and $\\alpha$\n 1. [Step 2.a.ii](#utoprim_2d__converting): Preparing the variables to be used by the `Utoprim_new_body()` function\n 1. [Step 2.b](#utoprim_new_body): *The `Utoprim_new_body()` function*\n 1. [Step 2.b.i](#utoprim_new_body__basic_quantities): Computing basic quantities\n 1. [Step 2.b.ii](#utoprim_new_body__wlast): Determining $W$ from the previous iteration, $W_{\\rm last}$\n 1. [Step 2.b.iii](#utoprim_new_body__vsqlast_and_recompute_w_and_vsq): Compute $v^{2}_{\\rm last}$, then update $v^{2}$ and $W$\n 1. [Step 2.b.iv](#utoprim_new_body__compute_prims): Computing the primitive variables\n 1. [Step 2.c](#vsq_calc): *The `vsq_calc()` function*\n 1. [Step 2.d](#x1_of_x0): *The `x1_of_x0()` function*\n 1. [Step 2.e](#validate_x): *The `validate_x()` function*\n 1. [Step 2.f](#general_newton_raphson): *The `general_newton_raphson()` function*\n 1. [Step 2.g](#func_vsq): *The `func_vsq()` function*\n1. [Step 3](#harm_utoprim_2d__c__eos_dep): **EOS dependent routines**\n 1. [Step 3.a](#pressure_w_vsq): *The `pressure_W_vsq()` function*\n 1. [Step 3.b](#dpdw_calc_vsq): *The `dpdW_calc_vsq()` function*\n 1. [Step 3.c](#dpdvsq_calc): *The `dpdvsq_calc()` function*\n 1. [Step 3.c.i](#dpdvsq_calc__basic_quantities): Setting basic quantities and computing $P_{\\rm cold}$ and $\\epsilon_{\\rm cold}$\n 1. [Step 3.c.ii](#dpdvsq_calc__dpcolddvsq): Computing $\\frac{\\partial P_{\\rm cold}}{\\partial\\left(v^{2}\\right)}$\n 1. [Step 3.c.iii](#dpdvsq_calc__depscolddvsq): Computing $\\frac{\\partial \\epsilon_{\\rm cold}}{\\partial\\left(v^{2}\\right)}$\n 1. [Step 3.c.iv](#dpdvsq_calc__dpdvsq): Computing $\\frac{\\partial p_{\\rm hybrid}}{\\partial\\left(v^{2}\\right)}$\n1. [Step 4](#code_validation): **Code validation**\n1. [Step 5](#latex_pdf_output): **Output this notebook to $\\LaTeX$-formatted PDF file**",
"_____no_output_____"
],
[
"<a id='src_dir'></a>\n\n# Step 0: Source directory creation \\[Back to [top](#toc)\\]\n$$\\label{src_dir}$$\n\nWe will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet.",
"_____no_output_____"
]
],
[
[
"# Step 0: Creation of the IllinoisGRMHD source directory\n# Step 0a: Add NRPy's directory to the path\n# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory\nimport os,sys\nnrpy_dir_path = os.path.join(\"..\",\"..\")\nif nrpy_dir_path not in sys.path:\n sys.path.append(nrpy_dir_path)\n\n# Step 0b: Load up cmdline_helper and create the directory\nimport cmdline_helper as cmd\nIGM_src_dir_path = os.path.join(\"..\",\"src\")\ncmd.mkdir(IGM_src_dir_path)\n\n# Step 0c: Create the output file path\noutfile_path__harm_utoprim_2d__c = os.path.join(IGM_src_dir_path,\"harm_utoprim_2d.c\")",
"_____no_output_____"
]
],
[
[
"<a id='introduction'></a>\n\n# Step 1: Introduction \\[Back to [top](#toc)\\]\n$$\\label{introduction}$$\n\nComment on license: `HARM` uses GPL, while `IllinoisGRMHD` uses BSD.",
"_____no_output_____"
],
[
"<a id='harm_utoprim_2d__c__eos_indep'></a>\n\n# Step 2: EOS independent routines \\[Back to [top](#toc)\\]\n$$\\label{harm_utoprim_2d__c__eos_indep}$$\n\nLet us now start documenting the `harm_utoprim_2d.c`, which is a part of the `Harm` code. Our main reference throughout this discussion will be the required citation [Noble *et al.* (2006)](https://arxiv.org/abs/astro-ph/0512420).\n\nWe will start with the code's required preamble.",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__harm_utoprim_2d__c\n#ifndef __HARM_UTOPRIM_2D__C__\n#define __HARM_UTOPRIM_2D__C__\n/***********************************************************************************\n Copyright 2006 Charles F. Gammie, Jonathan C. McKinney, Scott C. Noble,\n Gabor Toth, and Luca Del Zanna\n\n HARM version 1.0 (released May 1, 2006)\n\n This file is part of HARM. HARM is a program that solves hyperbolic\n partial differential equations in conservative form using high-resolution\n shock-capturing techniques. This version of HARM has been configured to\n solve the relativistic magnetohydrodynamic equations of motion on a\n stationary black hole spacetime in Kerr-Schild coordinates to evolve\n an accretion disk model.\n\n You are morally obligated to cite the following two papers in his/her\n scientific literature that results from use of any part of HARM:\n\n [1] Gammie, C. F., McKinney, J. C., \\& Toth, G.\\ 2003,\n Astrophysical Journal, 589, 444.\n\n [2] Noble, S. C., Gammie, C. F., McKinney, J. C., \\& Del Zanna, L. \\ 2006,\n Astrophysical Journal, 641, 626.\n\n\n Further, we strongly encourage you to obtain the latest version of\n HARM directly from our distribution website:\n http://rainman.astro.uiuc.edu/codelib/\n\n\n HARM is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n HARM is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with HARM; if not, write to the Free Software\n Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n***********************************************************************************/\n\n/*************************************************************************************/\n/*************************************************************************************/\n/*************************************************************************************\n\nutoprim_2d.c:\n---------------\n\n Uses the 2D method:\n -- solves for two independent variables (W,v^2) via a 2D\n Newton-Raphson method\n -- can be used (in principle) with a general equation of state.\n\n -- Currently returns with an error state (>0) if a negative rest-mass\n density or internal energy density is calculated. You may want\n to change this aspect of the code so that it still calculates the\n velocity and so that you can floor the densities. If you want to\n change this aspect of the code please comment out the \"return(retval)\"\n statement after \"retval = 5;\" statement in Utoprim_new_body();\n\n******************************************************************************/\n\nstatic const int NEWT_DIM=2;\n\n// Declarations:\nstatic CCTK_REAL vsq_calc(CCTK_REAL W,CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\nstatic int Utoprim_new_body(eos_struct eos, CCTK_REAL U[], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM], CCTK_REAL gdet, CCTK_REAL prim[],long &n_iter);\nstatic int general_newton_raphson( eos_struct eos, CCTK_REAL x[], int n, long &n_iter, void (*funcd) (eos_struct, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *, CCTK_REAL *, int,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &),CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\nstatic void func_vsq( eos_struct eos, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *f, CCTK_REAL *df, int n,CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\nstatic CCTK_REAL x1_of_x0(CCTK_REAL x0, CCTK_REAL &Bsq, CCTK_REAL &QdotBsq, CCTK_REAL &Qtsq, CCTK_REAL &Qdotn, CCTK_REAL &D ) ;\nstatic CCTK_REAL pressure_W_vsq(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) ;\nstatic CCTK_REAL dpdW_calc_vsq(CCTK_REAL W, CCTK_REAL vsq);\nstatic CCTK_REAL dpdvsq_calc(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D);\n\n/**********************************************************************/\n/******************************************************************\n\n Utoprim_2d():\n\n -- Driver for new prim. var. solver. The driver just translates\n between the two sets of definitions for U and P. The user may\n wish to alter the translation as they see fit. Note that Greek\n indices run 0,1,2,3 and Latin indices run 1,2,3 (spatial only).\n\n\n / rho u^t \\\n U = | T^t_t + rho u^t | sqrt(-det(g_{\\mu\\nu}))\n | T^t_i |\n \\ B^i /\n\n / rho \\\n P = | uu |\n | \\tilde{u}^i |\n \\ B^i /\n\n\n Arguments:\n U[NPR] = conserved variables (current values on input/output);\n gcov[NDIM][NDIM] = covariant form of the metric ;\n gcon[NDIM][NDIM] = contravariant form of the metric ;\n gdet = sqrt( - determinant of the metric) ;\n prim[NPR] = primitive variables (guess on input, calculated values on\n output if there are no problems);\n\n -- NOTE: for those using this routine for special relativistic MHD and are\n unfamiliar with metrics, merely set\n gcov = gcon = diag(-1,1,1,1) and gdet = 1. ;\n\n******************************************************************/",
"Writing ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_2d'></a>\n\n## Step 2.a: The `Utoprim_2d()` function \\[Back to [top](#toc)\\]\n$$\\label{utoprim_2d}$$\n\nThe `Utoprim_2d()` function is the driver function of the `HARM` conservative-to-primitive algorithm. We remind you from the definitions of primitive and conservative variables used in the code:\n\n$$\n\\begin{align}\n\\boldsymbol{P}_{\\rm HARM} &= \\left\\{\\rho_{b},u,\\tilde{u}^{i},B^{i}_{\\rm HARM}\\right\\}\\ ,\\\\\n\\boldsymbol{C}_{\\rm HARM} &= \\left\\{\\sqrt{-g}\\rho_{b}u^{0},\\sqrt{-g}\\left(T^{0}_{\\ 0}+\\rho_{b}u^{0}\\right),\\sqrt{-g}T^{0}_{\\ i},\\sqrt{-g}B^{i}_{\\rm HARM}\\right\\}\\ .\n\\end{align}\n$$\n\n<a id='utoprim_2d__bi_and_alpha'></a>\n\n### Step 2.a.i: Setting $B^{i}_{\\rm HARM}$ and $\\alpha$ \\[Back to [top](#toc)\\]\n$$\\label{utoprim_2d__bi_and_alpha}$$\n\nLet\n\n$$\n\\tilde{B}^{i}_{\\rm HARM} \\equiv \\sqrt{-g}B^{i}_{\\rm HARM}\\ .\n$$\n\nThe code starts by relating\n\n$$\n\\boxed{B^{i}_{\\rm HARM} = \\frac{\\tilde{B}^{i}_{\\rm HARM}}{\\sqrt{-g}}}\\ ,\n$$\n\nand setting\n\n$$\n\\boxed{\\alpha = \\frac{1}{\\sqrt{-g^{00}}}} \\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\nint Utoprim_2d(eos_struct eos, CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM],\n CCTK_REAL gdet, CCTK_REAL prim[NPR], long &n_iter)\n{\n\n CCTK_REAL U_tmp[NPR], prim_tmp[NPR];\n int i, ret;\n CCTK_REAL alpha;\n\n if( U[0] <= 0. ) {\n return(-100);\n }\n\n /* First update the primitive B-fields */\n for(i = BCON1; i <= BCON3; i++) prim[i] = U[i] / gdet ;\n\n /* Set the geometry variables: */\n alpha = 1.0/sqrt(-gcon[0][0]);",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_2d__converting'></a>\n\n### Step 2.a.ii: Preparing the variables to be used by the `Utoprim_new_body()` function \\[Back to [top](#toc)\\]\n$$\\label{utoprim_2d__converting}$$\n\nThe conservative-to-primitive algorithm uses the `Utoprim_new_body()` function. However, this function assumes a *different* set of primitive/conservative variables. Thus, we must perform the proper conversion. First, let us ease on the notation a bit by defining:\n\n$$\n\\boldsymbol{C} \\equiv \\left\\{\\rho_{\\star},u_{\\star},\\tilde{S}_{i},\\tilde{B}^{i}_{\\rm HARM}\\right\\} \\equiv \\left\\{\\sqrt{-g}\\rho_{b}u^{0},\\sqrt{-g}\\left(T^{0}_{\\ 0}+\\rho_{b}u^{0}\\right),\\sqrt{-g}T^{0}_{\\ i},\\sqrt{-g}B^{i}_{\\rm HARM}\\right\\}\\ .\n$$\n\n\n\nBelow we list the main differences in the conservative variables:\n\n| `Utoprim_2d()` | `Utoprim_new_body()` |\n|------------------------------------------|---------------------------------------------------------------------------|\n| $\\color{blue}{\\textbf{Conservatives}}$ | $\\color{red}{\\textbf{Conservatives}}$ |\n| $\\color{blue}{\\rho_{\\star}}$ | $\\color{red}{\\frac{\\alpha}{\\sqrt{-g}}\\rho_{\\star}}$ |\n| $\\color{blue}{u_{\\star}}$ | $\\color{red}{\\frac{\\alpha}{\\sqrt{-g}}\\left(u_{\\star}-\\rho_{\\star}\\right)}$|\n| $\\color{blue}{\\tilde{S}_{i}}$ | $\\color{red}{\\frac{\\alpha}{\\sqrt{-g}}\\tilde{S}_{i}}$ |\n| $\\color{blue}{\\tilde{B}^{i}_{\\rm HARM}}$ | $\\color{red}{\\frac{\\alpha}{\\sqrt{-g}}\\tilde{B}^{i}_{\\rm HARM}}$ |\n\nThese are necessary conversions because while `Utoprim_2d()` assumes the set of conservatives above, `Utoprim_new_body()` assumes\n\n$$\n\\left\\{\\gamma\\rho_{b},\\alpha T^{0}_{\\ \\ 0}, \\alpha T^{0}_{\\ \\ i}, \\alpha B^{i}_{\\rm HARM}\\right\\}\\ .\n$$\n\nLet us first pause to understand the table above. From definition (15) in [Noble *et al.* (2006)](https://arxiv.org/abs/astro-ph/0512420) and the discussion just below it, we know that $\\gamma = \\alpha u^{0}$. Thus\n\n$$\n\\rho_{\\star} = \\sqrt{-g}\\rho_{b}u^{0} = \\sqrt{-g}\\left(\\frac{\\gamma}{\\alpha}\\rho_{b}\\right)\\implies\\boxed{\\gamma \\rho_{b} = \\frac{\\alpha}{\\sqrt{-g}}\\rho_{\\star}}\\ .\n$$\n\nThen we have\n\n$$\nu_{\\star} = \\sqrt{-g}\\left(T^{0}_{\\ \\ 0} + \\rho_{b}u^{0}\\right)= \\sqrt{-g}\\left(T^{0}_{\\ \\ 0} + \\frac{\\rho_{\\star}}{\\sqrt{-g}}\\right) = \\sqrt{-g}T^{0}_{\\ \\ 0} + \\rho_{\\star} \\implies \\boxed{\\alpha T^{0}_{\\ \\ 0} = \\frac{\\alpha}{\\sqrt{-g}}\\left(u_{\\star}-\\rho_{\\star}\\right)}\\ .\n$$\n\nThe other two relations are more straightforward. We have\n\n$$\n\\tilde{S}_{i} = \\sqrt{-g}T^{0}_{\\ \\ i} \\implies \\boxed{\\alpha T^{0}_{\\ \\ i} = \\frac{\\alpha}{\\sqrt{-g}}\\tilde{S}_{i}}\\ ,\n$$\n\nand\n\n$$\n\\tilde{B}^{i}_{\\rm HARM} = \\sqrt{-g}B^{i}_{\\rm HARM}\\implies \\boxed{\\alpha B^{i}_{\\rm HARM} = \\frac{\\alpha}{\\sqrt{-g}}\\tilde{B}^{i}_{\\rm HARM}}\\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n /* Transform the CONSERVED variables into the new system */\n U_tmp[RHO] = alpha * U[RHO] / gdet;\n U_tmp[UU] = alpha * (U[UU] - U[RHO]) / gdet ;\n for( i = UTCON1; i <= UTCON3; i++ ) {\n U_tmp[i] = alpha * U[i] / gdet ;\n }\n for( i = BCON1; i <= BCON3; i++ ) {\n U_tmp[i] = alpha * U[i] / gdet ;\n }",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"Below we list the necessary transformations on the primitive variables:\n\n| `Utoprim_2d()` | `Utoprim_new_body()` |\n|-------------------------------------|----------------------------------------|\n| $\\color{blue}{\\textbf{Primitives}}$ | $\\color{red}{\\textbf{Primitives}}$ |\n| $\\color{blue}{\\rho_{b}}$ | $\\color{red}{\\rho_{b}}$ |\n| $\\color{blue}{u}$ | $\\color{red}{u}$ |\n| $\\color{blue}{\\tilde{u}^{i}}$ | $\\color{red}{\\tilde{u}^{i}}$ |\n| $\\color{blue}{B^{i}_{\\rm HARM}}$ | $\\color{red}{\\alpha B^{i}_{\\rm HARM}}$ |\n\nAfter this slight modification we call the `Utoprim_new_body()` function. If it returns without errors, than the variables ${\\rm prim\\_tmp}$ will now contain the values of the primitives. We then update the ${\\rm prim}$ variables with these newly computed values.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n /* Transform the PRIMITIVE variables into the new system */\n for( i = 0; i < BCON1; i++ ) {\n prim_tmp[i] = prim[i];\n }\n for( i = BCON1; i <= BCON3; i++ ) {\n prim_tmp[i] = alpha*prim[i];\n }\n\n ret = Utoprim_new_body(eos, U_tmp, gcov, gcon, gdet, prim_tmp,n_iter);\n\n /* Transform new primitive variables back if there was no problem : */\n if( ret == 0 || ret == 5 || ret==101 ) {\n for( i = 0; i < BCON1; i++ ) {\n prim[i] = prim_tmp[i];\n }\n }\n\n return( ret ) ;\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_new_body'></a>\n\n## Step 2.b: The `Utoprim_new_body()` function \\[Back to [top](#toc)\\]\n$$\\label{utoprim_new_body}$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n/**********************************************************************/\n/**********************************************************************************\n\n Utoprim_new_body():\n\n -- Attempt an inversion from U to prim using the initial guess prim.\n\n -- This is the main routine that calculates auxiliary quantities for the\n Newton-Raphson routine.\n\n -- assumes that\n / rho gamma \\\n U = | alpha T^t_\\mu |\n \\ alpha B^i /\n\n\n\n / rho \\\n prim = | uu |\n | \\tilde{u}^i |\n \\ alpha B^i /\n\n\nreturn: (i*100 + j) where\n i = 0 -> Newton-Raphson solver either was not called (yet or not used)\n or returned successfully;\n 1 -> Newton-Raphson solver did not converge to a solution with the\n given tolerances;\n 2 -> Newton-Raphson procedure encountered a numerical divergence\n (occurrence of \"nan\" or \"+/-inf\" ;\n\n j = 0 -> success\n 1 -> failure: some sort of failure in Newton-Raphson;\n 2 -> failure: utsq<0 w/ initial p[] guess;\n 3 -> failure: W<0 or W>W_TOO_BIG\n 4 -> failure: v^2 > 1\n 5 -> failure: rho,uu <= 0 ;\n\n**********************************************************************************/\n\nstatic int Utoprim_new_body(eos_struct eos, CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM],\n CCTK_REAL gcon[NDIM][NDIM], CCTK_REAL gdet, CCTK_REAL prim[NPR], long &n_iter)\n{\n\n CCTK_REAL x_2d[NEWT_DIM];\n CCTK_REAL QdotB,Bcon[NDIM],Bcov[NDIM],Qcov[NDIM],Qcon[NDIM],ncov[NDIM],ncon[NDIM],Qsq,Qtcon[NDIM];\n CCTK_REAL rho0,u,p,w,gammasq,gamma,gtmp,W_last,W,utsq,vsq;\n int i,j, n, retval, i_increase;\n\n n = NEWT_DIM ;\n\n // Assume ok initially:\n retval = 0;",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_new_body__basic_quantities'></a>\n\n## Step 2.b.i: Computing basic quantities \\[Back to [top](#toc)\\]\n$$\\label{utoprim_new_body__basic_quantities}$$\n\nWe start by computing basic quantities from the input variables. Notice that this conservative-to-primitive algorithm does not need to update the magnetic field, thus\n\n$$\n\\boxed{B_{\\rm prim}^{i} = B_{\\rm conserv}^{i}}\\ .\n$$\n\nSince they are both equal, we will not distinguish between prim and conserv in what follows. We also set $B^{0} = 0$. Then we define\n\n$$\n\\boxed{Q_{\\mu} \\equiv \\alpha T^{0}_{\\ \\ \\mu}}\\ .\n$$\n\nFrom these, the following quantities are then computed:\n\n$$\n\\boxed{\n\\begin{align}\nB_{i} &= g_{i\\mu}B^{\\mu}\\\\\nQ^{\\mu} &= g^{\\mu\\nu}Q_{\\nu}\\\\\nB^{2} &= B_{i}B^{i}\\\\\nQ\\cdot B &= Q_{\\mu}B^{\\mu}\\\\\n\\left(Q\\cdot B\\right)^{2} &= \\left(Q\\cdot B\\right)\\left(Q\\cdot B\\right)\\\\\nn_{\\mu} &= \\left(-\\alpha,0,0,0\\right)\\\\\nn^{\\mu} &= g^{\\mu\\nu}n_{\\nu}\\\\\n\\left(Q\\cdot n\\right) &= Q^{\\mu}n_{\\mu}\\\\\nQ^{2} &= Q_{\\mu}Q^{\\mu}\\\\\n\\tilde{Q}^{2} &= Q^{2} + \\left(Q\\cdot n\\right)\\left(Q\\cdot n\\right)\\\\\nD &\\equiv \\gamma \\rho_{b}\n\\end{align}\n}\\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n for(i = BCON1; i <= BCON3; i++) prim[i] = U[i] ;\n\n // Calculate various scalars (Q.B, Q^2, etc) from the conserved variables:\n Bcon[0] = 0. ;\n for(i=1;i<4;i++) Bcon[i] = U[BCON1+i-1] ;\n\n lower_g(Bcon,gcov,Bcov) ;\n\n for(i=0;i<4;i++) Qcov[i] = U[QCOV0+i] ;\n raise_g(Qcov,gcon,Qcon) ;\n\n\n CCTK_REAL Bsq = 0. ;\n for(i=1;i<4;i++) Bsq += Bcon[i]*Bcov[i] ;\n\n QdotB = 0. ;\n for(i=0;i<4;i++) QdotB += Qcov[i]*Bcon[i] ;\n CCTK_REAL QdotBsq = QdotB*QdotB ;\n\n ncov_calc(gcon,ncov) ;\n // FIXME: The exact form of n^{\\mu} can be found\n // in eq. (2.116) and implementing it\n // directly is a lot more efficient than\n // performing n^{\\mu} = g^{\\mu\\nu}n_{nu}\n raise_g(ncov,gcon,ncon);\n\n CCTK_REAL Qdotn = Qcon[0]*ncov[0] ;\n\n Qsq = 0. ;\n for(i=0;i<4;i++) Qsq += Qcov[i]*Qcon[i] ;\n\n CCTK_REAL Qtsq = Qsq + Qdotn*Qdotn ;\n\n CCTK_REAL D = U[RHO] ;",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_new_body__wlast'></a>\n\n## Step 2.b.ii: Determining $W$ from the previous iteration, $W_{\\rm last}$ \\[Back to [top](#toc)\\]\n$$\\label{utoprim_new_body__wlast}$$\n\nThe quantity $W$ is defined as\n\n$$\nW \\equiv w\\gamma^{2}\\ ,\n$$\n\nwhere\n\n$$\n\\begin{align}\nw &= \\rho_{b} + u + p\\ ,\\\\\n\\gamma^{2} &= 1 + g_{ij}\\tilde{u}^{i}\\tilde{u}^{j}\\ .\n\\end{align}\n$$\n\nThus the quantities $g_{ij}\\tilde{u}^{i}\\tilde{u}^{j}$ and then $\\gamma^{2}$ and $\\gamma$. Thus, by computing $\\rho_{b}$ and $p$ from the input variables, i.e. $D$, one can determine $w$ and then compute the value of $W$ from the input values (previous iteration), which we denote by $W_{\\rm last}$.\n\n**Dependecy note:** Note that this function depends on the `pressure_rho0_u()` function, which is *not* EOS independent.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n /* calculate W from last timestep and use for guess */\n utsq = 0. ;\n for(i=1;i<4;i++)\n for(j=1;j<4;j++) utsq += gcov[i][j]*prim[UTCON1+i-1]*prim[UTCON1+j-1] ;\n\n\n if( (utsq < 0.) && (fabs(utsq) < 1.0e-13) ) {\n utsq = fabs(utsq);\n }\n if(utsq < 0. || utsq > UTSQ_TOO_BIG) {\n retval = 2;\n return(retval) ;\n }\n\n gammasq = 1. + utsq ;\n gamma = sqrt(gammasq);\n\n // Always calculate rho from D and gamma so that using D in EOS remains consistent\n // i.e. you don't get positive values for dP/d(vsq) .\n rho0 = D / gamma ;\n u = prim[UU] ;\n p = pressure_rho0_u(eos, rho0,u) ;\n w = rho0 + u + p ;\n\n W_last = w*gammasq ;\n\n\n // Make sure that W is large enough so that v^2 < 1 :\n i_increase = 0;\n while( (( W_last*W_last*W_last * ( W_last + 2.*Bsq )\n - QdotBsq*(2.*W_last + Bsq) ) <= W_last*W_last*(Qtsq-Bsq*Bsq))\n && (i_increase < 10) ) {\n W_last *= 10.;\n i_increase++;\n }",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_new_body__vsqlast_and_recompute_w_and_vsq'></a>\n\n## Step 2.b.iii: Compute $v^{2}_{\\rm last}$, then update $v^{2}$ and $W$ \\[Back to [top](#toc)\\]\n$$\\label{utoprim_new_body__vsqlast_and_recompute_w_and_vsq}$$\n\nThen we use equation (28) in [Noble *et al.* (2006)](https://arxiv.org/abs/astro-ph/0512420) to determine $v^{2}$:\n\n$$\n\\boxed{v^{2} = \\frac{\\tilde{Q}^{2}W^{2} + \\left(Q\\cdot B\\right)^{2}\\left(B^{2}+2W\\right)}{\\left(B^{2}+W\\right)^{2}W^{2}}}\\ .\n$$\n\nThis is done by calling the `x1_of_x0()` function, where $x_{0} = W$ and $x_{1} = v^{2}$, which itself calls the `vsq_calc()` function which implements the boxed equation above.\n\nAfter we have $\\left\\{W_{\\rm last},v^{2}_{\\rm last}\\right\\}$ we use them as the initial guess for the `general_newton_raphson()`, which returns the updated values $\\left\\{W,v^{2}\\right\\}$.\n\nAll functions mentioned above are documented in this tutorial notebook, so look at the [Table of Contents](#toc) for more information.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n // Calculate W and vsq:\n x_2d[0] = fabs( W_last );\n x_2d[1] = x1_of_x0( W_last , Bsq,QdotBsq,Qtsq,Qdotn,D) ;\n retval = general_newton_raphson( eos, x_2d, n, n_iter, func_vsq, Bsq,QdotBsq,Qtsq,Qdotn,D) ;\n\n W = x_2d[0];\n vsq = x_2d[1];\n\n /* Problem with solver, so return denoting error before doing anything further */\n if( (retval != 0) || (W == FAIL_VAL) ) {\n retval = retval*100+1;\n return(retval);\n }\n else{\n if(W <= 0. || W > W_TOO_BIG) {\n retval = 3;\n return(retval) ;\n }\n }\n\n // Calculate v^2:\n if( vsq >= 1. ) {\n vsq = 1.-2.e-16;\n //retval = 4;\n //return(retval) ;\n }",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='utoprim_new_body__compute_prims'></a>\n\n## Step 2.b.iv: Computing the primitive variables \\[Back to [top](#toc)\\]\n$$\\label{utoprim_new_body__compute_prims}$$\n\nNow that we have $\\left\\{W,v^{2}\\right\\}$, we recompute the primitive variables. We start with\n\n$$\n\\left\\{\n\\begin{align}\n\\tilde{g} &\\equiv \\sqrt{1-v^{2}}\\\\\n\\gamma &= \\frac{1}{\\tilde{g}}\n\\end{align}\n\\right.\n\\implies\n\\boxed{\\rho_{b} = D\\tilde{g}}\\ .\n$$\n\nThen, we determine the pressure $p$ using the `pressure_rho0_w()` function and\n\n$$\nw = W\\left(1-v^{2}\\right)\n\\implies\n\\boxed{u = w - \\left(\\rho_{b} + p\\right)}\\ .\n$$\n\n**Dependecy note:** Note that this function depends on the `pressure_rho0_w()` function, which is *not* EOS independent.\n\nFinally, we can obtain $\\tilde{u}^{i}$ using eq. 31 in [Noble *et al.* (2006)](https://arxiv.org/abs/astro-ph/0512420)\n\n$$\n\\boxed{\n\\tilde{u}^{i} = \\frac{\\gamma}{\\left(W+B^{2}\\right)}\\left[\\tilde{Q}^{i} + \\frac{\\left(Q\\cdot B\\right)}{W}B^{i}\\right]\n}\\ ,\n$$\n\nwhere\n\n$$\n\\tilde{Q}^{i} = Q^{i} + \\left(Q\\cdot n\\right)n^{i}\\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n // Recover the primitive variables from the scalars and conserved variables:\n gtmp = sqrt(1. - vsq);\n gamma = 1./gtmp ;\n rho0 = D * gtmp;\n\n w = W * (1. - vsq) ;\n p = pressure_rho0_w(eos, rho0,w) ;\n u = w - (rho0 + p) ; // u = rho0 eps, w = rho0 h\n\n if( (rho0 <= 0.) || (u <= 0.) ) {\n // User may want to handle this case differently, e.g. do NOT return upon\n // a negative rho/u, calculate v^i so that rho/u can be floored by other routine:\n\n retval = 5;\n //return(retval) ;\n }\n\n /*\n if(retval==5 && fabs(u)<1e-16) {\n u = fabs(u);\n CCTK_VInfo(CCTK_THORNSTRING,\"%e\\t%e\\t%e\",1.0-w/(rho0 + p),rho0,p);\n retval=0;\n }\n */\n\n prim[RHO] = rho0 ;\n prim[UU] = u ;\n\n for(i=1;i<4;i++) Qtcon[i] = Qcon[i] + ncon[i] * Qdotn;\n for(i=1;i<4;i++) prim[UTCON1+i-1] = gamma/(W+Bsq) * ( Qtcon[i] + QdotB*Bcon[i]/W ) ;\n\n /* set field components */\n for(i = BCON1; i <= BCON3; i++) prim[i] = U[i] ;\n\n\n /* done! */\n return(retval) ;\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='vsq_calc'></a>\n\n## Step 2.c: The `vsq_calc()` function \\[Back to [top](#toc)\\]\n$$\\label{vsq_calc}$$\n\nThis function implements eq. (28) in [Noble *et al.* (2006)](https://arxiv.org/abs/astro-ph/0512420) to determine $v^{2}$:\n\n$$\n\\boxed{v^{2} = \\frac{\\tilde{Q}^{2}W^{2} + \\left(Q\\cdot B\\right)^{2}\\left(B^{2}+2W\\right)}{\\left(B^{2}+W\\right)^{2}W^{2}}}\\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n/**********************************************************************/\n/****************************************************************************\n vsq_calc():\n\n -- evaluate v^2 (spatial, normalized velocity) from\n W = \\gamma^2 w\n\n****************************************************************************/\nstatic CCTK_REAL vsq_calc(CCTK_REAL W,CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D)\n{\n CCTK_REAL Wsq,Xsq;\n\n Wsq = W*W ;\n Xsq = (Bsq + W) * (Bsq + W);\n\n return( ( Wsq * Qtsq + QdotBsq * (Bsq + 2.*W)) / (Wsq*Xsq) );\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='x1_of_x0'></a>\n\n## Step 2.d: The `x1_of_x0()` function \\[Back to [top](#toc)\\]\n$$\\label{x1_of_x0}$$\n\nThis function computes $v^{2}$, as described [above](#vsq_calc), then performs physical checks on $v^{2}$ (i.e. whether or not it is superluminal). This function assumes $W$ is physical.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n/********************************************************************\n\n x1_of_x0():\n\n -- calculates v^2 from W with some physical bounds checking;\n -- asumes x0 is already physical\n -- makes v^2 physical if not;\n\n*********************************************************************/\n\nstatic CCTK_REAL x1_of_x0(CCTK_REAL x0, CCTK_REAL &Bsq, CCTK_REAL &QdotBsq, CCTK_REAL &Qtsq, CCTK_REAL &Qdotn, CCTK_REAL &D )\n{\n CCTK_REAL vsq;\n CCTK_REAL dv = 1.e-15;\n\n vsq = fabs(vsq_calc(x0,Bsq,QdotBsq,Qtsq,Qdotn,D)) ; // guaranteed to be positive\n\n\n return( ( vsq > 1. ) ? (1.0 - dv) : vsq );\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='validate_x'></a>\n\n## Step 2.e: The `validate_x()` function \\[Back to [top](#toc)\\]\n$$\\label{validate_x}$$\n\nThis function performs physical tests on $\\left\\{W,v^{2}\\right\\}$ based on their definitions.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n/********************************************************************\n\n validate_x():\n\n -- makes sure that x[0,1] have physical values, based upon\n their definitions:\n\n*********************************************************************/\n\nstatic void validate_x(CCTK_REAL x[2], CCTK_REAL x0[2] )\n{\n\n CCTK_REAL dv = 1.e-15;\n\n /* Always take the absolute value of x[0] and check to see if it's too big: */\n x[0] = fabs(x[0]);\n x[0] = (x[0] > W_TOO_BIG) ? x0[0] : x[0];\n\n\n x[1] = (x[1] < 0.) ? 0. : x[1]; /* if it's too small */\n x[1] = (x[1] > 1.) ? (1. - dv) : x[1]; /* if it's too big */\n\n return;\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='general_newton_raphson'></a>\n\n## Step 2.f: The `general_newton_raphson()` function \\[Back to [top](#toc)\\]\n$$\\label{general_newton_raphson}$$\n\nThis function implements a [multidimensional Newton-Raphson method](https://en.wikipedia.org/wiki/Newton%27s_method#k_variables,_k_functions). We will not make the effort of explaining the algorithm exhaustively since it is pretty standard, so we will settle for a summary of the method.\n\nGiven a system of $N$ non-linear of equations and $N$ variables, $\\left\\{\\vec{F}\\!\\left(\\vec{x}\\right),\\vec{x}\\right\\}$, the Newton-Raphson method attempts to determine the root vector, $\\vec{x}_{\\star}$, iteratively through\n\n$$\n\\begin{align}\n\\vec{x}_{n+1} = \\vec{x}_{n} - J^{-1}_{F}\\!\\left(\\vec{x}_{n}\\right)\\vec{F}\\!\\left(\\vec{x}\\right)\\ ,\n\\end{align}\n$$\n\nwhere $J^{-1}_{F}$ is the Jacobian matrix\n\n$$\n\\left(J_{F}\\right)^{i}_{\\ \\ j} = \\frac{\\partial F^{i}}{\\partial x^{j}}\\ .\n$$\n\nThe index $n$ above is an *iteration* index and $\\vec{x}_{n+1}$ represents an improved approximation to $\\vec{x}_{\\star}$ when compared to $\\vec{x}_{n}$.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n/************************************************************\n\n general_newton_raphson():\n\n -- performs Newton-Rapshon method on an arbitrary system.\n\n -- inspired in part by Num. Rec.'s routine newt();\n\n*****************************************************************/\nstatic int general_newton_raphson( eos_struct eos, CCTK_REAL x[], int n, long &n_iter,\n void (*funcd) (eos_struct, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [],\n CCTK_REAL [][NEWT_DIM], CCTK_REAL *,\n CCTK_REAL *, int,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &),CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D)\n{\n CCTK_REAL f, df, dx[NEWT_DIM], x_old[NEWT_DIM];\n CCTK_REAL resid[NEWT_DIM], jac[NEWT_DIM][NEWT_DIM];\n CCTK_REAL errx, x_orig[NEWT_DIM];\n int id, i_extra, doing_extra;\n\n int keep_iterating;\n\n\n // Initialize various parameters and variables:\n errx = 1. ;\n df = f = 1.;\n i_extra = doing_extra = 0;\n for( id = 0; id < n ; id++) x_old[id] = x_orig[id] = x[id] ;\n\n n_iter = 0;\n\n /* Start the Newton-Raphson iterations : */\n keep_iterating = 1;\n while( keep_iterating ) {\n\n (*funcd) (eos, x, dx, resid, jac, &f, &df, n, Bsq,QdotBsq,Qtsq,Qdotn,D); /* returns with new dx, f, df */\n\n\n /* Save old values before calculating the new: */\n errx = 0.;\n for( id = 0; id < n ; id++) {\n x_old[id] = x[id] ;\n }\n\n /* Make the newton step: */\n for( id = 0; id < n ; id++) {\n x[id] += dx[id] ;\n }\n\n /****************************************/\n /* Calculate the convergence criterion */\n /****************************************/\n errx = (x[0]==0.) ? fabs(dx[0]) : fabs(dx[0]/x[0]);\n\n\n /****************************************/\n /* Make sure that the new x[] is physical : */\n /****************************************/\n validate_x( x, x_old ) ;\n\n\n /*****************************************************************************/\n /* If we've reached the tolerance level, then just do a few extra iterations */\n /* before stopping */\n /*****************************************************************************/\n\n if( (fabs(errx) <= NEWT_TOL) && (doing_extra == 0) && (EXTRA_NEWT_ITER > 0) ) {\n doing_extra = 1;\n }\n\n if( doing_extra == 1 ) i_extra++ ;\n\n if( ((fabs(errx) <= NEWT_TOL)&&(doing_extra == 0))\n || (i_extra > EXTRA_NEWT_ITER) || (n_iter >= (MAX_NEWT_ITER-1)) ) {\n keep_iterating = 0;\n }\n\n n_iter++;\n\n } // END of while(keep_iterating)\n\n /* Check for bad untrapped divergences : */\n if( (finite(f)==0) || (finite(df)==0) ) {\n return(2);\n }\n\n\n if( fabs(errx) > MIN_NEWT_TOL){\n //CCTK_VInfo(CCTK_THORNSTRING,\"%d %e %e %e %e\",n_iter,f,df,errx,MIN_NEWT_TOL);\n return(1);\n }\n if( (fabs(errx) <= MIN_NEWT_TOL) && (fabs(errx) > NEWT_TOL) ){\n return(0);\n }\n if( fabs(errx) <= NEWT_TOL ){\n return(0);\n }\n\n return(0);\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='func_vsq'></a>\n\n## Step 2.g: The `func_vsq()` function \\[Back to [top](#toc)\\]\n$$\\label{func_vsq}$$\n\nThis function is used by the `general_newton_raphson()` function to compute the residuals and stepping. We will again not describe it in great detail since the method itself is relatively straightforward.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n\n/**********************************************************************/\n/*********************************************************************************\n func_vsq():\n\n -- calculates the residuals, and Newton step for general_newton_raphson();\n -- for this method, x=W,vsq here;\n\n Arguments:\n x = current value of independent var's (on input & output);\n dx = Newton-Raphson step (on output);\n resid = residuals based on x (on output);\n jac = Jacobian matrix based on x (on output);\n f = resid.resid/2 (on output)\n df = -2*f; (on output)\n n = dimension of x[];\n*********************************************************************************/\n\nstatic void func_vsq(eos_struct eos, CCTK_REAL x[], CCTK_REAL dx[], CCTK_REAL resid[],\n CCTK_REAL jac[][NEWT_DIM], CCTK_REAL *f, CCTK_REAL *df, int n,\n CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D)\n{\n\n\n CCTK_REAL W, vsq, Wsq, p_tmp, dPdvsq, dPdW;\n CCTK_REAL t11;\n CCTK_REAL t16;\n CCTK_REAL t18;\n CCTK_REAL t2;\n CCTK_REAL t21;\n CCTK_REAL t23;\n CCTK_REAL t24;\n CCTK_REAL t25;\n CCTK_REAL t3;\n CCTK_REAL t35;\n CCTK_REAL t36;\n CCTK_REAL t4;\n CCTK_REAL t40;\n CCTK_REAL t9;\n\n // vv TESTING vv\n // CCTK_REAL D,gtmp,gamma,rho0,w,p,u;\n // ^^ TESTING ^^\n\n W = x[0];\n vsq = x[1];\n\n Wsq = W*W;\n\n // vv TESTING vv\n /*\n D = U[RHO] ;\n gtmp = sqrt(1. - vsq);\n gamma = 1./gtmp ;\n rho0 = D * gtmp;\n\n w = W * (1. - vsq) ;\n p = pressure_rho0_w(eos, rho0,w) ;\n u = w - (rho0 + p) ;\n\n if(u<=0 && 1==1) {\n vsq = 0.9999999 * (1.0-(rho0+p)/W);\n\n w = W * (1. - vsq) ;\n p = pressure_rho0_w(eos, rho0,w) ;\n u = w - (rho0 + p) ;\n\n //CCTK_VInfo(CCTK_THORNSTRING,\"%e check\",u);\n }\n */\n // ^^ TESTING ^^\n\n\n p_tmp = pressure_W_vsq( eos, W, vsq , D);\n dPdW = dpdW_calc_vsq( W, vsq );\n dPdvsq = dpdvsq_calc( eos, W, vsq, D );\n\n // These expressions were calculated using Mathematica, but made into efficient\n // code using Maple. Since we know the analytic form of the equations, we can\n // explicitly calculate the Newton-Raphson step:\n\n t2 = -0.5*Bsq+dPdvsq;\n t3 = Bsq+W;\n t4 = t3*t3;\n t9 = 1/Wsq;\n t11 = Qtsq-vsq*t4+QdotBsq*(Bsq+2.0*W)*t9;\n t16 = QdotBsq*t9;\n t18 = -Qdotn-0.5*Bsq*(1.0+vsq)+0.5*t16-W+p_tmp;\n t21 = 1/t3;\n t23 = 1/W;\n t24 = t16*t23;\n t25 = -1.0+dPdW-t24;\n t35 = t25*t3+(Bsq-2.0*dPdvsq)*(QdotBsq+vsq*Wsq*W)*t9*t23;\n t36 = 1/t35;\n dx[0] = -(t2*t11+t4*t18)*t21*t36;\n t40 = (vsq+t24)*t3;\n dx[1] = -(-t25*t11-2.0*t40*t18)*t21*t36;\n //detJ = t3*t35; // <- set but not used...\n jac[0][0] = -2.0*t40;\n jac[0][1] = -t4;\n jac[1][0] = t25;\n jac[1][1] = t2;\n resid[0] = t11;\n resid[1] = t18;\n\n\n\n *df = -resid[0]*resid[0] - resid[1]*resid[1];\n\n *f = -0.5 * ( *df );\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='harm_utoprim_2d__c__eos_dep'></a>\n\n# Step 3: EOS dependent routines \\[Back to [top](#toc)\\]\n$$\\label{harm_utoprim_2d__c__eos_dep}$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n/**********************************************************************\n **********************************************************************\n\n The following routines specify the equation of state. All routines\n above here should be indpendent of EOS. If the user wishes\n to use another equation of state, the below functions must be replaced\n by equivalent routines based upon the new EOS.\n\n **********************************************************************\n **********************************************************************/",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='pressure_w_vsq'></a>\n\n## Step 3.a: The `pressure_W_vsq()` function \\[Back to [top](#toc)\\]\n$$\\label{pressure_w_vsq}$$\n\nThis function computes $p\\left(W,v^{2}\\right)$. For a $\\Gamma$-law equation of state,\n\n$$\np_{\\Gamma} = \\left(\\Gamma-1\\right)u\\ ,\n$$\n\nand with the definitions\n\n$$\n\\begin{align}\n\\gamma^{2} &= \\frac{1}{1-v^{2}}\\ ,\\\\\nW &= \\gamma^{2}w\\ ,\\\\\nD &= \\gamma\\rho_{b}\\ ,\\\\\nw &= \\rho_{b} + u + p\\ ,\n\\end{align}\n$$\n\nwe have\n\n$$\n\\begin{align}\np_{\\Gamma} &= \\left(\\Gamma-1\\right)u\\\\\n &= \\left(\\Gamma-1\\right)\\left(w - \\rho_{b} - p_{\\Gamma}\\right)\\\\\n &= \\left(\\Gamma-1\\right)\\left(\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\right) - \\left(\\Gamma-1\\right)p_{\\Gamma}\\\\\n\\implies\n&\\boxed{\np_{\\Gamma} = \\frac{\\left(\\Gamma-1\\right)}{\\Gamma}\\left(\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\right)\n}\\ .\n\\end{align}\n$$\n\nThus, the pre-PPEOS Patch version of this function was\n\n```c\n/**********************************************************************/\n/********************************************************************** \n pressure_W_vsq(): \n \n -- Gamma-law equation of state;\n -- pressure as a function of W, vsq, and D:\n**********************************************************************/\nstatic CCTK_REAL pressure_W_vsq(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) \n{\n\n CCTK_REAL gtmp;\n gtmp = 1. - vsq;\n \n return( (GAMMA - 1.) * ( W * gtmp - D * sqrt(gtmp) ) / GAMMA );\n\n}\n```\n\nWe are now, however, interested in the hybrid EOS of the form\n\n$$\np_{\\rm hybrid} = P_{\\rm cold} + P_{\\rm th}\\ ,\n$$\n\nwhere $P_{\\rm cold}$ is given by a single or piecewise polytrope EOS,\n\n$$\nP_{\\rm cold} = K_{i}\\rho_{b}^{\\Gamma_{i}}\\ ,\n$$\n\n$P_{\\rm th}$ accounts for thermal effects and is given by\n\n$$\nP_{\\rm th} = \\left(\\Gamma_{\\rm th} - 1\\right)\\epsilon_{\\rm th}\\ ,\n$$\n\nand\n\n$$\n\\begin{align}\n\\epsilon \\equiv \\frac{u}{\\rho_{b}} &= \\epsilon_{\\rm th}+\\epsilon_{\\rm cold}\\ ,\\\\\n\\epsilon_{\\rm cold} &= \\int d\\rho \\frac{P_{\\rm cold}(\\rho)}{\\rho^{2}}\\ .\n\\end{align}\n$$\n\nWe then have\n\n$$\n\\begin{align}\np_{\\rm hybrid} &= P_{\\rm cold} + P_{\\rm th}\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\rho_{b}\\epsilon_{\\rm th}\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\rho_{b}\\left(\\epsilon - \\epsilon_{\\rm cold}\\right)\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\left(u - \\frac{D}{\\gamma}\\epsilon_{\\rm cold}\\right)\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\left(w - \\rho_{b} - p_{\\rm hybrid} - \\frac{D}{\\gamma}\\epsilon_{\\rm cold}\\right)\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\left(\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma} - \\frac{D}{\\gamma}\\epsilon_{\\rm cold}\\right)-\\left(\\Gamma_{\\rm th}-1\\right)p_{\\rm hybrid}\\\\\n &= P_{\\rm cold} + \\left(\\Gamma_{\\rm th}-1\\right)\\left[\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\left(1+\\epsilon_{\\rm cold}\\right)\\right]-\\left(\\Gamma_{\\rm th}-1\\right)p_{\\rm hybrid}\\\\\n\\implies\n&\\boxed{ p_{\\rm hybrid} = \\frac{P_{\\rm cold}}{\\Gamma_{\\rm th}} + \\frac{\\left(\\Gamma_{\\rm th}-1\\right)}{\\Gamma_{\\rm th}}\\left[\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\left(1+\\epsilon_{\\rm cold}\\right)\\right] }\n\\end{align}\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n/**********************************************************************/\n/**********************************************************************\n pressure_W_vsq():\n\n -- Hybrid single and piecewise polytropic equation of state;\n -- pressure as a function of P_cold, eps_cold, W, vsq, and D:\n**********************************************************************/\nstatic CCTK_REAL pressure_W_vsq(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D)\n{\n\n#ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n DECLARE_CCTK_PARAMETERS;\n#endif\n\n // Compute gamma^{-2} = 1 - v^{2} and gamma^{-1}\n CCTK_REAL inv_gammasq = 1.0 - vsq;\n CCTK_REAL inv_gamma = sqrt(inv_gammasq);\n\n // Compute rho_b = D / gamma\n CCTK_REAL rho_b = D*inv_gamma;\n\n // Compute P_cold and eps_cold\n CCTK_REAL P_cold, eps_cold;\n compute_P_cold__eps_cold(eos,rho_b, P_cold,eps_cold);\n\n // Compute p = P_{cold} + P_{th}\n return( ( P_cold + (Gamma_th - 1.0)*( W*inv_gammasq - D*inv_gamma*( 1.0 + eps_cold ) ) )/Gamma_th );\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='dpdw_calc_vsq'></a>\n\n## Step 3.b: The `dpdW_calc_vsq()` function \\[Back to [top](#toc)\\]\n$$\\label{dpdw_calc_vsq}$$\n\nThis function computes $\\frac{\\partial p\\left(W,v^{2}\\right)}{\\partial W}$. For a $\\Gamma$-law equation of state, remember that\n\n$$\np_{\\Gamma} = \\frac{\\left(\\Gamma-1\\right)}{\\Gamma}\\left(\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\right)\\ ,\n$$\n\nwhich then implies\n\n$$\n\\boxed{\\frac{\\partial p_{\\Gamma}}{\\partial W} = \\frac{\\Gamma-1}{\\Gamma \\gamma^{2}} = \\frac{\\left(\\Gamma-1\\right)\\left(1-v^{2}\\right)}{\\Gamma}}\\ .\n$$\n\nThus, the pre-PPEOS Patch version of this function was\n\n```c\n/**********************************************************************/\n/********************************************************************** \n dpdW_calc_vsq(): \n \n -- partial derivative of pressure with respect to W;\n**********************************************************************/\nstatic CCTK_REAL dpdW_calc_vsq(CCTK_REAL W, CCTK_REAL vsq)\n{\n\n return( (GAMMA - 1.) * (1. - vsq) / GAMMA ) ;\n\n}\n```\n\nFor the case of a hybrid, single or piecewise polytropic EOS, we have\n\n$$\np_{\\rm hybrid} = \\frac{P_{\\rm cold}}{\\Gamma_{\\rm th}} + \\frac{\\left(\\Gamma_{\\rm th}-1\\right)}{\\Gamma_{\\rm th}}\\left[\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\left(1+\\epsilon_{\\rm cold}\\right)\\right]\\ .\n$$\n\nIt is important to notice that the cold components of $p_{\\rm hybrid}$ are *not* functions of $W$, but instead functions of $D$: $P_{\\rm cold} = P_{\\rm cold}(\\rho_{b}) = P_{\\rm cold}(D)$ and $\\epsilon_{\\rm cold} = \\epsilon_{\\rm cold}(\\rho_{b}) = \\epsilon_{\\rm cold}(D)$. Thus\n\n$$\n\\boxed{\\frac{\\partial p_{\\rm hybrid}}{\\partial W} = \\frac{\\Gamma_{\\rm th}-1}{\\Gamma_{\\rm th} \\gamma^{2}} = \\frac{\\left(\\Gamma_{\\rm th}-1\\right)\\left(1-v^{2}\\right)}{\\Gamma_{\\rm th}}}\\ .\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n\n/**********************************************************************/\n/**********************************************************************\n dpdW_calc_vsq():\n\n -- partial derivative of pressure with respect to W;\n**********************************************************************/\nstatic CCTK_REAL dpdW_calc_vsq(CCTK_REAL W, CCTK_REAL vsq)\n{\n\n#ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n DECLARE_CCTK_PARAMETERS;\n#endif\n\n return( (Gamma_th - 1.0) * (1.0 - vsq) / Gamma_th ) ;\n\n}",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='dpdvsq_calc'></a>\n\n## Step 3.c: The `dpdvsq_calc()` function \\[Back to [top](#toc)\\]\n$$\\label{dpdvsq_calc}$$\n\nThis function computes $\\frac{\\partial p\\left(W,v^{2}\\right)}{\\partial W}$. For a $\\Gamma$-law equation of state, remember that\n\n$$\np_{\\Gamma} = \\frac{\\left(\\Gamma-1\\right)}{\\Gamma}\\left(\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\right) = \\frac{\\left(\\Gamma-1\\right)}{\\Gamma}\\left[W\\left(1-v^{2}\\right) - D\\sqrt{1-v^{2}}\\right]\\ ,\n$$\n\nwhich then implies\n\n$$\n\\boxed{\\frac{\\partial p_{\\Gamma}}{\\partial\\left(v^{2}\\right)} = \\frac{\\Gamma-1}{\\Gamma}\\left(\\frac{D}{2\\sqrt{1-v^{2}}}-W\\right)} \\ .\n$$\n\nThus, the pre-PPEOS Patch version of this function was\n\n```c\n/**********************************************************************/\n/********************************************************************** \n dpdvsq_calc(): \n \n -- partial derivative of pressure with respect to vsq\n**********************************************************************/\nstatic CCTK_REAL dpdvsq_calc(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D)\n{\n return( (GAMMA - 1.) * ( 0.5 * D / sqrt(1.-vsq) - W ) / GAMMA ) ;\n}\n```\n\n<a id='dpdvsq_calc__basic_quantities'></a>\n\n### Step 3.c.i: Setting basic quantities and computing $P_{\\rm cold}$ and $\\epsilon_{\\rm cold}$ \\[Back to [top](#toc)\\]\n$$\\label{dpdvsq_calc__basic_quantities}$$\n\nFor the case of a hybrid, single or piecewise polytropic EOS, we have\n\n$$\np_{\\rm hybrid} = \\frac{P_{\\rm cold}}{\\Gamma_{\\rm th}} + \\frac{\\left(\\Gamma_{\\rm th}-1\\right)}{\\Gamma_{\\rm th}}\\left[\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\left(1+\\epsilon_{\\rm cold}\\right)\\right]\\ .\n$$\n\nLet us thus begin by setting the necessary parameters from the hybrid EOS.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n/**********************************************************************/\n/**********************************************************************\n dpdvsq_calc():\n\n -- partial derivative of pressure with respect to vsq\n**********************************************************************/\nstatic CCTK_REAL dpdvsq_calc(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D)\n{\n\n // This sets Gamma_th\n#ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n DECLARE_CCTK_PARAMETERS;\n#endif\n\n\n // Set gamma and rho\n CCTK_REAL gamma = 1.0/sqrt(1.0 - vsq);\n CCTK_REAL rho_b = D/gamma;\n\n // Compute P_cold and eps_cold\n CCTK_REAL P_cold, eps_cold;\n compute_P_cold__eps_cold(eos,rho_b, P_cold,eps_cold);\n\n // Set basic polytropic quantities\n int polytropic_index = find_polytropic_K_and_Gamma_index(eos,rho_b);\n CCTK_REAL Gamma_ppoly_tab = eos.Gamma_ppoly_tab[polytropic_index];",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='dpdvsq_calc__dpcolddvsq'></a>\n\n### Step 3.c.ii: Computing $\\frac{\\partial P_{\\rm cold}}{\\partial\\left(v^{2}\\right)}$ \\[Back to [top](#toc)\\]\n$$\\label{dpdvsq_calc__dpcolddvsq}$$\n\nNext, remember that $P_{\\rm cold} = P_{\\rm cold}(\\rho_{b}) = P_{\\rm cold}(D,v^{2})$ and also $\\epsilon_{\\rm cold} = \\epsilon_{\\rm cold}(D,v^{2})$. Therefore, we must start by finding the derivatives of $P_{\\rm cold}$ and $\\epsilon_{\\rm cold}$ with respect to $v^{2}$.\n\nLet us first notice that\n\n$$\n\\frac{\\partial\\gamma}{\\partial\\left(v^{2}\\right)} = \\frac{\\partial}{\\partial\\left(v^{2}\\right)}\\left[\\frac{1}{\\sqrt{1-v^{2}}}\\right] = \\frac{1}{2}\\left(1-v^{2}\\right)^{-3/2} = \\frac{\\gamma^{3}}{2}\\ .\n$$\n\nThus, for a general power\n\n$$\n\\frac{\\partial\\gamma^{a}}{\\partial\\left(v^{2}\\right)} = a\\gamma^{a-1}\\frac{\\partial\\gamma}{\\partial\\left(v^{2}\\right)} = a\\gamma^{a-1}\\left(\\frac{\\gamma^{3}}{2}\\right) = \\frac{a}{2}\\gamma^{a+2}\n$$\n\nThus we have\n\n$$\n\\begin{align}\n\\frac{\\partial P_{\\rm cold}}{\\partial \\left(v^{2}\\right)}\n&= \\frac{\\partial}{\\partial\\left(v^{2}\\right)}\\left(K_{\\rm poly}\\rho_{b}^{\\Gamma_{\\rm poly}}\\right)\\\\\n&= \\frac{\\partial}{\\partial\\left(v^{2}\\right)}\\left[K_{\\rm poly}\\left(\\frac{D}{\\gamma}\\right)^{\\Gamma_{\\rm poly}}\\right]\\\\\n&= K_{\\rm poly}D^{\\Gamma_{\\rm poly}}\\frac{\\partial}{\\partial\\left(v^{2}\\right)}\\left[\\gamma^{-\\Gamma_{\\rm poly}/2}\\right]\\\\\n&=K_{\\rm poly}D^{\\Gamma_{\\rm poly}}\\left[\\frac{-\\Gamma_{\\rm poly}/2}{2}\\gamma^{-\\Gamma_{\\rm poly}/2 + 2}\\right]\\\\\n&=K_{\\rm poly}\\left(\\frac{D}{\\gamma}\\right)^{\\Gamma_{\\rm poly}}\\gamma^{-\\frac{\\Gamma_{\\rm poly}}{2} + 2 + \\Gamma_{\\rm poly}}\\\\\n\\implies &\\boxed{ \\frac{\\partial P_{\\rm cold}}{\\partial \\left(v^{2}\\right)} = \\gamma^{2+\\frac{\\Gamma_{\\rm poly}}{2}}P_{\\rm cold}}\\ .\n\\end{align}\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n /* Now we implement the derivative of P_cold with respect\n * to v^{2}, given by\n * ----------------------------------------------------\n * | dP_cold/dvsq = gamma^{2 + Gamma_{poly}/2} P_{cold} |\n * ----------------------------------------------------\n */\n CCTK_REAL dPcold_dvsq = P_cold * pow(gamma,2.0 + 0.5*Gamma_ppoly_tab);",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='dpdvsq_calc__depscolddvsq'></a>\n\n### Step 3.c.iii: Computing $\\frac{\\partial \\epsilon_{\\rm cold}}{\\partial\\left(v^{2}\\right)}$ \\[Back to [top](#toc)\\]\n$$\\label{dpdvsq_calc__depscolddvsq}$$\n\nNow, obtaining $\\epsilon_{\\rm cold}$ from $P_{\\rm cold}$ requires an integration and, therefore, generates an integration constant. Since we are interested in a *derivative* of $\\epsilon_{\\rm cold}$, however, we will simply drop the constant altogether. Remember that:\n\n$$\n\\epsilon_{\\rm cold} = K_{\\rm poly}\\int d\\rho_{b} \\rho_{b}^{\\Gamma_{\\rm poly}-2} = \\frac{K_{\\rm poly}\\rho_{b}^{\\Gamma_{\\rm poly}-1}}{\\Gamma_{\\rm poly}-1} = \\frac{P_{\\rm cold}}{\\rho_{b}\\left(\\Gamma_{\\rm poly}-1\\right)} = \\frac{\\gamma P_{\\rm cold}}{D\\left(\\Gamma_{\\rm poly}-1\\right)}\\ .\n$$\n\nThus\n\n$$\n\\begin{align}\n\\frac{\\partial \\epsilon_{\\rm cold}}{\\partial \\left(v^{2}\\right)}\n&= \\frac{1}{D\\left(\\Gamma_{\\rm poly}-1\\right)}\\left[\\gamma\\frac{\\partial P_{\\rm cold}}{\\partial \\left(v^{2}\\right)} + P_{\\rm cold}\\frac{\\partial\\gamma}{\\partial \\left(v^{2}\\right)}\\right]\\\\\n&=\\frac{1}{D\\left(\\Gamma_{\\rm poly}-1\\right)}\\left[\\gamma\\frac{\\partial P_{\\rm cold}}{\\partial \\left(v^{2}\\right)} + P_{\\rm cold}\\left(\\frac{\\gamma^{3}}{2}\\right)\\right]\\\\\n\\implies &\\boxed{\n\\frac{\\partial \\epsilon_{\\rm cold}}{\\partial \\left(v^{2}\\right)} = \\frac{\\gamma}{D\\left(\\Gamma_{\\rm poly}-1\\right)}\\left[\\frac{\\partial P_{\\rm cold}}{\\partial \\left(v^{2}\\right)} + \\frac{\\gamma^{2} P_{\\rm cold}}{2}\\right]\\ .\n}\n\\end{align}\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n\n /* Now we implement the derivative of eps_cold with respect\n * to v^{2}, given by\n * -----------------------------------------------------------------------------------\n * | deps_cold/dvsq = gamma/(D*(Gamma_ppoly_tab-1)) * (dP_cold/dvsq + gamma^{2} P_cold / 2) |\n * -----------------------------------------------------------------------------------\n */\n CCTK_REAL depscold_dvsq = ( gamma/(D*(Gamma_ppoly_tab-1.0)) ) * ( dPcold_dvsq + 0.5*gamma*gamma*P_cold );",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='dpdvsq_calc__dpdvsq'></a>\n\n### Step 3.c.iv: Computing $\\frac{\\partial p_{\\rm hybrid}}{\\partial\\left(v^{2}\\right)}$ \\[Back to [top](#toc)\\]\n$$\\label{dpdvsq_calc__dpdvsq}$$\n\nFinally, remembering that\n\n$$\n\\begin{align}\np_{\\rm hybrid} &= \\frac{P_{\\rm cold}}{\\Gamma_{\\rm th}} + \\frac{\\left(\\Gamma_{\\rm th}-1\\right)}{\\Gamma_{\\rm th}}\\left[\\frac{W}{\\gamma^{2}} - \\frac{D}{\\gamma}\\left(1+\\epsilon_{\\rm cold}\\right)\\right]\\ ,\\\\\n\\frac{\\partial\\gamma^{a}}{\\partial\\left(v^{2}\\right)} &= \\frac{a}{2}\\gamma^{a+2}\\ ,\n\\end{align}\n$$\n\nwe have\n\n$$\n\\boxed{\n\\frac{\\partial p_{\\rm hybrid}}{\\partial\\left(v^{2}\\right)}\n= \\frac{1}{\\Gamma_{\\rm th}}\\left\\{\\frac{\\partial P_{\\rm cold}}{\\partial\\left(v^{2}\\right)} + \\left(\\Gamma_{\\rm th}-1\\right)\\left[-W + \\frac{D\\gamma}{2}\\left(1+\\epsilon_{\\rm cold}\\right) - \\frac{D}{\\gamma}\\frac{\\partial \\epsilon_{\\rm cold}}{\\partial\\left(v^{2}\\right)}\\right]\\right\\}\\ .\n}\n$$",
"_____no_output_____"
]
],
[
[
"%%writefile -a $outfile_path__harm_utoprim_2d__c\n\n /* Now we implement the derivative of p_hybrid with respect\n * to v^{2}, given by\n * -----------------------------------------------------------------------------\n * | dp/dvsq = Gamma_th^{-1}( dP_cold/dvsq |\n * | + (Gamma_{th}-1)*(-W |\n * | + D gamma (1 + eps_cold)/2 |\n * | - (D/gamma) * deps_cold/dvsq) ) |\n * -----------------------------------------------------------------------------\n */\n return( ( dPcold_dvsq + (Gamma_th-1.0)*( -W + D*gamma*(1+eps_cold)/2.0 - D*depscold_dvsq/gamma ) )/Gamma_th );\n}\n\n\n/******************************************************************************\n END OF UTOPRIM_2D.C\n******************************************************************************/\n#endif\n\n\n\n",
"Appending to ../src/harm_utoprim_2d.c\n"
]
],
[
[
"<a id='code_validation'></a>\n\n# Step 4: Code validation \\[Back to [top](#toc)\\]\n$$\\label{code_validation}$$\n\nFirst we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook.",
"_____no_output_____"
]
],
[
[
"# Verify if the code generated by this tutorial module\n# matches the original IllinoisGRMHD source code\n\n# First download the original IllinoisGRMHD source code\nimport urllib\nfrom os import path\n\noriginal_IGM_file_url = \"https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/harm_utoprim_2d.c\"\noriginal_IGM_file_name = \"harm_utoprim_2d-original.c\"\noriginal_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name)\n\n# Then download the original IllinoisGRMHD source code\n# We try it here in a couple of ways in an attempt to keep\n# the code more portable\ntry:\n original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode(\"utf-8\")\n # Write down the file the original IllinoisGRMHD source code\n with open(original_IGM_file_path,\"w\") as file:\n file.write(original_IGM_file_code)\nexcept:\n try:\n original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode(\"utf-8\")\n # Write down the file the original IllinoisGRMHD source code\n with open(original_IGM_file_path,\"w\") as file:\n file.write(original_IGM_file_code)\n except:\n # If all else fails, hope wget does the job\n !wget -O $original_IGM_file_path $original_IGM_file_url\n\n# Perform validation\nValidation__harm_utoprim_2d__c = !diff $original_IGM_file_path $outfile_path__harm_utoprim_2d__c\n\nif Validation__harm_utoprim_2d__c == []:\n # If the validation passes, we do not need to store the original IGM source code file\n !rm $original_IGM_file_path\n print(\"Validation test for harm_utoprim_2d.c: PASSED!\")\nelse:\n # If the validation fails, we keep the original IGM source code file\n print(\"Validation test for harm_utoprim_2d.c: FAILED!\")\n # We also print out the difference between the code generated\n # in this tutorial module and the original IGM source code\n print(\"Diff:\")\n for diff_line in Validation__harm_utoprim_2d__c:\n print(diff_line)",
"Validation test for harm_utoprim_2d.c: FAILED!\nDiff:\n0a1,2\n> #ifndef __HARM_UTOPRIM_2D__C__\n> #define __HARM_UTOPRIM_2D__C__\n70,72c72,74\n< static int Utoprim_new_body(CCTK_REAL U[], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM], CCTK_REAL gdet, CCTK_REAL prim[],long &n_iter);\n< static int general_newton_raphson( CCTK_REAL x[], int n, long &n_iter, void (*funcd) (CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *, CCTK_REAL *, int,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &),CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\n< static void func_vsq( CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *f, CCTK_REAL *df, int n,CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\n---\n> static int Utoprim_new_body(eos_struct eos, CCTK_REAL U[], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM], CCTK_REAL gdet, CCTK_REAL prim[],long &n_iter);\n> static int general_newton_raphson( eos_struct eos, CCTK_REAL x[], int n, long &n_iter, void (*funcd) (eos_struct, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *, CCTK_REAL *, int,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &,CCTK_REAL &),CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\n> static void func_vsq( eos_struct eos, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], CCTK_REAL [][NEWT_DIM], CCTK_REAL *f, CCTK_REAL *df, int n,CCTK_REAL &Bsq,CCTK_REAL &QdotBsq,CCTK_REAL &Qtsq,CCTK_REAL &Qdotn,CCTK_REAL &D);\n74c76\n< static CCTK_REAL pressure_W_vsq(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) ;\n---\n> static CCTK_REAL pressure_W_vsq(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) ;\n76c78\n< static CCTK_REAL dpdvsq_calc(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D);\n---\n> static CCTK_REAL dpdvsq_calc(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D);\n89,97c91,99\n< / rho u^t \\\n< U = | T^t_t + rho u^t | sqrt(-det(g_{\\mu\\nu}))\n< | T^t_i |\n< \\ B^i /\n< \n< / rho \\\n< P = | uu |\n< | \\tilde{u}^i |\n< \\ B^i /\n---\n> / rho u^t \\\n> U = | T^t_t + rho u^t | sqrt(-det(g_{\\mu\\nu}))\n> | T^t_i |\n> \\ B^i /\n> \n> / rho \\\n> P = | uu |\n> | \\tilde{u}^i |\n> \\ B^i /\n101c103\n< U[NPR] = conserved variables (current values on input/output);\n---\n> U[NPR] = conserved variables (current values on input/output);\n105,106c107,108\n< prim[NPR] = primitive variables (guess on input, calculated values on \n< output if there are no problems);\n---\n> prim[NPR] = primitive variables (guess on input, calculated values on\n> output if there are no problems);\n114c116,117\n< int Utoprim_2d(CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM], \n---\n> \n> int Utoprim_2d(eos_struct eos, CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM], CCTK_REAL gcon[NDIM][NDIM], \n130a134\n> \n141a146\n> \n150c155\n< ret = Utoprim_new_body(U_tmp, gcov, gcon, gdet, prim_tmp,n_iter);\n---\n> ret = Utoprim_new_body(eos, U_tmp, gcov, gcon, gdet, prim_tmp,n_iter);\n163a169\n> \n175,177c181,183\n< / rho gamma \\\n< U = | alpha T^t_\\mu |\n< \\ alpha B^i /\n---\n> / rho gamma \\\n> U = | alpha T^t_\\mu |\n> \\ alpha B^i /\n181,184c187,190\n< / rho \\\n< prim = | uu |\n< | \\tilde{u}^i |\n< \\ alpha B^i /\n---\n> / rho \\\n> prim = | uu |\n> | \\tilde{u}^i |\n> \\ alpha B^i /\n198c204\n< 3 -> failure: W<0 or W>W_TOO_BIG\n---\n> 3 -> failure: W<0 or W>W_TOO_BIG\n204c210\n< static int Utoprim_new_body(CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM], \n---\n> static int Utoprim_new_body(eos_struct eos, CCTK_REAL U[NPR], CCTK_REAL gcov[NDIM][NDIM], \n217a224\n> \n237a245,248\n> // FIXME: The exact form of n^{\\mu} can be found\n> // in eq. (2.116) and implementing it\n> // directly is a lot more efficient than\n> // performing n^{\\mu} = g^{\\mu\\nu}n_{nu}\n248a260\n> \n270c282\n< p = pressure_rho0_u(rho0,u) ;\n---\n> p = pressure_rho0_u(eos, rho0,u) ;\n283a296\n> \n288c301\n< retval = general_newton_raphson( x_2d, n, n_iter, func_vsq, Bsq,QdotBsq,Qtsq,Qdotn,D) ; \n---\n> retval = general_newton_raphson( eos, x_2d, n, n_iter, func_vsq, Bsq,QdotBsq,Qtsq,Qdotn,D) ; \n311a325\n> \n318c332\n< p = pressure_rho0_w(rho0,w) ;\n---\n> p = pressure_rho0_w(eos, rho0,w) ;\n352a367\n> \n371a387\n> \n393a410\n> \n419a437\n> \n429,430c447,448\n< static int general_newton_raphson( CCTK_REAL x[], int n, long &n_iter,\n< void (*funcd) (CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], \n---\n> static int general_newton_raphson( eos_struct eos, CCTK_REAL x[], int n, long &n_iter,\n> void (*funcd) (eos_struct, CCTK_REAL [], CCTK_REAL [], CCTK_REAL [], \n454c472\n< (*funcd) (x, dx, resid, jac, &f, &df, n, Bsq,QdotBsq,Qtsq,Qdotn,D); /* returns with new dx, f, df */\n---\n> (*funcd) (eos, x, dx, resid, jac, &f, &df, n, Bsq,QdotBsq,Qtsq,Qdotn,D); /* returns with new dx, f, df */\n522a541\n> \n540c559\n< static void func_vsq(CCTK_REAL x[], CCTK_REAL dx[], CCTK_REAL resid[], \n---\n> static void func_vsq(eos_struct eos, CCTK_REAL x[], CCTK_REAL dx[], CCTK_REAL resid[], \n579c598\n< p = pressure_rho0_w(rho0,w) ;\n---\n> p = pressure_rho0_w(eos, rho0,w) ;\n586c605\n< p = pressure_rho0_w(rho0,w) ;\n---\n> p = pressure_rho0_w(eos, rho0,w) ;\n595c614\n< p_tmp = pressure_W_vsq( W, vsq , D);\n---\n> p_tmp = pressure_W_vsq( eos, W, vsq , D);\n597c616\n< dPdvsq = dpdvsq_calc( W, vsq, D );\n---\n> dPdvsq = dpdvsq_calc( eos, W, vsq, D );\n635a655\n> \n646a667\n> \n651,652c672,673\n< -- Gamma-law equation of state;\n< -- pressure as a function of W, vsq, and D:\n---\n> -- Hybrid single and piecewise polytropic equation of state;\n> -- pressure as a function of P_cold, eps_cold, W, vsq, and D:\n654c675\n< static CCTK_REAL pressure_W_vsq(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) \n---\n> static CCTK_REAL pressure_W_vsq(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D) \n655a677,678\n> \n> #ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n656a680\n> #endif\n658,661c682,694\n< CCTK_REAL gtmp;\n< gtmp = 1. - vsq;\n< \n< return( (gamma_th /* <- Should be local polytropic Gamma factor */ - 1.) * ( W * gtmp - D * sqrt(gtmp) ) / gamma_th /* <- Should be local polytropic Gamma factor */ );\n---\n> // Compute gamma^{-2} = 1 - v^{2} and gamma^{-1}\n> CCTK_REAL inv_gammasq = 1.0 - vsq;\n> CCTK_REAL inv_gamma = sqrt(inv_gammasq);\n> \n> // Compute rho_b = D / gamma\n> CCTK_REAL rho_b = D*inv_gamma;\n> \n> // Compute P_cold and eps_cold\n> CCTK_REAL P_cold, eps_cold;\n> compute_P_cold__eps_cold(eos,rho_b, P_cold,eps_cold);\n> \n> // Compute p = P_{cold} + P_{th}\n> return( ( P_cold + (Gamma_th - 1.0)*( W*inv_gammasq - D*inv_gamma*( 1.0 + eps_cold ) ) )/Gamma_th );\n665a699\n> \n673a708,709\n> \n> #ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n675c711,713\n< return( (gamma_th /* <- Should be local polytropic Gamma factor */ - 1.) * (1. - vsq) / gamma_th /* <- Should be local polytropic Gamma factor */ ) ;\n---\n> #endif\n> \n> return( (Gamma_th - 1.0) * (1.0 - vsq) / Gamma_th ) ;\n678a717\n> \n685c724\n< static CCTK_REAL dpdvsq_calc(CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D)\n---\n> static CCTK_REAL dpdvsq_calc(eos_struct eos, CCTK_REAL W, CCTK_REAL vsq, CCTK_REAL &D)\n686a726,728\n> \n> // This sets Gamma_th\n> #ifndef ENABLE_STANDALONE_IGM_C2P_SOLVER\n688c730,772\n< return( (gamma_th /* <- Should be local polytropic Gamma factor */ - 1.) * ( 0.5 * D / sqrt(1.-vsq) - W ) / gamma_th /* <- Should be local polytropic Gamma factor */ ) ;\n---\n> #endif\n> \n> \n> // Set gamma and rho\n> CCTK_REAL gamma = 1.0/sqrt(1.0 - vsq);\n> CCTK_REAL rho_b = D/gamma;\n> \n> // Compute P_cold and eps_cold\n> CCTK_REAL P_cold, eps_cold;\n> compute_P_cold__eps_cold(eos,rho_b, P_cold,eps_cold);\n> \n> // Set basic polytropic quantities\n> int polytropic_index = find_polytropic_K_and_Gamma_index(eos,rho_b);\n> CCTK_REAL Gamma_ppoly_tab = eos.Gamma_ppoly_tab[polytropic_index];\n> \n> \n> /* Now we implement the derivative of P_cold with respect\n> * to v^{2}, given by\n> * ----------------------------------------------------\n> * | dP_cold/dvsq = gamma^{2 + Gamma_{poly}/2} P_{cold} |\n> * ----------------------------------------------------\n> */\n> CCTK_REAL dPcold_dvsq = P_cold * pow(gamma,2.0 + 0.5*Gamma_ppoly_tab);\n> \n> \n> /* Now we implement the derivative of eps_cold with respect\n> * to v^{2}, given by\n> * -----------------------------------------------------------------------------------\n> * | deps_cold/dvsq = gamma/(D*(Gamma_ppoly_tab-1)) * (dP_cold/dvsq + gamma^{2} P_cold / 2) |\n> * -----------------------------------------------------------------------------------\n> */\n> CCTK_REAL depscold_dvsq = ( gamma/(D*(Gamma_ppoly_tab-1.0)) ) * ( dPcold_dvsq + 0.5*gamma*gamma*P_cold );\n> \n> /* Now we implement the derivative of p_hybrid with respect\n> * to v^{2}, given by\n> * -----------------------------------------------------------------------------\n> * | dp/dvsq = Gamma_th^{-1}( dP_cold/dvsq |\n> * | + (Gamma_{th}-1)*(-W |\n> * | + D gamma (1 + eps_cold)/2 |\n> * | - (D/gamma) * deps_cold/dvsq) ) |\n> * -----------------------------------------------------------------------------\n> */\n> return( ( dPcold_dvsq + (Gamma_th-1.0)*( -W + D*gamma*(1+eps_cold)/2.0 - D*depscold_dvsq/gamma ) )/Gamma_th );\n694a779\n> #endif\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 5: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-IllinoisGRMHD__harm_utoprim_2d.pdf](Tutorial-IllinoisGRMHD__harm_utoprim_2d.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means).",
"_____no_output_____"
]
],
[
[
"latex_nrpy_style_path = os.path.join(nrpy_dir_path,\"latex_nrpy_style.tplx\")\n#!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__harm_utoprim_2d.ipynb\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__harm_utoprim_2d.tex\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__harm_utoprim_2d.tex\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__harm_utoprim_2d.tex\n!rm -f Tut*.out Tut*.aux Tut*.log",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.