hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb3e787ce6b73b5ec6dfc688360af00ea2e52fcd
3,338
ipynb
Jupyter Notebook
ProjectEuler/18_Bottom_Up.ipynb
RobVor/Python
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
[ "FSFAP" ]
null
null
null
ProjectEuler/18_Bottom_Up.ipynb
RobVor/Python
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
[ "FSFAP" ]
4
2021-06-02T03:44:24.000Z
2022-03-12T00:52:58.000Z
ProjectEuler/18_Bottom_Up.ipynb
RobVor/Python
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
[ "FSFAP" ]
null
null
null
34.061224
397
0.511384
[ [ [ "\"\"\"By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.\n\n3\n7 4\n2 4 6\n8 5 9 3\n\nThat is, 3 + 7 + 4 + 9 = 23.\n\nFind the maximum total from top to bottom of the triangle below:\n\n75\n95 64\n17 47 82\n18 35 87 10\n20 04 82 47 65\n19 01 23 75 03 34\n88 02 77 73 07 63 67\n99 65 04 28 06 16 70 92\n41 41 26 56 83 40 80 70 33\n41 48 72 33 47 32 37 16 94 29\n53 71 44 65 25 43 91 52 97 51 14\n70 11 33 28 77 73 17 78 39 68 17 57\n91 71 52 38 17 14 91 43 58 50 27 29 48\n63 66 04 68 89 53 67 30 73 16 69 87 40 31\n04 62 98 27 23 09 70 98 73 93 38 53 60 04 23\n\nNOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route. However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o)\"\"\"\n\nTriag = [[75],[95,64],[17,47,82],[18,35,87,10],[20,4,82,47,65],[19,1,23,75,3,34],[88,2,77,73,7,63,67],[99,65,4,28,6,16,70,92],[41,41,26,56,83,40,80,70,33],[41,48,72,33,47,32,37,16,94,29],[53,71,44,65,25,43,91,52,97,51,14],[70,11,33,28,77,73,17,78,39,68,17,57],[91,71,52,38,17,14,91,43,58,50,27,29,48],[63,66,4,68,89,53,67,30,73,16,69,87,40,31],[4,62,98,27,23,9,70,98,73,93,38,53,60,4,23]]\n\ndef MaxValuePath(vals):\n \n for i in range(len(vals) -1, -1, -1):\n for j in range(0,i):\n vals[i-1][j] += max(vals[i][j], vals[i][j+1])\n return vals\n \nMaxValuePath(Triag)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb3ebb4efa4ab23bc42f85f3a530a9b67b245533
9,515
ipynb
Jupyter Notebook
jupyter/quick_start.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
jupyter/quick_start.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
jupyter/quick_start.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
22.335681
194
0.523069
[ [ [ "![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/quick_start.ipynb)", "_____no_output_____" ], [ "# Spark NLP Quick Start\n### How to use Spark NLP pretrained pipelines", "_____no_output_____" ] ], [ [ "# This is only to setup PySpark and Spark NLP on Colab\n!wget http://setup.johnsnowlabs.com/colab.sh -O - | bash", "openjdk version \"1.8.0_252\"\nOpenJDK Runtime Environment (build 1.8.0_252-8u252-b09-1~18.04-b09)\nOpenJDK 64-Bit Server VM (build 25.252-b09, mixed mode)\n" ], [ "import sparknlp \n\nspark = sparknlp.start()\n\nprint(\"Spark NLP version: \", sparknlp.version())\nprint(\"Apache Spark version: \", spark.version)", "Spark NLP version: 2.5.0\nApache Spark version: 2.4.4\n" ], [ "from sparknlp.pretrained import PretrainedPipeline ", "_____no_output_____" ] ], [ [ "Let's use Spark NLP pre-trained pipeline for `named entity recognition`\n\n`NOTE`: if you are using `Windows` please use this pipeline instead: `recognize_entities_dl_noncontrib`", "_____no_output_____" ] ], [ [ "pipeline = PretrainedPipeline('recognize_entities_dl', 'en')", "recognize_entities_dl download started this may take some time.\nApprox size to download 159 MB\n[OK!]\n" ], [ "result = pipeline.annotate('Google has announced the release of a beta version of the popular TensorFlow machine learning library.') ", "_____no_output_____" ], [ "print(result['ner'])", "['B-ORG', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-ORG', 'O', 'O', 'O', 'O']\n" ], [ "print(result['entities'])", "['Google', 'TensorFlow']\n" ] ], [ [ "Let's use Spark NLP pre-trained pipeline for `sentiment` analysis", "_____no_output_____" ] ], [ [ "pipeline = PretrainedPipeline('analyze_sentiment', 'en') ", "analyze_sentiment download started this may take some time.\nApprox size to download 4.9 MB\n[OK!]\n" ], [ "result = pipeline.annotate('This is a very boring movie. I recommend others to awoid this movie is not good..')", "_____no_output_____" ], [ "print(result['sentiment'])", "['negative', 'negative', 'negative']\n" ], [ "print(result['checked'])", "['This', 'is', 'a', 'very', 'boring', 'movie', '.', 'I', 'recommend', 'others', 'to', 'avoid', 'this', 'movie', 'is', 'not', 'good', '.', '.']\n" ] ], [ [ "The word `awoid` has been corrected to `avoid` by spell checker insdie this pipeline", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb3ec1106e94b96c7a25fc3ff6b0ad83e5e27a23
4,076
ipynb
Jupyter Notebook
ipynb/Netherlands.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
ipynb/Netherlands.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
ipynb/Netherlands.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
28.503497
165
0.51055
[ [ [ "# Netherlands\n\n* Homepage of project: https://oscovida.github.io\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Netherlands.ipynb)", "_____no_output_____" ] ], [ [ "import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")", "_____no_output_____" ], [ "%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *", "_____no_output_____" ], [ "overview(\"Netherlands\");", "_____no_output_____" ], [ "# load the data\ncases, deaths, region_label = get_country_data(\"Netherlands\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable", "_____no_output_____" ] ], [ [ "# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Netherlands.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook", "_____no_output_____" ], [ "# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------", "_____no_output_____" ] ], [ [ "print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")", "_____no_output_____" ], [ "# to force a fresh download of data, run \"clear_cache()\"", "_____no_output_____" ], [ "print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb3ec35a933301e41bf7ad42f4f9c1bf8a9a0104
74,694
ipynb
Jupyter Notebook
Label Clustering.ipynb
tienyuliu/IDS-703-Final-Project
cc324483951c9e3e289fb5538f23ae0571553c89
[ "MIT" ]
null
null
null
Label Clustering.ipynb
tienyuliu/IDS-703-Final-Project
cc324483951c9e3e289fb5538f23ae0571553c89
[ "MIT" ]
null
null
null
Label Clustering.ipynb
tienyuliu/IDS-703-Final-Project
cc324483951c9e3e289fb5538f23ae0571553c89
[ "MIT" ]
null
null
null
45.351548
122
0.462915
[ [ [ "#### TFIDF", "_____no_output_____" ] ], [ [ "# loading libraries\nimport pandas as pd\nfrom nltk.stem.snowball import SnowballStemmer\nstemmer = SnowballStemmer(\"english\")\nimport nltk\nimport re\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import GridSearchCV\nfrom collections import Counter\nimport ast", "_____no_output_____" ], [ "# importing data\nted_main = pd.read_csv('ted_main.csv')\nted_main['tags'] = ted_main['tags'].apply(lambda x: ast.literal_eval(x))\ntranscripts = pd.read_csv('transcripts.csv')\nted_merged = pd.merge(left=transcripts,\n right=ted_main,\n left_on='url',\n right_on='url')\ntranscript = ted_merged.transcript", "_____no_output_____" ], [ "def tokenize(text):\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n# stems = [stemmer.stem(t) for t in filtered_tokens]\n return filtered_tokens", "_____no_output_____" ], [ "doc = transcript.tolist()\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,\n min_df=0.2, stop_words='english',\n use_idf=True, tokenizer=tokenize, ngram_range=(1,3))\n\n%time tfidf_matrix = tfidf_vectorizer.fit_transform(doc) #fit the vectorizer to synopses\n\nprint(tfidf_matrix.shape)", "Wall time: 1min 29s\n(2467, 364)\n" ] ], [ [ "#### Spectural Clustering", "_____no_output_____" ] ], [ [ "n_cluster = range(2,11)\nbest_param = []\nlist_score = []\nfor n in n_cluster:\n model = SpectralClustering(n_clusters=n)\n model.fit(tfidf_matrix)\n label = model.labels_\n list_score.append(silhouette_score(tfidf_matrix, label))\nlist_score = np.array(list_score)\nbest_param.append(n_cluster[list_score.argmax()])\nprint(best_param)", "[8]\n" ], [ "model = SpectralClustering(n_clusters=8)\nmodel.fit(tfidf_matrix)\nlabel = model.labels_\nclusters = label.tolist()\nCounter(clusters)", "_____no_output_____" ] ], [ [ "#### KMeans Clustering", "_____no_output_____" ] ], [ [ "n_cluster = list(range(2,11))\nparam_grid = {'n_clusters': n_cluster}\nkmeans = KMeans()\nkmeans_cv = GridSearchCV(kmeans, param_grid)\nkmeans_cv.fit(tfidf_matrix)\nprint(\"Tuned Kmeans Parameter: {}\".format(kmeans_cv.best_params_))", "Tuned Kmeans Parameter: {'n_clusters': 10}\n" ], [ "km_model = KMeans(n_clusters=8)\nkm_model.fit(tfidf_matrix)\nkm_label = km_model.labels_\nkm_clusters = km_label.tolist()\nCounter(km_clusters)", "_____no_output_____" ], [ "import warnings\nwarnings.filterwarnings(\"ignore\")\nted_merged['cluster'] = clusters\nted_w_cluster = ted_merged[['title','transcript','tags','cluster']]\nted_w_cluster[ted_w_cluster['cluster']==7][:50]", "_____no_output_____" ], [ "ted_w_cluster", "_____no_output_____" ], [ "c0_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 0]['tags'].tolist() for item in sub_list]\nc1_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 1]['tags'].tolist() for item in sub_list]\nc2_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 2]['tags'].tolist() for item in sub_list]\nc3_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 3]['tags'].tolist() for item in sub_list]\nc4_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 4]['tags'].tolist() for item in sub_list]\nc5_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 5]['tags'].tolist() for item in sub_list]\nc6_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 6]['tags'].tolist() for item in sub_list]\nc7_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 7]['tags'].tolist() for item in sub_list]\n# c8_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 8]['tags'].tolist() for item in sub_list]\n# c9_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 9]['tags'].tolist() for item in sub_list]", "_____no_output_____" ], [ "c0_tag_stat = pd.Series(Counter(c0_tag))\nc1_tag_stat = pd.Series(Counter(c1_tag))\nc2_tag_stat = pd.Series(Counter(c2_tag))\nc3_tag_stat = pd.Series(Counter(c3_tag))\nc4_tag_stat = pd.Series(Counter(c4_tag))\nc5_tag_stat = pd.Series(Counter(c5_tag))\nc6_tag_stat = pd.Series(Counter(c6_tag))\nc7_tag_stat = pd.Series(Counter(c7_tag))\n# c8_tag_stat = pd.Series(Counter(c8_tag))\n# c9_tag_stat = pd.Series(Counter(c9_tag))", "_____no_output_____" ], [ "print(c0_tag_stat.nlargest(10))\nprint (\"\")\nprint (c1_tag_stat.nlargest(10))\nprint (\"\")\nprint (c2_tag_stat.nlargest(10))\nprint (\"\")\nprint (c3_tag_stat.nlargest(10))\nprint (\"\")\nprint (c4_tag_stat.nlargest(10))\nprint (\"\")\nprint (c5_tag_stat.nlargest(10))\nprint (\"\")\nprint(c6_tag_stat.nlargest(10))\nprint (\"\")\nprint(c7_tag_stat.nlargest(10))\nprint (\"\")\n# print(c8_tag_stat.nlargest(10))\n# print (\"\")\n# print(c9_tag_stat.nlargest(10))\n# print (\"\")", "entertainment 87\nculture 75\nhumor 64\ntechnology 51\nTEDx 49\nscience 39\nperformance 39\nmusic 36\ncomedy 36\ndesign 36\ndtype: int64\n\nglobal issues 198\nbusiness 115\neconomics 91\ntechnology 75\npolitics 59\nTEDx 54\nculture 52\nsocial change 52\nhealth 49\nsociety 49\ndtype: int64\n\ndesign 106\ncities 65\narchitecture 55\ntechnology 42\nculture 32\nart 32\ncollaboration 24\ncreativity 23\nbusiness 22\ninnovation 22\ndtype: int64\n\ntechnology 53\ndata 38\nscience 25\nhealth 17\nTEDx 16\ncommunication 13\nbusiness 12\ncomputers 12\nglobal issues 12\nmedicine 12\ndtype: int64\n\nscience 86\ntechnology 57\nenvironment 50\nexploration 36\nnature 30\nTEDx 30\ndesign 28\nwater 27\nglobal issues 26\nbiology 25\ndtype: int64\n\ntechnology 355\nscience 277\ndesign 163\nculture 133\nTEDx 133\nbiology 112\ninnovation 99\nbusiness 93\nbrain 88\nglobal issues 87\ndtype: int64\n\nculture 149\nglobal issues 108\nTEDx 100\nsocial change 88\nentertainment 85\nchildren 77\ntechnology 75\nsociety 70\nhumanity 69\nstorytelling 66\ndtype: int64\n\nwomen 64\nglobal issues 24\nfeminism 21\nculture 20\nGender equality 20\nactivism 18\nTEDx 18\ninequality 17\nsocial change 16\nsociety 15\ndtype: int64\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb3ed053610007e6b5d379f123b0dfdf45411ddd
30,900
ipynb
Jupyter Notebook
aws_marketplace/using_model_packages/amazon_augmented_ai_with_aws_marketplace_ml_models/amazon_augmented_ai_with_aws_marketplace_ml_models.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
2,610
2020-10-01T14:14:53.000Z
2022-03-31T18:02:31.000Z
aws_marketplace/using_model_packages/amazon_augmented_ai_with_aws_marketplace_ml_models/amazon_augmented_ai_with_aws_marketplace_ml_models.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
1,959
2020-09-30T20:22:42.000Z
2022-03-31T23:58:37.000Z
aws_marketplace/using_model_packages/amazon_augmented_ai_with_aws_marketplace_ml_models/amazon_augmented_ai_with_aws_marketplace_ml_models.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
2,052
2020-09-30T22:11:46.000Z
2022-03-31T23:02:51.000Z
39.312977
854
0.633981
[ [ [ "# Amazon Augmented AI(A2I) Integrated with AWS Marketplace ML Models", "_____no_output_____" ], [ "Sometimes, for some payloads, machine learning (ML) model predictions are just not confident enough and you want more than a machine. Furthermore, training a model can be complicated, time-consuming, and expensive. This is where [AWS Marketplace](https://aws.amazon.com/marketplace/b/6297422012?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao) and [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/) (Amazon A2I) come in. By combining a pretrained ML model in AWS Marketplace with Amazon Augmented AI, you can quickly reap the benefits of pretrained models with validating and augmenting the model's accuracy with human intelligence.\n\nAWS Marketplace contains over 400 pretrained ML models. Some models are general purpose. For example, the [GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?qid=1605041213915&sr=0-5&ref_=sa_campaign_pbrao) can detect objects in an image and place bounding boxes around the objects. AWS Marketplace also offers many purpose-built models such as a [Background Noise Classifier](https://aws.amazon.com/marketplace/pp/prodview-vpd6qdjm4d7u4?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), a [Hard Hat Detector for Worker Safety](https://aws.amazon.com/marketplace/pp/prodview-jd5tj2egpxxum?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), and a [Person in Water](https://aws.amazon.com/marketplace/pp/prodview-wlndemzv5pxhw?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao).\n\nAmazon A2I provides a human-in-loop workflow to review ML predictions. Its configurable human-review workflow solution and customizable user-review console enable you to focus on ML tasks and increase the accuracy of the predictions with human input.", "_____no_output_____" ], [ "## Overview\n\nIn this notebook, you will use a pre-trained AWS Marketplace machine learning model with Amazon A2I to detect images as well as trigger a human-in-loop workflow to review, update and add additional labeled objects to an individual image. Furthermore, you can specify configurable threshold criteria for triggering the human-in-loop workflow in Amazon A2I. For example, you can trigger a human-in-loop workflow if there are no objects that are detected with an accuracy of 90% or greater.\n\nThe following diagram shows the AWS services that are used in this notebook and the steps that you will perform. Here are the high level steps in this notebook:\n1.\tConfigure the human-in-loop review using Amazon A2I\n1.\tSelect, deploy, and invoke an AWS Marketplace ML model\n1.\tTrigger the human review workflow in Amazon A2I.\n1.\tThe private workforce that was created in Amazon SageMaker Ground Truth reviews and edits the objects detected in the image.\n", "_____no_output_____" ], [ "<img style=\"float: center;\" src=\"./img/a2i_diagram.png\" width=\"700\" height=\"500\">", "_____no_output_____" ], [ "## Contents\n\n* [Prerequisites](#Prerequisites)\n* [Step 1 Configure Amazon A2I service](#step1)\n * [Step 1.1 Creating human review Workteam or Workforce](#step1_1)\n * [Step 1.2 Create Human Task UI](#step1_2)\n * [Step 1.3 Create the Flow Definition](#step1_3)\n* [Step 2 Deploy and invoke AWS Marketplace model](#step2)\n * [Step 2.1 Create an endpoint](#step2_1)\n * [Step 2.2 Create input payload](#step2_2)\n * [Step 2.3 Perform real-time inference](#step2_3)\n* [Step3 Starting Human Loops](#step3)\n * [Step 3.1 View Task Results](#step3_1)\n* [Step 4 Next steps](#step4)\n * [Step 4.1 Additional resources](#step4_1)\n* [Step 5 Cleanup Resources](#step5)\n\n### Usage instructions\nYou can run this notebook one cell at a time (By using Shift+Enter for running a cell).", "_____no_output_____" ], [ "## Prerequisites <a class=\"anchor\" id=\"prerequisites\"></a>\n\nThis sample notebook requires a subscription to **[GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao)**, a pre-trained machine learning model package from AWS Marketplace. \nIf your AWS account has not been subscribed to this listing, here is the process you can follow: \n1. Open the [listing](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao) from AWS Marketplace\n1. Read the Highlights section and then product overview section of the listing.\n1. View usage information and then additional resources.\n1. Note the supported instance types.\n1. Next, click on **Continue to subscribe.**\n1. Review End-user license agreement, support terms, as well as pricing information.\n1. The **Accept Offer** button needs to be selected if your organization agrees with EULA, pricing information as well as support terms. If the Continue to configuration button is active, it means your account already has a subscription to this listing. Once you select the **Continue to configuration** button and then choose **region**, you will see that a Product Arn will appear. This is the **model package ARN** that you need to specify in the following cell.", "_____no_output_____" ] ], [ [ "model_package_arn = \"arn:aws:sagemaker:us-east-1:865070037744:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0\" # Update as needed", "_____no_output_____" ] ], [ [ "8. This notebook requires the IAM role associated with this notebook to have *AmazonSageMakerFullAccess* IAM permission.\n\n8. Note: If you want to run this notebook on AWS SageMaker Studio - please use Classic Jupyter mode to be able correctly render visualization. Pick instance type **'ml.m4.xlarge'** or larger. Set kernel to **'Data Science'**.\n\n <img style=\"float: left;\" src=\"./img/classicjupyter.png\">", "_____no_output_____" ], [ "### Installing Dependencies\n\nImport the libraries that are needed for this notebook.", "_____no_output_____" ] ], [ [ "# Import necessary libraries\nimport boto3\nimport json\nimport pandas as pd\nimport pprint\nimport requests\nimport sagemaker\nimport shutil\nimport time\nimport uuid\nimport PIL.Image\nfrom IPython.display import Image\nfrom IPython.display import Markdown as md\nfrom sagemaker import get_execution_role\nfrom sagemaker import ModelPackage", "_____no_output_____" ] ], [ [ "#### Setup Variables, Bucket and Paths", "_____no_output_____" ] ], [ [ "# Setting Role to the default SageMaker Execution Role\nrole = get_execution_role()\n\n# Instantiate the SageMaker session and client that will be used throughout the notebook\nsagemaker_session = sagemaker.Session()\nsagemaker_client = sagemaker_session.sagemaker_client\n\n# Fetch the region\nregion = sagemaker_session.boto_region_name\n\n# Create S3 and A2I clients\ns3 = boto3.client(\"s3\", region)\na2i = boto3.client(\"sagemaker-a2i-runtime\", region)\n\n# Retrieve the current timestamp\ntimestamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime())\n\n# endpoint_name = '<ENDPOINT_NAME>'\nendpoint_name = \"gluoncv-object-detector\"\n\n# content_type='<CONTENT_TYPE>'\ncontent_type = \"image/jpeg\"\n\n# Instance size type to be used for making real-time predictions\nreal_time_inference_instance_type = \"ml.m4.xlarge\"\n\n# Task UI name - this value is unique per account and region. You can also provide your own value here.\n# task_ui_name = '<TASK_UI_NAME>'\ntask_ui_name = \"ui-aws-marketplace-gluon-model-\" + timestamp\n\n# Flow definition name - this value is unique per account and region. You can also provide your own value here.\nflow_definition_name = \"fd-aws-marketplace-gluon-model-\" + timestamp\n\n# Name of the image file that will be used in object detection\nimage_file_name = \"image.jpg\"", "_____no_output_____" ], [ "# Create the sub-directory in the default S3 bucket\n# that will store the results of the human-in-loop A2I review\nbucket = sagemaker_session.default_bucket()\nkey = \"a2i-results\"\ns3.put_object(Bucket=bucket, Key=(key + \"/\"))\noutput_path = f\"s3://{bucket}/a2i-results\"\nprint(f\"Results of A2I will be stored in {output_path}.\")", "_____no_output_____" ] ], [ [ "## Step 1 Configure Amazon A2I service<a class=\"anchor\" id=\"step1\"></a>\nIn this section, you will create 3 resources:\n1. Private workforce\n2. Human-in-loop Console UI\n3. Workflow definition", "_____no_output_____" ], [ "### Step 1.1 Creating human review Workteam or Workforce <a class=\"anchor\" id=\"step1_1\"></a>", "_____no_output_____" ], [ "If you have already created a private work team, replace <WORKTEAM_ARN> with the ARN of your work team. If you have never created a private work team, use the instructions below to create one. To learn more about using and managing private work teams, see [Use a Private Workforce](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html)).\n\n1. In the Amazon SageMaker console in the left sidebar under the Ground Truth heading, open the **Labeling Workforces**.\n1. Choose **Private**, and then choose **Create private team**.\n1. If you are a new user of SageMaker workforces, it is recommended you select **Create a private work team with AWS Cognito**.\n1. For team name, enter \"MyTeam\".\n1. To add workers by email, select **Invite new workers by email** and paste or type a list of up to 50 email addresses, separated by commas, into the email addresses box. If you are following this notebook, specify an email account that you have access to. The system sends an invitation email, which allows users to authenticate and set up their profile for performing human-in-loop review.\n1. Enter an organization name - this will be used to customize emails sent to your workers.\n1. For contact email, enter an email address you have access to.\n1. Select **Create private team**.\n\nThis will bring you back to the Private tab under labeling workforces, where you can view and manage your private teams and workers.\n\n### **IMPORTANT: After you have created your workteam, from the Team summary section copy the value of the ARN and uncomment and replace `<WORKTEAM_ARN>` below:**", "_____no_output_____" ] ], [ [ "# workteam_arn = '<WORKTEAM_ARN>'", "_____no_output_____" ] ], [ [ "### Step 1.2 Create Human Task UI <a class=\"anchor\" id=\"step1_2\"></a>\n\nCreate a human task UI resource, giving a UI template in liquid HTML. This template will be rendered to the human workers whenever human loop is required.\n\nFor additional UI templates, check out this repository: https://github.com/aws-samples/amazon-a2i-sample-task-uis.\n\nYou will be using a slightly modified version of the [object detection UI](https://github.com/aws-samples/amazon-a2i-sample-task-uis/blob/master/images/bounding-box.liquid.html) that provides support for the `initial-value` and `labels` variables in the template.", "_____no_output_____" ] ], [ [ "# Create task UI\n\n# Read in the template from a local file\ntemplate = open(\"./src/worker-task-template.html\").read()\n\nhuman_task_ui_response = sagemaker_client.create_human_task_ui(\n HumanTaskUiName=task_ui_name, UiTemplate={\"Content\": template}\n)\n\nhuman_task_ui_arn = human_task_ui_response[\"HumanTaskUiArn\"]\nprint(human_task_ui_arn)", "_____no_output_____" ] ], [ [ "### Step 1.3 Create the Flow Definition <a class=\"anchor\" id=\"step1_3\"></a>", "_____no_output_____" ], [ "In this section, you will create a flow definition. Flow Definitions allow you to specify:\n\n* The workforce that your tasks will be sent to.\n* The instructions that your workforce will receive. This is called a worker task template.\n* The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks.\n* Where your output data will be stored.\n\nFor more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.", "_____no_output_____" ] ], [ [ "create_workflow_definition_response = sagemaker_client.create_flow_definition(\n FlowDefinitionName=flow_definition_name,\n RoleArn=role,\n HumanLoopConfig={\n \"WorkteamArn\": workteam_arn,\n \"HumanTaskUiArn\": human_task_ui_arn,\n \"TaskCount\": 1,\n \"TaskDescription\": \"Identify and locate the object in an image.\",\n \"TaskTitle\": \"Object detection Amazon A2I demo\",\n },\n OutputConfig={\"S3OutputPath\": output_path},\n)\nflow_definition_arn = create_workflow_definition_response[\n \"FlowDefinitionArn\"\n] # let's save this ARN for future use", "_____no_output_____" ], [ "%%time\n\n# Describe flow definition - status should be active\nfor x in range(60):\n describe_flow_definition_response = sagemaker_client.describe_flow_definition(\n FlowDefinitionName=flow_definition_name\n )\n print(describe_flow_definition_response[\"FlowDefinitionStatus\"])\n if describe_flow_definition_response[\"FlowDefinitionStatus\"] == \"Active\":\n print(\"Flow Definition is active\")\n break\n time.sleep(2)", "_____no_output_____" ] ], [ [ "## Step 2 Deploy and invoke AWS Marketplace model <a class=\"anchor\" id=\"step2\"></a>", "_____no_output_____" ], [ "In this section, you will stand up an Amazon SageMaker endpoint. Each endpoint must have a unique name which you can use for performing inference. ", "_____no_output_____" ], [ "### Step 2.1 Create an Endpoint <a class=\"anchor\" id=\"step2_1\"></a>", "_____no_output_____" ] ], [ [ "%%time\n\n# Create a deployable model from the model package.\nmodel = ModelPackage(\n role=role,\n model_package_arn=model_package_arn,\n sagemaker_session=sagemaker_session,\n predictor_cls=sagemaker.predictor.Predictor,\n)\n\n# Deploy the model\npredictor = model.deploy(\n initial_instance_count=1,\n instance_type=real_time_inference_instance_type,\n endpoint_name=endpoint_name,\n)", "_____no_output_____" ] ], [ [ "It will take anywhere between 5 to 10 minutes to create the endpoint. Once the endpoint has been created, you would be able to perform real-time inference.", "_____no_output_____" ], [ "### Step 2.2 Create input payload <a class=\"anchor\" id=\"step2_2\"></a>", "_____no_output_____" ], [ "In this step, you will prepare a payload to perform a prediction.", "_____no_output_____" ] ], [ [ "# Download the image file\n# Open the url image, set stream to True, this will return the stream content.\nr = requests.get(\"https://images.pexels.com/photos/763398/pexels-photo-763398.jpeg\", stream=True)\n\n# Open a local file with wb ( write binary ) permission to save it locally.\nwith open(image_file_name, \"wb\") as f:\n shutil.copyfileobj(r.raw, f)", "_____no_output_____" ] ], [ [ "Resize the image and upload the file to S3 so that the image can be referenced from the worker console UI.", "_____no_output_____" ] ], [ [ "# Load the image\nimage = PIL.Image.open(image_file_name)\n# Resize the image\nresized_image = image.resize((600, 400))\n\n# Save the resized image file locally\nresized_image.save(image_file_name)\n\n# Save file to S3\ns3 = boto3.client(\"s3\")\nwith open(image_file_name, \"rb\") as f:\n s3.upload_fileobj(f, bucket, image_file_name)\n\n# Display the image\nfrom IPython.core.display import Image, display\n\nImage(filename=image_file_name, width=600, height=400)", "_____no_output_____" ] ], [ [ "### Step 2.3 Perform real-time inference <a class=\"anchor\" id=\"step2_3\"></a>", "_____no_output_____" ], [ "Submit the image file to the model and it will detect the objects in the image.", "_____no_output_____" ] ], [ [ "with open(image_file_name, \"rb\") as f:\n payload = f.read()\n\nresponse = sagemaker_session.sagemaker_runtime_client.invoke_endpoint(\n EndpointName=endpoint_name, ContentType=content_type, Accept=\"json\", Body=payload\n)\n\nresult = json.loads(response[\"Body\"].read().decode())\n\n# Convert list to JSON\njson_result = json.dumps(result)\ndf = pd.read_json(json_result)\n\n# Display confidence scores < 0.90\ndf = df[df.score < 0.90]\nprint(df.head())", "_____no_output_____" ] ], [ [ "## Step 3 Starting Human Loops <a class=\"anchor\" id=\"step3\"></a>", "_____no_output_____" ], [ "In a previous step, you have already submitted your image to the model for prediction and stored the output in JSON format in the `result` variable. You simply need to modify the X, Y coordinates of the bounding boxes. Additionally, you can filter out all predictions that are less than 90% accurate before submitting it to your human-in-loop review. This will insure that your model's predictions are highly accurate and any additional detections of objects will be made by a human.", "_____no_output_____" ] ], [ [ "# Helper function to update X,Y coordinates and labels for the bounding boxes\ndef fix_boundingboxes(prediction_results, threshold=0.8):\n\n bounding_boxes = []\n labels = set()\n\n for data in prediction_results:\n label = data[\"id\"]\n labels.add(label)\n\n if data[\"score\"] > threshold:\n width = data[\"right\"] - data[\"left\"]\n height = data[\"bottom\"] - data[\"top\"]\n top = data[\"top\"]\n left = data[\"left\"]\n bounding_boxes.append(\n {\"height\": height, \"width\": width, \"top\": top, \"left\": left, \"label\": label}\n )\n\n return bounding_boxes, list(labels)\n\n\nbounding_boxes, labels = fix_boundingboxes(result, threshold=0.9)", "_____no_output_____" ], [ "# Define the content that is passed into the human-in-loop workflow and console\nhuman_loop_name = str(uuid.uuid4())\ninput_content = {\n \"initialValue\": bounding_boxes, # the bounding box values that have been detected by model prediction\n \"taskObject\": f\"s3://{bucket}/\"\n + image_file_name, # the s3 object will be passed to the worker task UI to render\n \"labels\": labels, # the labels that are displayed in the legend\n}\n\n# Trigger the human-in-loop workflow\nstart_loop_response = a2i.start_human_loop(\n HumanLoopName=human_loop_name,\n FlowDefinitionArn=flow_definition_arn,\n HumanLoopInput={\"InputContent\": json.dumps(input_content)},\n)", "_____no_output_____" ] ], [ [ "Now that the human-in-loop review has been triggered, you can log into the worker console to work on the task and make edits and additions to the object detection bounding boxes from the image.", "_____no_output_____" ] ], [ [ "# Fetch the URL for the worker console UI\nworkteam_name = workteam_arn.split(\"/\")[-1]\nmy_workteam = sagemaker_session.sagemaker_client.list_workteams(NameContains=workteam_name)\nworker_console_url = \"https://\" + my_workteam[\"Workteams\"][0][\"SubDomain\"]\n\nmd(\n \"### Click on the [Worker Console]({}) to begin reviewing the object detection\".format(\n worker_console_url\n )\n)", "_____no_output_____" ] ], [ [ "The below image shows the objects that were detected for the sample image that was used in this notebook by your model and displayed in the worker console. ", "_____no_output_____" ], [ "<img src='./img/rain_biker_bb.png' align='center' height=600 width=800/>", "_____no_output_____" ], [ "You can now make edits to the image to detect other objects. For example, in the image above, the model failed to detect the bicycle in the foreground with an accuracy of 90% or greater. However, as a human reviewer, you can clearly see the bicycle and can make a bounding box around it. Once you have finished with your edits, you can submit the result.", "_____no_output_____" ], [ "### Step 3.1 View Task Results <a class=\"anchor\" id=\"step3_1\"></a>", "_____no_output_____" ], [ "Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 `output_path` that you specified when all work is completed. Note that the human answer, the label and the bounding box, is returned and saved in the JSON file.\n\n**NOTE: You must edit/submit the image in the Worker console so that its status is `Completed`.**", "_____no_output_____" ] ], [ [ "# Fetch the details about the human loop review in order to locate the JSON output on S3\nresp = a2i.describe_human_loop(HumanLoopName=human_loop_name)\n\n# Wait for the human-in-loop review to be completed\nwhile True:\n resp = a2i.describe_human_loop(HumanLoopName=human_loop_name)\n print(\"-\", sep=\"\", end=\"\", flush=True)\n if resp[\"HumanLoopStatus\"] == \"Completed\":\n print(\"!\")\n break\n time.sleep(2)", "_____no_output_____" ] ], [ [ "Once its status is `Completed`, you can execute the below cell to view the JSON output that is stored in S3. Under `annotatedResult`, any new bounding boxes will be included along with those that the model predicted, will be included. To learn more about the output data schema, please refer to the documentation about [Output Data From Custom Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-output-data.html#sms-output-data-custom).", "_____no_output_____" ] ], [ [ "# Once the image has been submitted, display the JSON output that was sent to S3\n\nbucket, key = resp[\"HumanLoopOutput\"][\"OutputS3Uri\"].replace(\"s3://\", \"\").split(\"/\", 1)\nresponse = s3.get_object(Bucket=bucket, Key=key)\n\ncontent = response[\"Body\"].read()\njson_output = json.loads(content)\n\nprint(json.dumps(json_output, indent=1))", "_____no_output_____" ] ], [ [ "## Step 4 Next Steps <a class=\"anchor\" id=\"step4\"></a>", "_____no_output_____" ], [ "### Step 4.1 Additional Resources <a class=\"anchor\" id=\"step4_1\"></a>", "_____no_output_____" ], [ "* You can explore additional machine learning models in [AWS Marketplace - Machine Learning](https://aws.amazon.com/marketplace/b/c3714653-8485-4e34-b35b-82c2203e81c1?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao). \n* Learn more about [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/)\n* Other AWS blogs that may be of interest are:\n * [Using AWS Marketplace for machine learning workloads](https://aws.amazon.com/blogs/awsmarketplace/using-aws-marketplace-for-machine-learning-workloads/)\n * [Adding AI to your applications with ready-to-use models from AWS Marketplace](https://aws.amazon.com/blogs/machine-learning/adding-ai-to-your-applications-with-ready-to-use-models-from-aws-marketplace/)\n * [Building an end-to-end intelligent document processing solution using AWS](https://aws.amazon.com/blogs/machine-learning/building-an-end-to-end-intelligent-document-processing-solution-using-aws/)", "_____no_output_____" ], [ "## Step 5 Clean up resources <a class=\"anchor\" id=\"step5\"></a>\nIn order to clean up the resources from this notebook,simply execute the below cells.", "_____no_output_____" ] ], [ [ "# Delete Workflow definition\nsagemaker_client.delete_flow_definition(FlowDefinitionName=flow_definition_name)", "_____no_output_____" ], [ "# Delete Human Task UI\nsagemaker_client.delete_human_task_ui(HumanTaskUiName=task_ui_name)", "_____no_output_____" ], [ "# Delete Endpoint\npredictor.delete_endpoint()", "_____no_output_____" ], [ "# Delete Model\npredictor.delete_model()", "_____no_output_____" ] ], [ [ "#### Cancel AWS Marketplace subscription (Optional)", "_____no_output_____" ], [ "Finally, if you subscribed to AWS Marketplace model for an experiment and would like to unsubscribe, you can follow the steps below. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model.\n\n**Steps to unsubscribe from the product on AWS Marketplace:**\n\nNavigate to Machine Learning tab on Your [Software subscriptions page](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=lbr_tab_ml).\nLocate the listing that you would need to cancel, and click Cancel Subscription.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb3ed6e744db97ee22bd40385587a2b02c734364
25,865
ipynb
Jupyter Notebook
list slicing and updating .ipynb
pythontrainernag/python_7
eaff90ac6885f3c1e1db91d885215040b32d8f0f
[ "MIT" ]
null
null
null
list slicing and updating .ipynb
pythontrainernag/python_7
eaff90ac6885f3c1e1db91d885215040b32d8f0f
[ "MIT" ]
null
null
null
list slicing and updating .ipynb
pythontrainernag/python_7
eaff90ac6885f3c1e1db91d885215040b32d8f0f
[ "MIT" ]
null
null
null
21.53622
837
0.473381
[ [ [ "a = [1,2,3, 10, 7, 8, 2, 4, 1, 10]", "_____no_output_____" ], [ "print a[-5:]", "[8, 2, 4, 1, 10]\n" ], [ "print a[-5:-1]", "[8, 2, 4, 1]\n" ], [ "print a[-8:-1:-2]", "[]\n" ], [ "print a\n#print a[-8:-1:2]", "[1, 2, 3, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "print a[-8:-1:2]", "[3, 7, 2, 1]\n" ], [ "print a[-1:-5]", "[]\n" ], [ "print a", "[1, 2, 3, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "print a[3:-1]", "[10, 7, 8, 2, 4, 1]\n" ], [ "print a[3:-1:-1]", "[]\n" ], [ "print a[-1:3:-1]", "[10, 1, 4, 2, 8, 7]\n" ], [ "#take a list a = [1,2,2,1]", "_____no_output_____" ], [ "print a", "[1, 2, 3, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "print a[2]", "3\n" ], [ "a[2] = 100", "_____no_output_____" ], [ "print a", "[1, 2, 100, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "a[1:3] = [3, 2]", "_____no_output_____" ], [ "print a", "[1, 3, 2, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "a[1:3] = [200]", "_____no_output_____" ], [ "print a", "[1, 200, 10, 7, 8, 2, 4, 1, 10]\n" ], [ "a[1:5:2] = [100, 300]\nprint a", "[1, 100, 10, 300, 8, 2, 4, 1, 10]\n" ], [ "#list methods\n#dir ---> directory, __doc__ ----> docstring, help", "_____no_output_____" ], [ "print dir(a)", "['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__delslice__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__setslice__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']\n" ], [ "x = 5\nprint dir(x)", "['__abs__', '__add__', '__and__', '__class__', '__cmp__', '__coerce__', '__delattr__', '__div__', '__divmod__', '__doc__', '__float__', '__floordiv__', '__format__', '__getattribute__', '__getnewargs__', '__hash__', '__hex__', '__index__', '__init__', '__int__', '__invert__', '__long__', '__lshift__', '__mod__', '__mul__', '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__', '__trunc__', '__xor__', 'bit_length', 'conjugate', 'denominator', 'imag', 'numerator', 'real']\n" ], [ "x.bit_length()", "_____no_output_____" ], [ "print dir(a)", "['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__delslice__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__setslice__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']\n" ], [ "print a.__doc__", "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items\n" ], [ "print a.append.__doc__", "L.append(object) -- append object to end\n" ], [ "print a.count.__doc__", "L.count(value) -> integer -- return number of occurrences of value\n" ], [ "#print help(a)", "_____no_output_____" ], [ "a.append?", "_____no_output_____" ], [ "print dir(a)", "['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__delslice__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__setslice__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']\n" ], [ "print a", "[1, 100, 10, 300, 8, 2, 4, 1, 10]\n" ], [ "a.append(200)\nprint a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200]\n" ], [ "a.extend([2,100, 5])", "_____no_output_____" ], [ "print a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5]\n" ], [ "for x in [15, 25]:\n a.append(x)\nprint a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25]\n" ], [ "print a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25]\n" ], [ "a.append([30, 40])", "_____no_output_____" ], [ "print a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25, [30, 40]]\n" ], [ "a.extend(5)", "_____no_output_____" ], [ "a.insert.__doc__", "_____no_output_____" ], [ "print a", "[1, 100, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25, [30, 40]]\n" ], [ "a.insert(2, 20)\nprint a", "[1, 100, 20, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25, [30, 40]]\n" ], [ "print dir(a)", "['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__delslice__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__setslice__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']\n" ], [ "print a.pop.__doc__", "L.pop([index]) -> item -- remove and return item at index (default last).\nRaises IndexError if list is empty or index is out of range.\n" ], [ "print a", "[1, 100, 20, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25, [30, 40]]\n" ], [ "a.pop()", "_____no_output_____" ], [ "print a", "[1, 100, 20, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 15, 25]\n" ], [ "c = a.pop(-2)", "_____no_output_____" ], [ "print a", "[1, 100, 20, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "print c", "15\n" ], [ "a.pop(100)", "_____no_output_____" ], [ "a.remove.__doc__", "_____no_output_____" ], [ "print a", "[1, 100, 20, 10, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "a.remove(10)", "_____no_output_____" ], [ "print a", "[1, 100, 20, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "a.remove(1000)", "_____no_output_____" ], [ "#del\n#del is not a list method. It is a python object. It can be used to delete any object in python\n# similarly it can also be used to delete the element inside the list", "_____no_output_____" ], [ "c = 100\nprint c", "100\n" ], [ "del c", "_____no_output_____" ], [ "print c", "_____no_output_____" ], [ "print a", "[1, 20, 300, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "del a[2]\nprint a", "[1, 20, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "print a.index.__doc__", "L.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.\n" ], [ "print a", "[1, 20, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "a.index(20)", "_____no_output_____" ], [ "print a.index(2)", "3\n" ], [ "print a", "[1, 20, 8, 2, 4, 1, 10, 200, 2, 100, 5, 25]\n" ], [ "print a.index(20, 4)", "_____no_output_____" ], [ "print a.index(2, 4, 8)", "_____no_output_____" ], [ "print a.index(2, 4, 9)", "8\n" ], [ "# a = [10, 20, 30, 10, 15, 15, 30, 50, 55, 20, 100, 35, 15, 25 15]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3ed9fe03b65f2beeb570c8c85a1b4f01849307
8,815
ipynb
Jupyter Notebook
hw3/Untitled.ipynb
erfanMhi/Deep-Reinforcement-Learning-CS285-Pytroch
6da04f367e52a451c202ae7e5477994c1d149baf
[ "MIT" ]
91
2020-06-13T16:26:42.000Z
2022-03-31T02:49:30.000Z
hw3/Untitled.ipynb
erfanMhi/Deep-Reinforcement-Learning-CS285-Pytroch
6da04f367e52a451c202ae7e5477994c1d149baf
[ "MIT" ]
6
2020-07-26T15:44:36.000Z
2022-02-10T02:15:10.000Z
hw3/Untitled.ipynb
erfanMhi/Deep-Reinforcement-Learning-CS285-Pytroch
6da04f367e52a451c202ae7e5477994c1d149baf
[ "MIT" ]
16
2020-08-04T01:17:45.000Z
2022-02-24T04:51:41.000Z
26.313433
267
0.48599
[ [ [ "import cs285.infrastructure.torch_utils as tu", "_____no_output_____" ], [ "import numpy as np\ntu.torch_one_hot(([2, 5, 8, 9]), 10)", "_____no_output_____" ] ], [ [ "# ", "_____no_output_____" ] ], [ [ "(np.array([5, 6])).dtype == int", "_____no_output_____" ], [ "import torch\ntorch.tensor(([2, 3])).dtype", "_____no_output_____" ], [ "torch.mm(torch.tensor([[2, 3], [1, 4]]), torch.tensor([[2, 3], [4, 6]]))", "_____no_output_____" ], [ "torch.tensor([[2, 3], [4, 6]]).shape", "_____no_output_____" ], [ "import tensorflow as tf", "/home/oriea/anaconda3/envs/cs285_env/lib/python3.5/site-packages/google/protobuf/__init__.py:37: UserWarning: Module cs285 was already imported from None, but /home/oriea/Codes/github/Deep-Reinforcement-Learning-CS285-Pytorch/hw3 is being added to sys.path\n __import__('pkg_resources').declare_namespace(__name__)\n" ], [ "tf.enable_eager_execution()", "_____no_output_____" ], [ "a = torch.rand(10, 9)\na", "_____no_output_____" ], [ "print\nx = torch.stack([torch.arange(10), torch.argmax(a, dim=1)]).numpy().tolist()\na.numpy()[:,x[1]]\n", "_____no_output_____" ], [ "def gather_nd(params, indices):\n \"\"\"params is of \"n\" dimensions and has size [x1, x2, x3, ..., xn], indices is of 2 dimensions and has size [num_samples, m] (m <= n)\"\"\"\n assert type(indices) == torch.Tensor\n return params[indices.transpose(0,1).long().numpy().tolist()]", "_____no_output_____" ], [ "gather_nd(a, c)", "_____no_output_____" ], [ "tf.gather_nd(a.numpy(), c.numpy())", "_____no_output_____" ], [ "torch.nn.functional.smooth_l1_loss()", "_____no_output_____" ], [ "def smooth_l1_loss(input, target, delta=1.):\n # type: (Tensor, Tensor, float) -> Tensor\n t = torch.abs(input - target)\n return torch.where(t < delta, 0.5 * t ** 2, t * delta - (0.5 * delta ** 2))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3ee32d70ff2928b72a45cfdc6441c673e5317d
9,754
ipynb
Jupyter Notebook
.ipynb_checkpoints/Convolutional Neural Network-checkpoint.ipynb
sgarbirodrigo/Urban-Sound-Classification
d06fadd8cb0dc8526b02b281f2e049dce7563df1
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/Convolutional Neural Network-checkpoint.ipynb
sgarbirodrigo/Urban-Sound-Classification
d06fadd8cb0dc8526b02b281f2e049dce7563df1
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/Convolutional Neural Network-checkpoint.ipynb
sgarbirodrigo/Urban-Sound-Classification
d06fadd8cb0dc8526b02b281f2e049dce7563df1
[ "Apache-2.0" ]
null
null
null
43.351111
934
0.550646
[ [ [ "### Load necessary libraries ###\nimport glob\nimport os\nimport librosa\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score\n\nimport tensorflow as tf\nfrom tensorflow import keras", "_____no_output_____" ], [ "### Define helper functions ###\ndef extract_features(parent_dir,sub_dirs,file_ext=\"*.wav\",\n bands=60,frames=41):\n def _windows(data, window_size):\n start = 0\n while start < len(data):\n yield int(start), int(start + window_size)\n start += (window_size // 2)\n \n window_size = 512 * (frames - 1)\n features, labels = [], []\n for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):\n segment_log_specgrams, segment_labels = [], []\n sound_clip,sr = librosa.load(fn)\n label = int(fn.split('/')[2].split('-')[1])\n for (start,end) in _windows(sound_clip,window_size):\n if(len(sound_clip[start:end]) == window_size):\n signal = sound_clip[start:end]\n melspec = librosa.feature.melspectrogram(signal,n_mels=bands)\n logspec = librosa.amplitude_to_db(melspec)\n logspec = logspec.T.flatten()[:, np.newaxis].T\n segment_log_specgrams.append(logspec)\n segment_labels.append(label)\n \n segment_log_specgrams = np.asarray(segment_log_specgrams).reshape(\n len(segment_log_specgrams),bands,frames,1)\n segment_features = np.concatenate((segment_log_specgrams, np.zeros(\n np.shape(segment_log_specgrams))), axis=3)\n for i in range(len(segment_features)): \n segment_features[i, :, :, 1] = librosa.feature.delta(\n segment_features[i, :, :, 0])\n \n if len(segment_features) > 0: # check for empty segments \n features.append(segment_features)\n labels.append(segment_labels)\n return features, labels", "_____no_output_____" ], [ "# Pre-process and extract feature from the data\nparent_dir = 'UrbanSounds8K/audio/'\nsave_dir = \"UrbanSounds8K/processed/\"\nfolds = sub_dirs = np.array(['fold1','fold2','fold3','fold4',\n 'fold5','fold6','fold7','fold8',\n 'fold9','fold10'])\nfor sub_dir in sub_dirs:\n features, labels = extract_features(parent_dir,sub_dir)\n np.savez(\"{0}{1}\".format(save_dir, sub_dir), \n features=features, \n labels=labels)", "_____no_output_____" ], [ "### Define convolutional network architecture ###\ndef get_network():\n num_filters = [24,32,64,128] \n pool_size = (2, 2) \n kernel_size = (3, 3) \n input_shape = (60, 41, 2)\n num_classes = 10\n keras.backend.clear_session()\n \n model = keras.models.Sequential()\n model.add(keras.layers.Conv2D(24, kernel_size,\n padding=\"same\", input_shape=input_shape))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation(\"relu\"))\n model.add(keras.layers.MaxPooling2D(pool_size=pool_size))\n\n model.add(keras.layers.Conv2D(32, kernel_size,\n padding=\"same\"))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation(\"relu\")) \n model.add(keras.layers.MaxPooling2D(pool_size=pool_size))\n \n model.add(keras.layers.Conv2D(64, kernel_size,\n padding=\"same\"))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation(\"relu\")) \n model.add(keras.layers.MaxPooling2D(pool_size=pool_size))\n \n model.add(keras.layers.Conv2D(128, kernel_size,\n padding=\"same\"))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation(\"relu\")) \n\n model.add(keras.layers.GlobalMaxPooling2D())\n model.add(keras.layers.Dense(128, activation=\"relu\"))\n model.add(keras.layers.Dense(num_classes, activation=\"softmax\"))\n\n model.compile(optimizer=keras.optimizers.Adam(1e-4), \n loss=keras.losses.SparseCategoricalCrossentropy(), \n metrics=[\"accuracy\"])\n return model", "_____no_output_____" ], [ "### Train and evaluate via 10-Folds cross-validation ###\naccuracies = []\nfolds = np.array(['fold1','fold2','fold3','fold4',\n 'fold5','fold6','fold7','fold8',\n 'fold9','fold10'])\nload_dir = \"UrbanSounds8K/processed/\"\nkf = KFold(n_splits=10)\nfor train_index, test_index in kf.split(folds):\n x_train, y_train = [], []\n for ind in train_index:\n # read features or segments of an audio file\n train_data = np.load(\"{0}/{1}.npz\".format(load_dir,folds[ind]), \n allow_pickle=True)\n # for training stack all the segments so that they are treated as an example/instance\n features = np.concatenate(train_data[\"features\"], axis=0) \n labels = np.concatenate(train_data[\"labels\"], axis=0)\n x_train.append(features)\n y_train.append(labels)\n # stack x,y pairs of all training folds \n x_train = np.concatenate(x_train, axis = 0).astype(np.float32)\n y_train = np.concatenate(y_train, axis = 0).astype(np.float32)\n \n # for testing we will make predictions on each segment and average them to \n # produce signle label for an entire sound clip.\n test_data = np.load(\"{0}/{1}.npz\".format(load_dir,\n folds[test_index][0]), allow_pickle=True)\n x_test = test_data[\"features\"]\n y_test = test_data[\"labels\"]\n\n model = get_network()\n model.fit(x_train, y_train, epochs = 50, batch_size = 24, verbose = 0)\n \n # evaluate on test set/fold\n y_true, y_pred = [], []\n for x, y in zip(x_test, y_test):\n # average predictions over segments of a sound clip\n avg_p = np.argmax(np.mean(model.predict(x), axis = 0))\n y_pred.append(avg_p) \n # pick single label via np.unique for a sound clip\n y_true.append(np.unique(y)[0]) \n accuracies.append(accuracy_score(y_true, y_pred)) \nprint(\"Average 10 Folds Accuracy: {0}\".format(np.mean(accuracies)))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb3ee7431bf42b98049e669efe8ee6cd1191cd70
3,782
ipynb
Jupyter Notebook
paper/figures/texp.ipynb
exowanderer/exoplanet
dfd4859525ca574f1936de7b683951c35c292586
[ "MIT" ]
2
2021-10-01T12:46:09.000Z
2022-03-24T10:25:20.000Z
paper/figures/texp.ipynb
exowanderer/exoplanet
dfd4859525ca574f1936de7b683951c35c292586
[ "MIT" ]
null
null
null
paper/figures/texp.ipynb
exowanderer/exoplanet
dfd4859525ca574f1936de7b683951c35c292586
[ "MIT" ]
null
null
null
31.781513
99
0.497885
[ [ [ "%matplotlib inline", "_____no_output_____" ], [ "%run notebook_setup", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport exoplanet as xo\n\n# The light curve calculation requires an orbit\norbit = xo.orbits.KeplerianOrbit(period=1)\n\n# Compute a limb-darkened light curve using starry\ntexp = 0.02\nt = np.linspace(0.0, 0.06, 1000)\nu = [0.3, 0.2]\nstar = xo.StarryLightCurve(u)\nlight_curve_instant = star.get_light_curve(\n orbit=orbit, r=0.1, t=t).eval()\nlight_curve_exact = star.get_light_curve(\n orbit=orbit, r=0.1, t=t, texp=texp, oversample=1000).eval()\n\nfig, axes = plt.subplots(4, 1, figsize=(5, 10), sharex=True)\n\nax = axes[0]\nax.plot(t, light_curve_instant * 1e3, \":k\")\nax.plot(t, light_curve_exact * 1e3, \"k\")\nax.set_ylabel(\"relative flux [ppt]\")\n\nfor n in [3, 7, 15, 51][::-1]:\n for order in range(3):\n ax = axes[order+1]\n light_curve = star.get_light_curve(order=order,\n orbit=orbit, r=0.1, t=t, texp=texp, oversample=n).eval()\n ax.plot(t, np.log10(np.abs(light_curve - light_curve_exact)),\n label=\"{0}\".format(n))\n \n# integrated = xo.light_curves.LimbDarkLightCurve(u)\n# ax = axes[-1]\n# for tol in [-5, -4, -3, -2]:\n# light_curve, num_eval = theano.function([], integrated.get_light_curve(\n# orbit=orbit, r=0.1, t=t, texp=texp, tol=10**tol, return_num_eval=True))()\n# print(tol, num_eval / len(t))\n# ax.plot(t, np.log10(np.abs(light_curve - light_curve_exact)),\n# label=\"$10^{{{0}}},\\,{1:.0f}$\".format(tol, num_eval/len(t)), zorder=-tol)\n \nfor i, ax in enumerate(axes[1:]):\n if i <= 2:\n ax.annotate(\"order = {0}\".format(i), (0, 1), xycoords=\"axes fraction\",\n ha=\"left\", va=\"top\",\n xytext=(5, -10), textcoords=\"offset points\",\n fontsize=10)\n for loc, name in [(-3, \"ppt\"), (-6, \"ppm\"), (-9, \"ppb\")]:\n ax.axhline(loc, color=\"k\", alpha=0.3, lw=1)\n ax.annotate(name, (t.max(), loc), va=\"top\", ha=\"right\",\n xytext=(-3, -2), textcoords=\"offset points\",\n fontsize=10, alpha=0.3)\n ax.set_ylim(-10.2, -2.7)\n ax.set_ylabel(\"log$_{10}$(flux error)\")\n ax.legend(fontsize=9, ncol=4, loc=3)\n\nax.set_xlabel(\"time [days]\")\nax.set_xlim(t.min(), t.max())\nfig.subplots_adjust(hspace=0.0);", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb3ee765f84b4c13d17e8db217a738b73b7b5251
27,731
ipynb
Jupyter Notebook
code/NLP_SpaCy.ipynb
ansegura7/NLP
3bac362038cdaee810d35ff2de0009e638c4dcb0
[ "MIT" ]
78
2019-06-13T15:28:44.000Z
2022-03-11T01:26:14.000Z
code/NLP_SpaCy.ipynb
ansegura7/NLP
3bac362038cdaee810d35ff2de0009e638c4dcb0
[ "MIT" ]
null
null
null
code/NLP_SpaCy.ipynb
ansegura7/NLP
3bac362038cdaee810d35ff2de0009e638c4dcb0
[ "MIT" ]
15
2021-07-08T02:44:45.000Z
2022-03-01T16:17:25.000Z
35.147022
3,786
0.546681
[ [ [ "# NLP - Using spaCy library", "_____no_output_____" ], [ "- **Created by Andrés Segura Tinoco**\n- **Created on June 04, 2019**\n- **Updated on October 29, 2021**", "_____no_output_____" ], [ "**Natural language processing (NLP):** is a discipline where computer science, artificial intelligence and cognitive logic are intercepted, with the objective that machines can read and understand our language for decision making <a href=\"#link_one\">[1]</a>.\n\n**spaCy:** features fast statistical NER as well as an open-source named-entity visualizer <a href=\"#link_two\">[2]</a>.", "_____no_output_____" ], [ "## Example with a document in Spanish", "_____no_output_____" ] ], [ [ "# Load Python libraries\nimport io\nimport random\nfrom collections import Counter", "_____no_output_____" ], [ "# Load NLP libraries from spacy\nimport spacy", "_____no_output_____" ], [ "# Verify installed spacy version\nspacy.__version__", "_____no_output_____" ] ], [ [ "### Step 1 - Read natural text from a book", "_____no_output_____" ] ], [ [ "# Util function to read a plain text file\ndef read_text_file(file_path, encoding='ISO-8859-1'):\n text = \"\"\n with open(file_path, 'r', encoding=encoding) as f:\n text = f.read()\n \n return text", "_____no_output_____" ], [ "# Get text sample\nfile_path = \"../data/es/El Grillo del Hogar - Charles Dickens.txt\"\nbook_text = read_text_file(file_path)", "_____no_output_____" ], [ "# Show first 1000 raw characters of document\nbook_text[:1000]", "_____no_output_____" ] ], [ [ "### Step 2 - Create a NLP model", "_____no_output_____" ] ], [ [ "# Create NLP model for spanish language\nnlp = spacy.load('es_core_news_sm')\ndoc_es = nlp(book_text)", "_____no_output_____" ] ], [ [ "**- Vocabulary:** unique words of the document.", "_____no_output_____" ] ], [ [ "# Get vocabulary\nvocabulary_es = set(str(token).lower() for token in doc_es if not token.is_stop and token.is_alpha)\nlen(vocabulary_es)", "_____no_output_____" ], [ "# Show 100 random words of the vocabulary\nprint(random.sample(vocabulary_es, 100))", "['rastrojos', 'esperad', 'dicha', 'reconozco', 'ajeno', 'particularmente', 'ocasiones', 'volveré', 'reproducir', 'peatones', 'guiarme', 'imagen', 'pendía', 'delgado', 'bastantes', 'llenando', 'sinónimo', 'caballos', 'atravesaban', 'ocultó', 'cuero', 'extemporáneo', 'alta', 'infinidad', 'inconcebible', 'consoladoras', 'siglo', 'simpatía', 'consolarse', 'hogar', 'pensionado', 'punta', 'levantarse', 'vigoroso', 'agradable', 'salió', 'perfecta', 'carcajada', 'cabecera', 'apasionado', 'estrambótica', 'demuestra', 'encontrarse', 'presente', 'examinarle', 'helaría', 'abramos', 'empeño', 'acudió', 'iniciales', 'enmohecía', 'cancioncilla', 'permitieseis', 'diversión', 'envía', 'cogerle', 'consejos', 'trabajaban', 'acá', 'incidentes', 'enlazadas', 'radiante', 'objetar', 'parado', 'preparativos', 'parándose', 'seguida', 'perfeccionamiento', 'bendito', 'vasija', 'preciso', 'sospecha', 'hayáis', 'reconocerlas', 'público', 'comparación', 'calmarla', 'quince', 'siquiera', 'exceda', 'expuesta', 'acaso', 'directa', 'a', 'tratado', 'cadena', 'intencionado', 'dirección', 'retiró', 'alejé', 'interrumpió', 'herida', 'caballero', 'descubrir', 'atareado', 'desdichada', 'areópago', 'regordetillas', 'entornado', 'rechazado']\n" ] ], [ [ "**- Stopwords:** refers to the most common words in a language, which do not significantly affect the meaning of the text.", "_____no_output_____" ] ], [ [ "# Get unique stop-words\nstop_words_es = set(str(token).lower() for token in doc_es if token.is_stop)\nlen(stop_words_es)", "_____no_output_____" ], [ "# Show unique stop-words\nprint(stop_words_es)", "{'mi', 'su', 'ella', 'nunca', 'mayor', 'puedo', 'ésa', 'manera', 'quiere', 'pronto', 'fui', 'nosotras', 'tercera', 'como', 'sois', 'podrían', 'trata', 'ver', 'estar', 'cuándo', 'creo', 'uno', 'varias', 'así', 'primero', 'vuestra', 'algunas', 'excepto', 'días', 'entre', 'los', 'aquella', 'cuál', 'respecto', 'cosas', 'hace', 'tal', 'sé', 'de', 'usted', 'algo', 'final', 'eso', 'está', 'estaban', 'suyo', 'nos', 'algunos', 'trabajo', 'dijeron', 'haya', 'demás', 'tenía', 'tuyos', 'anterior', 'ningún', 'seis', 'conmigo', 'yo', 'según', 'al', 'alguna', 'quién', 'unos', 'pasado', 'tan', 'hicieron', 'mí', 'antes', 'señaló', 'aún', 'estoy', 'se', 'nadie', 'aquí', 'pudo', 'vuestras', 'dan', 'verdad', 'tiempo', 'quienes', 'breve', 'hacer', 'mas', 'manifestó', 'grandes', 'sola', 'tenido', 'en', 'tener', 'sino', 'estuvo', 'dicen', 'temprano', 'debe', 'cierto', 'están', 'ese', 'hemos', 'será', 'nuestras', 'dio', 'ésta', 'vuestro', 'cinco', 'toda', 'mismo', 'todos', 'quedó', 'tarde', 'cuando', 'he', 'realizar', 'encuentra', 'somos', 'que', 'dar', 'nuestra', 'suyas', 'sea', 'consigo', 'dice', 'adelante', 'te', 'pero', 'indicó', 'cuenta', 'tengo', 'todas', 'mediante', 'añadió', 'ambos', 'uso', 'ni', 'propias', 'encima', 'mejor', 'es', 'vais', 'diferente', 'verdadero', 'estamos', 'solo', 'buena', 'estas', 'esta', 'ellos', 'siendo', 'voy', 'saber', 'poner', 'gran', 'demasiado', 'llevar', 'hizo', 'siempre', 'vuestros', 'ello', 'fin', 'casi', 'por', 'último', 'debajo', 'pocas', 'tus', 'tampoco', 'cuántos', 'han', 'sabemos', 'éste', 'tu', 'ser', 'lo', 'deben', 'veces', 'incluso', 'un', 'aunque', 'nuevos', 'nuevas', 'para', 'momento', 'claro', 'ahora', 'vaya', 'hacerlo', 'había', 'mucha', 'podemos', 'éstos', 'propio', 'apenas', 'dicho', 'peor', 'le', 'dos', 'largo', 'nueva', 'sus', 'nada', 'eran', 'delante', 'va', 'otras', 'una', 'otros', 'luego', 'ningunos', 'haciendo', 'os', 'buenas', 'existe', 'explicó', 'qué', 'día', 'cual', 'pocos', 'desde', 'aquél', 'ha', 'son', 'poca', 'todo', 'mientras', 'dado', 'habrá', 'otra', 'ya', 'les', 'quiénes', 'mucho', 'cuanto', 'mismas', 'van', 'cuales', 'cuánto', 'cómo', 'buen', 'pasada', 'me', 'mal', 'donde', 'puede', 'soy', 'segundo', 'partir', 'verdadera', 'cuatro', 'medio', 'sin', 'tiene', 'menudo', 'ahí', 'detrás', 'estaba', 'ante', 'porque', 'cada', 'esa', 'algún', 'muchas', 'lejos', 'mismos', 'buenos', 'hubo', 'después', 'usar', 'parte', 'esos', 'arriba', 'bueno', 'lugar', 'misma', 'tuvo', 'pueda', 'última', 'era', 'propios', 'fuera', 'quien', 'igual', 'estará', 'sería', 'nuevo', 'cualquier', 'habla', 'aun', 'dijo', 'ninguna', 'con', 'menos', 'principalmente', 'suya', 'él', 'tenemos', 'ciertas', 'hacemos', 'existen', 'estos', 'da', 'ti', 'pesar', 'también', 'tanto', 'sido', 'hecho', 'tres', 'segunda', 'debido', 'allí', 'bastante', 'hacia', 'general', 'ejemplo', 'esas', 'pueden', 'próximos', 'más', 'nuestro', 'poco', 'ellas', 'ayer', 'trabajan', 'vez', 'podrá', 'cuantos', 'ciertos', 'expresó', 'sobre', 'valor', 'mío', 'alguno', 'siguiente', 'la', 'realizado', 'tienen', 'todavía', 'alrededor', 'poder', 'vosotros', 'propia', 'mis', 'mía', 'no', 'lado', 'primer', 'través', 'próximo', 'saben', 'decir', 'muy', 'tendrá', 'conocer', 'modo', 'aquel', 'solas', 'el', 'sigue', 'aquélla', 'cierta', 'tenga', 'últimas', 'cerca', 'podría', 'esto', 'realizó', 'muchos', 'habían', 'cuantas', 'vamos', 'contra', 'horas', 'sean', 'tras', 'sabe', 'acuerdo', 'fue', 'llegó', 'solamente', 'dentro', 'junto', 'hoy', 'ir', 'además', 'solos', 'dieron', 'entonces', 'aquellos', 'dónde', 'bajo', 'hay', 'estado', 'trabajar', 'pues', 'raras', 'las', 'posible', 'quizá', 'bien', 'hasta', 'si', 'primera', 'dejó', 'haber', 'parece', 'fueron', 'ustedes', 'sí', 'míos', 'sólo', 'este', 'nosotros', 'unas', 'otro', 'del', 'durante'}\n" ] ], [ [ "**- Entity:** can be any word or series of words that consistently refers to the same thing.", "_____no_output_____" ] ], [ [ "# Returns a text with data quality\ndef text_quality(text):\n new_text = text.replace('\\n', '')\n return new_text.strip('\\r\\n')\n\n# Print out named first 50 entities\nfor ix in range(50):\n ent = doc_es.ents[ix]\n ent_text = text_quality(ent.text)\n \n if len(ent_text) > 3:\n print((ix + 1), '- Entity:', ent_text, ', Label:', ent.label_)", "1 - Entity: The Cricket of the Heard , Label: MISC\n2 - Entity: Charles Dickens , Label: PER\n3 - Entity: Este libro electrónico , Label: MISC\n4 - Entity: Primer grito , Label: MISC\n5 - Entity: Capítulo I , Label: PER\n6 - Entity: Capítulo II , Label: PER\n7 - Entity: Capítulo III , Label: PER\n8 - Entity: Capítulo IV , Label: PER\n9 - Entity: Capítulo V , Label: PER\n10 - Entity: Capítulo VI , Label: PER\n11 - Entity: Segundo grito , Label: MISC\n12 - Entity: Capítulo I , Label: PER\n13 - Entity: Capítulo II , Label: PER\n14 - Entity: Capítulo III , Label: PER\n15 - Entity: Capítulo IV , Label: PER\n16 - Entity: Capítulo V , Label: PER\n17 - Entity: Capítulo VI , Label: PER\n18 - Entity: Tercer , Label: MISC\n19 - Entity: Capítulo I , Label: PER\n20 - Entity: Capítulo II , Label: PER\n21 - Entity: Capítulo III , Label: PER\n22 - Entity: Capítulo IV , Label: PER\n23 - Entity: Capítulo V , Label: PER\n24 - Entity: Capítulo VI , Label: PER\n25 - Entity: Primer , Label: MISC\n27 - Entity: Empezó , Label: PER\n28 - Entity: No necesito que me contéis , Label: MISC\n29 - Entity: Peerybingle , Label: ORG\n30 - Entity: Dejad , Label: PER\n31 - Entity: Peerybingle , Label: ORG\n32 - Entity: Tengo , Label: PER\n33 - Entity: El puchero empezó , Label: MISC\n34 - Entity: A decir verdad , Label: MISC\n35 - Entity: Por nada del mundo opondría mi opinión personal , Label: MISC\n36 - Entity: Peerybingle , Label: ORG\n37 - Entity: Dejarme contar el caso tal como ocurrió , Label: MISC\n38 - Entity: ¿cómo queréis que empiece por el principio , Label: MISC\n39 - Entity: Parecía , Label: PER\n40 - Entity: Una lucha musical , Label: MISC\n41 - Entity: Vais , Label: PER\n42 - Entity: La señora Peerybingle , Label: MISC\n43 - Entity: Euclides , Label: PER\n44 - Entity: La señora Peerybingle , Label: MISC\n45 - Entity: De vuelta ya , Label: MISC\n46 - Entity: Peerybingle muy pequeña- , Label: MISC\n47 - Entity: Peerybingle , Label: ORG\n48 - Entity: Además , Label: PER\n49 - Entity: No quería dejarse acomodar sobre la barra superior de la rejilla , Label: MISC\n50 - Entity: Peerybingle , Label: ORG\n" ] ], [ [ "### Step 3 - Working with POS, NER and sentences", "_____no_output_____" ], [ "**- POS:** the parts of speech explain how a word is used in a sentence.", "_____no_output_____" ] ], [ [ "# Part of speech (POS) used in this document\nset(token.pos_ for token in doc_es)", "_____no_output_____" ] ], [ [ "**- Sentences:** a set of words that is complete in itself and typically containing a subject and predicate.", "_____no_output_____" ] ], [ [ "# How many sentences are in this text?\nsentences = [s for s in doc_es.sents]\nlen(sentences)", "_____no_output_____" ], [ "# Show first 10 sentences\nsentences[1:11]", "_____no_output_____" ], [ "# Get the sentences in which the 'grillo' appears\npattern = 'grillo'\ncricket_sent = [sent for sent in doc_es.sents if pattern in sent.text]\nlen(cricket_sent)", "_____no_output_____" ], [ "# Show the first 10 sentences in which the 'grillo' appears\nfor sent in cricket_sent[1:11]:\n print('-', sent)", "- El puchero empezó cinco minutos antes que el grillo, según el relojito holandés de cuadrante barnizado situado en el rincón.\n- \n¡Como si el reloj no hubiese cesado de tocar! ¡Como si el segadorcido de movimientos convulsivos y bruscos que lo remata, paseando la hoz de derecha a izquierda y luego de izquierda a derecha ante la fachada de su palacio morisco, no hubiese segado medio acre de césped imaginario antes que el grillo hubiese hecho notar su presencia!\nA decir verdad, no fui nunca terco, como todo el mundo sabe.\n- Pero se trata de una cuestión de hecho, y el hecho es que el puchero empezó por lo menos cinco minutos antes que el grillo hubiese dado señal de vida.\n- Es lo que hubiera hecho desde la primera frase a no considerar que si cuento una historia debo empezar por el principio, y ¿cómo queréis que empiece por el principio si no empiezo por la vasija?\nParecía que la vasija y el grillo luchaban.\n- Aquí, precisamente en este punto, fue cuando el grillo entró en escena con un crrri, crrri, crrri, de magnífica potencia a coro con el puchero; pero con una voz tan asombradamente desproporcionada a su estatura -¡su estatura!, era casi invisible-, sobre todo comparándole con el puchero, que si por desgracia hubiese reventado como un cañón excesivamente cargado, cayendo, víctima de su celo, su cuerpecito roto en mil fragmentos, no hubiera parecido sino la consecuencia natural y perseguida con su trabajo afanoso.\n- Perseveró con ardor constante; pero el grillo se erigió en concertino y se mantuvo en su supremacía.\n- No obstante, marchaban muy bien unidos el grillo y el puchero.\n- Cuando volvió a sentarse en su sitio, el grillo y el puchero se esmeraban todavía en el canto con cierta rivalidad furiosa, siendo indudablemente el lado flaco del puchero la presunción de vencer constantemente.\n- El grillo logra una milla de delantera.\n- ¡Crrri, crrri, crrri!..., el grillo dobla la esquina.\n" ] ], [ [ "**- NER:** Named Entity Recognition.", "_____no_output_____" ] ], [ [ "# Returns the most common entities and their quantity\ndef find_entities(doc, ent_type, n):\n entities = Counter()\n \n for ent in doc.ents:\n if ent.label_ == ent_type:\n ent_name = text_quality(ent.lemma_)\n entities[ent_name] += 1\n \n return entities.most_common(n)", "_____no_output_____" ], [ "# Show entities of type PERSON\nfind_entities(doc_es, 'PER', 20)", "_____no_output_____" ], [ "# Returns persons adjectives\ndef get_person_adj(doc, person):\n adjectives = []\n \n for ent in doc.ents:\n if ent.lemma_ == person:\n for token in ent.subtree:\n if token.pos_ == 'ADJ': # Adjective\n adjectives.append(token.lemma_)\n \n for ent in doc.ents:\n if ent.lemma_ == person:\n if ent.root.dep_ == 'nsubj': # Nominal subject\n for child in ent.root.head.children:\n if child.dep_ == 'acomp': # Adjectival complement\n adjectives.append(child.lemma_)\n \n return set(adjectives)", "_____no_output_____" ], [ "# Show the adjectives used for John (most common entity)\ncurr_person = 'John'\nprint(get_person_adj(doc_es, curr_person))", "{'distraído', 'venturoso', 'cabizbajo', 'diligente', 'eficazmente', 'santo', 'turbada-', 'pensativo', 'amado', 'extrañado'}\n" ], [ "# Returns the people who use a certain verb\ndef verb_persons(doc, verb, n):\n verb_count = Counter()\n \n for ent in doc.ents:\n if ent.label_ == 'PER' and ent.root.head.lemma_ == verb:\n verb_count[ent.text] += 1\n \n return verb_count.most_common(n)", "_____no_output_____" ], [ "# Show the people who use a certain verb\ncurr_verb = 'hacer'\nverb_persons(doc_es, curr_verb, 10)", "_____no_output_____" ], [ "# Get ADJ type labels\nadj_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'ADJ')\nlen(adj_tokens)", "_____no_output_____" ], [ "# Show 50 random ADJ type labels\nprint(random.sample(adj_tokens, 50))", "['confiada', 'viejo', 'abierta', 'turbada-', 'privado', 'levantadas', 'patético', 'única', 'sorprendido', 'supremo', 'aires', 'sordo', 'echado', 'codicioso', 'vivísima', 'vivarachos', 'ensimismado', 'justísimo', 'alegro', 'convulsiva', 'ingenua', 'cruzadas', 'gentil', 'riguroso', 'verde', 'ocupadísima', 'holandés', 'bolso', 'tetera', 'insensible', 'franca', 'rígido', 'creciente', 'guantes', 'doméstico', 'insaciable', 'sano', 'vivo', 'verdaderos', 'indiferentes', 'útil', 'musical', 'próximos', 'generoso', 'egoísta', 'tiernas', 'hundida', 'arrebatadora', 'consabida', 'crema']\n" ], [ "# Get PROPN type labels\npropn_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'PROPN')\nlen(adj_tokens)", "_____no_output_____" ], [ "# Show 50 random PROPN type labels\nprint(random.sample(propn_tokens, 50))", "['cruel', 'madre', 'vivía', 'tackleton', 'soltarme', 'vi', '-era', '-¡el', 'padre', 'experiencia-', 'leed', '-o', '-murmuró', 'juro', 'cuán', 'verdad', 'error', '-¡berta', '-siguió', 'n.', 'vedle', 'tac', 'sepa', 'cámara', 'peerybingle', 'royal-george', 'mía', 'pobre', 'era-', 'shem', 'habríais', '-¡oh', 'crrri', 'sansón', 'regocijado-', '-(n.', 'peerybingle-', 'tic', 'fuerzas-', 'invariablemente', 'indudablemente', '-john', 'eduardo', 'dímelo', 'cuánto', 'hija', 'mecía', 'apiadaos', 'berta-', 'gruff']\n" ] ], [ [ "## Reference", "_____no_output_____" ], [ "<a name='link_one' href='https://en.wikipedia.org/wiki/Natural_language_processing' target='_blank' >[1]</a> Wikipedia - Natural language processing. \n<a name='link_two' href='https://spacy.io/' target='_blank' >[2]</a> spaCy website. ", "_____no_output_____" ], [ "<hr>\n<p><a href=\"https://ansegura7.github.io/NLP/\">« Home</a></p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb3eeb0e0dd7ac6885734a790da65cd631b5351e
40,883
ipynb
Jupyter Notebook
examine_morphs.ipynb
unfoldingWord-box3/alignment-ml
9b9205e3be8f89f671f790d3f0dd3bc5aa6ee6d9
[ "MIT" ]
null
null
null
examine_morphs.ipynb
unfoldingWord-box3/alignment-ml
9b9205e3be8f89f671f790d3f0dd3bc5aa6ee6d9
[ "MIT" ]
null
null
null
examine_morphs.ipynb
unfoldingWord-box3/alignment-ml
9b9205e3be8f89f671f790d3f0dd3bc5aa6ee6d9
[ "MIT" ]
1
2021-01-04T14:07:58.000Z
2021-01-04T14:07:58.000Z
47.154556
18,540
0.439816
[ [ [ "# Install a pip package in the current Jupyter kernel\n# import sys\n# !{sys.executable} -m pip install pandas", "_____no_output_____" ], [ "import pandas as pd\nimport utils.db_utils as db\nimport utils.file_utils as file\nimport utils.bible_utils as bible\nimport utils.morph_utils as mu\n\noriginal_words_table = db.original_words_table\ntarget_words_table = db.target_words_table\nalignment_table = db.alignment_table\ndbPath = './data/en_ult_NT_alignments.sqlite'\norigLangPathGreek = './data/OrigLangJson/ugnt/v0.14'\norigLangPathHebrew = './data/OrigLangJson/uhb/v2.1.15'\n\nconnection = db.initAlignmentDB(dbPath)\n\nsearchOriginal = True\nsearchTarget = False\nsearchLemma = True\ncaseInsensitive = True", "Connection to SQLite DB successful\n" ], [ "#################\n\n# get all greek words\nitems_orig_greek = db.fetchRecords(connection, original_words_table, \"morph LIKE 'Gr%'\")\nprint (f\"{len(items_orig_greek)} greek words in original_words_table\")\noriginal_df = pd.DataFrame(items_orig_greek)\nmorphs_greek = original_df['morph']\n\n# get count of times each used\nfrequency_greek = morphs_greek.value_counts()\nprint(\"Frequency of greek morph usage:\")\nprint(frequency_greek)\n\n# get alphabetized list of morphs\nmorphs_list_greek = list(dict(frequency_greek).keys())\nmorphs_list_greek.sort()\nprint(\"Alphabetized greek morph list:\")\nprint(morphs_list_greek)", "137962 greek words in original_words_table\nFrequency of greek morph usage:\nGr,CC,,,,,,,, 13124\nGr,D,,,,,,,,, 6209\nGr,P,,,,,A,,, 3883\nGr,CS,,,,,,,, 3710\nGr,N,,,,,NMS, 3442\n ... \nGr,NP,,,,NMSS 1\nGr,V,SEA1,,P, 1\nGr,V,PAM,GNP, 1\nGr,AA,,,,GNPS 1\nGr,EP,,,,NMP, 1\nName: morph, Length: 1090, dtype: int64\nAlphabetized greek morph list:\n['Gr,AA,,,,AFP,', 'Gr,AA,,,,AFPC', 'Gr,AA,,,,AFS,', 'Gr,AA,,,,AFSC', 'Gr,AA,,,,AFSS', 'Gr,AA,,,,AMP,', 'Gr,AA,,,,AMPC', 'Gr,AA,,,,AMS,', 'Gr,AA,,,,AMSC', 'Gr,AA,,,,ANP,', 'Gr,AA,,,,ANPC', 'Gr,AA,,,,ANPS', 'Gr,AA,,,,ANS,', 'Gr,AA,,,,ANSC', 'Gr,AA,,,,ANSS', 'Gr,AA,,,,DFP,', 'Gr,AA,,,,DFPC', 'Gr,AA,,,,DFS,', 'Gr,AA,,,,DFSC', 'Gr,AA,,,,DFSS', 'Gr,AA,,,,DMP,', 'Gr,AA,,,,DMPC', 'Gr,AA,,,,DMS,', 'Gr,AA,,,,DMSC', 'Gr,AA,,,,DMSS', 'Gr,AA,,,,DNP,', 'Gr,AA,,,,DNPS', 'Gr,AA,,,,DNS,', 'Gr,AA,,,,DNSC', 'Gr,AA,,,,GFP,', 'Gr,AA,,,,GFPS', 'Gr,AA,,,,GFS,', 'Gr,AA,,,,GFSC', 'Gr,AA,,,,GMP,', 'Gr,AA,,,,GMPC', 'Gr,AA,,,,GMPS', 'Gr,AA,,,,GMS,', 'Gr,AA,,,,GMSC', 'Gr,AA,,,,GNP,', 'Gr,AA,,,,GNPC', 'Gr,AA,,,,GNPS', 'Gr,AA,,,,GNS,', 'Gr,AA,,,,GNSS', 'Gr,AA,,,,NFP,', 'Gr,AA,,,,NFPS', 'Gr,AA,,,,NFS,', 'Gr,AA,,,,NMP,', 'Gr,AA,,,,NMPC', 'Gr,AA,,,,NMS,', 'Gr,AA,,,,NMSC', 'Gr,AA,,,,NMSS', 'Gr,AA,,,,NNP,', 'Gr,AA,,,,NNS,', 'Gr,AA,,,,NNSC', 'Gr,AA,,,,VFS,', 'Gr,AA,,,,VMP,', 'Gr,AA,,,,VMS,', 'Gr,AA,,,,VMSS', 'Gr,AA,,,,VNS,', 'Gr,AR,,,,AFP,', 'Gr,AR,,,,AFS,', 'Gr,AR,,,,AMS,', 'Gr,AR,,,,ANP,', 'Gr,AR,,,,ANPC', 'Gr,AR,,,,ANS,', 'Gr,AR,,,,ANSC', 'Gr,AR,,,,DFS,', 'Gr,AR,,,,DMS,', 'Gr,AR,,,,DNP,', 'Gr,AR,,,,DNS,', 'Gr,AR,,,,GFP,', 'Gr,AR,,,,GFS,', 'Gr,AR,,,,GMP,', 'Gr,AR,,,,GMS,', 'Gr,AR,,,,GNP,', 'Gr,AR,,,,GNS,', 'Gr,AR,,,,NFS,', 'Gr,AR,,,,NMP,', 'Gr,AR,,,,NMS,', 'Gr,AR,,,,NMSC', 'Gr,AR,,,,NNP,', 'Gr,AR,,,,NNS,', 'Gr,AR,,,,VFS,', 'Gr,AR,,,,VMS,', 'Gr,AR,,,,VNS,', 'Gr,CC,,,,,,,,', 'Gr,CO,,,,,,,,', 'Gr,CS,,,,,,,,', 'Gr,D,,,,,,,,,', 'Gr,D,,,,,,,,C', 'Gr,D,,,,,,,,S', 'Gr,DO,,,,,,,,', 'Gr,EA,,,,AFP,', 'Gr,EA,,,,AFS,', 'Gr,EA,,,,AMP,', 'Gr,EA,,,,AMS,', 'Gr,EA,,,,ANP,', 'Gr,EA,,,,ANS,', 'Gr,EA,,,,DFP,', 'Gr,EA,,,,DFS,', 'Gr,EA,,,,DMP,', 'Gr,EA,,,,DMS,', 'Gr,EA,,,,DNP,', 'Gr,EA,,,,DNS,', 'Gr,EA,,,,GFP,', 'Gr,EA,,,,GFS,', 'Gr,EA,,,,GMP,', 'Gr,EA,,,,GMS,', 'Gr,EA,,,,GNP,', 'Gr,EA,,,,GNS,', 'Gr,EA,,,,NFP,', 'Gr,EA,,,,NFS,', 'Gr,EA,,,,NMP,', 'Gr,EA,,,,NMS,', 'Gr,EA,,,,NNP,', 'Gr,EA,,,,NNS,', 'Gr,EA,,,,VFP,', 'Gr,EA,,,,VFS,', 'Gr,EA,,,,VMP,', 'Gr,EA,,,,VMS,', 'Gr,EA,,,,VNP,', 'Gr,EA,,,,VNS,', 'Gr,ED,,,,AFP,', 'Gr,ED,,,,AFS,', 'Gr,ED,,,,AMP,', 'Gr,ED,,,,AMS,', 'Gr,ED,,,,ANP,', 'Gr,ED,,,,ANS,', 'Gr,ED,,,,DFP,', 'Gr,ED,,,,DFS,', 'Gr,ED,,,,DMP,', 'Gr,ED,,,,DMS,', 'Gr,ED,,,,DNP,', 'Gr,ED,,,,DNS,', 'Gr,ED,,,,GFP,', 'Gr,ED,,,,GFS,', 'Gr,ED,,,,GMP,', 'Gr,ED,,,,GMS,', 'Gr,ED,,,,GNP,', 'Gr,ED,,,,GNS,', 'Gr,ED,,,,NFP,', 'Gr,ED,,,,NFS,', 'Gr,ED,,,,NMP,', 'Gr,ED,,,,NMS,', 'Gr,ED,,,,NNP,', 'Gr,ED,,,,NNS,', 'Gr,EF,,,,AFP,', 'Gr,EF,,,,AFS,', 'Gr,EF,,,,AMP,', 'Gr,EF,,,,AMS,', 'Gr,EF,,,,ANP,', 'Gr,EF,,,,ANS,', 'Gr,EF,,,,DFP,', 'Gr,EF,,,,DFS,', 'Gr,EF,,,,DMP,', 'Gr,EF,,,,DMS,', 'Gr,EF,,,,DNP,', 'Gr,EF,,,,DNS,', 'Gr,EF,,,,GFP,', 'Gr,EF,,,,GFS,', 'Gr,EF,,,,GMP,', 'Gr,EF,,,,GMS,', 'Gr,EF,,,,GNP,', 'Gr,EF,,,,GNS,', 'Gr,EF,,,,NFS,', 'Gr,EF,,,,NMP,', 'Gr,EF,,,,NMS,', 'Gr,EF,,,,NNP,', 'Gr,EF,,,,NNS,', 'Gr,EN,,,,AFP,', 'Gr,EN,,,,AFPI', 'Gr,EN,,,,AFS,', 'Gr,EN,,,,AMP,', 'Gr,EN,,,,AMPI', 'Gr,EN,,,,AMS,', 'Gr,EN,,,,ANP,', 'Gr,EN,,,,ANPI', 'Gr,EN,,,,ANS,', 'Gr,EN,,,,DFP,', 'Gr,EN,,,,DFPI', 'Gr,EN,,,,DFS,', 'Gr,EN,,,,DMP,', 'Gr,EN,,,,DMPI', 'Gr,EN,,,,DMS,', 'Gr,EN,,,,DNP,', 'Gr,EN,,,,DNPI', 'Gr,EN,,,,DNS,', 'Gr,EN,,,,GFP,', 'Gr,EN,,,,GFPI', 'Gr,EN,,,,GFS,', 'Gr,EN,,,,GMP,', 'Gr,EN,,,,GMS,', 'Gr,EN,,,,GNP,', 'Gr,EN,,,,GNPI', 'Gr,EN,,,,GNS,', 'Gr,EN,,,,NFP,', 'Gr,EN,,,,NFPI', 'Gr,EN,,,,NFS,', 'Gr,EN,,,,NMP,', 'Gr,EN,,,,NMPI', 'Gr,EN,,,,NMS,', 'Gr,EN,,,,NNP,', 'Gr,EN,,,,NNPI', 'Gr,EN,,,,NNS,', 'Gr,EO,,,,AFP,', 'Gr,EO,,,,AFS,', 'Gr,EO,,,,AMS,', 'Gr,EO,,,,ANP,', 'Gr,EO,,,,ANS,', 'Gr,EO,,,,DFP,', 'Gr,EO,,,,DFS,', 'Gr,EO,,,,DMS,', 'Gr,EO,,,,GFP,', 'Gr,EO,,,,GFS,', 'Gr,EO,,,,GMS,', 'Gr,EO,,,,GNS,', 'Gr,EO,,,,NFS,', 'Gr,EO,,,,NMS,', 'Gr,EO,,,,NNS,', 'Gr,EP,,,,AFP,', 'Gr,EP,,,,AFS,', 'Gr,EP,,,,AMP,', 'Gr,EP,,,,AMS,', 'Gr,EP,,,,ANP,', 'Gr,EP,,,,ANS,', 'Gr,EP,,,,DFP,', 'Gr,EP,,,,DFS,', 'Gr,EP,,,,DMP,', 'Gr,EP,,,,DMS,', 'Gr,EP,,,,DNP,', 'Gr,EP,,,,DNS,', 'Gr,EP,,,,GFP,', 'Gr,EP,,,,GFS,', 'Gr,EP,,,,GMP,', 'Gr,EP,,,,GMS,', 'Gr,EP,,,,GNP,', 'Gr,EP,,,,GNS,', 'Gr,EP,,,,NFP,', 'Gr,EP,,,,NFS,', 'Gr,EP,,,,NMP,', 'Gr,EP,,,,NMS,', 'Gr,EP,,,,NNP,', 'Gr,EP,,,,NNS,', 'Gr,EP,,,1AFP,', 'Gr,EP,,,1AFS,', 'Gr,EP,,,1AMP,', 'Gr,EP,,,1AMS,', 'Gr,EP,,,1ANP,', 'Gr,EP,,,1ANS,', 'Gr,EP,,,1DFP,', 'Gr,EP,,,1DFS,', 'Gr,EP,,,1DMP,', 'Gr,EP,,,1DMS,', 'Gr,EP,,,1DNP,', 'Gr,EP,,,1DNS,', 'Gr,EP,,,1GFS,', 'Gr,EP,,,1GNP,', 'Gr,EP,,,1NFS,', 'Gr,EP,,,1NMP,', 'Gr,EP,,,1NMS,', 'Gr,EP,,,1NNP,', 'Gr,EP,,,1NNS,', 'Gr,EP,,,2AFS,', 'Gr,EP,,,2DFS,', 'Gr,EP,,,2DMS,', 'Gr,EP,,,2DNS,', 'Gr,EP,,,2GFS,', 'Gr,EP,,,2NMP,', 'Gr,EP,,,2NMS,', 'Gr,EP,,,2NNS,', 'Gr,EQ,,,,AFP,', 'Gr,EQ,,,,AFS,', 'Gr,EQ,,,,AMP,', 'Gr,EQ,,,,AMS,', 'Gr,EQ,,,,ANP,', 'Gr,EQ,,,,ANS,', 'Gr,EQ,,,,DFP,', 'Gr,EQ,,,,DFS,', 'Gr,EQ,,,,DMP,', 'Gr,EQ,,,,DMPC', 'Gr,EQ,,,,DMS,', 'Gr,EQ,,,,DNP,', 'Gr,EQ,,,,DNS,', 'Gr,EQ,,,,GFP,', 'Gr,EQ,,,,GFS,', 'Gr,EQ,,,,GMP,', 'Gr,EQ,,,,GMS,', 'Gr,EQ,,,,GNP,', 'Gr,EQ,,,,GNS,', 'Gr,EQ,,,,NFP,', 'Gr,EQ,,,,NFS,', 'Gr,EQ,,,,NMP,', 'Gr,EQ,,,,NMS,', 'Gr,EQ,,,,NNP,', 'Gr,EQ,,,,NNS,', 'Gr,EQ,,,,VMP,', 'Gr,EQ,,,,VMS,', 'Gr,EQ,,,,VNP,', 'Gr,ER,,,,AFS,', 'Gr,ER,,,,AMP,', 'Gr,ER,,,,AMS,', 'Gr,ER,,,,ANP,', 'Gr,ER,,,,ANS,', 'Gr,ER,,,,DFS,', 'Gr,ER,,,,DMS,', 'Gr,ER,,,,DNS,', 'Gr,ER,,,,GFS,', 'Gr,ER,,,,NFP,', 'Gr,ER,,,,NMP,', 'Gr,ER,,,,NNS,', 'Gr,ET,,,,AFP,', 'Gr,ET,,,,AFS,', 'Gr,ET,,,,AMP,', 'Gr,ET,,,,AMS,', 'Gr,ET,,,,ANS,', 'Gr,ET,,,,DFS,', 'Gr,ET,,,,DMS,', 'Gr,ET,,,,DNP,', 'Gr,ET,,,,DNS,', 'Gr,ET,,,,GFP,', 'Gr,ET,,,,GFS,', 'Gr,ET,,,,GMS,', 'Gr,ET,,,,NFP,', 'Gr,ET,,,,NFS,', 'Gr,ET,,,,NMP,', 'Gr,ET,,,,NMS,', 'Gr,ET,,,,NNS,', 'Gr,IDMAA2,,P,', 'Gr,IDMAA2,,S,', 'Gr,IE,,,,,,,,', 'Gr,IE,,,,,,,I', 'Gr,IEMPA2,,P,', 'Gr,IEMPA2,,S,', 'Gr,IENPA,,,,,', 'Gr,IR,,,,,,,,', 'Gr,N,,,,,AFP,', 'Gr,N,,,,,AFPD', 'Gr,N,,,,,AFS,', 'Gr,N,,,,,AFSD', 'Gr,N,,,,,AFSI', 'Gr,N,,,,,AMP,', 'Gr,N,,,,,AMPI', 'Gr,N,,,,,AMS,', 'Gr,N,,,,,AMSD', 'Gr,N,,,,,AMSI', 'Gr,N,,,,,ANP,', 'Gr,N,,,,,ANPD', 'Gr,N,,,,,ANS,', 'Gr,N,,,,,ANSD', 'Gr,N,,,,,ANSI', 'Gr,N,,,,,DFP,', 'Gr,N,,,,,DFS,', 'Gr,N,,,,,DFSD', 'Gr,N,,,,,DFSI', 'Gr,N,,,,,DMP,', 'Gr,N,,,,,DMS,', 'Gr,N,,,,,DMSI', 'Gr,N,,,,,DNP,', 'Gr,N,,,,,DNPD', 'Gr,N,,,,,DNS,', 'Gr,N,,,,,DNSD', 'Gr,N,,,,,DNSI', 'Gr,N,,,,,GFP,', 'Gr,N,,,,,GFPD', 'Gr,N,,,,,GFS,', 'Gr,N,,,,,GFSD', 'Gr,N,,,,,GFSI', 'Gr,N,,,,,GMP,', 'Gr,N,,,,,GMPI', 'Gr,N,,,,,GMS,', 'Gr,N,,,,,GMSI', 'Gr,N,,,,,GNP,', 'Gr,N,,,,,GNPD', 'Gr,N,,,,,GNS,', 'Gr,N,,,,,GNSD', 'Gr,N,,,,,GNSI', 'Gr,N,,,,,NFP,', 'Gr,N,,,,,NFPI', 'Gr,N,,,,,NFS,', 'Gr,N,,,,,NFSD', 'Gr,N,,,,,NFSI', 'Gr,N,,,,,NMP,', 'Gr,N,,,,,NMPD', 'Gr,N,,,,,NMS,', 'Gr,N,,,,,NMSD', 'Gr,N,,,,,NMSI', 'Gr,N,,,,,NNP,', 'Gr,N,,,,,NNPD', 'Gr,N,,,,,NNPI', 'Gr,N,,,,,NNS,', 'Gr,N,,,,,NNSD', 'Gr,N,,,,,NNSI', 'Gr,N,,,,,VFP,', 'Gr,N,,,,,VFS,', 'Gr,N,,,,,VFSI', 'Gr,N,,,,,VMP,', 'Gr,N,,,,,VMPD', 'Gr,N,,,,,VMS,', 'Gr,N,,,,,VMSD', 'Gr,N,,,,,VMSI', 'Gr,N,,,,,VNP,', 'Gr,N,,,,,VNPD', 'Gr,N,,,,,VNS,', 'Gr,N,,,,,VNSD', 'Gr,NP,,,,AFP,', 'Gr,NP,,,,AFS,', 'Gr,NP,,,,AFSC', 'Gr,NP,,,,AMP,', 'Gr,NP,,,,AMS,', 'Gr,NP,,,,ANP,', 'Gr,NP,,,,ANPC', 'Gr,NP,,,,ANS,', 'Gr,NP,,,,DFP,', 'Gr,NP,,,,DFS,', 'Gr,NP,,,,DMP,', 'Gr,NP,,,,DNS,', 'Gr,NP,,,,GFP,', 'Gr,NP,,,,GFS,', 'Gr,NP,,,,GMP,', 'Gr,NP,,,,GMS,', 'Gr,NP,,,,GNP,', 'Gr,NP,,,,NFP,', 'Gr,NP,,,,NFPC', 'Gr,NP,,,,NFS,', 'Gr,NP,,,,NFSC', 'Gr,NP,,,,NFSS', 'Gr,NP,,,,NMP,', 'Gr,NP,,,,NMPC', 'Gr,NP,,,,NMPI', 'Gr,NP,,,,NMS,', 'Gr,NP,,,,NMSC', 'Gr,NP,,,,NMSS', 'Gr,NP,,,,NNP,', 'Gr,NP,,,,NNPC', 'Gr,NP,,,,NNS,', 'Gr,NP,,,,NNSC', 'Gr,NS,,,,AFP,', 'Gr,NS,,,,AFPC', 'Gr,NS,,,,AFS,', 'Gr,NS,,,,AFSC', 'Gr,NS,,,,AMP,', 'Gr,NS,,,,AMPC', 'Gr,NS,,,,AMPI', 'Gr,NS,,,,AMS,', 'Gr,NS,,,,AMSC', 'Gr,NS,,,,ANP,', 'Gr,NS,,,,ANPC', 'Gr,NS,,,,ANPI', 'Gr,NS,,,,ANS,', 'Gr,NS,,,,ANSC', 'Gr,NS,,,,ANSS', 'Gr,NS,,,,DFP,', 'Gr,NS,,,,DFS,', 'Gr,NS,,,,DMP,', 'Gr,NS,,,,DMPC', 'Gr,NS,,,,DMPI', 'Gr,NS,,,,DMS,', 'Gr,NS,,,,DMSC', 'Gr,NS,,,,DNP,', 'Gr,NS,,,,DNPI', 'Gr,NS,,,,DNPS', 'Gr,NS,,,,DNS,', 'Gr,NS,,,,DNSS', 'Gr,NS,,,,GFP,', 'Gr,NS,,,,GFS,', 'Gr,NS,,,,GFSC', 'Gr,NS,,,,GMP,', 'Gr,NS,,,,GMPC', 'Gr,NS,,,,GMPS', 'Gr,NS,,,,GMS,', 'Gr,NS,,,,GMSC', 'Gr,NS,,,,GMSS', 'Gr,NS,,,,GNP,', 'Gr,NS,,,,GNPC', 'Gr,NS,,,,GNPI', 'Gr,NS,,,,GNS,', 'Gr,NS,,,,NFP,', 'Gr,NS,,,,NFPI', 'Gr,NS,,,,NFS,', 'Gr,NS,,,,NMP,', 'Gr,NS,,,,NMPC', 'Gr,NS,,,,NMPI', 'Gr,NS,,,,NMS,', 'Gr,NS,,,,NMSC', 'Gr,NS,,,,NMSS', 'Gr,NS,,,,NNP,', 'Gr,NS,,,,NNS,', 'Gr,NS,,,,NNSC', 'Gr,NS,,,,VFS,', 'Gr,NS,,,,VMP,', 'Gr,NS,,,,VMPC', 'Gr,NS,,,,VMS,', 'Gr,P,,,,,,,,,', 'Gr,P,,,,,A,,,', 'Gr,P,,,,,D,,,', 'Gr,P,,,,,G,,,', 'Gr,PI,,,,,,,,', 'Gr,PI,,,,D,,,', 'Gr,PI,,,,G,,,', 'Gr,RC,,,,AMP,', 'Gr,RC,,,,DMP,', 'Gr,RC,,,,DNP,', 'Gr,RC,,,,GMP,', 'Gr,RC,,,,GNP,', 'Gr,RD,,,,AFP,', 'Gr,RD,,,,AFS,', 'Gr,RD,,,,AMP,', 'Gr,RD,,,,AMS,', 'Gr,RD,,,,ANP,', 'Gr,RD,,,,ANS,', 'Gr,RD,,,,DFP,', 'Gr,RD,,,,DFS,', 'Gr,RD,,,,DMP,', 'Gr,RD,,,,DMS,', 'Gr,RD,,,,DNP,', 'Gr,RD,,,,DNS,', 'Gr,RD,,,,GFP,', 'Gr,RD,,,,GFS,', 'Gr,RD,,,,GMP,', 'Gr,RD,,,,GMS,', 'Gr,RD,,,,GNP,', 'Gr,RD,,,,GNS,', 'Gr,RD,,,,NFP,', 'Gr,RD,,,,NFS,', 'Gr,RD,,,,NMP,', 'Gr,RD,,,,NMS,', 'Gr,RD,,,,NNP,', 'Gr,RD,,,,NNS,', 'Gr,RD,,,,VFS,', 'Gr,RD,,,,VMP,', 'Gr,RD,,,,VMS,', 'Gr,RD,,,3ANP,', 'Gr,RE,,,,AFS,', 'Gr,RE,,,,ANS,', 'Gr,RE,,,1AMP,', 'Gr,RE,,,1AMS,', 'Gr,RE,,,1DMP,', 'Gr,RE,,,1DMS,', 'Gr,RE,,,1GMP,', 'Gr,RE,,,1GMS,', 'Gr,RE,,,1NMP,', 'Gr,RE,,,1NMS,', 'Gr,RE,,,2AFP,', 'Gr,RE,,,2AMP,', 'Gr,RE,,,2AMS,', 'Gr,RE,,,2ANP,', 'Gr,RE,,,2DFP,', 'Gr,RE,,,2DMP,', 'Gr,RE,,,2DMS,', 'Gr,RE,,,2GMP,', 'Gr,RE,,,2GMS,', 'Gr,RE,,,2NMP,', 'Gr,RE,,,2NMS,', 'Gr,RE,,,3AFP,', 'Gr,RE,,,3AFS,', 'Gr,RE,,,3AMP,', 'Gr,RE,,,3AMS,', 'Gr,RE,,,3ANP,', 'Gr,RE,,,3ANS,', 'Gr,RE,,,3DFS,', 'Gr,RE,,,3DMP,', 'Gr,RE,,,3DMS,', 'Gr,RE,,,3GFP,', 'Gr,RE,,,3GFS,', 'Gr,RE,,,3GMP,', 'Gr,RE,,,3GMS,', 'Gr,RE,,,3GNP,', 'Gr,RE,,,3GNS,', 'Gr,RE,,,3NFS,', 'Gr,RE,,,3NMP,', 'Gr,RE,,,3NMS,', 'Gr,RE,,,3NNS,', 'Gr,RI,,,,AFP,', 'Gr,RI,,,,AFS,', 'Gr,RI,,,,AMP,', 'Gr,RI,,,,AMS,', 'Gr,RI,,,,ANP,', 'Gr,RI,,,,ANS,', 'Gr,RI,,,,DFS,', 'Gr,RI,,,,DMP,', 'Gr,RI,,,,DMS,', 'Gr,RI,,,,DNP,', 'Gr,RI,,,,DNS,', 'Gr,RI,,,,GFP,', 'Gr,RI,,,,GFS,', 'Gr,RI,,,,GMP,', 'Gr,RI,,,,GMPC', 'Gr,RI,,,,GMS,', 'Gr,RI,,,,GNP,', 'Gr,RI,,,,GNS,', 'Gr,RI,,,,NFP,', 'Gr,RI,,,,NFS,', 'Gr,RI,,,,NMP,', 'Gr,RI,,,,NMS,', 'Gr,RI,,,,NNP,', 'Gr,RI,,,,NNS,', 'Gr,RI,,,,VMP,', 'Gr,RP,,,,AFS,', 'Gr,RP,,,,AMP,', 'Gr,RP,,,,AMS,', 'Gr,RP,,,,ANS,', 'Gr,RP,,,,DFP,', 'Gr,RP,,,,DFS,', 'Gr,RP,,,,DMP,', 'Gr,RP,,,,DMS,', 'Gr,RP,,,,GFS,', 'Gr,RP,,,,GMP,', 'Gr,RP,,,,GMS,', 'Gr,RP,,,,GNS,', 'Gr,RP,,,,NFS,', 'Gr,RP,,,,NMP,', 'Gr,RP,,,,NMS,', 'Gr,RP,,,1A,P,', 'Gr,RP,,,1A,S,', 'Gr,RP,,,1ANP,', 'Gr,RP,,,1ANS,', 'Gr,RP,,,1D,P,', 'Gr,RP,,,1D,S,', 'Gr,RP,,,1DNP,', 'Gr,RP,,,1G,P,', 'Gr,RP,,,1G,S,', 'Gr,RP,,,1GFP,', 'Gr,RP,,,1GMS,', 'Gr,RP,,,1N,P,', 'Gr,RP,,,1N,S,', 'Gr,RP,,,1NFS,', 'Gr,RP,,,1NMP,', 'Gr,RP,,,1NMS,', 'Gr,RP,,,1NNP,', 'Gr,RP,,,1NNS,', 'Gr,RP,,,2A,P,', 'Gr,RP,,,2A,S,', 'Gr,RP,,,2AMP,', 'Gr,RP,,,2AMS,', 'Gr,RP,,,2ANP,', 'Gr,RP,,,2ANS,', 'Gr,RP,,,2D,P,', 'Gr,RP,,,2D,S,', 'Gr,RP,,,2G,P,', 'Gr,RP,,,2G,S,', 'Gr,RP,,,2GFS,', 'Gr,RP,,,2N,P,', 'Gr,RP,,,2N,S,', 'Gr,RP,,,2NFP,', 'Gr,RP,,,2NMP,', 'Gr,RP,,,2NNP,', 'Gr,RP,,,2NNS,', 'Gr,RP,,,3AFP,', 'Gr,RP,,,3AFS,', 'Gr,RP,,,3AMP,', 'Gr,RP,,,3AMS,', 'Gr,RP,,,3ANP,', 'Gr,RP,,,3ANS,', 'Gr,RP,,,3DFP,', 'Gr,RP,,,3DFS,', 'Gr,RP,,,3DMP,', 'Gr,RP,,,3DMS,', 'Gr,RP,,,3DNP,', 'Gr,RP,,,3DNS,', 'Gr,RP,,,3GFP,', 'Gr,RP,,,3GFS,', 'Gr,RP,,,3GMP,', 'Gr,RP,,,3GMS,', 'Gr,RP,,,3GNP,', 'Gr,RP,,,3GNS,', 'Gr,RP,,,3NFS,', 'Gr,RP,,,3NMP,', 'Gr,RP,,,3NMS,', 'Gr,RR,,,,AFP,', 'Gr,RR,,,,AFS,', 'Gr,RR,,,,AMP,', 'Gr,RR,,,,AMS,', 'Gr,RR,,,,ANP,', 'Gr,RR,,,,ANS,', 'Gr,RR,,,,DFP,', 'Gr,RR,,,,DFS,', 'Gr,RR,,,,DMP,', 'Gr,RR,,,,DMS,', 'Gr,RR,,,,DNP,', 'Gr,RR,,,,DNS,', 'Gr,RR,,,,GFP,', 'Gr,RR,,,,GFS,', 'Gr,RR,,,,GMP,', 'Gr,RR,,,,GMS,', 'Gr,RR,,,,GNP,', 'Gr,RR,,,,GNS,', 'Gr,RR,,,,NFP,', 'Gr,RR,,,,NFS,', 'Gr,RR,,,,NMP,', 'Gr,RR,,,,NMS,', 'Gr,RR,,,,NNP,', 'Gr,RR,,,,NNS,', 'Gr,RR,,,,VMS,', 'Gr,RT,,,,AFP,', 'Gr,RT,,,,AFS,', 'Gr,RT,,,,AMP,', 'Gr,RT,,,,AMS,', 'Gr,RT,,,,ANP,', 'Gr,RT,,,,ANS,', 'Gr,RT,,,,DMP,', 'Gr,RT,,,,DMS,', 'Gr,RT,,,,DNS,', 'Gr,RT,,,,GFS,', 'Gr,RT,,,,GMP,', 'Gr,RT,,,,GMS,', 'Gr,RT,,,,GNP,', 'Gr,RT,,,,GNS,', 'Gr,RT,,,,NFS,', 'Gr,RT,,,,NMP,', 'Gr,RT,,,,NMS,', 'Gr,RT,,,,NNP,', 'Gr,RT,,,,NNS,', 'Gr,T,,,,,,,,,', 'Gr,TF,,,,,,,,', 'Gr,V,IAA1,,P,', 'Gr,V,IAA1,,S,', 'Gr,V,IAA2,,P,', 'Gr,V,IAA2,,S,', 'Gr,V,IAA3,,P,', 'Gr,V,IAA3,,S,', 'Gr,V,IAM1,,P,', 'Gr,V,IAM1,,S,', 'Gr,V,IAM2,,P,', 'Gr,V,IAM2,,S,', 'Gr,V,IAM3,,P,', 'Gr,V,IAM3,,S,', 'Gr,V,IAP1,,P,', 'Gr,V,IAP1,,S,', 'Gr,V,IAP2,,P,', 'Gr,V,IAP2,,S,', 'Gr,V,IAP3,,P,', 'Gr,V,IAP3,,S,', 'Gr,V,IEA1,,P,', 'Gr,V,IEA1,,S,', 'Gr,V,IEA2,,P,', 'Gr,V,IEA2,,S,', 'Gr,V,IEA3,,P,', 'Gr,V,IEA3,,S,', 'Gr,V,IEM1,,P,', 'Gr,V,IEM1,,S,', 'Gr,V,IEM2,,P,', 'Gr,V,IEM2,,S,', 'Gr,V,IEM3,,P,', 'Gr,V,IEM3,,S,', 'Gr,V,IEP1,,P,', 'Gr,V,IEP1,,S,', 'Gr,V,IEP2,,P,', 'Gr,V,IEP2,,S,', 'Gr,V,IEP3,,P,', 'Gr,V,IEP3,,S,', 'Gr,V,IFA1,,P,', 'Gr,V,IFA1,,S,', 'Gr,V,IFA2,,P,', 'Gr,V,IFA2,,S,', 'Gr,V,IFA3,,P,', 'Gr,V,IFA3,,S,', 'Gr,V,IFM1,,P,', 'Gr,V,IFM1,,S,', 'Gr,V,IFM2,,P,', 'Gr,V,IFM2,,S,', 'Gr,V,IFM3,,P,', 'Gr,V,IFM3,,S,', 'Gr,V,IFP1,,P,', 'Gr,V,IFP1,,S,', 'Gr,V,IFP2,,P,', 'Gr,V,IFP2,,S,', 'Gr,V,IFP3,,P,', 'Gr,V,IFP3,,S,', 'Gr,V,IIA1,,P,', 'Gr,V,IIA1,,S,', 'Gr,V,IIA2,,P,', 'Gr,V,IIA2,,S,', 'Gr,V,IIA3,,P,', 'Gr,V,IIA3,,S,', 'Gr,V,IIM1,,P,', 'Gr,V,IIM1,,S,', 'Gr,V,IIM2,,P,', 'Gr,V,IIM2,,S,', 'Gr,V,IIM3,,P,', 'Gr,V,IIM3,,S,', 'Gr,V,IIP1,,P,', 'Gr,V,IIP1,,S,', 'Gr,V,IIP2,,P,', 'Gr,V,IIP3,,P,', 'Gr,V,IIP3,,S,', 'Gr,V,ILA1,,S,', 'Gr,V,ILA2,,P,', 'Gr,V,ILA2,,S,', 'Gr,V,ILA3,,P,', 'Gr,V,ILA3,,S,', 'Gr,V,ILM3,,P,', 'Gr,V,ILM3,,S,', 'Gr,V,ILP3,,S,', 'Gr,V,IPA1,,P,', 'Gr,V,IPA1,,S,', 'Gr,V,IPA2,,P,', 'Gr,V,IPA2,,S,', 'Gr,V,IPA3,,P,', 'Gr,V,IPA3,,S,', 'Gr,V,IPM1,,P,', 'Gr,V,IPM1,,S,', 'Gr,V,IPM2,,P,', 'Gr,V,IPM2,,S,', 'Gr,V,IPM3,,P,', 'Gr,V,IPM3,,S,', 'Gr,V,IPP1,,P,', 'Gr,V,IPP1,,S,', 'Gr,V,IPP2,,P,', 'Gr,V,IPP2,,S,', 'Gr,V,IPP3,,P,', 'Gr,V,IPP3,,S,', 'Gr,V,MAA2,,P,', 'Gr,V,MAA2,,S,', 'Gr,V,MAA3,,P,', 'Gr,V,MAA3,,S,', 'Gr,V,MAM2,,P,', 'Gr,V,MAM2,,S,', 'Gr,V,MAM3,,P,', 'Gr,V,MAM3,,S,', 'Gr,V,MAP2,,P,', 'Gr,V,MAP2,,S,', 'Gr,V,MAP3,,P,', 'Gr,V,MAP3,,S,', 'Gr,V,MEA2,,P,', 'Gr,V,MEP2,,P,', 'Gr,V,MEP2,,S,', 'Gr,V,MPA2,,P,', 'Gr,V,MPA2,,S,', 'Gr,V,MPA3,,P,', 'Gr,V,MPA3,,S,', 'Gr,V,MPM2,,P,', 'Gr,V,MPM2,,S,', 'Gr,V,MPM3,,P,', 'Gr,V,MPM3,,S,', 'Gr,V,MPP2,,P,', 'Gr,V,MPP2,,S,', 'Gr,V,MPP3,,P,', 'Gr,V,MPP3,,S,', 'Gr,V,NAA,,,,,', 'Gr,V,NAM,,,,,', 'Gr,V,NAP,,,,,', 'Gr,V,NEA,,,,,', 'Gr,V,NEM,,,,,', 'Gr,V,NEP,,,,,', 'Gr,V,NFA,,,,,', 'Gr,V,NFM,,,,,', 'Gr,V,NPA,,,,,', 'Gr,V,NPM,,,,,', 'Gr,V,NPP,,,,,', 'Gr,V,OAA3,,P,', 'Gr,V,OAA3,,S,', 'Gr,V,OAM1,,S,', 'Gr,V,OAM3,,S,', 'Gr,V,OAP3,,S,', 'Gr,V,OPA2,,P,', 'Gr,V,OPA3,,P,', 'Gr,V,OPA3,,S,', 'Gr,V,OPM1,,S,', 'Gr,V,OPM3,,P,', 'Gr,V,OPM3,,S,', 'Gr,V,PAA,AFP,', 'Gr,V,PAA,AFS,', 'Gr,V,PAA,AMP,', 'Gr,V,PAA,AMS,', 'Gr,V,PAA,ANP,', 'Gr,V,PAA,ANS,', 'Gr,V,PAA,DFP,', 'Gr,V,PAA,DFS,', 'Gr,V,PAA,DMP,', 'Gr,V,PAA,DMS,', 'Gr,V,PAA,GFS,', 'Gr,V,PAA,GMP,', 'Gr,V,PAA,GMS,', 'Gr,V,PAA,GNS,', 'Gr,V,PAA,NFP,', 'Gr,V,PAA,NFS,', 'Gr,V,PAA,NMP,', 'Gr,V,PAA,NMS,', 'Gr,V,PAA,NNP,', 'Gr,V,PAA,NNS,', 'Gr,V,PAM,AFS,', 'Gr,V,PAM,AMP,', 'Gr,V,PAM,AMS,', 'Gr,V,PAM,ANP,', 'Gr,V,PAM,ANS,', 'Gr,V,PAM,DMP,', 'Gr,V,PAM,DNP,', 'Gr,V,PAM,GFP,', 'Gr,V,PAM,GFS,', 'Gr,V,PAM,GMP,', 'Gr,V,PAM,GMS,', 'Gr,V,PAM,GNP,', 'Gr,V,PAM,GNS,', 'Gr,V,PAM,NFP,', 'Gr,V,PAM,NFS,', 'Gr,V,PAM,NMP,', 'Gr,V,PAM,NMS,', 'Gr,V,PAM,NNS,', 'Gr,V,PAP,AFS,', 'Gr,V,PAP,AMP,', 'Gr,V,PAP,AMS,', 'Gr,V,PAP,ANP,', 'Gr,V,PAP,ANS,', 'Gr,V,PAP,DFS,', 'Gr,V,PAP,DMS,', 'Gr,V,PAP,DNP,', 'Gr,V,PAP,DNS,', 'Gr,V,PAP,GFP,', 'Gr,V,PAP,GFS,', 'Gr,V,PAP,GMP,', 'Gr,V,PAP,GMS,', 'Gr,V,PAP,GNP,', 'Gr,V,PAP,GNS,', 'Gr,V,PAP,NFP,', 'Gr,V,PAP,NFS,', 'Gr,V,PAP,NMP,', 'Gr,V,PAP,NMS,', 'Gr,V,PAP,NNP,', 'Gr,V,PAP,NNS,', 'Gr,V,PEA,AFS,', 'Gr,V,PEA,AMP,', 'Gr,V,PEA,AMS,', 'Gr,V,PEA,ANP,', 'Gr,V,PEA,ANS,', 'Gr,V,PEA,DMP,', 'Gr,V,PEA,DMS,', 'Gr,V,PEA,DNS,', 'Gr,V,PEA,GFS,', 'Gr,V,PEA,GMP,', 'Gr,V,PEA,GMS,', 'Gr,V,PEA,GNP,', 'Gr,V,PEA,NFP,', 'Gr,V,PEA,NFS,', 'Gr,V,PEA,NMP,', 'Gr,V,PEA,NMS,', 'Gr,V,PEA,NNP,', 'Gr,V,PEA,NNS,', 'Gr,V,PEM,AFS,', 'Gr,V,PEM,AMP,', 'Gr,V,PEM,AMS,', 'Gr,V,PEM,DFP,', 'Gr,V,PEM,GFS,', 'Gr,V,PEM,NMP,', 'Gr,V,PEM,NMS,', 'Gr,V,PEP,AFP,', 'Gr,V,PEP,AFS,', 'Gr,V,PEP,AMP,', 'Gr,V,PEP,AMS,', 'Gr,V,PEP,ANP,', 'Gr,V,PEP,ANS,', 'Gr,V,PEP,DFS,', 'Gr,V,PEP,DMP,', 'Gr,V,PEP,DMS,', 'Gr,V,PEP,DNP,', 'Gr,V,PEP,DNS,', 'Gr,V,PEP,GFP,', 'Gr,V,PEP,GFS,', 'Gr,V,PEP,GMP,', 'Gr,V,PEP,GMS,', 'Gr,V,PEP,GNP,', 'Gr,V,PEP,GNS,', 'Gr,V,PEP,NFP,', 'Gr,V,PEP,NFS,', 'Gr,V,PEP,NMP,', 'Gr,V,PEP,NMS,', 'Gr,V,PEP,NNP,', 'Gr,V,PEP,NNS,', 'Gr,V,PEP,VFS,', 'Gr,V,PEP,VMP,', 'Gr,V,PEP,VMS,', 'Gr,V,PFA,ANP,', 'Gr,V,PFA,NMP,', 'Gr,V,PFA,NMS,', 'Gr,V,PFM,ANS,', 'Gr,V,PFP,GNP,', 'Gr,V,PIA3,,P,', 'Gr,V,PPA,AFP,', 'Gr,V,PPA,AFS,', 'Gr,V,PPA,AMP,', 'Gr,V,PPA,AMS,', 'Gr,V,PPA,ANP,', 'Gr,V,PPA,ANS,', 'Gr,V,PPA,DFP,', 'Gr,V,PPA,DFS,', 'Gr,V,PPA,DMP,', 'Gr,V,PPA,DMS,', 'Gr,V,PPA,DNP,', 'Gr,V,PPA,DNS,', 'Gr,V,PPA,GFP,', 'Gr,V,PPA,GFS,', 'Gr,V,PPA,GMP,', 'Gr,V,PPA,GMS,', 'Gr,V,PPA,GNP,', 'Gr,V,PPA,GNS,', 'Gr,V,PPA,NFP,', 'Gr,V,PPA,NFS,', 'Gr,V,PPA,NMP,', 'Gr,V,PPA,NMS,', 'Gr,V,PPA,NNP,', 'Gr,V,PPA,NNS,', 'Gr,V,PPA,VFS,', 'Gr,V,PPA,VMP,', 'Gr,V,PPA,VMS,', 'Gr,V,PPM,AFP,', 'Gr,V,PPM,AFS,', 'Gr,V,PPM,AMP,', 'Gr,V,PPM,AMS,', 'Gr,V,PPM,ANP,', 'Gr,V,PPM,ANS,', 'Gr,V,PPM,DFP,', 'Gr,V,PPM,DFS,', 'Gr,V,PPM,DMP,', 'Gr,V,PPM,DMS,', 'Gr,V,PPM,DNP,', 'Gr,V,PPM,DNS,', 'Gr,V,PPM,GFP,', 'Gr,V,PPM,GFS,', 'Gr,V,PPM,GMP,', 'Gr,V,PPM,GMS,', 'Gr,V,PPM,GNP,', 'Gr,V,PPM,GNS,', 'Gr,V,PPM,NFP,', 'Gr,V,PPM,NFS,', 'Gr,V,PPM,NMP,', 'Gr,V,PPM,NMS,', 'Gr,V,PPM,NNP,', 'Gr,V,PPM,NNS,', 'Gr,V,PPM,VMP,', 'Gr,V,PPM,VMS,', 'Gr,V,PPP,AFP,', 'Gr,V,PPP,AFS,', 'Gr,V,PPP,AMP,', 'Gr,V,PPP,AMS,', 'Gr,V,PPP,ANP,', 'Gr,V,PPP,ANS,', 'Gr,V,PPP,DFS,', 'Gr,V,PPP,DMP,', 'Gr,V,PPP,DMS,', 'Gr,V,PPP,DNP,', 'Gr,V,PPP,DNS,', 'Gr,V,PPP,GFS,', 'Gr,V,PPP,GMP,', 'Gr,V,PPP,GMS,', 'Gr,V,PPP,GNP,', 'Gr,V,PPP,GNS,', 'Gr,V,PPP,NFP,', 'Gr,V,PPP,NFS,', 'Gr,V,PPP,NMP,', 'Gr,V,PPP,NMS,', 'Gr,V,PPP,NNP,', 'Gr,V,PPP,NNS,', 'Gr,V,SAA1,,P,', 'Gr,V,SAA1,,S,', 'Gr,V,SAA2,,P,', 'Gr,V,SAA2,,S,', 'Gr,V,SAA3,,P,', 'Gr,V,SAA3,,S,', 'Gr,V,SAM1,,P,', 'Gr,V,SAM1,,S,', 'Gr,V,SAM2,,P,', 'Gr,V,SAM2,,S,', 'Gr,V,SAM3,,P,', 'Gr,V,SAM3,,S,', 'Gr,V,SAP1,,P,', 'Gr,V,SAP1,,S,', 'Gr,V,SAP2,,P,', 'Gr,V,SAP2,,S,', 'Gr,V,SAP3,,P,', 'Gr,V,SAP3,,S,', 'Gr,V,SEA1,,P,', 'Gr,V,SEA1,,S,', 'Gr,V,SEA2,,P,', 'Gr,V,SEA2,,S,', 'Gr,V,SPA1,,P,', 'Gr,V,SPA1,,S,', 'Gr,V,SPA2,,P,', 'Gr,V,SPA2,,S,', 'Gr,V,SPA3,,P,', 'Gr,V,SPA3,,S,', 'Gr,V,SPM1,,P,', 'Gr,V,SPM1,,S,', 'Gr,V,SPM2,,P,', 'Gr,V,SPM2,,S,', 'Gr,V,SPM3,,P,', 'Gr,V,SPM3,,S,', 'Gr,V,SPP1,,P,', 'Gr,V,SPP1,,S,', 'Gr,V,SPP2,,P,', 'Gr,V,SPP3,,P,', 'Gr,V,SPP3,,S,']\n" ], [ "#################\n\nunique_morph_list = list(frequency_greek.keys())\nmorph = unique_morph_list[0]\nmorph", "_____no_output_____" ], [ "# roles = mu.getGreekRoles()\n# roles", "_____no_output_____" ], [ "verbs = list(mu.filterSyntacticalRole(unique_morph_list, 'V'))\n\nverb_list_ = list(map(mu.morphToDict, verbs))\nprint(f\"Number of verb types: {len(verb_list_)}\")\n\nverbs_frame = pd.DataFrame(verb_list_)\n\nfor field in mu.morphFields:\n field_key = field + '_key'\n field_frequency = verbs_frame[field_key].value_counts()\n print(f\"\\nFrequency of {field}:\")\n print(field_frequency)", "Number of verb types: 376\n\nFrequency of role:\nV 376\nName: role_key, dtype: int64\n\nFrequency of type:\n, 376\nName: type_key, dtype: int64\n\nFrequency of mood:\nP 191\nI 97\nS 39\nM 27\nN 11\nO 11\nName: mood_key, dtype: int64\n\nFrequency of tense:\nP 131\nA 115\nE 79\nF 25\nI 18\nL 8\nName: tense_key, dtype: int64\n\nFrequency of voice:\nA 138\nP 125\nM 113\nName: voice_key, dtype: int64\n\nFrequency of person:\n, 201\n3 68\n2 60\n1 47\nName: person_key, dtype: int64\n\nFrequency of case:\n, 186\nN 51\nA 50\nG 45\nD 36\nV 8\nName: case_key, dtype: int64\n\nFrequency of gender:\n, 186\nM 74\nN 60\nF 56\nName: gender_key, dtype: int64\n\nFrequency of number:\nS 189\nP 176\n, 11\nName: number_key, dtype: int64\n" ], [ "role = 'N'\nfield_data = mu.findFieldsForRole(unique_morph_list, role)", "\nFor role: 'N'\n\nFor 'N' - instances of 'role':\n['N']\n\nFor 'N' - instances of 'type':\n[',', 'S', 'P']\n\nFor 'N' - instances of 'mood':\n[',']\n\nFor 'N' - instances of 'tense':\n[',']\n\nFor 'N' - instances of 'voice':\n[',']\n\nFor 'N' - instances of 'person':\n[',']\n\nFor 'N' - instances of 'case':\n['N', 'A', 'G', 'D', 'V']\n\nFor 'N' - instances of 'gender':\n['M', 'N', 'F']\n\nFor 'N' - instances of 'number':\n['S', 'P']\n" ], [ "roles = mu.getGreekRoles()\nprint(f\"roles: {roles}\")\n\nfor role_ in roles:\n field_freq_data = mu.findFieldsFrequencyForRole(unique_morph_list, role_)\n", "roles: ['N', 'A', 'E', 'R', 'V', 'I', 'P', 'D', 'C', 'T']\n\nFor role: 'N'\n\nFor 'noun (N)' - frequency of 'role':\nN 157\nName: role_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'type':\n, 69\nS 56\nP 32\nName: type_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'mood':\n, 157\nName: mood_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'tense':\n, 157\nName: tense_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'voice':\n, 157\nName: voice_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'person':\n, 157\nName: person_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'case':\nN 43\nA 38\nG 32\nD 28\nV 16\nName: case_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'gender':\nM 58\nN 52\nF 47\nName: gender_key, dtype: int64\n\nFor 'noun (N)' - frequency of 'number':\nS 83\nP 74\nName: number_key, dtype: int64\n\nFor role: 'A'\n\nFor 'adjective (A)' - frequency of 'role':\nA 85\nName: role_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'type':\nA 59\nR 26\nName: type_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'mood':\n, 85\nName: mood_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'tense':\n, 85\nName: tense_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'voice':\n, 85\nName: voice_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'person':\n, 85\nName: person_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'case':\nA 22\nG 20\nD 18\nN 17\nV 8\nName: case_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'gender':\nN 30\nM 30\nF 25\nName: gender_key, dtype: int64\n\nFor 'adjective (A)' - frequency of 'number':\nS 49\nP 36\nName: number_key, dtype: int64\n\nFor role: 'E'\n\nFor 'determiner (E)' - frequency of 'role':\nE 235\nName: role_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'type':\nP 51\nN 35\nA 30\nQ 28\nD 24\nF 23\nT 17\nO 15\nR 12\nName: type_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'mood':\n, 235\nName: mood_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'tense':\n, 235\nName: tense_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'voice':\n, 235\nName: voice_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'person':\n, 208\n1 19\n2 8\nName: person_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'case':\nA 61\nD 59\nN 57\nG 49\nV 9\nName: case_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'gender':\nF 80\nM 79\nN 76\nName: gender_key, dtype: int64\n\nFor 'determiner (E)' - frequency of 'number':\nS 123\nP 112\nName: number_key, dtype: int64\n\nFor role: 'R'\n\nFor 'pronoun (R)' - frequency of 'role':\nR 213\nName: role_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'type':\nP 71\nE 40\nD 28\nI 25\nR 25\nT 19\nC 5\nName: type_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'mood':\n, 213\nName: mood_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'tense':\n, 213\nName: tense_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'voice':\n, 213\nName: voice_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'person':\n, 118\n3 41\n2 28\n1 26\nName: person_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'case':\nA 60\nG 53\nN 50\nD 45\nV 5\nName: case_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'gender':\nM 86\nN 58\nF 53\n, 16\nName: gender_key, dtype: int64\n\nFor 'pronoun (R)' - frequency of 'number':\nS 109\nP 104\nName: number_key, dtype: int64\n\nFor role: 'V'\n\nFor 'verb (V)' - frequency of 'role':\nV 376\nName: role_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'type':\n, 376\nName: type_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'mood':\nP 191\nI 97\nS 39\nM 27\nN 11\nO 11\nName: mood_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'tense':\nP 131\nA 115\nE 79\nF 25\nI 18\nL 8\nName: tense_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'voice':\nA 138\nP 125\nM 113\nName: voice_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'person':\n, 201\n3 68\n2 60\n1 47\nName: person_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'case':\n, 186\nN 51\nA 50\nG 45\nD 36\nV 8\nName: case_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'gender':\n, 186\nM 74\nN 60\nF 56\nName: gender_key, dtype: int64\n\nFor 'verb (V)' - frequency of 'number':\nS 189\nP 176\n, 11\nName: number_key, dtype: int64\n\nFor role: 'I'\n\nFor 'interjection (I)' - frequency of 'role':\nI 8\nName: role_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'type':\nE 5\nD 2\nR 1\nName: type_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'mood':\nM 4\n, 3\nN 1\nName: mood_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'tense':\n, 3\nP 3\nA 2\nName: tense_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'voice':\nA 5\n, 3\nName: voice_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'person':\n, 4\n2 4\nName: person_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'case':\n, 8\nName: case_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'gender':\n, 8\nName: gender_key, dtype: int64\n\nFor 'interjection (I)' - frequency of 'number':\n, 4\nP 2\nS 2\nName: number_key, dtype: int64\n\nFor role: 'P'\n\nFor 'preposition (P)' - frequency of 'role':\nP 7\nName: role_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'type':\n, 4\nI 3\nName: type_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'mood':\n, 7\nName: mood_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'tense':\n, 7\nName: tense_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'voice':\n, 7\nName: voice_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'person':\n, 7\nName: person_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'case':\n, 2\nG 2\nD 2\nA 1\nName: case_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'gender':\n, 7\nName: gender_key, dtype: int64\n\nFor 'preposition (P)' - frequency of 'number':\n, 7\nName: number_key, dtype: int64\n\nFor role: 'D'\n\nFor 'adverb (D)' - frequency of 'role':\nD 4\nName: role_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'type':\n, 3\nO 1\nName: type_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'mood':\n, 4\nName: mood_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'tense':\n, 4\nName: tense_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'voice':\n, 4\nName: voice_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'person':\n, 4\nName: person_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'case':\n, 4\nName: case_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'gender':\n, 4\nName: gender_key, dtype: int64\n\nFor 'adverb (D)' - frequency of 'number':\n, 4\nName: number_key, dtype: int64\n\nFor role: 'C'\n\nFor 'conjunction (C)' - frequency of 'role':\nC 3\nName: role_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'type':\nC 1\nO 1\nS 1\nName: type_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'mood':\n, 3\nName: mood_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'tense':\n, 3\nName: tense_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'voice':\n, 3\nName: voice_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'person':\n, 3\nName: person_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'case':\n, 3\nName: case_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'gender':\n, 3\nName: gender_key, dtype: int64\n\nFor 'conjunction (C)' - frequency of 'number':\n, 3\nName: number_key, dtype: int64\n\nFor role: 'T'\n\nFor 'particle (T)' - frequency of 'role':\nT 2\nName: role_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'type':\n, 1\nF 1\nName: type_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'mood':\n, 2\nName: mood_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'tense':\n, 2\nName: tense_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'voice':\n, 2\nName: voice_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'person':\n, 2\nName: person_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'case':\n, 2\nName: case_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'gender':\n, 2\nName: gender_key, dtype: int64\n\nFor 'particle (T)' - frequency of 'number':\n, 2\nName: number_key, dtype: int64\n" ], [ "char = ':'\nindex = mu.getIndexForChar(char)\nprint(f\"index for '{char}': {index}\")", "getIndexForChar - unexpected character ':'\nindex for ':': -1\n" ], [ "mu.findRoleNameForCharGreek('V')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f090c6d84eaa9e49e7555c5f88f1f2cfa95c8
38,866
ipynb
Jupyter Notebook
3-1-LinearRegression.ipynb
jb2020-super/dive-into-deep-learning
691c8864efcdcfbfc4fbd5a5991046a2fec01608
[ "MIT" ]
null
null
null
3-1-LinearRegression.ipynb
jb2020-super/dive-into-deep-learning
691c8864efcdcfbfc4fbd5a5991046a2fec01608
[ "MIT" ]
null
null
null
3-1-LinearRegression.ipynb
jb2020-super/dive-into-deep-learning
691c8864efcdcfbfc4fbd5a5991046a2fec01608
[ "MIT" ]
null
null
null
213.549451
35,007
0.606391
[ [ [ "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l", "_____no_output_____" ], [ "n = 10000\na = torch.ones(n)\nb = torch.ones(n)", "_____no_output_____" ], [ "class Timer: #@save\n \"\"\"记录多次运行时间\"\"\"\n def __init__(self):\n self.times = []\n self.start()\n\n def start(self):\n \"\"\"启动计时器\"\"\"\n self.tik = time.time()\n\n def stop(self):\n \"\"\"停止计时器并将时间记录在列表中\"\"\"\n self.times.append(time.time() - self.tik)\n return self.times[-1]\n\n def avg(self):\n \"\"\"返回平均时间\"\"\"\n return sum(self.times) / len(self.times)\n\n def sum(self):\n \"\"\"返回时间总和\"\"\"\n return sum(self.times)\n\n def cumsum(self):\n \"\"\"返回累计时间\"\"\"\n return np.array(self.times).cumsum().tolist()", "_____no_output_____" ], [ "c = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nf'{timer.stop():.5f} sec'", "_____no_output_____" ], [ "timer.start()\nd = a + b\nf'{timer.stop():.5f} sec'", "_____no_output_____" ], [ "def normal(x, mu, sigma):\n p = 1 / math.sqrt(2 * math.pi * sigma**2)\n return p * np.exp(-0.5 / sigma**2 * (x - mu)**2)", "_____no_output_____" ], [ "# 再次使用numpy进行可视化\nx = np.arange(-7, 7, 0.01)\n\n# 均值和标准差对\nparams = [(0, 1), (0, 2), (3, 1), (0, 0.5)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5),\n legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f1be74fbfffca232b74a302797fc2a2aa1b43
3,909
ipynb
Jupyter Notebook
dataPrepare.ipynb
MShooshtari/unet
d8d0abf60a60b5e17b4c33c67e0c450153190011
[ "MIT" ]
null
null
null
dataPrepare.ipynb
MShooshtari/unet
d8d0abf60a60b5e17b4c33c67e0c450153190011
[ "MIT" ]
null
null
null
dataPrepare.ipynb
MShooshtari/unet
d8d0abf60a60b5e17b4c33c67e0c450153190011
[ "MIT" ]
null
null
null
29.171642
271
0.582246
[ [ [ "from data import *", "C:\\SoftWare\\Anaconda2\\envs\\python3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ] ], [ [ "# data augmentation \n\nIn deep learning tasks, a lot of data is need to train DNN model, when the dataset is not big enough, data augmentation should be applied.\n\nkeras.preprocessing.image.ImageDataGenerator is a data generator, which can feed the DNN with data like : (data,label), it can also do data augmentation at the same time.\n\nIt is very convenient for us to use keras.preprocessing.image.ImageDataGenerator to do data augmentation by implement image rotation, shift, rescale and so on... see [keras documentation](https://keras.io/preprocessing/image/) for detail.\n\nFor image segmentation tasks, the image and mask must be transformed **together!!**", "_____no_output_____" ], [ "## define your data generator\n\nIf you want to visualize your data augmentation result, set save_to_dir = your path", "_____no_output_____" ] ], [ [ "#if you don't want to do data augmentation, set data_gen_args as an empty dict.\n#data_gen_args = dict()\n\ndata_gen_args = dict(rotation_range=0.2,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=True,\n fill_mode='nearest')\nmyGenerator = trainGenerator(20,'data/membrane/train','image','label',data_gen_args,save_to_dir = \"data/membrane/train/aug\")", "_____no_output_____" ] ], [ [ "## visualize your data augmentation result", "_____no_output_____" ] ], [ [ "#you will see 60 transformed images and their masks in data/membrane/train/aug\nnum_batch = 3\nfor i,batch in enumerate(myGenerator):\n if(i >= num_batch):\n break", "_____no_output_____" ] ], [ [ "## create .npy data\n\nIf your computer has enough memory, you can create npy files containing all your images and masks, and feed your DNN with them.", "_____no_output_____" ] ], [ [ "image_arr,mask_arr = geneTrainNpy(\"data/membrane/train/aug/\",\"data/membrane/train/aug/\")\n#np.save(\"data/image_arr.npy\",image_arr)\n#np.save(\"data/mask_arr.npy\",mask_arr)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3f1ea77bdaa7b6afcbd6c57ecc5d9f0781c4b6
7,791
ipynb
Jupyter Notebook
demos/02_data_acquisition_twitter.ipynb
boudabou/Network-tour-in-data-science
88e0276f27fab626761a58f45ff6e1609fc27d79
[ "MIT" ]
null
null
null
demos/02_data_acquisition_twitter.ipynb
boudabou/Network-tour-in-data-science
88e0276f27fab626761a58f45ff6e1609fc27d79
[ "MIT" ]
null
null
null
demos/02_data_acquisition_twitter.ipynb
boudabou/Network-tour-in-data-science
88e0276f27fab626761a58f45ff6e1609fc27d79
[ "MIT" ]
null
null
null
29.850575
341
0.605571
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb3f2b711af6d896710edddd264e56006cc88ede
81,623
ipynb
Jupyter Notebook
2_base_of_probability/problems.ipynb
MasahiroOgawa/LNPR_BOOK_CODES
112e9ce1b1312d77651c5958d44dbcd2ba225c19
[ "MIT" ]
null
null
null
2_base_of_probability/problems.ipynb
MasahiroOgawa/LNPR_BOOK_CODES
112e9ce1b1312d77651c5958d44dbcd2ba225c19
[ "MIT" ]
null
null
null
2_base_of_probability/problems.ipynb
MasahiroOgawa/LNPR_BOOK_CODES
112e9ce1b1312d77651c5958d44dbcd2ba225c19
[ "MIT" ]
null
null
null
170.402923
28,166
0.66643
[ [ [ "# problem 2.1", "_____no_output_____" ], [ "read data", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "data = pd.read_csv('../sensor_data/sensor_data_200.txt', delimiter=' ', names=['date','time','ir','lidar'])\ndata", "_____no_output_____" ], [ "data['lidar'].hist()", "_____no_output_____" ] ], [ [ "## make function for extracting n samples ", "_____no_output_____" ] ], [ [ "def extract(data: pd.DataFrame, n: int) -> pd.DataFrame:\n return data.sample(n=n)", "_____no_output_____" ] ], [ [ "## a. extract n=3, and compute its sample variance", "_____no_output_____" ] ], [ [ "sample3 = extract(data['lidar'], n=3)\nsample3", "_____no_output_____" ], [ "sample3.var(ddof=0) #ddof: Delta Degrees of Freedom. The divisor used in calculations is N - ddof", "_____no_output_____" ] ], [ [ "## b. run a 10000 times, and compute mean of the sample variance.", "_____no_output_____" ] ], [ [ "vars = [ extract(data['lidar'], n=3).var(ddof=0) for i in range(10000)]", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "np.mean(vars)", "_____no_output_____" ] ], [ [ "## c. compute unbiased variance for n=3 data", "_____no_output_____" ] ], [ [ "extract(data['lidar'], n=3).var()", "_____no_output_____" ] ], [ [ "## d. run c 10000 times, and compute the mean of unbiased variance", "_____no_output_____" ] ], [ [ "vars2 = [extract(data['lidar'], n=3).var() for i in range(10000)]", "_____no_output_____" ], [ "np.mean(vars2)", "_____no_output_____" ] ], [ [ "# problem 2.2 simulate a dice.\n 1. shoot a dice 10 times\n 2. add 200 to the sum.\n 3. iterate 1,2 10000 times. draw histogram. ", "_____no_output_____" ] ], [ [ "\nimport random\nimport matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "vals = []\n\nfor i in range(10000): \n samples = [random.choice([1,2,3,4,5,6]) for j in range(10)]\n vals.append(sum(samples) + 200)\n\ndata = pd.DataFrame(vals)\ndata.hist(bins=max(vals)-min(vals), align='left')", "_____no_output_____" ], [ "data.mean()", "_____no_output_____" ], [ "data.var()", "_____no_output_____" ], [ "onedice = [random.choice([1,2,3,4,5,6]) for i in range(10000)]", "_____no_output_____" ], [ "np.mean(onedice)", "_____no_output_____" ], [ "np.var(onedice, ddof=1)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f46e9a9dd56fd2a3e45af437b548362fae4ac
5,843
ipynb
Jupyter Notebook
prog1/implementacoes/tutoriais/entradaESaida.ipynb
arthurstevam98/Tamburetei
0b1198560ae43bc56465ad2022cf1ebe3c7cf4d6
[ "MIT" ]
null
null
null
prog1/implementacoes/tutoriais/entradaESaida.ipynb
arthurstevam98/Tamburetei
0b1198560ae43bc56465ad2022cf1ebe3c7cf4d6
[ "MIT" ]
null
null
null
prog1/implementacoes/tutoriais/entradaESaida.ipynb
arthurstevam98/Tamburetei
0b1198560ae43bc56465ad2022cf1ebe3c7cf4d6
[ "MIT" ]
null
null
null
26.31982
349
0.571795
[ [ [ "# Entrada e Saída de Dados \n\n\nEste tutorial tem o objetivo de mostrar brevemente as funções nativas da linguagem Python, utilizadas para entrada e saída padrão.\n\n1. print\n2. raw_input\n3. casting (int, float, str)", "_____no_output_____" ], [ "## Comando print\n\nNa versão 2 da linguagem Python, o comando responsável pela saída padrão de dados é o __print__.", "_____no_output_____" ] ], [ [ "print \"Imprimindo na saida padrao\"", "Imprimindo na saida padrao\n" ], [ "print \"O Tamburetei é top!\"", "O Tamburetei é top!\n" ] ], [ [ "## Função raw_input()\n\nA função __raw_input()__ é a responsável por coletar dados diretamente do teclado, que é a entrada padrão. Os dados coletados pela função vão ser transformadas em string.", "_____no_output_____" ] ], [ [ "# Espera por uma entrada e salva na variável mensagem\nmensagem = raw_input()\n# Imprime o que está em mensagem\nprint mensagem", "O Tamburetei é o melhor projeto do OpenDevUFCG\nO Tamburetei é o melhor projeto do OpenDevUFCG\n" ] ], [ [ "Observe que ao executar o trecho de código acima, o sistema espera que o usuário digite algo para então continuar a execução da próxima instrução.\n\nAinda com a função __raw_input()__ é possível imprimir um texto na saída padrão com o objetivo de auxiliar o usuário no que ele deve digitar. Esse texto deve vir como parâmetro da função.\n\n```python\nvariavel_que_guarda_entrada = raw_input(\"Texto de Ajuda\")\n```", "_____no_output_____" ] ], [ [ "# Imprime a string que está como parâmetro em raw_input()\n# em seguida espera a entrada do teclado e guarda na variável\nprimeiro_nome = raw_input(\"Digite seu primeiro nome: \")\n# Imprime o que está salvo em primeiro_nome\nprint primeiro_nome", "Digite seu primeiro nome: Matheus\nMatheus\n" ] ], [ [ "## Funções de Casting\n\nComo foi mencionado anteriormente, a função raw_input (que serve como entrada padrão de dados), transforma tudo que é recebido pelo teclado em string. Considerando essa informação, como salvar um numero inteiro ou de ponto flutuante? ou imprimir na saída padrão uma string e em seguida um número? Para isso são usadas as funções de casting.\n\nOperação de casting é o nome que se dá para a realização de uma conversão de tipos. Do tipo string para o tipo inteiro, por exemplo. Nesse sentido, as funções mais utilizadas são:\n\nNome da Função | O que faz\n-------------- | ----------\nstr() | Realiza a conversão de um tipo em string\nint() | Realiza a conversão de um tipo em inteiro\nfloat() | Realiza a conversão de um tipo em ponto flutuante\n", "_____no_output_____" ], [ "Realizando a conversão da string recebida da função raw_input() em inteiro:\n\n```python\nconverte_string_para_inteiro = int(raw_input(\"Digite um Número Inteiro: \"))\n```\n\nRealizando a conversão da string recebida da função raw_input() em ponto flutuante:\n\n```python\nconverte_string_para_float = float(raw_input(\"Digite um Número em Ponto Flutuante: \"))\n```", "_____no_output_____" ] ], [ [ "# Guarda um inteiro na variável idade com valor 20\nidade = 20\n# Converte a idade de inteiro para string, concatena com a frase e imprime\nprint \"A idade de Lucas: \" + str(idade)", "A idade de Lucas: 20\n" ], [ "# Guarda um ponto flutuante na variável preco com valor 200.00\npreco = 200.00\n# Converte o preco de float para string, concatena com a frase e imprime \nprint \"A mesa custou \" + str(preco)", "A mesa custou 200.0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
cb3f4b0c771988923ddaba3287d0b76d1940f55a
27,464
ipynb
Jupyter Notebook
examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb
zealseeker/deepchem
a44decc033c727e2da681b1461c3d57fdd53aca0
[ "MIT" ]
1
2020-05-17T10:26:52.000Z
2020-05-17T10:26:52.000Z
examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb
zealseeker/deepchem
a44decc033c727e2da681b1461c3d57fdd53aca0
[ "MIT" ]
null
null
null
examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb
zealseeker/deepchem
a44decc033c727e2da681b1461c3d57fdd53aca0
[ "MIT" ]
null
null
null
52.412214
682
0.657588
[ [ [ "# Tutorial Part 11: Learning Unsupervised Embeddings for Molecules\n\n\nIn this example, we will use a `SeqToSeq` model to generate fingerprints for classifying molecules. This is based on the following paper, although some of the implementation details are different: Xu et al., \"Seq2seq Fingerprint: An Unsupervised Deep Molecular Embedding for Drug Discovery\" (https://doi.org/10.1145/3107411.3107424).\n\nMany types of models require their inputs to have a fixed shape. Since molecules can vary widely in the numbers of atoms and bonds they contain, this makes it hard to apply those models to them. We need a way of generating a fixed length \"fingerprint\" for each molecule. Various ways of doing this have been designed, such as Extended-Connectivity Fingerprints (ECFPs). But in this example, instead of designing a fingerprint by hand, we will let a `SeqToSeq` model learn its own method of creating fingerprints.\n\nA `SeqToSeq` model performs sequence to sequence translation. For example, they are often used to translate text from one language to another. It consists of two parts called the \"encoder\" and \"decoder\". The encoder is a stack of recurrent layers. The input sequence is fed into it, one token at a time, and it generates a fixed length vector called the \"embedding vector\". The decoder is another stack of recurrent layers that performs the inverse operation: it takes the embedding vector as input, and generates the output sequence. By training it on appropriately chosen input/output pairs, you can create a model that performs many sorts of transformations.\n\nIn this case, we will use SMILES strings describing molecules as the input sequences. We will train the model as an autoencoder, so it tries to make the output sequences identical to the input sequences. For that to work, the encoder must create embedding vectors that contain all information from the original sequence. That's exactly what we want in a fingerprint, so perhaps those embedding vectors will then be useful as a way to represent molecules in other models!\n\n\n## Colab\n\nThis tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb)\n\n## Setup\n\nTo run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment. This notebook will take a few hours to run on a GPU machine, so we encourage you to run it on Google colab unless you have a good GPU machine available.", "_____no_output_____" ] ], [ [ "!wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh\n!chmod +x Anaconda3-2019.10-Linux-x86_64.sh\n!bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local\n!conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0\nimport sys\nsys.path.append('/usr/local/lib/python3.7/site-packages/')\nimport deepchem as dc", "_____no_output_____" ] ], [ [ "Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with.", "_____no_output_____" ] ], [ [ "import deepchem as dc\ntasks, datasets, transformers = dc.molnet.load_muv()\ntrain_dataset, valid_dataset, test_dataset = datasets\ntrain_smiles = train_dataset.ids\nvalid_smiles = valid_dataset.ids", "/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=FutureWarning)\nRDKit WARNING: [15:40:18] Enabling RDKit 2019.09.3 jupyter extensions\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" ] ], [ [ "We need to define the \"alphabet\" for our `SeqToSeq` model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence.", "_____no_output_____" ] ], [ [ "tokens = set()\nfor s in train_smiles:\n tokens = tokens.union(set(c for c in s))\ntokens = sorted(list(tokens))", "_____no_output_____" ] ], [ [ "Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an `ExponentialDecay` to multiply the learning rate by 0.9 after each epoch.", "_____no_output_____" ] ], [ [ "from deepchem.models.optimizers import Adam, ExponentialDecay\nmax_length = max(len(s) for s in train_smiles)\nbatch_size = 100\nbatches_per_epoch = len(train_smiles)/batch_size\nmodel = dc.models.SeqToSeq(tokens,\n tokens,\n max_length,\n encoder_layers=2,\n decoder_layers=2,\n embedding_dimension=256,\n model_dir='fingerprint',\n batch_size=batch_size,\n learning_rate=ExponentialDecay(0.004, 0.9, batches_per_epoch))", "WARNING:tensorflow:From /Users/bharath/opt/anaconda3/envs/deepchem/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\nWARNING:tensorflow:Entity <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>>: AssertionError: Bad argument number for Name: 3, expecting 4\nWARNING: Entity <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>>: AssertionError: Bad argument number for Name: 3, expecting 4\nWARNING:tensorflow:Entity <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>>: AssertionError: Bad argument number for Name: 3, expecting 4\nWARNING: Entity <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Stack.call of <deepchem.models.layers.Stack object at 0x1a3cc7b0f0>>: AssertionError: Bad argument number for Name: 3, expecting 4\n" ] ], [ [ "Let's train it! The input to `fit_sequences()` is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less.", "_____no_output_____" ] ], [ [ "def generate_sequences(epochs):\n for i in range(epochs):\n for s in train_smiles:\n yield (s, s)\n\nmodel.fit_sequences(generate_sequences(40))", "Ending global_step 999: Average loss 72.0029\nEnding global_step 1999: Average loss 40.7221\nEnding global_step 2999: Average loss 31.5364\nEnding global_step 3999: Average loss 26.4576\nEnding global_step 4999: Average loss 22.814\nEnding global_step 5999: Average loss 19.5248\nEnding global_step 6999: Average loss 16.4594\nEnding global_step 7999: Average loss 18.8898\nEnding global_step 8999: Average loss 13.476\nEnding global_step 9999: Average loss 11.5528\nEnding global_step 10999: Average loss 10.1594\nEnding global_step 11999: Average loss 10.6434\nEnding global_step 12999: Average loss 6.57057\nEnding global_step 13999: Average loss 6.46177\nEnding global_step 14999: Average loss 7.53559\nEnding global_step 15999: Average loss 4.95809\nEnding global_step 16999: Average loss 4.35039\nEnding global_step 17999: Average loss 3.39137\nEnding global_step 18999: Average loss 3.5216\nEnding global_step 19999: Average loss 3.08579\nEnding global_step 20999: Average loss 2.80738\nEnding global_step 21999: Average loss 2.92217\nEnding global_step 22999: Average loss 2.51032\nEnding global_step 23999: Average loss 1.86265\nEnding global_step 24999: Average loss 1.67088\nEnding global_step 25999: Average loss 1.87016\nEnding global_step 26999: Average loss 1.61166\nEnding global_step 27999: Average loss 1.40708\nEnding global_step 28999: Average loss 1.4488\nEnding global_step 29801: Average loss 1.33917\nTIMING: model fitting took 5619.924 s\n" ] ], [ [ "Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced.", "_____no_output_____" ] ], [ [ "predicted = model.predict_from_sequences(valid_smiles[:500])\ncount = 0\nfor s,p in zip(valid_smiles[:500], predicted):\n if ''.join(p) == s:\n count += 1\nprint('reproduced', count, 'of 500 validation SMILES strings')", "reproduced 363 of 500 validation SMILES strings\n" ] ], [ [ "Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory.", "_____no_output_____" ] ], [ [ "train_embeddings = model.predict_embeddings(train_smiles)\ntrain_embeddings_dataset = dc.data.NumpyDataset(train_embeddings,\n train_dataset.y,\n train_dataset.w,\n train_dataset.ids)\n\nvalid_embeddings = model.predict_embeddings(valid_smiles)\nvalid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings,\n valid_dataset.y,\n valid_dataset.w,\n valid_dataset.ids)", "_____no_output_____" ] ], [ [ "For classification, we'll use a simple fully connected network with one hidden layer.", "_____no_output_____" ] ], [ [ "classifier = dc.models.MultitaskClassifier(n_tasks=len(tasks),\n n_features=256,\n layer_sizes=[512])\nclassifier.fit(train_embeddings_dataset, nb_epoch=10)", "Ending global_step 999: Average loss 829.805\nEnding global_step 1999: Average loss 450.42\nEnding global_step 2999: Average loss 326.079\nEnding global_step 3999: Average loss 265.199\nEnding global_step 4999: Average loss 246.724\nEnding global_step 5999: Average loss 224.64\nEnding global_step 6999: Average loss 202.624\nEnding global_step 7460: Average loss 213.885\nTIMING: model fitting took 19.780 s\n" ] ], [ [ "Find out how well it worked. Compute the ROC AUC for the training and validation datasets.", "_____no_output_____" ] ], [ [ "import numpy as np\nmetric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode=\"classification\")\ntrain_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers)\nvalid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers)\nprint('Training set ROC AUC:', train_score)\nprint('Validation set ROC AUC:', valid_score)", "computed_metrics: [0.97828427249789751, 0.98705973960125326, 0.966007068438685, 0.9874401066031584, 0.97794394675150698, 0.98021719680962449, 0.95318452689781941, 0.97185747562764213, 0.96389538770053473, 0.96798988621997473, 0.9690779239145807, 0.98544402211472004, 0.97762497271338133, 0.96843239633294886, 0.97753648081489997, 0.96504683675485614, 0.93547151958366914]\ncomputed_metrics: [0.90790686952512678, 0.79891461649782913, 0.61900937081659968, 0.75241212956581671, 0.58678903240426017, 0.72765072765072758, 0.34929006085192693, 0.83986814712005553, 0.82379943502824859, 0.61844636844636847, 0.863620199146515, 0.68106930272108857, 0.98020477815699669, 0.85073580939032944, 0.781015678254942, 0.75399733510992673, nan]\nTraining set ROC AUC: {'mean-roc_auc_score': 0.97132433878689139}\nValidation set ROC AUC: {'mean-roc_auc_score': 0.74592061629292239}\n" ] ], [ [ "# Congratulations! Time to join the Community!\n\nCongratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:\n\n## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)\nThis helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.\n\n## Join the DeepChem Gitter\nThe DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb3f5e28499f0e4f57c61a4e3ff0ad2e9ee27a33
7,836
ipynb
Jupyter Notebook
PyC-Chapter2 Variable.ipynb
GraceKoo/python-note
9baac1080376fc37ff20565efe093105ef0fa6f0
[ "Apache-2.0" ]
null
null
null
PyC-Chapter2 Variable.ipynb
GraceKoo/python-note
9baac1080376fc37ff20565efe093105ef0fa6f0
[ "Apache-2.0" ]
null
null
null
PyC-Chapter2 Variable.ipynb
GraceKoo/python-note
9baac1080376fc37ff20565efe093105ef0fa6f0
[ "Apache-2.0" ]
null
null
null
20.195876
335
0.496299
[ [ [ "message = 'Hello Python world!'\nprint(message)", "Hello Python world!\n" ], [ "name = \"ada love alce\"\nprint(name.title())", "Ada Love Alce\n" ], [ "name = \"ada love alce\"\nprint(name.upper())", "ADA LOVE ALCE\n" ], [ "name = \"ada love alce\"\nprint(name.lower())", "ada love alce\n" ], [ "first_name = \"ada\"\nlast_name = \"alace\"\nfull_name = first_name + \" \" + last_name\n\nmessage = \"Hello,\" + full_name.title() + \"!\"\nprint(message)", "Hello,Ada Alace!\n" ], [ "print(\"language:\\n\\tPython\\n\\tC\\n\\tJavaScript\")", "language:\n\tPython\n\tC\n\tJavaScript\n" ], [ "favourite_language = \" python \"\nfavourite_language.rstrip()", "_____no_output_____" ], [ "favourite_language.lstrip()", "_____no_output_____" ], [ "favourite_language.strip()", "_____no_output_____" ], [ "favourite_language", "_____no_output_____" ], [ "favourite_language =\" python \"\nfavourite_language = favourite_language.strip()\nfavourite_language", "_____no_output_____" ] ], [ [ "message = 'One of Python's strengths is its diverse community'\nmessage", "_____no_output_____" ] ], [ [ "message = 'One of Python's strengths is its diverse community' message\nmessage", "_____no_output_____" ], [ "message = \"One of Python's strengths is its diverse community\" \nmessage", "_____no_output_____" ], [ "name = 'eric w'\nprint(\"Hello \" + name.title() + \",would you like to learn some Python today?\")", "Hello Eric W,would you like to learn some Python today?\n" ], [ "3 ** 3", "_____no_output_____" ], [ "age = 23 \nmessage = \"Happy \" + str(age) +\"rd Birthday!\"\nmessage", "_____no_output_____" ], [ "import this", "The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb3f6425df7be22e6045e5ce2fa3304909907f24
13,206
ipynb
Jupyter Notebook
Linear_Classifier.ipynb
zemo89/Linear_Classifier
1a37e0e1692c7fe7bcaf5eb917f5e6064206c602
[ "Apache-2.0" ]
null
null
null
Linear_Classifier.ipynb
zemo89/Linear_Classifier
1a37e0e1692c7fe7bcaf5eb917f5e6064206c602
[ "Apache-2.0" ]
null
null
null
Linear_Classifier.ipynb
zemo89/Linear_Classifier
1a37e0e1692c7fe7bcaf5eb917f5e6064206c602
[ "Apache-2.0" ]
null
null
null
48.373626
592
0.573982
[ [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\n\nclass Titanic:\n\n def __init__(self):\n self.train = pd.read_csv('train.csv',usecols = ['survived','sex'\t,'age','class','fare','class'])\n self.test = pd.read_csv('eval.csv',usecols = ['survived','sex'\t,'age','class','fare','class'])\n\n \n def dependent_features(self):\n return self.train.pop('survived') , self.test.pop('survived')\n\n\n def independent_features(self):\n return self.train , self.test\n\nT = Titanic()\ntrain_y , test_y = T.dependent_features()\ntrain_titanic , test_titanic = T.independent_features()", "_____no_output_____" ], [ "le = LabelEncoder()\nclass Modify_Features:\n\n def modify_train_data(self,train,x,y):\n return le.fit_transform(train[x]) , le.fit_transform(train[y])\n\n def modify_test_data(self,test,x,y):\n return le.fit_transform(test[x]) , le.fit_transform(test[y])\n\n def feature(self):\n features_col = []\n for key in train_titanic.keys():\n features_col.append(tf.feature_column.numeric_column(key = key))\n return features_col\n\n\nm = Modify_Features()\ntrain_titanic['sex'] , train_titanic['class'] = m.modify_train_data(train_titanic,'sex','class')\ntest_titanic['sex'] , test_titanic['class'] = m.modify_test_data(test_titanic,'sex','class')\ninput_features = m.feature()", "_____no_output_____" ], [ "def make_input(train , test , shuffle = True , batch_size = 128):\n def input():\n ds = tf.data.Dataset.from_tensor_slices((dict(train),test))\n if shuffle:\n ds = ds.shuffle(1000)\n ds = ds.batch(batch_size).repeat(500)\n return ds\n return input\n\ntitanic_train_ip = make_input(train_titanic , train_y)\ntitanic_test_ip = make_input(test_titanic , test_y , shuffle = False)", "_____no_output_____" ], [ "class Neural_network:\n\n def __init__(self):\n self.model = tf.estimator.LinearClassifier(feature_columns = input_features)\n \n def model_training(self):\n self.model.train(titanic_train_ip)\n\n def model_evaluation(self):\n self.model_training()\n result = self.model.evaluate(titanic_test_ip)\n print('accuracy for evaluation is : ',result['accuracy'])\n\n def prediction_on_model(self):\n self.model_evaluation()\n prediction = list(self.model.predict(titanic_test_ip))\n print(test_titanic.loc[0])\n print(prediction[0]['probabilities'])\n\n\nn = Neural_network()\nn.prediction_on_model()", "INFO:tensorflow:Using default config.\nWARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp6duzobxz\nINFO:tensorflow:Using config: {'_model_dir': '/tmp/tmp6duzobxz', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n }\n}\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nWARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/training/training_util.py:236: Variable.initialized_value (from tensorflow.python.ops.variables) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.\nINFO:tensorflow:Calling model_fn.\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb3f7a4b20fa7bdfb14d95d8a22554539e1a0a94
93,920
ipynb
Jupyter Notebook
Intro_Data_Visualization_Pluralsight/Data Aggregation.ipynb
vishalvb/practice
4c38f863408c91aa072bd20510f098043fecd043
[ "MIT" ]
null
null
null
Intro_Data_Visualization_Pluralsight/Data Aggregation.ipynb
vishalvb/practice
4c38f863408c91aa072bd20510f098043fecd043
[ "MIT" ]
null
null
null
Intro_Data_Visualization_Pluralsight/Data Aggregation.ipynb
vishalvb/practice
4c38f863408c91aa072bd20510f098043fecd043
[ "MIT" ]
null
null
null
384.918033
38,732
0.920273
[ [ [ "import pandas as pd\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "data = pd.read_csv('obama.csv',parse_dates=['year_month'])#pandas will interpret year_month column as date/timestamps\ndata.head()", "_____no_output_____" ], [ "plt.plot(data.year_month, data.approve_percent, 'o',markersize = 2, alpha = 0.3)#markersize is to set size of dots, \n #alpha is to set transparency of dots\nplt.show()", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "data_mean = data.groupby('year_month').mean()\ndata_median = data.groupby('year_month').median()", "_____no_output_____" ], [ "plt.plot(data_mean.index, data_mean.approve_percent,'red')\nplt.plot(data_median.index, data_median.approve_percent,'green')\nplt.legend(['Mean','Median'])\nplt.plot(data.year_month, data.approve_percent, 'o',markersize = 2, alpha = 0.3)\nplt.show()", "_____no_output_____" ], [ "data_25 = data.groupby('year_month').quantile(0.25)#this finds 25% of approval ratings for each month\ndata_75 = data.groupby('year_month').quantile(0.27)#this finds 25% of approval ratings for each month\n\n\nplt.plot(data_75.index, data_75.approve_percent,'green')\nplt.plot(data_median.index, data_median.approve_percent,'red')\nplt.plot(data_25.index, data_mean.approve_percent,'blue')\n#plt.plot(data_median.index, data_median.approve_percent,'green')\nplt.legend(['75th','Median','25th'])\nplt.plot(data.year_month, data.approve_percent, 'o',markersize = 2, alpha = 0.3)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f7faa21924d2ce730a7a3f49e1630376a930d
5,260
ipynb
Jupyter Notebook
E012/E012 - Diziler.ipynb
alperkonuralp/AlperIlePython
64e4940648a74306951dbfd97b593cfbcd94b7f6
[ "Apache-2.0" ]
1
2021-01-30T16:50:40.000Z
2021-01-30T16:50:40.000Z
E012/.ipynb_checkpoints/E012 - Diziler-checkpoint.ipynb
alperkonuralp/AlperIlePython
64e4940648a74306951dbfd97b593cfbcd94b7f6
[ "Apache-2.0" ]
null
null
null
E012/.ipynb_checkpoints/E012 - Diziler-checkpoint.ipynb
alperkonuralp/AlperIlePython
64e4940648a74306951dbfd97b593cfbcd94b7f6
[ "Apache-2.0" ]
null
null
null
18.853047
296
0.463308
[ [ [ "ogrenci1 = \"Alper\"\nogrenci2 = \"Burcu\"\nogrenci3 = \"Yağmur\"", "_____no_output_____" ], [ "ogrenci1, ogrenci2, ogrenci3", "_____no_output_____" ], [ "ogrenci4 = \"Veli\"", "_____no_output_____" ], [ "ogrenci1, ogrenci2, ogrenci3, ogrenci4", "_____no_output_____" ], [ "ogrenciler = []\nogrenciler", "_____no_output_____" ], [ "ogrenciler = ['Alper', 'Burcu', 'Yağmur']\nogrenciler", "_____no_output_____" ], [ "ogrenciler.append('Ali')\nogrenciler", "_____no_output_____" ], [ "del ogrenciler[3]\nogrenciler", "_____no_output_____" ], [ "ogrenciler[2]", "_____no_output_____" ], [ "ogrenciler[0], ogrenciler[1], ogrenciler[2]", "_____no_output_____" ], [ "ogrenciler[3]", "_____no_output_____" ], [ "len(ogrenciler)", "_____no_output_____" ], [ "kisi = [ 'Alper', 'Konuralp', 1234, '0 Rh+', 1978, 1.78, 72.0 ]\nkisi", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f7fdceeeff7b42733397ba45edadf26293497
415,838
ipynb
Jupyter Notebook
Predavanje 1.ipynb
pevlaic/MS
83c271d68a0cac63d145a98d0dd0cb5413a938fa
[ "Unlicense" ]
null
null
null
Predavanje 1.ipynb
pevlaic/MS
83c271d68a0cac63d145a98d0dd0cb5413a938fa
[ "Unlicense" ]
null
null
null
Predavanje 1.ipynb
pevlaic/MS
83c271d68a0cac63d145a98d0dd0cb5413a938fa
[ "Unlicense" ]
null
null
null
147.147205
141,988
0.891838
[ [ [ "# Matematički softver - prvo predavanje", "_____no_output_____" ], [ "## Metapodaci", "_____no_output_____" ], [ "### Materijali", "_____no_output_____" ], [ "Materijali će se nalaziti na Githubu, u repozitoriju kolegija (https://github.com/vedgar/ms). Za potrebe kolegija, svi morate imati _neku_ online bazu vlastitog koda, u kojoj napravite repozitorij 'Matematički softver'. Preporučujem Github.", "_____no_output_____" ], [ "### Potreban softver", "_____no_output_____" ], [ "Na računalo na kojem ćete pratiti predavanja, pisati zadaće i slično, instalirajte `Anaconda` distribuciju (full (**ne** \nMiniconda!), Python 3.6, 64bit ako imate 64bitno računalo, ne trebate instalirati Visual Studio Code). Instalacija i pokretanje traje dosta dugo. Pokrenite Anaconda Navigator (iz Start izbornika ili iz komandne linije) i odaberite Jupyter Notebook (launch). Odaberite folder gdje ćete držati datoteke vezane uz kolegij, i napravite novu bilježnicu (u browseru, `New`... `Python 3`). Odaberite `Help`... `User Interfacte Tour` da biste se upoznali sa sučeljem.", "_____no_output_____" ], [ "Na računalima u Pr1 (pod Linuxom!) već je instalirano sve potrebno, samo da biste pokrenuli Anaconda Navigator, morate prvo u terminalu izvršiti naredbu\n```bash\nexport PATH=/opt/anaconda3/bin:$PATH\nanaconda-navigator\n```", "_____no_output_____" ], [ "### Sadržaj kolegija", "_____no_output_____" ], [ "* IPython / Jupyter\n* Scipy stack: Numpy, Sympy, Pandas, Matplotlib, Scikit\n* (_možda_) Sage\n* (_vrlo možda_) Julia\n* Markdown\n* LaTeX", "_____no_output_____" ], [ "### Cilj kolegija", "_____no_output_____" ], [ "* Razviti sposobnost korištenja Pythona kao moćnog alata za znanstvenike\n* Osposobiti vas za produkciju visokokvalitetnih publikacija", "_____no_output_____" ], [ "### Polaganje", "_____no_output_____" ], [ "Ocjena se formira iz dva kolokvija i domaćih zadaća. Kolokviji su _open-book_ tipa i nose 80 bodova. Domaće zadaće su u obliku eseja/projekta sa zadanim elementima.\n\nPredaja svih domaćih zadaća nužan je uvjet za polaganje kolegija. Domaća zadaća je uspješno predana samo ako ima _sve_ tražene elemente. Domaće zadaće nose 20 bodova, koji se dodjeljuju za _dodatne_ elemente.\n\nZbog kontinuiranog praćenja, popravni ispit ne postoji. U slučaju opravdane spriječenosti prisustvovanja kolokviju, javite se što prije da dogovorimo alternativne metode ocjenjivanja.", "_____no_output_____" ], [ "### Literatura", "_____no_output_____" ], [ "* [Lectures on scientific computing with Python](https://github.com/jrjohansson/scientific-python-lectures) - kolegij po uzoru na koji je ovaj nastao (davno)\n\n* [Astro 599 Course](http://nbviewer.jupyter.org/github/jakevdp/2013_fall_ASTR599/tree/master/notebooks/) - još jedan sličan kolegij, nažalost u Pythonu 2 ali s puno korisnih informacija\n\n* [Reproducible data analysis in Jupyter](http://jakevdp.github.io/blog/2017/03/03/reproducible-data-analysis-in-jupyter/) - serija videâ koji detaljno objašnjavaju _workflow_ potreban za \"idealnu\" prvu zadaću\n\n* [xkcd 1313: Regex Golf](http://nbviewer.jupyter.org/url/norvig.com/ipython/xkcd1313.ipynb) - duhovit ali i poučan primjer netrivijalnog rezultata dobivenog pomoću Jupytera\n\n* [A gallery of interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks) - ogromni repozitorij raznovrsnih bilježnica, dobro za upoznavanje s mogućnostima", "_____no_output_____" ], [ "## Primjeri", "_____no_output_____" ], [ "### Slaganje bilježnica", "_____no_output_____" ], [ "Možemo uključivati slike, zvukove, videe, matematičke zapise, JSON objekte,... Za detalje pogledati [dokumentaciju](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html).", "_____no_output_____" ] ], [ [ "from IPython.display import Image, YouTubeVideo", "_____no_output_____" ], [ "Image('http://python.org/images/python-logo.gif')", "_____no_output_____" ], [ "YouTubeVideo('k7WXVWej-NY')", "_____no_output_____" ] ], [ [ "Možemo i izvršavati sistemske naredbe (počinju uskličnikom), ali time naša bilježnica postaje izvršiva samo na određenom OSu. Zato je dobro to izbjegavati.", "_____no_output_____" ] ], [ [ "!dir", "Predavanje\\ 1.ipynb Untitled.ipynb\r\n" ] ], [ [ "### Magične naredbe", "_____no_output_____" ], [ "\"Magične\" naredbe počinju znakom `%`. Linijske (počinju jednim znakom `%`) primaju argumente do kraja linije, ćelijske (počinju s `%%`) primaju argumente do kraja linije.", "_____no_output_____" ] ], [ [ "%lsmagic", "_____no_output_____" ], [ "%who", "Image\t YouTubeVideo\t \n" ], [ "%matplotlib inline", "_____no_output_____" ], [ "%xmode plain", "Exception reporting mode: Plain\n" ], [ "a = 0\n1 / a", "_____no_output_____" ], [ "%%HTML\n<h5>Neki naslov</h5>\n<p>Jedan jednostavan <em>primjer</em>.</p>\n<p>Od dva odlomka teksta.</p>", "_____no_output_____" ], [ "%%time\nzbroj = 0\nfor broj in range(1_000_000):\n zbroj += broj\nprint(zbroj)", "499999500000\nWall time: 512 ms\n" ], [ "# %%js\n# alert('Bu!')", "_____no_output_____" ] ], [ [ "### Traženje pomoći", "_____no_output_____" ], [ "Dodavanjem `?` (ili `??` za više detalja) na praktički bilo koje ime možemo zatražiti pomoć u vezi objekta imenovanog tim imenom.", "_____no_output_____" ] ], [ [ "YouTubeVideo?", "_____no_output_____" ], [ "help(chr)", "Help on built-in function chr in module builtins:\n\nchr(i, /)\n Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.\n\n" ], [ "dir(list)", "_____no_output_____" ] ], [ [ "### Kompleksniji primjer", "_____no_output_____" ] ], [ [ "from skimage import data\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "coins = data.coins()\nplt.imshow(coins, cmap='gray')", "_____no_output_____" ], [ "coins", "_____no_output_____" ], [ "coins.shape", "_____no_output_____" ], [ "coins_zoom = coins[10:80, 300:370]", "_____no_output_____" ], [ "from skimage import restoration", "_____no_output_____" ], [ "tv_coins = restoration.denoise_tv_chambolle(coins_zoom, weight=.05)", "_____no_output_____" ], [ "plt.figure()\nplt.subplot(1, 2, 1)\nplt.imshow(coins_zoom)\nplt.subplot(1, 2, 2)\nplt.imshow(tv_coins)", "_____no_output_____" ] ], [ [ "## Osnove Pythona", "_____no_output_____" ], [ "### Objekti i njihova imena", "_____no_output_____" ], [ "Osnovna razlika Pythona od jezika poput C-a je: U C-u, memorijske lokacije su primarni objekti kojima baratamo, vrijednosti zapisane u njima su sekundarne. Varijable su zato uvijek imena za \"kutije\": nizove od nekoliko (fiksno i konačno mnogo) uzastopnih memorijskih lokacija, unutar kojih se nalazi vrijednost objekta. Čak i kod dinamički alocirane memorije, deklarirane varijable uvijek imaju fiksnu i ograničenu veličinu (`sizeof`), dok o našoj interpretaciji njihovog sadržaja kao pointera ovisi naša sposobnost da spremimo potencijalno više podataka u memoriju nego što smo statički alocirali.\n\nU Pythonu, pogled na svijet je bitno drugačiji: primarne \"vrijednosti\" kojima baratamo su upravo objekti, memorijske lokacije su potpuno irelevantne, a da bismo došli do objekata odnosno da bismo ih mogli spomenuti u kodu, dajemo im _imena_. `x = 382` ne znači (kao u C-u) \"stvori kutiju imena `x` i u nju stavi bit-pattern `382`\", već \"stvori objekt `382` (tipa `int`), i daj mu ime `x`\".\n\nDirektna posljedica: tip više nije svojstvo varijable (imena), nego objekta. `x = 3; x = 'tri'` je sasvim legalno. Naravno, hoće li `x.islower()` vratiti `True` ili dignuti `AttributeError`, ovisi o konkretnoj vrijednosti od `x` u trenutku poziva -- baš kao da smo napisali `3 .islower()` odnosno `'tri'.islower()`. Još jedna posljedica: isti objekt može imati više imena. `x = y` jednostavno uzme objekt na koji referira ime `y`, i dade mu još jedno ime `x`. _Ništa se nikamo ne kopira._ Na primjer kod poziva funkcije, objekti koje smo naveli kao funkcijske argumente jednostavno dobiju još neka _lokalna_ imena, koja zovemo parametri. Opet, ništa se nikamo ne kopira.", "_____no_output_____" ], [ "Da bismo ustanovili referiraju li imena `x` i `y` na isti objekt, možemo koristiti `x is y`. Napomena: to nema veze s jednakošću, koja se testira sa `x == y`. Implementacija jednakosti, kao i mnogih drugih operatora/metoda, ovisi o tipu od `x` (i možda od `y`): npr. razlomci bitno drugačije definiraju jednakost nego IP adrese. Čak i da dva objekta imaju potpuno istu reprezentaciju u memoriji (tada će vjerojatno biti `x == y`), to i dalje mogu biti dva objekta (`x is not y`), i promjena jednog neće utjecati na drugi objekt. Druga implikacija (`x is y` povlači `x == y`, odnosno zapravo `x == x`) vrijedi puno češće, i jedini standardni kontraprimjer je NaN (`math.nan`).", "_____no_output_____" ], [ "### Prostori imena (_namespaces_)", "_____no_output_____" ], [ "Kako su imena očito vrlo važna i zapravo jedini način da u kodu govorimo o objektima, Python posvećuje posebnu pažnju njihovom razvrstavanju. Svako ime postoji u točno jednom prostoru, te se prostori imena dijele na implicitne (čija imena se obično ne navode) i eksplicitne (čija imena se moraju navesti).\n\nImplicitnih prostora imena ima četiri vrste, i uvijek su ugniježđeni na isti način. Vanjski je `builtins`, u kojem žive preddefinirana imena za Pythonove ugrađene funkcije i ostale objekte (npr. `print`). Obično se ne mijenja, iako Python dopušta i njegovu promjenu.\n\nSljedeći prema unutra je `globals`, u kojem žive ili varijable koje smo definirali unutar interaktivnog rada (npr. pisanja u Jupyterovoj bilježnici), ili pak \"globalne\" varijable pojedinog _modula_ (ugrubo, datoteke s nastavkom `.py`) koje nisu ni u kojem unutrašnjem (klasnom ili lokalnom) prostoru. U njega možemo slobodno dodavati imena, a možemo i mijenjati njihove vrijednosti ali to se ne smatra dobrom praksom iz svih uobičajenih razloga protiv globalnih varijabli (globalne _konstante_, imena čije se vrijednosti ne mijenjaju -- npr. funkcije i klase koje modul definira -- su sasvim u redu).\n\nUnutar `globals` postoje dvije vrste prostora imena, koji se oba zovu `locals` ali ne vide jedan drugog bez obzira na sintaksnu ugniježđenost: klasni i funkcijski. U klasnom prostoru žive atributi (uključujući metode i svojstva) klase unutar koje se nalazimo (ako se uopće nalazimo unutar `class ...:` bloka). Točna priroda ovog bloka, pa tako i mogućnost mijenjanja, ovisi o tipu trenutne klase (tzv. \"metaklasi\"), no često se u njemu nalaze samo metode.\n\nU funkcijskom lokalnom prostoru žive \"prave\" lokalne varijable (uključivo parametri) funkcije unutar koje se nalazimo (ako se uopće nalazimo unutar `def ...:` bloka). U ovom prostoru imena su fiksna (samo ona koja se sintaksno pojavljuju u kodu funkcije) i nije moguće dodavati nova dinamički, iako je naravno moguće mijenjati njihove vrijednosti. Ovaj prostor imena iznova se stvara svakim pozivom funkcije, i prestaje postojati povratkom iz funkcije; zato je posebno optimiziran. Funkcijski lokalni prostori mogu biti i međusobno ugniježđeni, ako imamo jednu funkciju definiranu unutar druge.\n\nZa implicitne prostore imena vrijedi jednostavno pravilo: _čitanje_ vrijednosti imena (npr. korištenje imena u nekom izrazu) obavlja se prvo u trenutnom prostoru, te ako ga tamo nema, u prvom iznad, te ako ga ni tamo nema, u sljedećem iznad njega... i tako dalje u hijerarhiji. Ako se ime ne nađe ni u jednom implicitnom prostoru imena (čak niti u `builtins`), prijavljuje se greška `NameError`. S druge strane, _postavljanje_ vrijednosti imena (npr. pridruživanje tog imena nekom objektu), kao i _brisanje_ vrijednosti imena (npr. naredbom `del`), _uvijek_ se obavlja u trenutnom prostoru imena (osim ako smo to promijenili `global` ili `nonlocal` direktivom).", "_____no_output_____" ], [ "Eksplicitni prostori imena su puno jednostavniji, i ima ih dva tipa: atributni (oni čija imena se navode kao `prostor.ime`) i spremnički (oni čija imena se navode kao `prostor[ime]`). Atributni su vrlo slični implicitnima, samo su vezani uz _objekte_ (instance) pojedinih klasa. Gotovo svaki objekt u Pythonu ima svoj atributni prostor imena. Važan specijalni slučaj: `import nekimodul` stvara objekt imena `nekimodul`, čiji atributni prostor je upravo globalni prostor modula `nekimodul.py`. Na taj način možemo jednostavno koristiti globalna imena iz jednog modula u drugom. Atributni prostori su često dinamički (možemo dodavati i brisati imena), iako pomoću `__slots__` definicije možemo fiksirati skup imena, vrlo slično funkcijskom lokalnom prostoru.\n\nSpremnički prostor imena imaju samo posebni tipovi _spremnici_, kao što su npr. liste i rječnici. Njihova posebnost je u tome da \"imena\" u njima mogu biti proizvoljni objekti (ipak, najčešće se zahtijeva da budu nepromjenjivi) -- u svim ostalim prostorima imena su morala biti validni identifikatori: nizovi znakova koji sadrže slova, znamenke i znak `_`, te ne počinju znamenkom. U slučaju listi (i raznih drugih sekvenci kao što su slogovi, stringovi, polja,...), validna imena su cijeli brojevi i zovu se _indeksi_. U slučaju rječnika, validna imena su bilo kakvi _hashabilni_ objekti i zovu se _ključevi_.", "_____no_output_____" ] ], [ [ "def f(x): return x + 5", "_____no_output_____" ], [ "f(1)", "_____no_output_____" ] ], [ [ "### Primjena: uvoz modula", "_____no_output_____" ], [ "Recimo da imamo modul `m.py`, unutar kojeg se nalazi kod\n```py\nx = y = 2\nz = 3\n```\n\nNaredbom `import m` dobit ćemo (u trenutnom prostoru imena) ime `m` koje će se odnositi na objekt tipa `module`, čiji atributni prostor imena će imati tri imena: `m.x` i `m.y` će biti imena za jedan te isti broj `2`, dok će `m.z` biti ime za broj `3`. Naredbom `import m as n` dobit ćemo ime `n` koje će se odnositi na isti objekt opisan gore. Tako će `n.x` i `n.y` biti imena za `2`, dok će `n.z` biti ime za `3`. Ovo najčešće koristimo kad želimo skratiti ime modula, posebno kad je modul u paketu te je time njegovo ime zapravo cijela \"staza\": `import matplotlib.pyplot as plt`.\n\nNaredbom `from m import x` dobit ćemo (u trenutnom prostoru imena) ime `x` koje će se odnositi na broj `2`. Naredbom `from m import x as z` dobit ćemo ime `z` koje će se odnositi na broj `2`. Naredbom `from m import x, z` dobit ćemo imena `x` i `z` koja će se odnositi redom na `2` i `3`. Ovo koristimo kad imamo module koji sadrže sve i svašta, i treba nam samo uzak dio njihovih funkcionalnosti: `from functools import partial`. Također je korisno kad je glavnina funkcionalnosti modula u jednoj funkciji/klasi koja se zove jednako kao modul: `from datetime import datetime`.\n\nNaredbom `from m import *` dobit ćemo imena `x`, `y` i `z`, takva da će se prva dva odnositi na broj `2`, a treće na broj `3`. Primijetite da se ovakav oblik naredbe ne može naći unutar funkcijske definicije, jer se time imena `x`, `y` i `z` ne bi nalazila sintaksno u tekstu funkcijske definicije, te Python ne bi mogao konstruirati lokalni funkcijski prostor imena (ne zna koja će se imena pojaviti u lokalnom prostoru sve do trenutka poziva funkcije). Također, loša strana ovog je teže razumijevanje koda: posebno pri _debuggiranju_, izuzetno je važno za svako ime znati iz kojeg prostora dolazi. Ako imamo\n\n```py\nfrom a import *\nfrom b import *\nfrom c import *\n...\n... z ...\n```\n\nnemamo nikakav način da saznamo iz kojeg modula je `z`, te najčešće moramo istraživati jedan po jedan. Ipak, u interaktivnom _quick&dirty_ kodiranju često je vrlo praktično koristiti ovaj oblik.", "_____no_output_____" ], [ "### Primjena: \"prijenos\" argumenata u funkciju", "_____no_output_____" ] ], [ [ "def f(x):\n x = 8\n print(x)\nx = 3\nf(x)\nprint(x)", "8\n3\n" ] ], [ [ "Ime `x` u prve tri linije različito je od imena `x` u druge tri linije: prvo živi u lokalnom funkcijskom prostoru funkcije `f`, dok drugo živi u globalnom prostoru ove Jupyter bilježnice.\n\nPrvo se definira funkcija `f`, s jednim lokalnim imenom `x`, koje je (jednom kad se funkcija pozove) u početku ime za argument funkcije, nakon toga ime za broj `8`, i nakon toga se njegova vrijednost (dakle `8`) prenese u funkciju `print` (precizno, funkciju čije ime u `builtins` prostoru je `print`).\n\nNakon toga stvorimo broj `3`, damo mu _globalno_ ime `x`, te s njime pozovemo funkciju `f` (precizno, funkciju čije ime u `globals` prostoru je `f`). Tada se stvori lokalni funkcijski prostor imena, u njemu ime `x` za broj `3`, zatim se to isto (lokalno) ime pridruži broju `8`, te se ispiše `8`. Time funkcija dolazi do kraja, njen lokalni prostor imena nestaje, te se izvršava sljedeća naredba nakon funkcijskog poziva, a to je ovaj `print(x)` u zadnjoj liniji. U tom trenutku, postoji samo jedan `x`, globalno ime `x` za objekt `3`, te se ispisuje `3`.", "_____no_output_____" ] ], [ [ "def g(x):\n x[2] = 8\n print(x)\nx = [1, 5, 3]\ng(x)\nprint(x)", "[1, 5, 8]\n[1, 5, 8]\n" ] ], [ [ "Velika razlika od prethodnog primjera: također postoje dva imena, lokalni i globalni `x`, no ovaj put se oba cijelo vrijeme odnose na isti objekt: jednu jedinu listu u gornjem kodu. Naredba pridruživanja `x[2] = 8` ne mijenja lokalni funkcijski prostor funkcije `g` (u kojem živi `x`), već `x`ov atributni prostor imena. Zapravo, to nije pridruživanje ničega imenu `x`, već je to pridruživanje (broja `8`) \"imenu\" `2` unutar prostora imena koji odgovara objektu -- spremniku `x`. To se najbolje može vidjeti po tome da u ovoj drugoj naredbi \"x\" može biti izraz: `(x if 2 < 3 else y)[2] = 8`, dok je naravno \"pridruživanje\" `(x if 2 < 3 else y) = 8` besmisleno.", "_____no_output_____" ], [ "### Za detaljniji prikaz ...", "_____no_output_____" ], [ "Izuzetno koristan alat za vizualizaciju Pythonove memorije: http://pythontutor.com/visualize.html#mode=edit.", "_____no_output_____" ] ], [ [ "%%HTML\n<iframe width=\"800\" height=\"500\" frameborder=\"0\" src=\"http://pythontutor.com/iframe-embed.html#code=def%20f%28x%29%3A%0A%20%20%20%20x%20%3D%208%0A%20%20%20%20print%28x%29%0Ax%20%3D%203%0Af%28x%29%0Aprint%28x%29%0A%0Adef%20g%28x%29%3A%0A%20%20%20%20x%5B2%5D%20%3D%208%0A%20%20%20%20print%28x%29%0Ax%20%3D%20%5B1,%205,%203%5D%0Ag%28x%29%0Aprint%28x%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=true&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\"> </iframe>", "_____no_output_____" ] ], [ [ "## Matematičke operacije", "_____no_output_____" ] ], [ [ "18 + 7, 18 - 7, 18 * 7, 18 / 7", "_____no_output_____" ], [ "18 // 7, 18 % 7, divmod(18, 7) ", "_____no_output_____" ], [ "18 ** 7, pow(18, 7), pow(18, 7, 1000)", "_____no_output_____" ], [ "from fractions import Fraction as F\nfrom decimal import Decimal as D, getcontext\nimport cmath", "_____no_output_____" ], [ "F(18, 7)**2 % 1", "_____no_output_____" ], [ "str(_), _.numerator, _.denominator", "_____no_output_____" ], [ "getcontext().prec = 30\nD(18/7), D(18)/D(7)", "_____no_output_____" ], [ "getcontext().prec = 200\nD(2).sqrt()", "_____no_output_____" ], [ "_ ** 2 - 2", "_____no_output_____" ], [ "2 ** 3 ** 4, (2 ** 3) ** 4, 2 ** (3 ** 4)", "_____no_output_____" ], [ "5 ** 3**-1, 7 ** .5, (-1) ** .5", "_____no_output_____" ], [ "1j.real, 1j.imag, cmath.isclose(_[2], 1j)", "_____no_output_____" ], [ "(2 + 3j) ** 5, (1 + 1j) ** 8", "_____no_output_____" ], [ "cmath.isclose(cmath.e ** (1j * cmath.pi) + 1, 0)", "_____no_output_____" ], [ "help(cmath.isclose)", "Help on built-in function isclose in module cmath:\n\nisclose(a, b, *, rel_tol=1e-09, abs_tol=0.0)\n Determine whether two complex numbers are close in value.\n \n rel_tol\n maximum difference for being considered \"close\", relative to the\n magnitude of the input values\n abs_tol\n maximum difference for being considered \"close\", regardless of the\n magnitude of the input values\n \n Return True if a is close in value to b, and False otherwise.\n \n For the values to be considered close, the difference between them must be\n smaller than at least one of the tolerances.\n \n -inf, inf and NaN behave similarly to the IEEE 754 Standard. That is, NaN is\n not close to anything, even itself. inf and -inf are only close to themselves.\n\n" ], [ "cmath.isclose(cmath.e ** (1j * cmath.pi) + 1, 0, abs_tol=1e-9)", "_____no_output_____" ], [ "2.3.imag, 7 .denominator", "_____no_output_____" ], [ "cmath.inf * 0", "_____no_output_____" ], [ "_ == _", "_____no_output_____" ], [ "cmath.isclose(cmath.atan(cmath.inf), cmath.pi / 2)", "_____no_output_____" ] ], [ [ "## Spremnici", "_____no_output_____" ] ], [ [ "lista = [3, -2, 5.8, 2j, 'bla', [3, 5], {8}, print, ZeroDivisionError]\nlista", "_____no_output_____" ], [ "len(lista), lista[3], lista[-2] is lista[len(lista)-2], lista[~2]", "_____no_output_____" ], [ "lista[5][0], lista[4][2][0][0][0]", "_____no_output_____" ], [ "lista[:3], lista[3:], lista[-3:], lista[:-3]", "_____no_output_____" ], [ "lista[2:6], lista[1:-1], lista[7:7], lista[3:99]", "_____no_output_____" ], [ "lista, lista[::3], lista[1::3]", "_____no_output_____" ], [ "list('MatSoft')", "_____no_output_____" ], [ "set(_)", "_____no_output_____" ], [ "mat = set('matematički')", "_____no_output_____" ], [ "samoglasnici = set('aeiou')", "_____no_output_____" ], [ "mat & samoglasnici, mat - samoglasnici", "_____no_output_____" ], [ "mat | samoglasnici, mat ^ samoglasnici", "_____no_output_____" ], [ "len(samoglasnici) + len(mat) == len(mat | samoglasnici) + len(mat & samoglasnici) # FUI", "_____no_output_____" ], [ "samoglasnici <= mat, samoglasnici & mat < mat", "_____no_output_____" ], [ "lista.append(23)\nlista", "_____no_output_____" ], [ "lista.remove({8})\nlista.remove(2j)\nlista", "_____no_output_____" ], [ "del lista[3]\ndel lista[2:]\nlista", "_____no_output_____" ], [ "-2 in lista, 3 not in lista, 17 in lista, 117 not in lista", "_____no_output_____" ], [ "mat", "_____no_output_____" ], [ "mat |= set('01234')\nmat", "_____no_output_____" ], [ "mat.add('z')\nmat.remove('č')\nmat.discard('ž')\nmat", "_____no_output_____" ], [ "mat.isdisjoint(samoglasnici)", "_____no_output_____" ] ], [ [ "### Rječnici", "_____no_output_____" ] ], [ [ "boje = {'jabuka': 'crveno', 'kruška': 'žuto', 'limun': 'žuto'}\nboje['grožđe'] = 'plavo'\nlen(boje)", "_____no_output_____" ], [ "for voće, boja in boje.items(): print(f'{voće} je boje: {boja}')", "jabuka je boje: crveno\nkruška je boje: žuto\nlimun je boje: žuto\ngrožđe je boje: plavo\n" ], [ "del boje['limun']", "_____no_output_____" ], [ "for voće in boje:\n if voće != 'grožđe':\n boje[voće] = 'zeleno'", "_____no_output_____" ], [ "boje", "_____no_output_____" ], [ "{voće for voće in boje if boje[voće] == 'zeleno'}", "_____no_output_____" ] ], [ [ "## Funkcije", "_____no_output_____" ] ], [ [ "def potencije(x): return x ** 2, x ** 3, x ** 4\nkvadrat, kub, četvrta = potencije(3)\nkvadrat", "_____no_output_____" ], [ "from skimage import io, transform\ndef thumbnail(slika, širina=100, ime='thumb.png'):\n \"\"\"Proizvodi thumbnail za sliku, zadanog imena i širine (visina se određuje proporcionalno).\"\"\"\n izvorna_širina, izvorna_visina, *_ = slika.shape\n visina = izvorna_visina * širina // izvorna_širina\n io.imsave(ime, transform.resize(slika, (širina, visina), mode='constant'))", "_____no_output_____" ], [ "astro = data.astronaut()\nastro.shape", "_____no_output_____" ], [ "plt.imshow(astro)", "_____no_output_____" ], [ "io.imsave('astro.png', astro)", "_____no_output_____" ], [ "astro_s_diska = io.imread('astro.png')\nthumbnail(astro_s_diska)", "C:\\Users\\Veky\\Anaconda3\\lib\\site-packages\\skimage\\util\\dtype.py:122: UserWarning: Possible precision loss when converting from float64 to uint8\n .format(dtypeobj_in, dtypeobj_out))\n" ], [ "Image('thumb.png')", "_____no_output_____" ] ], [ [ "### Funkcije višeg reda", "_____no_output_____" ] ], [ [ "def linearna(a, b):\n def funkcija(x): return a*x + b\n return funkcija", "_____no_output_____" ], [ "f = linearna(a=1/2, b=3)", "_____no_output_____" ], [ "f, f.__code__.co_freevars, f.__code__.co_varnames, [c.cell_contents for c in f.__closure__]", "_____no_output_____" ], [ "f(20)", "_____no_output_____" ], [ "def komponiraj(*funkcije):\n def kompozicija(x):\n for f in reversed(funkcije):\n x = f(x)\n return x\n return kompozicija", "_____no_output_____" ], [ "f = komponiraj(lambda x: x+1, lambda x: x*2)", "_____no_output_____" ], [ "f(5)", "_____no_output_____" ], [ "from functools import partial", "_____no_output_____" ], [ "dvana = partial(pow, 2)", "_____no_output_____" ], [ "dvana(12)", "_____no_output_____" ], [ "komponiraj(dvana, dvana, f)", "_____no_output_____" ], [ "_(1)", "_____no_output_____" ] ], [ [ "## Grananja i petlje", "_____no_output_____" ] ], [ [ "if 2 < 1: print('Nešto je čudno')\nelif 2 == 1: print('Još čudnije')\nelse: print('Sve ok')", "Sve ok\n" ], [ "x = 1\nif 2 <= x < 5: print(f'{x} je između 2 i 5')\nelse: print(f'{x} nije između 2 i 5')", "1 nije između 2 i 5\n" ], [ "for i in range(23, 99, 7): print(i)", "23\n30\n37\n44\n51\n58\n65\n72\n79\n86\n93\n" ], [ "for riječ in 'Znanstvenici', 'vole', 'koristiti', 'Python': print(riječ)", "Znanstvenici\nvole\nkoristiti\nPython\n" ], [ "params = dict(p1=1, p2=2.879, p31=38)\nfor parametar, vrijednost in params.items():\n print(f'{parametar:>3} = {vrijednost:5.2f}')", " p1 = 1.00\n p2 = 2.88\np31 = 38.00\n" ], [ "{x**2: x for x in range(4, -3, -1)}", "_____no_output_____" ], [ "{slovo for slovo in 'Matematički softver'.lower()}", "_____no_output_____" ], [ "listalisti = [[], [8, 3, 5], [2, 1], [3]]\n[element for lista in listalisti for element in lista]", "_____no_output_____" ], [ "rezultat = []\nfor lista in listalisti:\n for element in lista:\n rezultat.append(element)\nrezultat", "_____no_output_____" ], [ "broj = 27\nwhile broj > 1:\n if broj % 2: broj = broj*3 + 1\n else: broj //= 2\n print(broj, end='\\t')", "82\t41\t124\t62\t31\t94\t47\t142\t71\t214\t107\t322\t161\t484\t242\t121\t364\t182\t91\t274\t137\t412\t206\t103\t310\t155\t466\t233\t700\t350\t175\t526\t263\t790\t395\t1186\t593\t1780\t890\t445\t1336\t668\t334\t167\t502\t251\t754\t377\t1132\t566\t283\t850\t425\t1276\t638\t319\t958\t479\t1438\t719\t2158\t1079\t3238\t1619\t4858\t2429\t7288\t3644\t1822\t911\t2734\t1367\t4102\t2051\t6154\t3077\t9232\t4616\t2308\t1154\t577\t1732\t866\t433\t1300\t650\t325\t976\t488\t244\t122\t61\t184\t92\t46\t23\t70\t35\t106\t53\t160\t80\t40\t20\t10\t5\t16\t8\t4\t2\t1\t" ] ], [ [ "## Interaktivnost", "_____no_output_____" ] ], [ [ "from ipywidgets import interact\nimport networkx as nx\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "def plot_random_graph(n, m, p, generator):\n nx.draw(generator(n, m, p))\n plt.show()", "_____no_output_____" ], [ "interact(plot_random_graph, n=(2, 30), m=(1, 10), p=(0, 1, 1e-3), generator={\n 'lobster': lambda n, m, p: nx.random_lobster (n, p, p / m),\n 'power law': lambda n, m, p: nx.powerlaw_cluster_graph (n, m, p),\n 'Newman-Watts-Strogatz': lambda n, m, p: nx.newman_watts_strogatz_graph(n, m, p),\n 'Erdős-Rényi': lambda n, m, p: nx.erdos_renyi_graph (n, p),\n});", "_____no_output_____" ], [ "nx.erdos_renyi_graph?", "_____no_output_____" ] ], [ [ "## Domaća zadaća (do idućeg predavanja)", "_____no_output_____" ], [ "* Otvoriti Github account i napraviti repozitorij \"Matematički softver\"\n* Skinuti i instalirati Anacondu (upute na početku ovog dokumenta)", "_____no_output_____" ], [ "### Zadaci za vježbu", "_____no_output_____" ], [ "1. Napišite funkciju koja prima $n$, a vraća listu neparnih brojeva od $1$ do (uključivo) $n$.\n2. Napišite funkciju koja rješava kvadratnu jednadžbu.\n3. Napišite funkciju `trapezint(f, n, a, b)` koja numerički računa integral funkcije $f$ na intervalu $[a,b]$, koristeći trapeznu formulu $$\\int_a^b f(x)\\,dx\\approx\\frac{h}{2}\\sum_{i=1}^n{\\bigl(f(x_{i-1})+f(x_i)\\bigr)}.$$\n4. Napišite funkciju za numeričko deriviranje oblika `diff(f, x, h=1e-6)`.", "_____no_output_____" ] ], [ [ "def neparni_do(n):\n return list(range(1, n+1, 2))", "_____no_output_____" ], [ "neparni_do(7)", "_____no_output_____" ], [ "def rq(a, b, c):\n D = b**2 - 4*a*c\n return (-b + D**.5) / (2*a), (-b - D**.5) / (2*a)", "_____no_output_____" ], [ "rq(1,1,1)", "_____no_output_____" ], [ "def trapezint(f, n, a, b):\n h = (b - a) / n\n xi = [a + i*h for i in range(n + 1)]\n s = sum(f(xi[i-1]) + f(xi[i]) for i in range(1, n+1))\n return h / 2 * s", "_____no_output_____" ], [ "trapezint(lambda x: x**3, 100, 0, 2)", "_____no_output_____" ], [ "def diff(f, x, h=1e-6):\n return (f(x+h)- f(x)) / h", "_____no_output_____" ], [ "from math import cos, radians", "_____no_output_____" ], [ "diff(cos, radians(45))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3f8903966a7d793aba9957c546aa4a55ccd8dd
28,117
ipynb
Jupyter Notebook
notebooks/profiling/window_utils.ipynb
mufernando/scikit-allel
79980679b6a8676a678b72f85bfc99afd71b82d5
[ "MIT" ]
229
2015-03-18T10:30:33.000Z
2022-03-16T00:04:43.000Z
notebooks/profiling/window_utils.ipynb
mufernando/scikit-allel
79980679b6a8676a678b72f85bfc99afd71b82d5
[ "MIT" ]
327
2015-02-02T16:22:09.000Z
2022-03-10T19:00:40.000Z
notebooks/profiling/window_utils.ipynb
mufernando/scikit-allel
79980679b6a8676a678b72f85bfc99afd71b82d5
[ "MIT" ]
51
2015-11-03T13:32:55.000Z
2022-03-07T11:57:48.000Z
52.851504
16,579
0.744674
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb3f8d3cdb4949b537a196c6086a34c1ef7b506b
9,049
ipynb
Jupyter Notebook
problem_5.ipynb
johangenis/problems_vs_algorithms
9925d7319de849fd7814cf87050232c22d8c2a96
[ "MIT" ]
null
null
null
problem_5.ipynb
johangenis/problems_vs_algorithms
9925d7319de849fd7814cf87050232c22d8c2a96
[ "MIT" ]
null
null
null
problem_5.ipynb
johangenis/problems_vs_algorithms
9925d7319de849fd7814cf87050232c22d8c2a96
[ "MIT" ]
null
null
null
35.073643
498
0.547243
[ [ [ "# Building a Trie in Python\n\nBefore we start let us reiterate the key components of a Trie or Prefix Tree. A trie is a tree-like data structure that stores a dynamic set of strings. Tries are commonly used to facilitate operations like predictive text or autocomplete features on mobile phones or web search.\n\nBefore we move into the autocomplete function we need to create a working trie for storing strings. We will create two classes:\n* A `Trie` class that contains the root node (empty string)\n* A `TrieNode` class that exposes the general functionality of the Trie, like inserting a word or finding the node which represents a prefix.\n\nGive it a try by implementing the `TrieNode` and `Trie` classes below!", "_____no_output_____" ] ], [ [ "## Represents a single node in the Trie\nclass TrieNode:\n def __init__(self, end_of_word=False):\n # Initialize this node in the Trie\n\n # Indicates whether the string ends here is a valid word\n self.end_of_word = end_of_word\n\n # A dictionary to store the possible characters in this node\n # Dictionary key: character (e.g. a)\n # Dictionary value: pointer to child node\n self.char_dict = dict()\n\n def insert(self, char):\n # Add a child node in this Trie\n sub_char_node = TrieNode()\n self.char_dict[char] = sub_char_node\n\n return sub_char_node\n \n## The Trie itself containing the root node and insert/find functions\nclass Trie:\n def __init__(self):\n # Initialize this Trie (add a root node)\n root_node = TrieNode()\n self.root = root_node\n\n def insert(self, word):\n # Add a word to the Trie\n\n # Split the word into a seq of chars and build the corresponding TrieNodes\n cur_node = self.root\n\n for char in word:\n if char in cur_node.char_dict:\n cur_node = cur_node.char_dict[char]\n else:\n new_child_node = cur_node.insert(char)\n cur_node = new_child_node\n\n # End of word, set end_of_word property to True\n cur_node.end_of_word = True\n\n def find(self, prefix):\n # Find the Trie node that represents this prefix\n\n cur_node = self.root\n # Traverse the Trie tree base on the character sequence in the prefix\n for char in prefix:\n if char in cur_node.char_dict:\n cur_node = cur_node.char_dict[char]\n else:\n return None\n\n return cur_node\n", "_____no_output_____" ] ], [ [ "# Finding Suffixes\n\nNow that we have a functioning Trie, we need to add the ability to list suffixes to implement our autocomplete feature. To do that, we need to implement a new function on the `TrieNode` object that will return all complete word suffixes that exist below it in the trie. For example, if our Trie contains the words `[\"fun\", \"function\", \"factory\"]` and we ask for suffixes from the `f` node, we would expect to receive `[\"un\", \"unction\", \"actory\"]` back from `node.suffixes()`.\n\nUsing the code you wrote for the `TrieNode` above, try to add the suffixes function below. (Hint: recurse down the trie, collecting suffixes as you go.)", "_____no_output_____" ] ], [ [ "class TrieNode:\n def __init__(self, end_of_word=False):\n # Initialize this node in the Trie\n\n # Indicates whether the string ends here is a valid word\n self.end_of_word = end_of_word\n\n # A dictionary to store the possible characters in this node\n # Dictionary key: character (e.g. a)\n # Dictionary value: pointer to child node\n self.char_dict = dict()\n\n def insert(self, char):\n # Add a child node in this Trie\n sub_char_node = TrieNode()\n self.char_dict[char] = sub_char_node\n\n return sub_char_node\n\n def suffixes(self, suffix=''):\n # Recursive function that collects the suffix for\n # all complete words below this point\n output_str_list = list()\n\n def find_suffix(node, output_str):\n\n # If end_of_word at this node is true, then add the suffix to result list\n if node.end_of_word:\n output_str_list.append(output_str)\n\n for char in node.char_dict:\n temp_output_str = output_str + char\n find_suffix(node.char_dict[char], temp_output_str)\n\n find_suffix(self, \"\")\n\n return output_str_list\n", "_____no_output_____" ] ], [ [ "# Testing it all out\n\nRun the following code to add some words to your trie and then use the interactive search box to see what your code returns.", "_____no_output_____" ] ], [ [ "MyTrie = Trie()\nwordList = [\n \"ant\", \"anthology\", \"antagonist\", \"antonym\", \n \"fun\", \"function\", \"factory\", \n \"trie\", \"trigger\", \"trigonometry\", \"tripod\"\n]\nfor word in wordList:\n MyTrie.insert(word)\n \nnode = MyTrie.find('t')\nprint(node.suffixes())", "['rie', 'rigger', 'rigonometry', 'ripod']\n" ], [ "from ipywidgets import widgets\nfrom IPython.display import display\nfrom ipywidgets import interact\ndef f(prefix):\n if prefix != '':\n prefixNode = MyTrie.find(prefix)\n if prefixNode:\n print('\\n'.join(prefixNode.suffixes()))\n else:\n print(prefix + \" not found\")\n else:\n print('')\ninteract(f,prefix='');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb3fb07790d001105671a93e389fb736a7a40878
67,658
ipynb
Jupyter Notebook
single-cell/covid-19-atlases/Covid-19-Atlas-E2E-2.ipynb
mzager/dv-pipelines
3356753cc56a5298bb075f12681f9282d8f08658
[ "MIT" ]
3
2020-02-24T21:08:11.000Z
2020-05-19T18:26:01.000Z
single-cell/covid-19-atlases/Covid-19-Atlas-E2E-2.ipynb
mzager/dv-pipelines
3356753cc56a5298bb075f12681f9282d8f08658
[ "MIT" ]
null
null
null
single-cell/covid-19-atlases/Covid-19-Atlas-E2E-2.ipynb
mzager/dv-pipelines
3356753cc56a5298bb075f12681f9282d8f08658
[ "MIT" ]
2
2020-01-04T00:23:07.000Z
2020-02-26T17:54:34.000Z
37.629588
20,497
0.654113
[ [ [ "import argparse\nimport logging\nfrom operator import mul\nimport time\nimport os\n\nimport pubweb.singlecell # import AnnDataSparse\nfrom pubweb.hdf5 import Hdf5\nfrom pubweb.commands.convert.singlecell.anndata import ImportAnndata\nfrom pubweb.commands.convert.singlecell.cellranger import ImportCellRanger\nfrom pubweb.commands.validate.dimensions import ValidateDimensions\nfrom pubweb.commands.annotate.geneid import AnnotateGeneId\nfrom pubweb.commands.annotate.geneset import AnnotateGeneset\nfrom pubweb.commands.export.lists import ExportLists\nfrom pubweb.commands.export.attributes import ExportAttributes\nfrom pubweb.commands.export.tables import ExportTables\nfrom pubweb.commands.export.projections import ExportProjections\nfrom pubweb.commands.export.spatial import ExportSpatial\nfrom pubweb.commands.export.matrix_sparse import ExportMatrixSparse\nfrom pubweb.commands.export.matrix_dense import ExportMatrixDense\nfrom pubweb.commands.summarize.genes import SummarizeGenes\nfrom pubweb.commands.summarize.genemap import SummarizeGeneMap\nfrom pubweb.commands.summarize.colors import SummarizeColors\nfrom pubweb.commands.summarize.manifest import SummerizeManifest\n", "_____no_output_____" ], [ "import importlib\n\nimportlib.reload(pubweb.singlecell)\nimportlib.reload(pubweb.hdf5)\nimportlib.reload(pubweb.commands.convert.singlecell.anndata)\nimportlib.reload(pubweb.commands.convert.singlecell.cellranger)\nimportlib.reload(pubweb.commands.validate.dimensions)\nimportlib.reload(pubweb.commands.annotate.geneid)\nimportlib.reload(pubweb.commands.annotate.geneset)\nimportlib.reload(pubweb.commands.export)\nimportlib.reload(pubweb.commands.export.lists)\nimportlib.reload(pubweb.commands.export.attributes)\nimportlib.reload(pubweb.commands.export.tables)\nimportlib.reload(pubweb.commands.export.projections)\nimportlib.reload(pubweb.commands.export.spatial)\nimportlib.reload(pubweb.commands.export.matrix_sparse)\nimportlib.reload(pubweb.commands.export.matrix_dense)\nimportlib.reload(pubweb.commands.summarize.genes)\nimportlib.reload(pubweb.commands.summarize.genemap)\nimportlib.reload(pubweb.commands.summarize.colors)\nimportlib.reload(pubweb.commands.summarize.manifest)\n", "_____no_output_____" ], [ "logging.basicConfig(level='DEBUG')", "_____no_output_____" ], [ "datasetName='lung-upper-airway-h1299'\ninputFile = '/data/notebooks/input/convert.hdf5'\noutputFolder = '/data/notebooks/pubweb'\nspecies = 'human'\noverwriteHdf5 = True\npython_wd = '/opt/pubweb'\n", "_____no_output_____" ], [ "#dir(pubweb.singlecell)", "_____no_output_____" ] ], [ [ "pubweb.singlecell.AnnDataSparse(\n inputFile=inputFile,\n outputFolder=outputFolder,\n datasetName=datasetName,\n species=species)", "_____no_output_____" ] ], [ [ "# anndatasparse\noutputFile = f'{outputFolder}/pubweb.hdf5'\nif os.path.exists(outputFile) and overwriteHdf5:\n os.remove(outputFile)\nhdf5 = Hdf5.load(outputFile, \"a\")", "_____no_output_____" ], [ "hdf5.uri", "_____no_output_____" ], [ "%time hdf5 | ImportAnndata(inputFile, datasetName)\n# 345", "CPU times: user 464 ms, sys: 6.55 s, total: 7.01 s\nWall time: 6.97 s\n" ], [ "hdf5.getDatasets()", "_____no_output_____" ], [ "hdf5.h5py['pubweb/lung-upper-airway-h1299/matrix']", "_____no_output_____" ], [ "%time hdf5 | AnnotateGeneId(species=species)\n# 1min28s", "INFO:root:AnnotateGeneId: pubweb/lung-upper-airway-h1299/features/gene\n" ], [ "# save hdf5_geneid\nprint(type(hdf5))", "<class 'pubweb.hdf5.LocalHdf5'>\n" ], [ "hdf5.getDatasetsWithPath('pubweb/lung-upper-airway-h1299')", "_____no_output_____" ], [ "hdf5.getDatasets()", "_____no_output_____" ], [ "%time hdf5 | ExportMatrixDense(outputFolder)\n# 14.1s", "Export Matrix\nWriting cols 0 to 100\nWriting cols 100 to 200\nWriting cols 200 to 300\nWriting cols 300 to 400\nWriting cols 400 to 500\nWriting cols 500 to 600\nWriting cols 600 to 700\nWriting cols 700 to 800\nWriting cols 800 to 900\nWriting cols 900 to 1000\nWriting cols 1000 to 1100\nWriting cols 1100 to 1200\nWriting cols 1200 to 1300\nWriting cols 1300 to 1400\nWriting cols 1400 to 1500\nWriting cols 1500 to 1600\nWriting cols 1600 to 1700\nWriting cols 1700 to 1800\nWriting cols 1800 to 1900\nWriting cols 1900 to 2000\nWriting cols 2000 to 2100\nWriting cols 2100 to 2200\nWriting cols 2200 to 2300\nWriting cols 2300 to 2400\nWriting cols 2400 to 2500\nWriting cols 2500 to 2600\nWriting cols 2600 to 2700\nWriting cols 2700 to 2800\nWriting cols 2800 to 2900\nWriting cols 2900 to 3000\nWriting cols 3000 to 3100\nWriting cols 3100 to 3200\nWriting cols 3200 to 3300\nWriting cols 3300 to 3400\nWriting cols 3400 to 3500\nWriting cols 3500 to 3600\nWriting cols 3600 to 3700\nWriting cols 3700 to 3800\nWriting cols 3800 to 3900\nWriting cols 3900 to 4000\nWriting cols 4000 to 4100\nWriting cols 4100 to 4200\nWriting cols 4200 to 4300\nWriting cols 4300 to 4400\nWriting cols 4400 to 4500\nWriting cols 4500 to 4600\nWriting cols 4600 to 4700\nWriting cols 4700 to 4800\nWriting cols 4800 to 4900\nWriting cols 4900 to 5000\nWriting cols 5000 to 5100\nWriting cols 5100 to 5200\nWriting cols 5200 to 5300\nWriting cols 5300 to 5400\nWriting cols 5400 to 5500\nWriting cols 5500 to 5600\nWriting cols 5600 to 5700\nWriting cols 5700 to 5800\nWriting cols 5800 to 5900\nWriting cols 5900 to 6000\nWriting cols 6000 to 6100\nWriting cols 6100 to 6200\nWriting cols 6200 to 6300\nWriting cols 6300 to 6400\nWriting cols 6400 to 6500\nWriting cols 6500 to 6600\nWriting cols 6600 to 6700\nWriting cols 6700 to 6800\nWriting cols 6800 to 6900\nWriting cols 6900 to 7000\nWriting cols 7000 to 7100\nWriting cols 7100 to 7200\nWriting cols 7200 to 7300\nWriting cols 7300 to 7400\nWriting cols 7400 to 7500\nWriting cols 7500 to 7600\nWriting cols 7600 to 7700\nWriting cols 7700 to 7800\nWriting cols 7800 to 7900\nWriting cols 7900 to 8000\nWriting cols 8000 to 8100\nWriting cols 8100 to 8200\nWriting cols 8200 to 8300\nWriting cols 8300 to 8400\nWriting cols 8400 to 8500\nWriting cols 8500 to 8600\nWriting cols 8600 to 8700\nWriting cols 8700 to 8800\nWriting cols 8800 to 8900\nWriting cols 8900 to 9000\nWriting cols 9000 to 9100\nWriting cols 9100 to 9200\nWriting cols 9200 to 9300\nWriting cols 9300 to 9400\nWriting cols 9400 to 9500\nWriting cols 9500 to 9600\nWriting cols 9600 to 9700\nWriting cols 9700 to 9800\nWriting cols 9800 to 9900\nWriting cols 9900 to 10000\nWriting cols 10000 to 10100\nWriting cols 10100 to 10200\nWriting cols 10200 to 10300\nWriting cols 10300 to 10400\nWriting cols 10400 to 10500\nWriting cols 10500 to 10600\nWriting cols 10600 to 10700\nWriting cols 10700 to 10800\nWriting cols 10800 to 10900\nWriting cols 10900 to 11000\nWriting cols 11000 to 11100\nWriting cols 11100 to 11200\nWriting cols 11200 to 11300\nWriting cols 11300 to 11400\nWriting cols 11400 to 11500\nWriting cols 11500 to 11600\nWriting cols 11600 to 11700\nWriting cols 11700 to 11800\nWriting cols 11800 to 11900\nWriting cols 11900 to 12000\nWriting cols 12000 to 12100\nWriting cols 12100 to 12200\nWriting cols 12200 to 12300\nWriting cols 12300 to 12400\nWriting cols 12400 to 12500\nWriting cols 12500 to 12600\nWriting cols 12600 to 12700\nWriting cols 12700 to 12800\nWriting cols 12800 to 12900\nWriting cols 12900 to 13000\nWriting cols 13000 to 13100\nWriting cols 13100 to 13200\nWriting cols 13200 to 13300\nWriting cols 13300 to 13400\nWriting cols 13400 to 13500\nWriting cols 13500 to 13600\nWriting cols 13600 to 13700\nWriting cols 13700 to 13800\nWriting cols 13800 to 13900\nWriting cols 13900 to 14000\nWriting cols 14000 to 14100\nWriting cols 14100 to 14200\nWriting cols 14200 to 14300\nWriting cols 14300 to 14400\nWriting cols 14400 to 14500\nWriting cols 14500 to 14600\nWriting cols 14600 to 14700\nWriting cols 14700 to 14800\nWriting cols 14800 to 14900\nWriting cols 14900 to 15000\nWriting cols 15000 to 15100\nWriting cols 15100 to 15200\nWriting cols 15200 to 15300\nWriting cols 15300 to 15400\nWriting cols 15400 to 15500\nWriting cols 15500 to 15600\nWriting cols 15600 to 15700\nWriting cols 15700 to 15800\nWriting cols 15800 to 15900\nWriting cols 15900 to 16000\nWriting cols 16000 to 16100\nWriting cols 16100 to 16200\nWriting cols 16200 to 16300\nWriting cols 16300 to 16400\nWriting cols 16400 to 16500\nWriting cols 16500 to 16600\nWriting cols 16600 to 16700\nWriting cols 16700 to 16800\nWriting cols 16800 to 16900\nWriting cols 16900 to 17000\nWriting cols 17000 to 17100\nWriting cols 17100 to 17200\nWriting cols 17200 to 17300\nWriting cols 17300 to 17400\nWriting cols 17400 to 17500\nWriting cols 17500 to 17600\nWriting cols 17600 to 17700\nWriting cols 17700 to 17800\nWriting cols 17800 to 17900\nWriting cols 17900 to 18000\nWriting cols 18000 to 18100\nWriting cols 18100 to 18200\nWriting cols 18200 to 18300\nWriting cols 18300 to 18400\nWriting cols 18400 to 18500\nWriting cols 18500 to 18600\nWriting cols 18600 to 18700\nWriting cols 18700 to 18800\nWriting cols 18800 to 18900\nWriting cols 18900 to 19000\nWriting cols 19000 to 19100\nWriting cols 19100 to 19200\nWriting cols 19200 to 19300\nWriting cols 19300 to 19400\nWriting cols 19400 to 19500\nWriting cols 19500 to 19600\nWriting cols 19600 to 19700\nWriting cols 19700 to 19800\nWriting cols 19800 to 19900\nWriting cols 19900 to 20000\nWriting cols 20000 to 20100\nWriting cols 20100 to 20200\nWriting cols 20200 to 20300\nWriting cols 20300 to 20400\nWriting cols 20400 to 20500\nWriting cols 20500 to 20600\nWriting cols 20600 to 20700\nWriting cols 20700 to 20800\nWriting cols 20800 to 20900\nWriting cols 20900 to 21000\nWriting cols 21000 to 21100\nWriting cols 21100 to 21200\nWriting cols 21200 to 21300\nWriting cols 21300 to 21400\nWriting cols 21400 to 21500\nWriting cols 21500 to 21600\nWriting cols 21600 to 21700\nWriting cols 21700 to 21800\nWriting cols 21800 to 21900\nWriting cols 21900 to 22000\nWriting cols 22000 to 22100\nWriting cols 22100 to 22200\nWriting cols 22200 to 22300\nWriting cols 22300 to 22400\nWriting cols 22400 to 22500\nWriting cols 22500 to 22600\nWriting cols 22600 to 22700\nWriting cols 22700 to 22800\nWriting cols 22800 to 22900\nWriting cols 22900 to 23000\nWriting cols 23000 to 23100\nWriting cols 23100 to 23200\nWriting cols 23200 to 23300\nWriting cols 23300 to 23400\nWriting cols 23400 to 23500\nWriting cols 23500 to 23600\nWriting cols 23600 to 23700\nWriting cols 23700 to 23800\nWriting cols 23800 to 23900\nWriting cols 23900 to 24000\nWriting cols 24000 to 24100\nWriting cols 24100 to 24200\nWriting cols 24200 to 24300\nWriting cols 24300 to 24400\nWriting cols 24400 to 24500\nWriting cols 24500 to 24600\nWriting cols 24600 to 24700\nWriting cols 24700 to 24800\nWriting cols 24800 to 24900\nWriting cols 24900 to 25000\nWriting cols 25000 to 25100\nWriting cols 25100 to 25200\nWriting cols 25200 to 25300\nWriting cols 25300 to 25400\nWriting cols 25400 to 25500\nWriting cols 25500 to 25600\nWriting cols 25600 to 25700\nWriting cols 25700 to 25800\nWriting cols 25800 to 25900\nWriting cols 25900 to 26000\nWriting cols 26000 to 26100\nWriting cols 26100 to 26200\nWriting cols 26200 to 26300\nWriting cols 26300 to 26400\nWriting cols 26400 to 26500\nWriting cols 26500 to 26600\nWriting cols 26600 to 26700\nWriting cols 26700 to 26800\nWriting cols 26800 to 26900\nWriting cols 26900 to 27000\nWriting cols 27000 to 27072\nCPU times: user 837 ms, sys: 4.18 s, total: 5.02 s\nWall time: 14.1 s\n" ], [ "%time hdf5 | ExportProjections(outputFolder)\n# 3min3s", "Export Dataset Projections\nCPU times: user 164 µs, sys: 282 µs, total: 446 µs\nWall time: 429 µs\n" ], [ "%time hdf5 | ExportTables(outputFolder)\n# 426us", "Export Dataset Tables\nCPU times: user 424 µs, sys: 0 ns, total: 424 µs\nWall time: 408 µs\n" ], [ "%time hdf5 | ExportLists(outputFolder)\n#480us", "Export Dataset Lists\nCPU times: user 154 µs, sys: 264 µs, total: 418 µs\nWall time: 401 µs\n" ], [ "%time hdf5 | ExportAttributes(outputFolder)\n# 2min 7 s", "DEBUG:root:data has shape (81736,)\nDEBUG:root:data has shape (81736,)\n" ], [ "%time hdf5 | SummarizeColors(outputFolder)\n# 59.4ms", "INFO:root:Reading from /data/notebooks/pubweb/features/pw_symbol/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/pw_ensembl/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/gene/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/vst_variance_expected/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/vst_mean/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/pw_hcid/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/vst_variable/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/Selected/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/vst_variance/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/vst_variance_standardized/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/features/pw_entrez/metadata.json for /data/notebooks/pubweb/summary/color/features\nINFO:root:Reading from /data/notebooks/pubweb/observations/infect/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/id/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/nFeature_RNA/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/sample_name/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/method/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/nCount_Unspliced/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/nCount_RNA/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/strain/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/orig_ident/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/nFeature_Unspliced/metadata.json for /data/notebooks/pubweb/summary/color/observations\nINFO:root:Reading from /data/notebooks/pubweb/observations/sample_id/metadata.json for /data/notebooks/pubweb/summary/color/observations\n" ], [ "%time hdf5 | SummerizeManifest(outputFolder)\n# 4.2ms", "matrix: /data/notebooks/pubweb/matrix\nplaceholder\nCPU times: user 79 µs, sys: 2.94 ms, total: 3.02 ms\nWall time: 2.37 ms\n" ] ] ]
[ "code", "raw", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb3fb47409c54f6604d312797e1993d597615f2d
7,246
ipynb
Jupyter Notebook
1-multiple-cogs.ipynb
pangeo-data/cog-best-practices
3549a54e2c2d1072af758774ed7af530a7a20d61
[ "BSD-3-Clause" ]
55
2020-11-05T22:00:46.000Z
2022-03-31T11:19:43.000Z
1-multiple-cogs.ipynb
pangeo-data/cog-best-practices
3549a54e2c2d1072af758774ed7af530a7a20d61
[ "BSD-3-Clause" ]
11
2020-11-05T02:33:03.000Z
2021-06-18T16:09:54.000Z
1-multiple-cogs.ipynb
pangeo-data/cog-best-practices
3549a54e2c2d1072af758774ed7af530a7a20d61
[ "BSD-3-Clause" ]
8
2020-11-05T22:07:21.000Z
2021-06-18T14:05:21.000Z
29.942149
333
0.589567
[ [ [ "# Best-practices for Cloud-Optimized Geotiffs\n\n**Part 2. Multiple COGs**\n\nThis notebook goes over ways to construct a multidimensional xarray DataArray from many 2D COGS", "_____no_output_____" ] ], [ [ "import dask\nimport s3fs\nimport intake\nimport os\nimport xarray as xr\nimport pandas as pd", "_____no_output_____" ], [ "# use the same GDAL environment settings as we did for the single COG case\nenv = dict(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR', \n AWS_NO_SIGN_REQUEST='YES',\n GDAL_MAX_RAW_BLOCK_CACHE_SIZE='200000000',\n GDAL_SWATH_SIZE='200000000',\n VSI_CURL_CACHE_SIZE='200000000')\nos.environ.update(env)", "_____no_output_____" ], [ "# set up a connection with credentials and other settings\ns3 = s3fs.S3FileSystem(anon=True)\nobjects = s3.ls('sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/')\nimages = ['s3://' + obj + '/Gamma0_VV.tif' for obj in objects]\nprint(len(images))\nimages[:6] #january 2020 scenes", "_____no_output_____" ] ], [ [ "## GDAL VRT\n\nA GDAL VRT file is an XML format that can group together many separate files into separate bands. It's common to create such a file with a the GDAL command line tool `gdalbuildvrt`, illustrated below:", "_____no_output_____" ] ], [ [ "#step 1) write a file list that points to the data. GDAL requires special prefixes for this /vsis3/ or /vsicurl/\nwith open('files.txt', 'w') as f:\n lines = [x.replace('s3://', '/vsis3/') + '\\n' for x in images[:6]]\n f.writelines(lines)", "_____no_output_____" ], [ "%%time\n# step 2) create a VRT file\n!gdalbuildvrt stack.vrt -separate -input_file_list files.txt ", "_____no_output_____" ], [ "%%time\n# step 4) open with xarray\nchunks=dict(band=1, x=2745, y=2745)\nda = xr.open_rasterio('stack.vrt', chunks=chunks)\nda", "_____no_output_____" ], [ "# step 5) optionally modify coordinates (e.g. time dimension extracted from file name)\nda = da.rename({'band':'time'})\nda['time'] = [pd.to_datetime(x[60:68]) for x in images[:6]]", "_____no_output_____" ] ], [ [ "#### Recap\n\n1. `xr.open_rasterio(stack.vrt)` stores band coordinates as sequential integers (we lose file name and metadata from each individual COG, so it's common to alter the coordinates after opening the dataset)\n2. data is tied to a reference to a local file ('stack.vrt'), which can cause problems with distributed computing if you don't have access to the local filesystem", "_____no_output_____" ], [ "## intake-xarray\n\n[intake-xarray](https://github.com/intake/intake-xarray) is a plugin for the intake library. It uses fsspec/s3fs under the hood to facilitate loading data into python objects. the function `intake.open_rasterio()` accepts a list of paths. it returns an intake object with a `to_dask()` function that returns an xarray DataArray", "_____no_output_____" ] ], [ [ "%%time\n# ~ 1s for 6 files\n\n# this loads the image ID into xarray's band coordinates. \n\npattern = 's3://sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/{band}/Gamma0_VV.tif'\nchunks=dict(band=1, x=2745, y=2745)\nsources = intake.open_rasterio(images[:6], chunks=chunks, path_as_pattern=pattern, concat_dim='band')\nda = sources.to_dask() \nda", "_____no_output_____" ] ], [ [ "#### recap:\n\n* This is a convient way to avoid constructing a VRT and load a bunch of COGs. It works well as long as the COG urls follow a distinct pattern. Metadata is also lost (we have attributes from the first COG, not others)", "_____no_output_____" ], [ "## Custom\n\nYou can also just use xarray and dask to construct a larger datacube from many COGS. ", "_____no_output_____" ] ], [ [ "%%time\n\n# 4 - 8 s\n# Load all the images\n\nchunks=dict(band=1, x=2745, y=2745)\ndataArrays = [xr.open_rasterio(url, chunks=chunks) for url in images]\n\n# note use of join='override' b/c we know these COGS have the same coordinates\nda = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop')\nda = da.rename({'band':'time'})\nda['time'] = [pd.to_datetime(x[60:68]) for x in images]\nda", "_____no_output_____" ] ], [ [ "#### recap:\n\n* The cell above is essentially a for-loop that iterates over each COG in sequence. 50ms-200ms * 80 ~ 4-16 seconds. The next notebook will look at using Dask to speed things up by opening the files in parallel.", "_____no_output_____" ], [ "## Visualize\n\nHere is an example of interactive visualization again using hvplot. Since we're using full resolution arrays it's key to set the `rasterize=True` keyword argument. That uses the datashader library to pre-render images before sending them to the browser.\n\nThis is extremely powerful because, resolution updates as you zoom in, and you can scrub through the data cube with an interactive slider widget", "_____no_output_____" ] ], [ [ "import hvplot.xarray\nda.hvplot.image(rasterize=True, aspect='equal', cmap='gray', clim=(0,0.4))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb3fc348fd4a870670dbecfd21e88d77cde6bbfb
20,602
ipynb
Jupyter Notebook
B_Submissions_Kopuru_competition/2021-05-12_submit/batch_zzDummyFlow/workerbee03_NEST.ipynb
IEwaspbusters/KopuruVespaCompetitionIE
1630d5c74fa1c901669b62a0f3b6870bc1f3caf4
[ "MIT" ]
1
2021-09-22T15:47:01.000Z
2021-09-22T15:47:01.000Z
B_Submissions_Kopuru_competition/2021-05-12_submit/batch_zzDummyFlow/workerbee03_NEST.ipynb
IEwaspbusters/KopuruVespaCompetitionIE
1630d5c74fa1c901669b62a0f3b6870bc1f3caf4
[ "MIT" ]
null
null
null
B_Submissions_Kopuru_competition/2021-05-12_submit/batch_zzDummyFlow/workerbee03_NEST.ipynb
IEwaspbusters/KopuruVespaCompetitionIE
1630d5c74fa1c901669b62a0f3b6870bc1f3caf4
[ "MIT" ]
null
null
null
32.701587
99
0.37176
[ [ [ "## import modules", "_____no_output_____" ] ], [ [ "# Data manipulation and scientific computing -----------------------------------\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "## get the data", "_____no_output_____" ] ], [ [ "QUEEN = pd.read_csv('../../../Input_open_data/ds02_datos-nidos-avispa-asiatica.csv', sep=',')", "_____no_output_____" ], [ "QUEEN.head()", "_____no_output_____" ], [ "GEO = pd.read_csv('./WBds01_GEO.csv', sep=',')\nGEO.head()", "_____no_output_____" ], [ "METEO = pd.read_csv('./WBds02_METEO.csv', sep=',')\nMETEO.head()", "_____no_output_____" ] ], [ [ "## do the python magic", "_____no_output_____" ] ], [ [ "QUEENtrain = QUEEN.loc[:,['HELBIDEA/DIRECCION']]", "_____no_output_____" ], [ "QUEENpredict = QUEEN .loc[:,['ESPEZIE/ESPECIE']]", "_____no_output_____" ] ], [ [ "## Export the data", "_____no_output_____" ] ], [ [ "QUEENtrain.to_csv('WBds03_QUEENtrain.csv')", "_____no_output_____" ], [ "QUEENpredict.to_csv('WBds03_QUEENpredict.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb3fc6ce3a35be666d74d2e697325e80570e7929
399,534
ipynb
Jupyter Notebook
tutorials/kats_201_forecasting.ipynb
goncaloperes/Kats
8a1de1aa4461ced8116b9307895e4e385f26b72a
[ "MIT" ]
null
null
null
tutorials/kats_201_forecasting.ipynb
goncaloperes/Kats
8a1de1aa4461ced8116b9307895e4e385f26b72a
[ "MIT" ]
null
null
null
tutorials/kats_201_forecasting.ipynb
goncaloperes/Kats
8a1de1aa4461ced8116b9307895e4e385f26b72a
[ "MIT" ]
null
null
null
416.615224
125,264
0.933012
[ [ [ "# Kats 201 - Forecasting with Kats\n\n\nThis tutorial will introduce time series modeling and forecasting with Kats. We will show you how to build forecasts with different Kats models and how to do parameter tuning and backtesting using Kats. The complete table of contents for Kats 201 is as follows:\n\n1. Forecasting with Kats Base Models \n 1.1 SARIMA \n 1.2 Prophet \n 1.3 Holt-Winters \n2. Forecasting with Kats Ensemble Model\n3. Multivariate Model Forecasting\n4. Hyperparameter Tuning\n5. Backtesting", "_____no_output_____" ], [ "**Note:** We provide two types of tutorial notebooks\n- **Kats 101**, basic data structure and functionalities in Kats \n- **Kats 20x**, advanced topics, including advanced forecasting techniques, advanced detection algorithms, `TsFeatures`, meta-learning, etc. ", "_____no_output_____" ], [ "# 1. Forecasting with Kats Base Models\n\n\n\nIn this part, we will demonstrate the forecasting workflow with the following models with `air_passengers` data set:\n1. SARIMA\n2. Prophet,\n3. Holt-Winters\n", "_____no_output_____" ], [ "We begin by loading the `air_passengers` data set into a `TimeSeriesData` object. This code is essentially the same as the code in our introduction to the `TimeSeriesData` object in the Kats 101 Tutorial.", "_____no_output_____" ] ], [ [ "%%capture\n# For Google Colab:\n!pip install kats\n!wget https://raw.githubusercontent.com/facebookresearch/Kats/main/kats/data/air_passengers.csv\n!wget https://raw.githubusercontent.com/facebookresearch/Kats/main/kats/data/multi_ts.csv", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport warnings\n\nwarnings.simplefilter(action='ignore')\nsys.path.append(\"../\")\n\nfrom kats.consts import TimeSeriesData\n\ntry: # If running on Jupyter\n air_passengers_df = pd.read_csv(\"../kats/data/air_passengers.csv\")\nexcept FileNotFoundError: # If running on colab\n air_passengers_df = pd.read_csv(\"air_passengers.csv\")\n\n# Note: If the column holding the time values is not called time, you will want to specify the name of this column.\nair_passengers_df.columns = [\"time\", \"value\"]\nair_passengers_ts = TimeSeriesData(air_passengers_df)", "_____no_output_____" ] ], [ [ "Because each of our time series models follow the `sklearn` model API pattern, the code for each of the next three examples is quite similar. We initialize the model with its parameters and then call the `fit` and `predict` methods. The only difference between each of these examples are the model-specific parameters. We can then use the `plot` method to visualize our forecast in each case.\n\nThe values we choose for each of our paremeters in these examples are basically arbitrary. Later in this tutorial, we will show you how to pick the right parameters for a model in Kats using hyperparameter tuning.", "_____no_output_____" ], [ "## 1.1 SARIMA", "_____no_output_____" ] ], [ [ "from kats.models.sarima import SARIMAModel, SARIMAParams\nwarnings.simplefilter(action='ignore')\n\n# create SARIMA param class\nparams = SARIMAParams(\n p = 2, \n d=1, \n q=1, \n trend = 'ct', \n seasonal_order=(1,0,1,12)\n )\n\n# initiate SARIMA model\nm = SARIMAModel(data=air_passengers_ts, params=params)\n\n# fit SARIMA model\nm.fit()\n\n# generate forecast values\nfcst = m.predict(\n steps=30, \n freq=\"MS\"\n )\n\n# make plot to visualize\nm.plot()", "_____no_output_____" ] ], [ [ "## 1.2 Prophet\nNote: This example requires `fbprophet` be installed, for example with `pip install kats[prophet]` or `pip install kats[all]`\n", "_____no_output_____" ] ], [ [ "# import the param and model classes for Prophet model\nfrom kats.models.prophet import ProphetModel, ProphetParams\n\n# create a model param instance\nparams = ProphetParams(seasonality_mode='multiplicative') # additive mode gives worse results\n\n# create a prophet model instance\nm = ProphetModel(air_passengers_ts, params)\n\n# fit model simply by calling m.fit()\nm.fit()\n\n# make prediction for next 30 month\nfcst = m.predict(steps=30, freq=\"MS\")\n\n# plot to visualize\nm.plot()", "INFO:fbprophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\nINFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" ] ], [ [ "## 1.3 Holt-Winters", "_____no_output_____" ] ], [ [ "from kats.models.holtwinters import HoltWintersParams, HoltWintersModel\nwarnings.simplefilter(action='ignore')\n\n\nparams = HoltWintersParams(\n trend=\"add\",\n #damped=False,\n seasonal=\"mul\",\n seasonal_periods=12,\n )\nm = HoltWintersModel(\n data=air_passengers_ts, \n params=params)\n\nm.fit()\nfcst = m.predict(steps=30, alpha = 0.1)\nm.plot()", "_____no_output_____" ] ], [ [ "# 2. Forecasting with Ensemble model\n\n`KatsEnsemble` is an ensemble forecasting model, which means it allows you to combine several different forecasting models when building a forecast. When creating an ensemble, you specify the list of models (with parameters) that you wish to include in the ensemble, and then you choose whether to aggregate these forecasts using the median or the weighted average. Prior to building any forecasts, the model checks for seasonality and if seasonality is detected, it performs an STL decomposition (using either additive or multiplicative decomposition, as specified by the user). Each of the forecasting models specified to the ensemble model are only applied to the the de-seasonalized components, and after these forecasts are aggregated the result is reseasonalized.\n\nWhen we initialize `KatsEnsemble`, we include a dictionary with the following components: \n\n* **models:** `EnsembleParams`, contains a list of parameters for each of the individual model parameters \n* **aggregation:** 'str', either 'median' or 'weightedavg', how to aggregate the individual forecasts to build an ensemble \n* **seasonality_length:** int, the length of the seasonality of the time series \n* **decomposition_method** str, either 'multiplicative' or 'additive', the type of decomposition of the initial time series", "_____no_output_____" ], [ "In the example below, we use the `air_passengers` data set to build a median ensemble forecast that combines 6 different forecasting models. We use the `EnsembleParams` object to define the parameters for each of these models. Then generating a forecast for this ensemble is straightforward.", "_____no_output_____" ] ], [ [ "from kats.models.ensemble.ensemble import EnsembleParams, BaseModelParams\nfrom kats.models.ensemble.kats_ensemble import KatsEnsemble\nfrom kats.models import (\n arima,\n holtwinters,\n linear_model,\n prophet, # requires fbprophet be installed\n quadratic_model,\n sarima,\n theta,\n)\n\n# we need define params for each individual forecasting model in `EnsembleParams` class\n# here we include 6 different models\nmodel_params = EnsembleParams(\n [\n BaseModelParams(\"arima\", arima.ARIMAParams(p=1, d=1, q=1)),\n BaseModelParams(\n \"sarima\",\n sarima.SARIMAParams(\n p=2,\n d=1,\n q=1,\n trend=\"ct\",\n seasonal_order=(1, 0, 1, 12),\n enforce_invertibility=False,\n enforce_stationarity=False,\n ),\n ),\n BaseModelParams(\"prophet\", prophet.ProphetParams()), # requires fbprophet be installed\n BaseModelParams(\"linear\", linear_model.LinearModelParams()),\n BaseModelParams(\"quadratic\", quadratic_model.QuadraticModelParams()),\n BaseModelParams(\"theta\", theta.ThetaParams(m=12)),\n ]\n )\n\n# create `KatsEnsembleParam` with detailed configurations \nKatsEnsembleParam = {\n \"models\": model_params,\n \"aggregation\": \"median\",\n \"seasonality_length\": 12,\n \"decomposition_method\": \"multiplicative\",\n}\n\n# create `KatsEnsemble` model\nm = KatsEnsemble(\n data=air_passengers_ts, \n params=KatsEnsembleParam\n )\n\n# fit and predict\nm.fit()\n\n# predict for the next 30 steps\nfcst = m.predict(steps=30)\n\n# aggregate individual model results\nm.aggregate()\n\n# plot to visualize\nm.plot()", "_____no_output_____" ] ], [ [ "# 3. Multivariate Model Forecasting", "_____no_output_____" ], [ "Vector autoregression (VAR) is a multivariable forecasting algorithm that is supported in Kats. Here, we show show an example of how to use the `VARModel` with the `multi_ts` data set. We begin by loading the data set into a `TimeSeriesData` and previewing it.", "_____no_output_____" ] ], [ [ "try: # If running on Jupyter\n multi_df = pd.read_csv(\"../kats/data/multi_ts.csv\")\nexcept FileNotFoundError: # If running on colab\n multi_df = pd.read_csv(\"multi_ts.csv\")\nmulti_ts = TimeSeriesData(multi_df)", "_____no_output_____" ], [ "multi_df.groupby('time').sum()[['V1', 'V2']].plot(figsize=(10, 6))\nplt.show()", "_____no_output_____" ] ], [ [ "Now it is straightforward to build this forecast using `VARModel` and plot the results as follows.", "_____no_output_____" ] ], [ [ "# Use VAR model to forecast this multivariate time series\nfrom kats.models.var import VARModel, VARParams\n\nparams = VARParams()\nm = VARModel(multi_ts, params)\nm.fit()\nfcst = m.predict(steps=90)\n\nm.plot()\nplt.show()", "_____no_output_____" ] ], [ [ "# 4. Hyperparameter tuning\n\nTo identify which hyperparameters to use for a specfied forecasting model, we have classes in Kats that allow you to efficiently identify the best hyperparameters. Here we will provide an exmaple of how to do hyperparameter tuning for an ARIMA model that using the `air_passengers` data set.\nNOTE: This example requires ax-platform be installed. For example, `pip install ax-platform` or `pip install kats[all]`.", "_____no_output_____" ] ], [ [ "import kats.utils.time_series_parameter_tuning as tpt\nfrom kats.consts import ModelEnum, SearchMethodEnum, TimeSeriesData\nfrom kats.models.arima import ARIMAParams, ARIMAModel\n\nfrom ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType\nfrom ax.models.random.sobol import SobolGenerator\nfrom ax.models.random.uniform import UniformGenerator\nwarnings.simplefilter(action='ignore')", "_____no_output_____" ] ], [ [ "The method we use to hyperparameter-tuning is a static method called `create_search_method`. To call this method, we need to specify the type of search we are doing and the search space for the parameters. We specify the search space for the parameters by defining a dictionary for each parameter and combining these dictionaries into a list. Here we are specifying that we want to look at all ARIMA(p,d,q) models where the values p, d, and q are either 1 or 2.", "_____no_output_____" ] ], [ [ "parameters_grid_search = [\n{\n \"name\": \"p\",\n \"type\": \"choice\",\n \"values\": list(range(1, 3)),\n \"value_type\": \"int\",\n \"is_ordered\": True,\n},\n{\n \"name\": \"d\",\n \"type\": \"choice\",\n \"values\": list(range(1, 3)),\n \"value_type\": \"int\",\n \"is_ordered\": True,\n},\n{\n \"name\": \"q\",\n \"type\": \"choice\",\n \"values\": list(range(1, 3)),\n \"value_type\": \"int\",\n \"is_ordered\": True,\n},\n]", "_____no_output_____" ] ], [ [ "Now, we are going to create a grid search with these parameters. The full list of arguments of the `create_search_method` are as follows:\n* **Parameters:** List[Dict], this is a list of dictionaries, where each dictionary gives the search space for a parameter \n* **selected_search_method:** SearchMethodEnum, the type of search method used to do parameter tuning\n* **objective_name:** str, the nume of the objective function used for the search (this is arbitrary)", "_____no_output_____" ] ], [ [ "parameter_tuner_grid = tpt.SearchMethodFactory.create_search_method(\n objective_name=\"evaluation_metric\",\n parameters=parameters_grid_search,\n selected_search_method=SearchMethodEnum.GRID_SEARCH,\n)", "_____no_output_____" ] ], [ [ "Now that we have defined our search grid, we need to define the metric we are calculating at each point on the grid. Given a set of parameters p, q, and d, we define our evaluation function to be mean absolute error (MAE) of the forecast for the test data set (using an 80/20 training-test split) using these respective parameters.", "_____no_output_____" ] ], [ [ "# Divide into an 80/20 training-test split\nsplit = int(0.8*len(air_passengers_df))\n\ntrain_ts = air_passengers_ts[0:split]\ntest_ts = air_passengers_ts[split:]\n\n# Fit an ARIMA model and calculate the MAE for the test data\ndef evaluation_function(params):\n arima_params = ARIMAParams(\n p = params['p'],\n d = params['d'],\n q = params['q']\n )\n model = ARIMAModel(train_ts, arima_params)\n model.fit()\n model_pred = model.predict(steps=len(test_ts))\n error = np.mean(np.abs(model_pred['fcst'].values - test_ts.value.values))\n return error", "_____no_output_____" ] ], [ [ "Now that we have our grid and our evaluation functions defined, we can display our evaluation metric for each point on the grid using the following function calls.", "_____no_output_____" ] ], [ [ "parameter_tuner_grid.generate_evaluate_new_parameter_values(\n evaluation_function=evaluation_function\n)\n\n# Retrieve parameter tuning results\n\nparameter_tuning_results_grid = (\n parameter_tuner_grid.list_parameter_value_scores()\n)\n\nparameter_tuning_results_grid", "_____no_output_____" ] ], [ [ "From the calculations in the table above, we can conclude that ARIMA(2,1,1) has the minimal error of 52.02.", "_____no_output_____" ], [ "# 5. Backtesting\n\nKats provides a backtesting module that makes it easy to to compare an evaluate different forecasting models. While our hyperparameter tuning module allows you to compare different sets of parameters for a single base forecasting model, backtesting allows you to compare different types of base models (with pre-specified parameters). \n\nOur backtesting module allows you to look at multiple error metrics in a single function call. Here are the error metrics that are currently supported:\n* Mean Absolute Error (MAE)\n* Mean Absolute Percentage Error (MAPE)\n* Symmetric Mean Absolute Percentage Error (SMAPE)\n* Mean Squared Error (MSE)\n* Mean Absolute Scaled Error (MASE)\n* Root Mean Squared Error (RMSE)\n\nOur example below shows how you can use the `BackTesterSimple` class to compare errors between an ARIMA model and a Prophet model using the `air_passengers` data set.", "_____no_output_____" ] ], [ [ "from kats.utils.backtesters import BackTesterSimple\nfrom kats.models.arima import ARIMAModel, ARIMAParams\n\nbacktester_errors = {}", "_____no_output_____" ] ], [ [ "Here, we define a backtester to look at each of the supported error metrics for an ARIMA(2,1,1) model. We specify in the `BackTesterSimple` definition that we are using a 75/25 training-test split to train and evaluate the metrics for this model", "_____no_output_____" ] ], [ [ "params = ARIMAParams(p=2, d=1, q=1)\nALL_ERRORS = ['mape', 'smape', 'mae', 'mase', 'mse', 'rmse']\n\nbacktester_arima = BackTesterSimple(\n error_methods=ALL_ERRORS,\n data=air_passengers_ts,\n params=params,\n train_percentage=75,\n test_percentage=25, \n model_class=ARIMAModel)\n\nbacktester_arima.run_backtest()", "_____no_output_____" ] ], [ [ "After we run the backtester, the `errors` attribute will be a dictionary mapping each error type name to a its corresponding value", "_____no_output_____" ] ], [ [ "backtester_errors['arima'] = {}\nfor error, value in backtester_arima.errors.items():\n backtester_errors['arima'][error] = value", "_____no_output_____" ] ], [ [ "Now we run another backteseter to caluculate the same error metrics for a Prophet model.", "_____no_output_____" ] ], [ [ "params_prophet = ProphetParams(seasonality_mode='multiplicative') # additive mode gives worse results\n\nbacktester_prophet = BackTesterSimple(\n error_methods=ALL_ERRORS,\n data=air_passengers_ts,\n params=params_prophet,\n train_percentage=75,\n test_percentage=25, \n model_class=ProphetModel)\n\nbacktester_prophet.run_backtest()\n\nbacktester_errors['prophet'] = {}\nfor error, value in backtester_prophet.errors.items():\n backtester_errors['prophet'][error] = value", "INFO:fbprophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\nINFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" ] ], [ [ "Here we can compare the error metrics for the two models.", "_____no_output_____" ] ], [ [ "pd.DataFrame.from_dict(backtester_errors)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3fd96b12e378cde17ce205829de0b1f0ebf56b
89,112
ipynb
Jupyter Notebook
Scripts/Delete_Cassandra.ipynb
danielherranzsegundo/TFM_2022
5bf885a1535cf9402ef84d3f2f5b1db39f581195
[ "Apache-2.0" ]
1
2022-03-31T14:13:25.000Z
2022-03-31T14:13:25.000Z
Scripts/Delete_Cassandra.ipynb
danielherranzsegundo/TFM_2022
5bf885a1535cf9402ef84d3f2f5b1db39f581195
[ "Apache-2.0" ]
null
null
null
Scripts/Delete_Cassandra.ipynb
danielherranzsegundo/TFM_2022
5bf885a1535cf9402ef84d3f2f5b1db39f581195
[ "Apache-2.0" ]
null
null
null
85.849711
210
0.749113
[ [ [ "Script delete Cassandra en cluster multidomain", "_____no_output_____" ] ], [ [ "!pip install mysql-connector==2.1.7\n!pip install pandas\n!pip install sqlalchemy\n#requiere instalación adicional, consultar https://github.com/PyMySQL/mysqlclient\n!pip install mysqlclient\n!pip install numpy\n!pip install pymysql", "Requirement already satisfied: mysql-connector==2.1.7 in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (2.1.7)\nRequirement already satisfied: pandas in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (1.0.1)\nRequirement already satisfied: numpy>=1.13.3 in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (from pandas) (1.18.1)\nRequirement already satisfied: pytz>=2017.2 in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (from pandas) (2019.3)\nRequirement already satisfied: python-dateutil>=2.6.1 in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (from pandas) (2.8.1)\nRequirement already satisfied: six>=1.5 in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (from python-dateutil>=2.6.1->pandas) (1.14.0)\nRequirement already satisfied: sqlalchemy in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (1.3.13)\nRequirement already satisfied: mysqlclient in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (2.1.0)\nRequirement already satisfied: numpy in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (1.18.1)\nRequirement already satisfied: pymysql in /home/danielherranzsegundo/anaconda3/lib/python3.7/site-packages (1.0.2)\n" ], [ "import pandas as pd\nimport numpy as np\nimport os\nimport json\nimport random\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nimport time\nfrom pprint import pprint\nimport psutil\nimport uuid\nfrom cassandra.query import tuple_factory\nfrom cassandra.query import dict_factory\nfrom cassandra.query import BatchStatement, SimpleStatement\nfrom cassandra.policies import RetryPolicy", "_____no_output_____" ], [ "#Los resultados de medidas de tiempo en carga por dominios se almacenan en estos objetos.\n#Se itera durante 100 iteraciones para sacar medias\n#repeticiones\nrepeats = 100", "_____no_output_____" ], [ "#Ficheros de salida\nresultados_etl_delete = '../Results/Cassandra/CassandraDelete_test_{}.csv'", "_____no_output_____" ], [ "def save_results_to_csv(results,file):\n #Guardamos los resultados en csv\n from datetime import datetime\n \n csv_df = pd.DataFrame(results, columns=['Registros', 'Tiempo', 'CPU','Memoria'])\n dia = datetime.now().strftime(\"%d%m%Y_%H_%M_%S\")\n print(file.format(str(dia)))\n csv_df.to_csv(file.format(str(dia)))", "_____no_output_____" ], [ "from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT\nfrom cassandra.policies import WhiteListRoundRobinPolicy, DowngradingConsistencyRetryPolicy\nfrom cassandra.query import tuple_factory\nfrom cassandra import ConsistencyLevel\n\nprofile = ExecutionProfile(\n load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']),\n retry_policy=DowngradingConsistencyRetryPolicy(),\n consistency_level=ConsistencyLevel.ALL,\n serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL,\n request_timeout=3600,\n row_factory=tuple_factory\n)\ncluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile})\nsession = cluster.connect()\nprint(session.execute(\"SELECT release_version FROM system.local\").one())\n\nsession.execute('USE currentaccountkeyspace')\n", "('4.0.3',)\n" ], [ "partyid_list = []\npartyid_list_deleted = []\nresult = session.execute(\"SELECT partyid FROM customerprofilekeyspace.customerprofile LIMIT 10000;\")\nfor partyid in result:\n partyid_list.append(partyid[0])\nprint(partyid_list[10:15])", "['bc397d66-bfc8-11ec-b0d9-ff5473b2f02a', 'bc47bd22-bfc8-11ec-b0d9-ff5473b2f02a', 'bc24c5c4-bfc8-11ec-b0d9-ff5473b2f02a', 'bc30dabc-bfc8-11ec-b0d9-ff5473b2f02a', 'bc1e8286-bfc8-11ec-b0d9-ff5473b2f02a']\n" ] ], [ [ "# Select test multidomain", "_____no_output_____" ] ], [ [ "from cassandra import ConsistencyLevel\nfrom cassandra.query import SimpleStatement\nfrom cassandra import concurrent\nfrom statistics import mean\n\nregisters = []\nbatch = BatchStatement(consistency_level=ConsistencyLevel.ALL)\naccount_id_list = []\n\n#Cargas Masiva con Many\ndef deletePartyOnCascade(): \n \n SELECT_CURRENT_ACCOUNT_STMT = \"SELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = '{}';\"\n DELETE_CURRENT_ACCOUNT_STMT = \"DELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '{}';\" \n DELETE_CUSTOMER_PROFILE_STMT = \"DELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = '{}';\"\n DELETE_CUSTOMER_PROFILE_ADDRESS_STMT = \"DELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = '{}';\"\n DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT = \"DELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '{}';\"\n DELETE_POSITIONKEEPING_SMT = \"DELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '{}';\"\n \n session = cluster.connect('customerprofilekeyspace')\n iter = 0;\n i = 1\n \n for i in range(0,repeats): \n \n time_inicial = time.time() \n \n accounts_to_delete = []\n partyid = random.choice(partyid_list) \n print(SELECT_CURRENT_ACCOUNT_STMT.format(partyid))\n result_ca = session.execute(SELECT_CURRENT_ACCOUNT_STMT.format(partyid))\n for accountid in result_ca:\n accounts_to_delete.append(accountid[0])\n #print(\"accounts_to_delete:\", accountid[0])\n #Borrado customerprofile\n print(DELETE_CUSTOMER_PROFILE_STMT.format(partyid))\n batch.add(DELETE_CUSTOMER_PROFILE_STMT.format(partyid))\n #Borrado customerprofileaddress\n print(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid))\n batch.add(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid))\n \n #Borrado account info\n for accountid in accounts_to_delete:\n #Borrado account\n print(DELETE_CURRENT_ACCOUNT_STMT.format(accountid))\n batch.add(DELETE_CURRENT_ACCOUNT_STMT.format(accountid))\n print(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid))\n batch.add(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid))\n print(DELETE_POSITIONKEEPING_SMT.format(accountid))\n batch.add(DELETE_POSITIONKEEPING_SMT.format(accountid))\n \n #Borrado en cascada\n session.execute(batch)\n batch.clear()\n partyid_list.remove(partyid)\n \n time_final = time.time() \n data_time_collection = round(time_final - time_inicial,3)\n used_cpu = psutil.cpu_percent()\n mem_used = psutil.virtual_memory().percent\n registers.append((iter,data_time_collection,used_cpu,mem_used))\n print((iter,data_time_collection,used_cpu,mem_used))\n iter += 1;\n time_inicial = time.time()\n i = i + 1\n \n return registers", "_____no_output_____" ], [ "registers = deletePartyOnCascade()", "SELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc284834-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc284834-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc284834-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47472be6-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47472be6-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47472be6-bfcf-11ec-b0d9-ff5473b2f02a';\n(0, 0.007, 31.5, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc3add0a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc3add0a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc3add0a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4759a050-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4759a050-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4759a050-bfcf-11ec-b0d9-ff5473b2f02a';\n(1, 0.004, 50.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc2afc8c-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc2afc8c-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc2afc8c-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4749e48a-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4749e48a-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4749e48a-bfcf-11ec-b0d9-ff5473b2f02a';\n(2, 0.003, 0.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc2adbd0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc2adbd0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc2adbd0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4749c392-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4749c392-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4749c392-bfcf-11ec-b0d9-ff5473b2f02a';\n(3, 0.004, 50.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc4984ae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc4984ae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc4984ae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47685a6e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47685a6e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47685a6e-bfcf-11ec-b0d9-ff5473b2f02a';\n(4, 0.004, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc3bd778-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc3bd778-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc3bd778-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '475a9762-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '475a9762-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '475a9762-bfcf-11ec-b0d9-ff5473b2f02a';\n(5, 0.004, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc25c29e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc25c29e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc25c29e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4744a04c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4744a04c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4744a04c-bfcf-11ec-b0d9-ff5473b2f02a';\n(6, 0.007, 33.3, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc374c3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc374c3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc374c3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47562768-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47562768-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47562768-bfcf-11ec-b0d9-ff5473b2f02a';\n(7, 0.005, 75.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc48d810-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc48d810-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc48d810-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4767aefc-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4767aefc-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4767aefc-bfcf-11ec-b0d9-ff5473b2f02a';\n(8, 0.004, 0.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc406aae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc406aae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc406aae-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '475f2c78-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '475f2c78-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '475f2c78-bfcf-11ec-b0d9-ff5473b2f02a';\n(9, 0.004, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc3ff452-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc3ff452-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc3ff452-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '475eb126-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '475eb126-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '475eb126-bfcf-11ec-b0d9-ff5473b2f02a';\n(10, 0.004, 0.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc33e2f2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc33e2f2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc33e2f2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4752cf00-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4752cf00-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4752cf00-bfcf-11ec-b0d9-ff5473b2f02a';\n(11, 0.004, 50.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc39e3f0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc39e3f0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc39e3f0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4758adb2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4758adb2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4758adb2-bfcf-11ec-b0d9-ff5473b2f02a';\n(12, 0.004, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc1d4c72-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc1d4c72-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc1d4c72-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '473b0334-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '473b0334-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '473b0334-bfcf-11ec-b0d9-ff5473b2f02a';\n(13, 0.004, 75.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc4a1f9a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc4a1f9a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc4a1f9a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4768f4a6-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4768f4a6-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4768f4a6-bfcf-11ec-b0d9-ff5473b2f02a';\n(14, 0.008, 40.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc411d78-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc411d78-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc411d78-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '475fec9e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '475fec9e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '475fec9e-bfcf-11ec-b0d9-ff5473b2f02a';\n(15, 0.004, 50.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc284604-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc284604-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc284604-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '474729ac-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '474729ac-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '474729ac-bfcf-11ec-b0d9-ff5473b2f02a';\n(16, 0.003, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc285c20-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc285c20-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc285c20-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47473f78-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47473f78-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47473f78-bfcf-11ec-b0d9-ff5473b2f02a';\n(17, 0.004, 66.7, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc401194-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc401194-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc401194-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '475ecf1c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '475ecf1c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '475ecf1c-bfcf-11ec-b0d9-ff5473b2f02a';\n(18, 0.004, 0.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc3614a0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc3614a0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc3614a0-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4754f848-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4754f848-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4754f848-bfcf-11ec-b0d9-ff5473b2f02a';\n(19, 0.004, 50.0, 92.4)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc2a67d6-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc2a67d6-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc2a67d6-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47494e94-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47494e94-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47494e94-bfcf-11ec-b0d9-ff5473b2f02a';\n(20, 0.011, 62.5, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc295f30-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc295f30-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc295f30-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4748438c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4748438c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4748438c-bfcf-11ec-b0d9-ff5473b2f02a';\n(21, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc459fce-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc459fce-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc459fce-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47647890-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47647890-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47647890-bfcf-11ec-b0d9-ff5473b2f02a';\n(22, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc413a2e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc413a2e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc413a2e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '476008d2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '476008d2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '476008d2-bfcf-11ec-b0d9-ff5473b2f02a';\n(23, 0.004, 100.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc33df96-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc33df96-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc33df96-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4752cbc2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4752cbc2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4752cbc2-bfcf-11ec-b0d9-ff5473b2f02a';\n(24, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc340bba-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc340bba-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc340bba-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4752f624-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4752f624-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4752f624-bfcf-11ec-b0d9-ff5473b2f02a';\n(25, 0.003, 75.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc36e8a8-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc36e8a8-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc36e8a8-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4755c764-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4755c764-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4755c764-bfcf-11ec-b0d9-ff5473b2f02a';\n(26, 0.007, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc349d64-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc349d64-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc349d64-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4753844a-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4753844a-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4753844a-bfcf-11ec-b0d9-ff5473b2f02a';\n(27, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc1e2b6a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc1e2b6a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc1e2b6a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '473c638c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '473c638c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '473c638c-bfcf-11ec-b0d9-ff5473b2f02a';\n(28, 0.004, 0.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc22eef2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc22eef2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc22eef2-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '4741c71e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '4741c71e-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '4741c71e-bfcf-11ec-b0d9-ff5473b2f02a';\n(29, 0.007, 60.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc445740-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc445740-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc445740-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47632d96-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47632d96-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47632d96-bfcf-11ec-b0d9-ff5473b2f02a';\n(30, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc29aa3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc29aa3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc29aa3a-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47488fa4-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47488fa4-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47488fa4-bfcf-11ec-b0d9-ff5473b2f02a';\n(31, 0.01, 40.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc1fc344-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc1fc344-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc1fc344-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '473ea82c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '473ea82c-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '473ea82c-bfcf-11ec-b0d9-ff5473b2f02a';\n(32, 0.004, 66.7, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc28235e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc28235e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc28235e-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '474706a2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '474706a2-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '474706a2-bfcf-11ec-b0d9-ff5473b2f02a';\n(33, 0.009, 60.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc36abfe-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = 'bc36abfe-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = 'bc36abfe-bfc8-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '47558a38-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '47558a38-bfcf-11ec-b0d9-ff5473b2f02a';\nDELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '47558a38-bfcf-11ec-b0d9-ff5473b2f02a';\n(34, 0.004, 50.0, 92.3)\nSELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = 'bc3d323a-bfc8-11ec-b0d9-ff5473b2f02a';\n" ], [ "#Guardamos los resultados Customer Profile\nsave_results_to_csv(registers,resultados_etl_delete)", "../Results/Cassandra/CassandraDelete_test_19042022_17_51_07.csv\n" ], [ "cluster.shutdown()\nprint('Conexion cerrada')", "Conexion cerrada\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb3fe610669bfdd62a319b8e5aa4395930f8961c
26,723
ipynb
Jupyter Notebook
Day17/.ipynb_checkpoints/Day17-checkpoint.ipynb
MichaelMKKang/AdventOfCode
fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff
[ "MIT" ]
null
null
null
Day17/.ipynb_checkpoints/Day17-checkpoint.ipynb
MichaelMKKang/AdventOfCode
fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff
[ "MIT" ]
null
null
null
Day17/.ipynb_checkpoints/Day17-checkpoint.ipynb
MichaelMKKang/AdventOfCode
fcbc7dccdd116c8cce62c0570bbaeaafe17b55ff
[ "MIT" ]
null
null
null
36.308424
120
0.498934
[ [ [ "import re\nwith open('Day17 input.txt') as f:\n lines = f.readlines()\nlines = [x.strip() for x in lines]\nline = lines[0][13:]\nprint(line)\nx_range = re.search('(?<=x=).*(?=, )',line)[0]\nx_range = [int(re.search('.*(?=\\.\\.)',x_range)[0]), int(re.search('(?<=\\.\\.).*',x_range)[0])]\nprint(x_range)\ny_range = re.search('(?<=, y=).*',line)[0]\ny_range = [int(re.search('.*(?=\\.\\.)',y_range)[0]), int(re.search('(?<=\\.\\.).*',y_range)[0])]\nprint(y_range)", "x=175..227, y=-134..-79\n[175, 227]\n[-134, -79]\n" ], [ "#pos is (x,y), vel is (x_vel,y_vel)\n# outputs new step,new vel\ndef step(x,y,x_vel,y_vel):\n new_x_pos = x + x_vel\n new_y_pos = y + y_vel\n y_vel -= 1\n if x_vel > 0:\n x_vel -= 1\n elif x_vel < 0:\n x_vel += 1\n else:\n pass\n return new_x_pos,new_y_pos,x_vel,y_vel\n", "_____no_output_____" ], [ "#returns 0 if winning shot, -1 if under, 1 if over, 2 if goes through\ndef try_shot(x,y,x_vel,y_vel):\n initial_y_vel = y_vel\n max_height = 0\n while x_vel != 0 or y >= y_range[0]:\n #step\n x,y,x_vel,y_vel = step(x,y,x_vel,y_vel)\n if y > max_height:\n max_height = y\n #print(x,y,x_vel,y_vel)\n #check for win\n if (x_range[0] <= x <= x_range[1]) and (y_range[0] <= y <= y_range[1]):\n print('y velocity ',initial_y_vel, ' has a max height of: ',max_height)\n return 0\n #every shot eventually gets to x_vel==0, but mercykill\n #if y_vel < 0 and y < y_range[0]:\n # return -1\n #now x_vel == 0 and y_pos is under the box\n if x < x_range[0]:\n return -1\n elif x > x_range[1]:\n return 1\n else:\n return 2", "_____no_output_____" ], [ "#Part 1\nmax_height = 0\nwinning_y_vels = []\nx,y=0,0\nstart_x_vel = 0\nfor start_y_vel in range(-5,400): #for each value of y_vel\n x_vel,y_vel = start_x_vel,start_y_vel\n\n success = False\n max_height = 0\n attempted_x_vels = []\n while 1:\n attempted_x_vels.append(x_vel)\n x_result = try_shot(x,y,x_vel,y_vel)\n if x_result == 0:\n #print(start_y_vel,': ',x_vel,y_vel,'successfully hits')\n success = True\n break\n #no situation where it doesn't go through for starting y_vel for any x\n elif x_result == 2:\n print(start_y_vel,x_vel,': ',y_vel,'goes through')\n break\n else:\n x_vel += -x_result\n if x_vel in attempted_x_vels:\n #print(start_y_vel,': ',y_vel,'no solution')\n break\n \n if success:\n #print(start_y_vel,'successfully hits')\n winning_y_vels.append(start_y_vel)\n else:\n #if len(winning_y_vels)>0:\n # break\n #print(start_y_vel, 'doesn\\'t hit')\n pass\n\nprint(max(winning_y_vels))", "-5 19 : -5 goes through\n-4 19 : -4 goes through\ny velocity -3 has a max height of: 0\ny velocity -2 has a max height of: 0\ny velocity -1 has a max height of: 0\ny velocity 0 has a max height of: 0\ny velocity 1 has a max height of: 1\ny velocity 2 has a max height of: 3\ny velocity 3 has a max height of: 6\ny velocity 4 has a max height of: 10\ny velocity 5 has a max height of: 15\ny velocity 6 has a max height of: 21\ny velocity 7 has a max height of: 28\ny velocity 8 has a max height of: 36\ny velocity 9 has a max height of: 45\ny velocity 10 has a max height of: 55\ny velocity 11 has a max height of: 66\ny velocity 12 has a max height of: 78\ny velocity 13 has a max height of: 91\ny velocity 14 has a max height of: 105\ny velocity 15 has a max height of: 120\ny velocity 16 has a max height of: 136\ny velocity 17 has a max height of: 153\ny velocity 18 has a max height of: 171\ny velocity 19 has a max height of: 190\ny velocity 20 has a max height of: 210\ny velocity 21 has a max height of: 231\ny velocity 22 has a max height of: 253\ny velocity 23 has a max height of: 276\ny velocity 24 has a max height of: 300\ny velocity 25 has a max height of: 325\ny velocity 26 has a max height of: 351\ny velocity 27 has a max height of: 378\ny velocity 28 has a max height of: 406\ny velocity 29 has a max height of: 435\ny velocity 30 has a max height of: 465\ny velocity 31 has a max height of: 496\ny velocity 32 has a max height of: 528\ny velocity 33 has a max height of: 561\ny velocity 34 has a max height of: 595\ny velocity 35 has a max height of: 630\ny velocity 36 has a max height of: 666\ny velocity 37 has a max height of: 703\ny velocity 38 has a max height of: 741\ny velocity 39 has a max height of: 780\ny velocity 40 has a max height of: 820\ny velocity 41 has a max height of: 861\ny velocity 42 has a max height of: 903\ny velocity 43 has a max height of: 946\ny velocity 44 has a max height of: 990\ny velocity 45 has a max height of: 1035\ny velocity 46 has a max height of: 1081\ny velocity 47 has a max height of: 1128\ny velocity 48 has a max height of: 1176\ny velocity 49 has a max height of: 1225\ny velocity 50 has a max height of: 1275\ny velocity 51 has a max height of: 1326\ny velocity 52 has a max height of: 1378\ny velocity 53 has a max height of: 1431\ny velocity 54 has a max height of: 1485\ny velocity 55 has a max height of: 1540\ny velocity 56 has a max height of: 1596\ny velocity 57 has a max height of: 1653\ny velocity 58 has a max height of: 1711\ny velocity 59 has a max height of: 1770\ny velocity 60 has a max height of: 1830\ny velocity 61 has a max height of: 1891\ny velocity 62 has a max height of: 1953\ny velocity 63 has a max height of: 2016\ny velocity 64 has a max height of: 2080\ny velocity 65 has a max height of: 2145\n66 19 : 66 goes through\n67 19 : 67 goes through\n68 19 : 68 goes through\n69 19 : 69 goes through\n70 19 : 70 goes through\n71 19 : 71 goes through\n72 19 : 72 goes through\n73 19 : 73 goes through\n74 19 : 74 goes through\n75 19 : 75 goes through\n76 19 : 76 goes through\n77 19 : 77 goes through\ny velocity 78 has a max height of: 3081\ny velocity 79 has a max height of: 3160\ny velocity 80 has a max height of: 3240\ny velocity 81 has a max height of: 3321\ny velocity 82 has a max height of: 3403\ny velocity 83 has a max height of: 3486\ny velocity 84 has a max height of: 3570\ny velocity 85 has a max height of: 3655\ny velocity 86 has a max height of: 3741\ny velocity 87 has a max height of: 3828\ny velocity 88 has a max height of: 3916\ny velocity 89 has a max height of: 4005\ny velocity 90 has a max height of: 4095\ny velocity 91 has a max height of: 4186\ny velocity 92 has a max height of: 4278\ny velocity 93 has a max height of: 4371\ny velocity 94 has a max height of: 4465\ny velocity 95 has a max height of: 4560\ny velocity 96 has a max height of: 4656\ny velocity 97 has a max height of: 4753\ny velocity 98 has a max height of: 4851\ny velocity 99 has a max height of: 4950\ny velocity 100 has a max height of: 5050\ny velocity 101 has a max height of: 5151\ny velocity 102 has a max height of: 5253\ny velocity 103 has a max height of: 5356\ny velocity 104 has a max height of: 5460\ny velocity 105 has a max height of: 5565\ny velocity 106 has a max height of: 5671\ny velocity 107 has a max height of: 5778\ny velocity 108 has a max height of: 5886\ny velocity 109 has a max height of: 5995\ny velocity 110 has a max height of: 6105\ny velocity 111 has a max height of: 6216\ny velocity 112 has a max height of: 6328\ny velocity 113 has a max height of: 6441\ny velocity 114 has a max height of: 6555\ny velocity 115 has a max height of: 6670\ny velocity 116 has a max height of: 6786\ny velocity 117 has a max height of: 6903\ny velocity 118 has a max height of: 7021\ny velocity 119 has a max height of: 7140\ny velocity 120 has a max height of: 7260\ny velocity 121 has a max height of: 7381\ny velocity 122 has a max height of: 7503\ny velocity 123 has a max height of: 7626\ny velocity 124 has a max height of: 7750\ny velocity 125 has a max height of: 7875\ny velocity 126 has a max height of: 8001\ny velocity 127 has a max height of: 8128\ny velocity 128 has a max height of: 8256\ny velocity 129 has a max height of: 8385\ny velocity 130 has a max height of: 8515\ny velocity 131 has a max height of: 8646\ny velocity 132 has a max height of: 8778\ny velocity 133 has a max height of: 8911\n134 19 : 134 goes through\n135 19 : 135 goes through\n136 19 : 136 goes through\n137 19 : 137 goes through\n138 19 : 138 goes through\n139 19 : 139 goes through\n140 19 : 140 goes through\n141 19 : 141 goes through\n142 19 : 142 goes through\n143 19 : 143 goes through\n144 19 : 144 goes through\n145 19 : 145 goes through\n146 19 : 146 goes through\n147 19 : 147 goes through\n148 19 : 148 goes through\n149 19 : 149 goes through\n150 19 : 150 goes through\n151 19 : 151 goes through\n152 19 : 152 goes through\n153 19 : 153 goes through\n154 19 : 154 goes through\n155 19 : 155 goes through\n156 19 : 156 goes through\n157 19 : 157 goes through\n158 19 : 158 goes through\n159 19 : 159 goes through\n160 19 : 160 goes through\n161 19 : 161 goes through\n162 19 : 162 goes through\n163 19 : 163 goes through\n164 19 : 164 goes through\n165 19 : 165 goes through\n166 19 : 166 goes through\n167 19 : 167 goes through\n168 19 : 168 goes through\n169 19 : 169 goes through\n170 19 : 170 goes through\n171 19 : 171 goes through\n172 19 : 172 goes through\n173 19 : 173 goes through\n174 19 : 174 goes through\n175 19 : 175 goes through\n176 19 : 176 goes through\n177 19 : 177 goes through\n178 19 : 178 goes through\n179 19 : 179 goes through\n180 19 : 180 goes through\n181 19 : 181 goes through\n182 19 : 182 goes through\n183 19 : 183 goes through\n184 19 : 184 goes through\n185 19 : 185 goes through\n186 19 : 186 goes through\n187 19 : 187 goes through\n188 19 : 188 goes through\n189 19 : 189 goes through\n190 19 : 190 goes through\n191 19 : 191 goes through\n192 19 : 192 goes through\n193 19 : 193 goes through\n194 19 : 194 goes through\n195 19 : 195 goes through\n196 19 : 196 goes through\n197 19 : 197 goes through\n198 19 : 198 goes through\n199 19 : 199 goes through\n200 19 : 200 goes through\n201 19 : 201 goes through\n202 19 : 202 goes through\n203 19 : 203 goes through\n204 19 : 204 goes through\n205 19 : 205 goes through\n206 19 : 206 goes through\n207 19 : 207 goes through\n208 19 : 208 goes through\n209 19 : 209 goes through\n210 19 : 210 goes through\n211 19 : 211 goes through\n212 19 : 212 goes through\n213 19 : 213 goes through\n214 19 : 214 goes through\n215 19 : 215 goes through\n216 19 : 216 goes through\n217 19 : 217 goes through\n218 19 : 218 goes through\n219 19 : 219 goes through\n220 19 : 220 goes through\n221 19 : 221 goes through\n222 19 : 222 goes through\n223 19 : 223 goes through\n224 19 : 224 goes through\n225 19 : 225 goes through\n226 19 : 226 goes through\n227 19 : 227 goes through\n228 19 : 228 goes through\n229 19 : 229 goes through\n230 19 : 230 goes through\n" ], [ "#returns 0 if winning shot, -1 if under, 1 if over, 2 if goes through\ndef try_shot_test(x,y,x_vel,y_vel):\n while x_vel != 0 or y >= y_range[0]:\n #step\n x,y,x_vel,y_vel = step(x,y,x_vel,y_vel)\n print(x,y,x_vel,y_vel)\n #check for win\n if (x_range[0] <= x <= x_range[1]) and (y_range[0] <= y <= y_range[1]):\n return 0\n #every shot eventually gets to x_vel==0, but mercykill\n #if y_vel < 0 and y < y_range[0]:\n # return -1\n #now x_vel == 0 and y_pos is under the box\n if x < x_range[0]:\n return -1\n elif x > x_range[1]:\n return 1\n else:\n return 2\n\n# sample testing\nx_range = [20, 30]\ny_range = [-10, -5]\n\ntry_shot_test(0,0,6,10)", "6 10 5 9\n11 19 4 8\n15 27 3 7\n18 34 2 6\n20 40 1 5\n21 45 0 4\n21 49 0 3\n21 52 0 2\n21 54 0 1\n21 55 0 0\n21 55 0 -1\n21 54 0 -2\n21 52 0 -3\n21 49 0 -4\n21 45 0 -5\n21 40 0 -6\n21 34 0 -7\n21 27 0 -8\n21 19 0 -9\n21 10 0 -10\n21 0 0 -11\n21 -11 0 -12\n" ], [ "sample = '''target area: x=20..30, y=-10..-5'''\n\nmy = '''target area: x=241..273, y=-97..-63'''\n\ndef parse_data(data):\n #target area: x=241..273, y=-97..-63\n coords = { \"x\": [], \"y\": []}\n data = data.split()\n for line in (line for line in data if \"=\" in line):\n for coord in (coord for coord in line.strip(\",\")[2:].split('..')):\n coords[line[0]].append(int(coord))\n \n return coords\n\ndef launch_probe(velocity,target):\n p_x,p_y = [0,0]\n v_x,v_y = velocity\n t_x = sorted(target[\"x\"])\n t_y = sorted(target[\"y\"])\n max_y = p_y\n while (p_x < max(t_x)+1 and not (v_x == 0 and p_x < min(t_x))) and not (p_x > min(t_x) and p_y < min(t_y)):\n p_x += v_x\n p_y += v_y\n if v_x > 0:\n v_x -= 1\n elif v_x < 0:\n v_x += 1\n v_y -= 1\n if p_y > max_y:\n max_y = p_y\n if (p_x in range(min(t_x),max(t_x)+1)) and (p_y in range(min(t_y),max(t_y)+1)):\n \n return True,velocity,max_y\n \n return False,velocity,max_y\n\ndef main():\n target = parse_data(sample)\n max_y = 0\n optimal = []\n count = 0\n for x in range(1,max(target[\"x\"])*2):\n for y in range(min(target[\"y\"]),max(target[\"x\"])):\n r,velocity,this_max_y = launch_probe([x,y],target)\n if r == True:\n count += 1\n if this_max_y > max_y:\n max_y = this_max_y\n optimal = velocity\n \n print(optimal)\n print(max_y)\n print(count)\n \nmain()", "[6, 9]\n45\n112\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb3feff7cc6805c493119b8473389828487abf6e
37,331
ipynb
Jupyter Notebook
code/Pandas_dateFrames.ipynb
xing710/ModSimPy
87f0f481926c40855223e2843bd728edb235c516
[ "MIT" ]
null
null
null
code/Pandas_dateFrames.ipynb
xing710/ModSimPy
87f0f481926c40855223e2843bd728edb235c516
[ "MIT" ]
null
null
null
code/Pandas_dateFrames.ipynb
xing710/ModSimPy
87f0f481926c40855223e2843bd728edb235c516
[ "MIT" ]
null
null
null
100.08311
27,900
0.831507
[ [ [ "import pandas as pd\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n#set font size of labels on matplotlib plots\nplt.rc('font', size=16)\n\n#set style of plots\nsns.set_style('white')\n\n#define a custom palette\ncustomPalette = ['#630C3A', '#39C8C6', '#D3500C', '#FFB139']\nsns.set_palette(customPalette)\n\n#number of points per group\nn = 150\n\n#define group labels and their centers\ngroups = {'A': (2,2),\n 'B': (4,4)}\n\n#create labeled x and y data\ndata = pd.DataFrame(index=range(n*len(groups)), columns=['x','y','label'])\nfor i, group in enumerate(groups.keys()):\n #randomly select n datapoints from a gaussian distrbution\n data.loc[i*n:((i+1)*n)-1,['x','y']] = np.random.normal(groups[group], \n [0.5,0.5], \n [n,2])\n #add group labels\n data.loc[i*n:((i+1)*n)-1,['label']] = group\n#plot data with seaborn\nfacet = sns.lmplot(data=data, x='x', y='y', hue='label', \n fit_reg=False, legend=True, legend_out=True)", "_____no_output_____" ], [ "type(data)", "_____no_output_____" ], [ "data[:3]", "_____no_output_____" ], [ "data.shape[0]\ndf=data\ndf.head()", "_____no_output_____" ], [ "print(df.loc[1]['label'])", "A\n" ], [ "print(df.at[0,'x'])", "1.1090041361357015\n" ], [ "print(df.iloc[0][1])", "1.2636391389936057\n" ] ], [ [ "Before you can get to the solution, it’s first a good idea to grasp the concept of loc and how it differs from other indexing attributes such as .iloc[] and .ix[]:\n\n.loc[] works on labels of your index. This means that if you give in loc[2], you look for the values of your DataFrame that have an index labeled 2.\n.iloc[] works on the positions in your index. This means that if you give in iloc[2], you look for the values of your DataFrame that are at index ’2`.\n.ix[] is a more complex case: when the index is integer-based, you pass a label to .ix[]. ix[2] then means that you’re looking in your DataFrame for values that have an index labeled 2. This is just like .loc[]! However, if your index is not solely integer-based, ix will work with positions, just like .iloc[].", "_____no_output_____" ] ], [ [ "print(df.loc[2])\n# Pass `2` to `loc`\nprint(df.loc[2])\n\n# Pass `2` to `iloc`\nprint(df.iloc[2])\n\n# Pass `2` to `ix`\nprint(df.ix[2])", "x 1.40948\ny 1.74879\nlabel A\nName: 2, dtype: object\nx 1.40948\ny 1.74879\nlabel A\nName: 2, dtype: object\nx 1.40948\ny 1.74879\nlabel A\nName: 2, dtype: object\nx 1.40948\ny 1.74879\nlabel A\nName: 2, dtype: object\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb3ff071b8b8104ed1a7bf555a28e981baf3581a
76,751
ipynb
Jupyter Notebook
Dual-Polarized base on rotation.ipynb
ABDELHAMED2017/hybrid_beamforming_notebook
645652cbbe44eb383cf8797914de3837a9a35d7b
[ "MIT" ]
5
2019-07-03T02:32:46.000Z
2021-09-23T02:41:11.000Z
Dual-Polarized base on rotation.ipynb
dienhoa/hybrid_beamforming_notebook
645652cbbe44eb383cf8797914de3837a9a35d7b
[ "MIT" ]
null
null
null
Dual-Polarized base on rotation.ipynb
dienhoa/hybrid_beamforming_notebook
645652cbbe44eb383cf8797914de3837a9a35d7b
[ "MIT" ]
2
2018-02-21T21:43:15.000Z
2019-04-21T03:19:10.000Z
138.041367
28,772
0.864301
[ [ [ "%matplotlib inline\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy.io as io\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import ceil\nfrom scipy.optimize import curve_fit\nrealization = 1000\nimport seaborn as sns\nfrom matplotlib import cm\nfrom array_response import *\nimport itertools \n\nmat = io.loadmat('boundary.mat')\nbound1_para = mat['bound1_para'][0,:]\nbound2_para = mat['bound2_para'][0,:]\nbound3_para = mat['bound3_para'][0,:]\nbound4_1para = mat['bound4_1para'][0,:]\nbound4_2para = mat['bound4_2para'][0,:]\n\nbound4_3para = mat['bound4_3para'][0,:]\nxlim_4_1 = mat['xlim_4_1'][0,0]\nxlim_4_2 = mat['xlim_4_2'][0,:]\nxlim_4_3 = mat['xlim_4_3'][0,0]\n", "_____no_output_____" ], [ "azi_rot = np.linspace(0,2*np.pi,50)", "_____no_output_____" ], [ "def func_sin(x, c, d):\n return np.sin(2*np.pi*x*0.312 + c)*0.23 + d", "_____no_output_____" ], [ "test_1 = func_sin(azi_rot, *bound1_para)\ntest_2 = func_sin(azi_rot, *bound2_para)\nbound3 = np.poly1d(bound3_para)\nboud4_13 = np.poly1d(bound4_1para)\nbound4_2 = np.poly1d(bound4_2para)", "_____no_output_____" ], [ "plt.plot(azi_rot,test_1)\nplt.plot(azi_rot,test_2)\nplt.plot(azi_rot,bound3(azi_rot))\nplt.plot(azi_rot,boud4_13(azi_rot))\nplt.plot(azi_rot,bound4_2(azi_rot))\nplt.ylim(0,3.14)", "_____no_output_____" ], [ "def check_cate(_azi,_ele):\n _index = \"\"\n if ((_ele - bound3(_azi)) > 0):\n if (((_azi<xlim_4_1) and ((_ele - boud4_13(_azi))<0)) or ((_azi>xlim_4_2[0]) and (_azi<xlim_4_2[1]) and ((_ele - bound4_2(_azi))<0)) or ((_azi>xlim_4_3) and ((_ele - boud4_13(_azi))<0))):\n _index = \"samecluster\"\n else:\n _index = \"diffclus_samepol\"\n else:\n if ((_ele - func_sin(_azi, *bound2_para)) > 0):\n _index = \"diffclus_crosspol\"\n else:\n if ((_ele - func_sin(_azi, *bound1_para)) > 0):\n _index = \"samecluster\"\n else:\n _index = \"diffclus_samepol\"\n \n return _index", "_____no_output_____" ] ], [ [ "### Parameters declaration\n\nDeclare parameters needed for channel realization", "_____no_output_____" ] ], [ [ "Ns = 1 # number of streams\n\nNc = 6 # number of cluster\nNray = 1 # number of rays in each cluster\n\nNt = 64 # number of transmit antennas\nNr = 16 # number of receive antennas\n\nangle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx\n\ngamma = np.sqrt((Nt*Nr)/(Nc*Nray))\nrealization = 1000 # equivalent to number of taking sample\ncount = 0\n\neps = 0.1 # 20dB isolation\nsigma = np.sqrt(8/(1+eps**2))*1.37/1.14 # according to the normalization condition of H", "_____no_output_____" ] ], [ [ "### Channel Realization\n\nRealize channel H for Dual-Polarized antenna array", "_____no_output_____" ] ], [ [ "H_pol = np.zeros((2*Nr,2*Nt,realization),dtype=complex)\nAt = np.zeros((Nt,Nc*Nray,realization),dtype=complex)\nAr = np.zeros((Nr,Nc*Nray,realization),dtype=complex)\nalpha_hh = np.zeros((Nc*Nray,realization),dtype=complex)\nalpha_hv = np.zeros((Nc*Nray,realization),dtype=complex)\nalpha_vh = np.zeros((Nc*Nray,realization),dtype=complex)\nalpha_vv = np.zeros((Nc*Nray,realization),dtype=complex)\n\nAoD = np.zeros((2,Nc*Nray),dtype=complex)\nAoA = np.zeros((2,Nc*Nray),dtype=complex)\n\nH = np.zeros((2*Nr,2*Nt,realization),dtype=complex)\nazi_rot = np.random.normal(1.7,0.3,realization)\nele_rot = np.random.normal(2.3,0.3,realization) # Why PI/2 ??\n# azi_rot = np.random.uniform(0,2*np.pi,realization)\n# ele_rot = np.random.uniform(0,np.pi,realization) # Why PI/2 ??\nR = np.array([[np.cos(ele_rot)*np.cos(azi_rot),np.sin(ele_rot)],[-np.sin(ele_rot)*np.cos(azi_rot),np.cos(ele_rot)]]) # rotation matrix\n\nfor reali in range(realization):\n for c in range(1,Nc+1):\n AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth\n AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation\n AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth\n AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation\n AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray))\n AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray))\n AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray))\n AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray))\n for j in range(Nc*Nray):\n At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt)/np.sqrt(2) # UPA array response\n Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)/np.sqrt(2)\n var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real\n var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real\n var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real\n var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real\n alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2))\n alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2))\n alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2))\n alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2))\n alpha = np.vstack((np.hstack((alpha_hh[j,reali],alpha_hv[j,reali])),np.hstack((alpha_vh[j,reali],alpha_vv[j,reali]))))\n H_pol[:,:,reali] = H_pol[:,:,reali] + np.kron(alpha,Ar[:,[j],reali]@At[:,[j],reali].conj().T)\n H_pol[:,:,reali] = 2*gamma* H_pol[:,:,reali]\n H[:,:,reali] = (np.kron(R[:,:,reali],np.eye(Nr)))@H_pol[:,:,reali]\n H[:,:,reali] = np.sqrt(4/3)* H[:,:,reali]", "_____no_output_____" ] ], [ [ "### Check normalized condition", "_____no_output_____" ] ], [ [ "channel_fro_2 = np.zeros(realization)\nfor reali in range(realization):\n channel_fro_2[reali] = np.linalg.norm(H[:,:,reali],'fro')\nprint(\"4*Nt*Nr =\", 4*Nt*Nr , \" Frobenius norm =\", np.mean(channel_fro_2**2))", "4*Nt*Nr = 4096 Frobenius norm = 4273.500312510811\n" ], [ "cluster = np.arange(Nc)\nprint(cluster)\nc = list(itertools.combinations(cluster, 2))\nnum_path = (2*Nc-1)*Nc\npath_combi = np.zeros((num_path,4),dtype=int)\n\nprint(path_combi.shape)\npath_combi[0:Nc,:]=np.arange(Nc).reshape(Nc,1).repeat(4,axis=1)\n\ncount = 0\nfor i in range(int(Nc*(Nc-1)/2)):\n path_combi[Nc+4*i,:] = np.array([c[count][0],c[count][0],c[count][1],c[count][1]])\n path_combi[Nc+4*i+1,:] = np.array([c[count][1],c[count][1],c[count][0],c[count][0]])\n path_combi[Nc+4*i+2,:] = np.array([c[count][0],c[count][1],c[count][1],c[count][0]])\n path_combi[Nc+4*i+3,:] = np.array([c[count][1],c[count][0],c[count][0],c[count][1]])\n count = count+1", "[0 1 2 3 4 5]\n(66, 4)\n" ], [ "cross_index = []\nsamepolar_index = []\ncount = Nc-1\nwhile (count<num_path-4):\n cross_index.extend([count+3,count+4])\n samepolar_index.extend([count+1,count+2])\n count = count + 4\ncross_index = np.array(cross_index)\nsamepolar_index = np.array(samepolar_index)\nsameclus_index = np.arange(0,Nc)\nprint(cross_index)\nprint(samepolar_index)\nprint(sameclus_index)\n# print(path_combi)", "[ 8 9 12 13 16 17 20 21 24 25 28 29 32 33 36 37 40 41 44 45 48 49 52 53\n 56 57 60 61 64 65]\n[ 6 7 10 11 14 15 18 19 22 23 26 27 30 31 34 35 38 39 42 43 46 47 50 51\n 54 55 58 59 62 63]\n[0 1 2 3 4 5]\n" ], [ "path_gain = np.zeros((num_path,realization)) # 2 to save the position and maximum value\nfor reali in range(realization):\n for combi in range(num_path):\n path_gain[combi,reali] =\\\n (np.abs\\\n ((np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.sin(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,1])+\\\n (np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.sin(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,1])+\\\n (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.cos(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,3])+\\\n (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.cos(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,3])\n ))**2\n", "_____no_output_____" ], [ "print(np.max(path_gain[0:Nc,2]))\nprint(path_gain[0:Nc,2])\n\nprint(path_gain[samepolar_index,2])\nprint(np.max(path_gain[samepolar_index,2]))", "1.975791431919476\n[0.18981628 1.85364276 0.05053548 0.01342732 0.22306832 1.97579143]\n[ 0.42621905 0.43475534 0.0209367 0.01124158 0.12170725 0.05435331\n 6.20860522 0.23115072 0.05219855 0.21463785 0.56655361 0.33562231\n 0.40002124 0.09474818 10.0327937 0.38169218 0.99948011 0.72424171\n 0.06127947 0.04748041 6.72855617 0.15124797 0.07332543 0.30238716\n 7.95799585 0.26569779 0.09586032 0.27099597 0.01857951 8.03719116]\n10.032793700190904\n" ] ], [ [ "__Check maximum gain from combination of path in each realization__", "_____no_output_____" ] ], [ [ "index = np.zeros(realization,dtype=int)\nfor reali in range(realization):\n index[reali] = np.argmax(path_gain[:,reali])", "_____no_output_____" ] ], [ [ "__Same Cluster__", "_____no_output_____" ] ], [ [ "index_sameclus = np.zeros(realization,dtype=int)\nfor reali in range(realization):\n index_sameclus[reali] = np.argmax(path_gain[0:Nc,reali])", "_____no_output_____" ], [ "gain_sameclus = np.zeros(realization,dtype=float)\nfor reali in range(realization):\n gain_sameclus[reali] = path_gain[index_sameclus[reali],reali]", "_____no_output_____" ] ], [ [ "__Chosen Category before check__", "_____no_output_____" ] ], [ [ "choosen_cate = [\"\" for x in range(realization)]\n\nindex_checkcate = np.zeros(realization,dtype=int)\ncate = \"\"\ntemp = 0\nfor reali in range(realization):\n cate = check_cate(azi_rot[reali],ele_rot[reali])\n if (cate == \"samecluster\"):\n index_checkcate[reali] = np.argmax(path_gain[0:Nc,reali])\n if (cate == \"diffclus_samepol\"):\n temp = np.argmax(path_gain[samepolar_index,reali])\n index_checkcate[reali] = int(temp+(np.floor(temp/2))*2+Nc)\n# index_checkcate[reali] = np.argmax(path_gain[samepolar_index,reali])\n if (cate == \"diffclus_crosspol\"):\n# index_checkcate[reali] = np.argmax(path_gain[cross_index,reali])\n temp = np.argmax(path_gain[cross_index,reali])\n index_checkcate[reali] = int(temp+(np.floor(temp/2)+1)*2+Nc) \n choosen_cate[reali] = cate\n temp = 0\n", "_____no_output_____" ] ], [ [ "### Plot Spectral Efficiency", "_____no_output_____" ] ], [ [ "SNR_dB = np.arange(-35,10,5)\nSNR = 10**(SNR_dB/10)\nsmax = SNR.shape[0]\nR_cross = np.zeros([smax, realization],dtype=complex)\n# R_steer = np.zeros([smax, realization],dtype=complex)\nR_samecl = np.zeros([smax, realization],dtype=complex)\nR_checkcate = np.zeros([smax, realization],dtype=complex)\n\nfor reali in range(realization):\n _chosen_combi_path = path_combi[index[reali]]\n _chosen_checkcate_path = path_combi[index_checkcate[reali]]\n# _chosen_checkcate_path = path_combi[:,reali]\n _chosen_sameclus_path = path_combi[index_sameclus[reali]]\n \n W_cross = np.vstack((Ar[:,[_chosen_combi_path[1]],reali],Ar[:,[_chosen_combi_path[3]],reali]))\n F_cross = np.vstack((At[:,[_chosen_combi_path[0]],reali],At[:,[_chosen_combi_path[2]],reali]))\n \n W_checkcate = np.vstack((Ar[:,[_chosen_checkcate_path[1]],reali],Ar[:,[_chosen_checkcate_path[3]],reali]))\n F_checkcate = np.vstack((At[:,[_chosen_checkcate_path[0]],reali],At[:,[_chosen_checkcate_path[2]],reali]))\n# W_steer = np.vstack((Ar[:,[_chosen_steer_path[0]],reali],Ar[:,[_chosen_steer_path[1]],reali]))\n# F_steer = np.vstack((At[:,[_chosen_steer_path[0]],reali],At[:,[_chosen_steer_path[1]],reali]))\n \n W_samecl = np.vstack((Ar[:,[_chosen_sameclus_path[1]],reali],Ar[:,[_chosen_sameclus_path[3]],reali]))\n F_samecl = np.vstack((At[:,[_chosen_sameclus_path[0]],reali],At[:,[_chosen_sameclus_path[2]],reali]))\n for s in range(smax):\n R_cross[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_cross)@H[:,:,reali]@F_cross@F_cross.conj().T@H[:,:,reali].conj().T@W_cross))\n R_checkcate[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_checkcate)@H[:,:,reali]@F_checkcate@F_checkcate.conj().T@H[:,:,reali].conj().T@W_checkcate))\n R_samecl[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_samecl)@H[:,:,reali]@F_samecl@F_samecl.conj().T@H[:,:,reali].conj().T@W_samecl))", "_____no_output_____" ], [ "x = np.linalg.norm(F_cross,'fro')\nprint(\"Ns\", Ns , \" Frobenius norm FRF*FBB=\", x**2)", "Ns 1 Frobenius norm FRF*FBB= 1.0\n" ], [ "plt.plot(SNR_dB, (np.sum(R_cross,axis=1).real)/realization, label='joint polarization beam steering')\nplt.plot(SNR_dB, (np.sum(R_checkcate,axis=1).real)/realization, label='one category beam steering')\nplt.plot(SNR_dB, (np.sum(R_samecl,axis=1).real)/realization, label='same ray beam steering')\n\nplt.legend(loc='upper left',prop={'size': 9})\nplt.xlabel('SNR(dB)',fontsize=11)\nplt.ylabel('Spectral Efficiency (bits/s/Hz)',fontsize=11)\nplt.tick_params(axis='both', which='major', labelsize=9)\nplt.ylim(0,11)\nplt.grid()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb3ff07693e32369a59c89b2284ea3037cc7c28c
18,464
ipynb
Jupyter Notebook
Python/60_Registration_Introduction.ipynb
seungkyoon/SimpleITK-Notebooks
b8e828d5ae855fb49186cd4adfd2cd12b7b96553
[ "Apache-2.0" ]
1
2018-01-12T08:03:53.000Z
2018-01-12T08:03:53.000Z
Python/60_Registration_Introduction.ipynb
seungkyoon/SimpleITK-Notebooks
b8e828d5ae855fb49186cd4adfd2cd12b7b96553
[ "Apache-2.0" ]
null
null
null
Python/60_Registration_Introduction.ipynb
seungkyoon/SimpleITK-Notebooks
b8e828d5ae855fb49186cd4adfd2cd12b7b96553
[ "Apache-2.0" ]
2
2018-12-24T06:43:52.000Z
2020-01-13T08:31:19.000Z
41.214286
392
0.636265
[ [ [ "<h1 align=\"center\">Introduction to SimpleITKv4 Registration</h1>\n\n\n<table width=\"100%\">\n<tr style=\"background-color: red;\"><td><font color=\"white\">SimpleITK conventions:</font></td></tr>\n<tr><td>\n<ul>\n<li>Dimensionality and pixel type of registered images is required to be the same (2D/2D or 3D/3D).</li>\n<li>Supported pixel types are sitkFloat32 and sitkFloat64 (use the SimpleITK <a href=\"http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#af8c9d7cc96a299a05890e9c3db911885\">Cast()</a> function if your image's pixel type is something else).\n</ul>\n</td></tr>\n</table>\n\n\n## Registration Components \n\n<img src=\"ITKv4RegistrationComponentsDiagram.svg\" style=\"width:700px\"/><br><br>\n\nThere are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the <a href=\"http://www.itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ImageRegistrationMethod.html\">ImageRegistrationMethod</a> class. This class encapsulates many of the components available in ITK for constructing a registration instance.\n\nCurrently, the available choices from the following groups of ITK components are:\n\n### Optimizers\n\nThe SimpleITK registration framework supports several optimizer types via the SetOptimizerAsX() methods, these include:\n\n<ul>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1ExhaustiveOptimizerv4.html\">Exhaustive</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1AmoebaOptimizerv4.html\">Nelder-Mead downhill simplex</a>, a.k.a. Amoeba.\n </li>\n <li>\n <a href=\"https://itk.org/Doxygen/html/classitk_1_1PowellOptimizerv4.html\">Powell optimizer</a>.\n </li>\n <li>\n <a href=\"https://itk.org/Doxygen/html/classitk_1_1OnePlusOneEvolutionaryOptimizerv4.html\">1+1 evolutionary optimizer</a>.\n </li>\n <li>\n Variations on gradient descent:\n <ul>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentOptimizerv4Template.html\">GradientDescent</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentLineSearchOptimizerv4Template.html\">GradientDescentLineSearch</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1RegularStepGradientDescentOptimizerv4.html\">RegularStepGradientDescent</a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1ConjugateGradientLineSearchOptimizerv4Template.html\">ConjugateGradientLineSearch</a> \n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1LBFGSBOptimizerv4.html\">L-BFGS-B</a> (Limited memory Broyden, Fletcher,Goldfarb,Shannon-Bound Constrained) - supports the use of simple constraints ($l\\leq x \\leq u$) \n </li>\n</ul>\n\n \n### Similarity metrics\n\nThe SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include:\n\n<ul>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1MeanSquaresImageToImageMetricv4.html\">MeanSquares</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1DemonsImageToImageMetricv4.html\">Demons</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1CorrelationImageToImageMetricv4.html\">Correlation</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1ANTSNeighborhoodCorrelationImageToImageMetricv4.html\">ANTSNeighborhoodCorrelation</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1JointHistogramMutualInformationImageToImageMetricv4.html\">JointHistogramMutualInformation</a>\n </li>\n <li>\n <a href=\"http://www.itk.org/Doxygen/html/classitk_1_1MattesMutualInformationImageToImageMetricv4.html\">MattesMutualInformation</a>\n </li>\n</ul>\n\n\n### Interpolators\n\nThe SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one of\nthe <a href=\"http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#a7cb1ef8bd02c669c02ea2f9f5aa374e5\">following enumerations</a>:\n<ul>\n<li> sitkNearestNeighbor </li>\n<li> sitkLinear </li>\n<li> sitkBSpline </li>\n<li> sitkGaussian </li>\n<li> sitkHammingWindowedSinc </li>\n<li> sitkCosineWindowedSinc </li>\n<li> sitkWelchWindowedSinc </li>\n<li> sitkLanczosWindowedSinc </li>\n<li> sitkBlackmanWindowedSinc </li>\n</ul>\n\n## Data - Retrospective Image Registration Evaluation\n\nWe will be using part of the training data from the Retrospective Image Registration Evaluation (<a href=\"http://www.insight-journal.org/rire/\">RIRE</a>) project.", "_____no_output_____" ] ], [ [ "import SimpleITK as sitk\n\n# Utility method that either downloads data from the MIDAS repository or\n# if already downloaded returns the file name for reading from disk (cached data).\n%run update_path_to_download_script\nfrom downloaddata import fetch_data as fdata\n\n# Always write output to a separate directory, we don't want to pollute the source directory. \nimport os\nOUTPUT_DIR = 'Output'", "_____no_output_____" ] ], [ [ "## Utility functions\nA number of utility callback functions for image display and for plotting the similarity metric during registration.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom ipywidgets import interact, fixed\nfrom IPython.display import clear_output\n\n# Callback invoked by the interact IPython method for scrolling through the image stacks of\n# the two images (moving and fixed).\ndef display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):\n # Create a figure with two subplots and the specified size.\n plt.subplots(1,2,figsize=(10,8))\n \n # Draw the fixed image in the first subplot.\n plt.subplot(1,2,1)\n plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r);\n plt.title('fixed image')\n plt.axis('off')\n \n # Draw the moving image in the second subplot.\n plt.subplot(1,2,2)\n plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r);\n plt.title('moving image')\n plt.axis('off')\n \n plt.show()\n\n# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending\n# of an image stack of two images that occupy the same physical space. \ndef display_images_with_alpha(image_z, alpha, fixed, moving):\n img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z] \n plt.imshow(sitk.GetArrayViewFromImage(img),cmap=plt.cm.Greys_r);\n plt.axis('off')\n plt.show()\n \n# Callback invoked when the StartEvent happens, sets up our new data.\ndef start_plot():\n global metric_values, multires_iterations\n \n metric_values = []\n multires_iterations = []\n\n# Callback invoked when the EndEvent happens, do cleanup of data and figure.\ndef end_plot():\n global metric_values, multires_iterations\n \n del metric_values\n del multires_iterations\n # Close figure, we don't want to get a duplicate of the plot latter on.\n plt.close()\n\n# Callback invoked when the IterationEvent happens, update our data and display new figure. \ndef plot_values(registration_method):\n global metric_values, multires_iterations\n \n metric_values.append(registration_method.GetMetricValue()) \n # Clear the output area (wait=True, to reduce flickering), and plot current data\n clear_output(wait=True)\n # Plot the similarity metric values\n plt.plot(metric_values, 'r')\n plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')\n plt.xlabel('Iteration Number',fontsize=12)\n plt.ylabel('Metric Value',fontsize=12)\n plt.show()\n \n# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the \n# metric_values list. \ndef update_multires_iterations():\n global metric_values, multires_iterations\n multires_iterations.append(len(metric_values)) ", "_____no_output_____" ] ], [ [ "## Read images\n\nWe first read the images, casting the pixel type to that required for registration (Float32 or Float64) and look at them.", "_____no_output_____" ] ], [ [ "fixed_image = sitk.ReadImage(fdata(\"training_001_ct.mha\"), sitk.sitkFloat32)\nmoving_image = sitk.ReadImage(fdata(\"training_001_mr_T1.mha\"), sitk.sitkFloat32) \n\ninteract(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayViewFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayViewFromImage(moving_image)));", "_____no_output_____" ] ], [ [ "## Initial Alignment\n\nUse the CenteredTransformInitializer to align the centers of the two volumes and set the center of rotation to the center of the fixed image.", "_____no_output_____" ] ], [ [ "initial_transform = sitk.CenteredTransformInitializer(fixed_image, \n moving_image, \n sitk.Euler3DTransform(), \n sitk.CenteredTransformInitializerFilter.GEOMETRY)\n\nmoving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())\n\ninteract(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));", "_____no_output_____" ] ], [ [ "## Registration\n\nThe specific registration task at hand estimates a 3D rigid transformation between images of different modalities. There are multiple components from each group (optimizers, similarity metrics, interpolators) that are appropriate for the task. Note that each component selection requires setting some parameter values. We have made the following choices:\n\n<ul>\n<li>Similarity metric, mutual information (Mattes MI):\n<ul>\n <li>Number of histogram bins, 50.</li>\n <li>Sampling strategy, random.</li>\n <li>Sampling percentage, 1%.</li>\n</ul>\n</li>\n<li>Interpolator, sitkLinear.</li>\n<li>Optimizer, gradient descent: \n<ul>\n <li>Learning rate, step size along traversal direction in parameter space, 1.0 .</li>\n <li>Number of iterations, maximal number of iterations, 100.</li>\n <li>Convergence minimum value, value used for convergence checking in conjunction with the energy profile of the similarity metric that is estimated in the given window size, 1e-6.</li>\n <li>Convergence window size, number of values of the similarity metric which are used to estimate the energy profile of the similarity metric, 10.</li>\n</ul>\n</li>\n</ul>\n\n\nPerform registration using the settings given above, and take advantage of the built in multi-resolution framework, use a three tier pyramid. \n\nIn this example we plot the similarity metric's value during registration. Note that the change of scales in the multi-resolution framework is readily visible.", "_____no_output_____" ] ], [ [ "registration_method = sitk.ImageRegistrationMethod()\n\n# Similarity metric settings.\nregistration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)\nregistration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\nregistration_method.SetMetricSamplingPercentage(0.01)\n\nregistration_method.SetInterpolator(sitk.sitkLinear)\n\n# Optimizer settings.\nregistration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)\nregistration_method.SetOptimizerScalesFromPhysicalShift()\n\n# Setup for the multi-resolution framework. \nregistration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])\nregistration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])\nregistration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n\n# Don't optimize in-place, we would possibly like to run this cell multiple times.\nregistration_method.SetInitialTransform(initial_transform, inPlace=False)\n\n# Connect all of the observers so that we can perform plotting during registration.\nregistration_method.AddCommand(sitk.sitkStartEvent, start_plot)\nregistration_method.AddCommand(sitk.sitkEndEvent, end_plot)\nregistration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations) \nregistration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))\n\nfinal_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32), \n sitk.Cast(moving_image, sitk.sitkFloat32))", "_____no_output_____" ] ], [ [ "## Post registration analysis", "_____no_output_____" ], [ "Query the registration method to see the metric value and the reason the optimization terminated. \n\nThe metric value allows us to compare multiple registration runs as there is a probabilistic aspect to our registration, we are using random sampling to estimate the similarity metric.\n\nAlways remember to query why the optimizer terminated. This will help you understand whether termination is too early, either due to thresholds being too tight, early termination due to small number of iterations - numberOfIterations, or too loose, early termination due to large value for minimal change in similarity measure - convergenceMinimumValue) ", "_____no_output_____" ] ], [ [ "print('Final metric value: {0}'.format(registration_method.GetMetricValue()))\nprint('Optimizer\\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))", "_____no_output_____" ] ], [ [ "Now visually inspect the results.", "_____no_output_____" ] ], [ [ "moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())\n\ninteract(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));", "_____no_output_____" ] ], [ [ "If we are satisfied with the results, save them to file.", "_____no_output_____" ] ], [ [ "sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha'))\nsitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm'))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb3fff5d5543a6de146a5595be8be107da9e6bb5
222,795
ipynb
Jupyter Notebook
MultiObjectiveModel_YMPNet_Pavan/planercnn_inference1.ipynb
eva5covergence/EVA5_AI_Projects
7052373c52b6b9901cd0bc05a4758dd4b63f7480
[ "MIT" ]
null
null
null
MultiObjectiveModel_YMPNet_Pavan/planercnn_inference1.ipynb
eva5covergence/EVA5_AI_Projects
7052373c52b6b9901cd0bc05a4758dd4b63f7480
[ "MIT" ]
null
null
null
MultiObjectiveModel_YMPNet_Pavan/planercnn_inference1.ipynb
eva5covergence/EVA5_AI_Projects
7052373c52b6b9901cd0bc05a4758dd4b63f7480
[ "MIT" ]
null
null
null
39.756424
891
0.472618
[ [ [ "# Colab notebook or tutorial\n### [How to run PyTorch with GPU and CUDA 8.0 support on Google Colab](https://www.dlology.com/blog/how-to-run-pytorch-with-gpu-and-cuda-92-support-on-google-colab/)", "_____no_output_____" ], [ "# New Section", "_____no_output_____" ] ], [ [ "!nvidia-smi", "Sat Oct 31 11:17:30 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 455.32.00 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 57C P8 10W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "!cat /etc/*-release", "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=18.04\nDISTRIB_CODENAME=bionic\nDISTRIB_DESCRIPTION=\"Ubuntu 18.04.5 LTS\"\nNAME=\"Ubuntu\"\nVERSION=\"18.04.5 LTS (Bionic Beaver)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 18.04.5 LTS\"\nVERSION_ID=\"18.04\"\nHOME_URL=\"https://www.ubuntu.com/\"\nSUPPORT_URL=\"https://help.ubuntu.com/\"\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\nVERSION_CODENAME=bionic\nUBUNTU_CODENAME=bionic\n" ] ], [ [ "## Install [Cuda 8.0\n](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=1710&target_type=deblocal)", "_____no_output_____" ] ], [ [ "!wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb", "--2020-10-31 11:17:35-- https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb\nResolving developer.nvidia.com (developer.nvidia.com)... 152.199.0.24\nConnecting to developer.nvidia.com (developer.nvidia.com)|152.199.0.24|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://developer.download.nvidia.com/compute/cuda/8.0/secure/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64.deb?fCVRgkrsiwmpUva2l9hiUDzKMv6bInDxpI3Qg82smennbiEaGa_0k3vazanNBPXnwwAlYXddlOvnTvNpwD7mBmevEMSJnsFNgWBLLv8QSYuGgOG5G5DKtXq7XdIUfk_7XuvPXakD9SnfUG6bgmMRXf9jzdL4-bS2_GIgVrpxhr4giomowiuC35Ycc_AuftTiiGVHk4Sh1-lwNAMNSMBl4Apzow [following]\n--2020-10-31 11:17:35-- https://developer.download.nvidia.com/compute/cuda/8.0/secure/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64.deb?fCVRgkrsiwmpUva2l9hiUDzKMv6bInDxpI3Qg82smennbiEaGa_0k3vazanNBPXnwwAlYXddlOvnTvNpwD7mBmevEMSJnsFNgWBLLv8QSYuGgOG5G5DKtXq7XdIUfk_7XuvPXakD9SnfUG6bgmMRXf9jzdL4-bS2_GIgVrpxhr4giomowiuC35Ycc_AuftTiiGVHk4Sh1-lwNAMNSMBl4Apzow\nResolving developer.download.nvidia.com (developer.download.nvidia.com)... 152.195.19.142\nConnecting to developer.download.nvidia.com (developer.download.nvidia.com)|152.195.19.142|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1913589814 (1.8G) [application/x-deb]\nSaving to: ‘cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb’\n\ncuda-repo-ubuntu160 100%[===================>] 1.78G 41.0MB/s in 14s \n\n2020-10-31 11:17:49 (129 MB/s) - ‘cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb’ saved [1913589814/1913589814]\n\n" ], [ "# cd ../\n!ls", "cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb sample_data\nplanercnn_with_adf\n" ], [ "!dpkg -i cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb", "(Reading database ... 153720 files and directories currently installed.)\nPreparing to unpack cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb ...\nUnpacking cuda-repo-ubuntu1604-8-0-local-ga2 (8.0.61-1) over (8.0.61-1) ...\nSetting up cuda-repo-ubuntu1604-8-0-local-ga2 (8.0.61-1) ...\nWarning: The postinst maintainerscript of the package cuda-repo-ubuntu1604-8-0-local-ga2\nWarning: seems to use apt-key (provided by apt) without depending on gnupg or gnupg2.\nWarning: This will BREAK in the future and should be fixed by the package maintainer(s).\nNote: Check first if apt-key functionality is needed at all - it probably isn't!\nWarning: apt-key should not be used in scripts (called from postinst maintainerscript of the package cuda-repo-ubuntu1604-8-0-local-ga2)\nOK\n" ], [ "!ls /var/", "backups colab\t\t\t lib\t lock mail run tmp\ncache\t cuda-repo-8-0-local-ga2 local log opt spool\n" ], [ "!ls /var/cuda-repo-8-0-local-ga2 | grep .pub", "7fa2af80.pub\n" ], [ "!apt-key add /var/cuda-repo-8-0-local-ga2/7fa2af80.pub", "OK\n" ], [ "!apt-get update", "\r0% [Working]\r \rGet:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\n\r0% [Connecting to security.ubuntu.com] [Connecting to cloud.r-project.org] [1 I\r \rIgn:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\n\r0% [Connecting to security.ubuntu.com] [Connecting to cloud.r-project.org] [Con\r \rGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\n\r0% [Connecting to security.ubuntu.com] [Connecting to cloud.r-project.org] [Con\r \rGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\n\r0% [2 Release 0 B/574 B 0%] [Connecting to archive.ubuntu.com] [Connecting to s\r0% [Connecting to archive.ubuntu.com] [Connecting to security.ubuntu.com] [Conn\r0% [Release.gpg gpgv 574 B] [Connecting to archive.ubuntu.com] [Connecting to s\r \rHit:3 http://security.ubuntu.com/ubuntu bionic-security InRelease\n\r0% [Release.gpg gpgv 574 B] [Connecting to archive.ubuntu.com (91.189.88.142)] \r \rHit:4 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\n\r0% [Release.gpg gpgv 574 B] [Connecting to archive.ubuntu.com (91.189.88.142)] \r \rIgn:5 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\n\r0% [Release.gpg gpgv 574 B] [Waiting for headers] [Waiting for headers] [Waitin\r \rHit:6 http://archive.ubuntu.com/ubuntu bionic InRelease\n\r \rHit:7 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\n\r0% [Release.gpg gpgv 574 B] [Waiting for headers] [Connecting to ppa.launchpad.\r0% [Waiting for headers] [Connecting to ppa.launchpad.net (91.189.95.83)] [Wait\r0% [3 InRelease gpgv 88.7 kB] [Waiting for headers] [Connecting to ppa.launchpa\r \rIgn:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\n\r0% [3 InRelease gpgv 88.7 kB] [Waiting for headers] [Connecting to ppa.launchpa\r \rHit:10 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:11 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:12 http://archive.ubuntu.com/ubuntu bionic-updates InRelease\nHit:13 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nHit:14 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\nReading package lists... Done\n" ], [ "!apt-get install cuda-8-0", "Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\ncuda-8-0 is already the newest version (8.0.61-1).\n0 upgraded, 0 newly installed, 0 to remove and 13 not upgraded.\n" ] ], [ [ "## Install [PyTorch](https://pytorch.org/) with Cuda 8.0 support", "_____no_output_____" ] ], [ [ "!nvcc --version", "nvcc: NVIDIA (R) Cuda compiler driver\nCopyright (c) 2005-2016 NVIDIA Corporation\nBuilt on Tue_Jan_10_13:22:03_CST_2017\nCuda compilation tools, release 8.0, V8.0.61\n" ], [ "!pip install torch==0.4.0\n#!pip install torchvision", "Collecting torch==0.4.0\n Using cached https://files.pythonhosted.org/packages/69/43/380514bd9663f1bf708abeb359b8b48d3fabb1c8e95bb3427a980a064c57/torch-0.4.0-cp36-cp36m-manylinux1_x86_64.whl\n\u001b[31mERROR: torchvision 0.7.0+cu101 has requirement torch==1.6.0, but you'll have torch 0.4.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: fastai 1.0.61 has requirement torch>=1.0.0, but you'll have torch 0.4.0 which is incompatible.\u001b[0m\nInstalling collected packages: torch\n Found existing installation: torch 0.4.1\n Uninstalling torch-0.4.1:\n Successfully uninstalled torch-0.4.1\nSuccessfully installed torch-0.4.0\n" ], [ "!git clone https://github.com/UTS-CAS/planercnn_with_adf.git", "fatal: destination path 'planercnn_with_adf' already exists and is not an empty directory.\n" ], [ "cd planercnn_with_adf", "/content/planercnn_with_adf\n" ], [ "!pip install -r requirements.txt", "Requirement already satisfied: cffi==1.11.5 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 1)) (1.11.5)\nRequirement already satisfied: numpy==1.15.4 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 2)) (1.15.4)\nRequirement already satisfied: opencv-python==3.4.4.19 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 3)) (3.4.4.19)\nRequirement already satisfied: scikit-image==0.14.1 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 4)) (0.14.1)\nCollecting torch==0.4.1\n Using cached https://files.pythonhosted.org/packages/49/0e/e382bcf1a6ae8225f50b99cc26effa2d4cc6d66975ccf3fa9590efcbedce/torch-0.4.1-cp36-cp36m-manylinux1_x86_64.whl\nRequirement already satisfied: tqdm==4.28.1 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 6)) (4.28.1)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi==1.11.5->-r requirements.txt (line 1)) (2.20)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (1.15.0)\nRequirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (7.0.0)\nRequirement already satisfied: networkx>=1.8 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (2.5)\nRequirement already satisfied: cloudpickle>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (1.3.0)\nRequirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (1.1.1)\nRequirement already satisfied: dask[array]>=0.9.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (2.12.0)\nRequirement already satisfied: matplotlib>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (3.2.2)\nRequirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.14.1->-r requirements.txt (line 4)) (1.4.1)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=1.8->scikit-image==0.14.1->-r requirements.txt (line 4)) (4.4.2)\nRequirement already satisfied: toolz>=0.7.3; extra == \"array\" in /usr/local/lib/python3.6/dist-packages (from dask[array]>=0.9.0->scikit-image==0.14.1->-r requirements.txt (line 4)) (0.11.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->scikit-image==0.14.1->-r requirements.txt (line 4)) (1.2.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->scikit-image==0.14.1->-r requirements.txt (line 4)) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->scikit-image==0.14.1->-r requirements.txt (line 4)) (2.4.7)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.0.0->scikit-image==0.14.1->-r requirements.txt (line 4)) (2.8.1)\n\u001b[31mERROR: torchvision 0.7.0+cu101 has requirement torch==1.6.0, but you'll have torch 0.4.1 which is incompatible.\u001b[0m\n\u001b[31mERROR: fastai 1.0.61 has requirement torch>=1.0.0, but you'll have torch 0.4.1 which is incompatible.\u001b[0m\nInstalling collected packages: torch\n Found existing installation: torch 0.4.0\n Uninstalling torch-0.4.0:\n Successfully uninstalled torch-0.4.0\nSuccessfully installed torch-0.4.1\n" ], [ "!ls /usr/local/cuda-8.0", "bin extras lib64\t libnvvp nvml\tREADME\t share\ttargets version.txt\ndoc include libnsight LICENSE nvvm\tsamples src\ttools\n" ], [ "# %%writefile make.sh\n\n# #!/usr/bin/env bash\n\n# # CUDA_PATH=/usr/local/cuda/\n\n# export CUDA_PATH=/usr/local/cuda-9.2/\n# #You may also want to ad the following\n# #export C_INCLUDE_PATH=/opt/cuda/include\n\n# export CXXFLAGS=\"-std=c++11\"\n# export CFLAGS=\"-std=c99\"\n\n# # python setup.py build_ext --inplace\n# # rm -rf build\n\n# CUDA_ARCH=\"-gencode arch=compute_30,code=sm_30 \\\n# -gencode arch=compute_35,code=sm_35 \\\n# -gencode arch=compute_50,code=sm_50 \\\n# -gencode arch=compute_52,code=sm_52 \\\n# -gencode arch=compute_60,code=sm_60 \\\n# -gencode arch=compute_61,code=sm_61 \"\n\n# # compile NMS\n# cd nms/src/cuda/\n# echo \"Compiling nms kernels by nvcc...\"\n# #nvcc -c -o nms_cuda_kernel.cu.o nms_cuda_kernel.cu -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH\n# nvcc -c -o nms_kernel.cu.o nms_kernel.cu -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH\n\n# cd ../../\n\n# python build.py", "_____no_output_____" ], [ "!export CUDA_PATH=/usr/local/cuda-8.0/", "_____no_output_____" ], [ "!nvcc --version", "nvcc: NVIDIA (R) Cuda compiler driver\nCopyright (c) 2005-2016 NVIDIA Corporation\nBuilt on Tue_Jan_10_13:22:03_CST_2017\nCuda compilation tools, release 8.0, V8.0.61\n" ], [ "!ls", "anchors evaluate_adf.py models_adf requirements.txt\nconfig.py evaluate.py\t nms\t roialign\ncontrib evaluate_utils.py options.py train_planercnn.py\ndata_prep example_images plane_utils.py utils.py\ndatasets models\t README.md visualize_utils.py\n" ], [ "!sudo apt-get update && \\\nsudo apt-get install build-essential software-properties-common -y && \\\nsudo add-apt-repository ppa:ubuntu-toolchain-r/test -y && \\\nsudo apt-get update && \\\nsudo apt-get install gcc-5 g++-5 -y && \\\nsudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 --slave /usr/bin/g++ g++ /usr/bin/g++-5 && \\\ngcc -v", "\r0% [Working]\r \rGet:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\n\r \rIgn:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\n\r0% [Connecting to security.ubuntu.com]\r \rGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\n\r0% [Connecting to security.ubuntu.com] [Connecting to cloud.r-project.org]\r \rGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\n\r0% [Connecting to archive.ubuntu.com] [Connecting to security.ubuntu.com (91.18\r0% [Release.gpg gpgv 574 B] [Connecting to archive.ubuntu.com] [Connecting to s\r \rHit:3 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\n\r0% [Release.gpg gpgv 574 B] [Connecting to archive.ubuntu.com] [Connecting to s\r0% [Connecting to archive.ubuntu.com (91.189.88.152)] [Waiting for headers] [Co\r0% [3 InRelease gpgv 3,626 B] [Connecting to archive.ubuntu.com (91.189.88.152)\r \rIgn:5 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nHit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease\nIgn:7 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nHit:8 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:10 http://archive.ubuntu.com/ubuntu bionic InRelease\nHit:11 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\nHit:12 http://archive.ubuntu.com/ubuntu bionic-updates InRelease\nHit:14 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\nHit:16 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nReading package lists... Done\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nbuild-essential is already the newest version (12.4ubuntu1).\nsoftware-properties-common is already the newest version (0.96.24.32.14).\n0 upgraded, 0 newly installed, 0 to remove and 13 not upgraded.\nGet:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\nIgn:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\nGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\nGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\nHit:3 http://security.ubuntu.com/ubuntu bionic-security InRelease\nHit:4 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\nIgn:6 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nHit:7 http://archive.ubuntu.com/ubuntu bionic InRelease\nIgn:8 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nHit:9 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:10 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:11 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\nHit:12 http://archive.ubuntu.com/ubuntu bionic-updates InRelease\nHit:13 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\nHit:14 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nGet:17 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic InRelease [15.4 kB]\nGet:18 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 Packages [39.9 kB]\nFetched 55.3 kB in 1s (45.4 kB/s)\nReading package lists... Done\nGet:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\nIgn:1 file:/var/cuda-repo-8-0-local-ga2 InRelease\nGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\nGet:2 file:/var/cuda-repo-8-0-local-ga2 Release [574 B]\nHit:3 http://security.ubuntu.com/ubuntu bionic-security InRelease\nHit:4 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\nIgn:6 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nHit:7 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\nHit:8 http://archive.ubuntu.com/ubuntu bionic InRelease\nIgn:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nHit:10 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:11 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:12 http://archive.ubuntu.com/ubuntu bionic-updates InRelease\nHit:13 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nHit:14 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\nHit:15 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic InRelease\nReading package lists... Done\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n cpp-5 gcc-5-base libasan2 libgcc-5-dev libisl15 libmpx0 libstdc++-5-dev\nSuggested packages:\n gcc-5-locales g++-5-multilib gcc-5-doc libstdc++6-5-dbg gcc-5-multilib\n libgcc1-dbg libgomp1-dbg libitm1-dbg libatomic1-dbg libasan2-dbg\n liblsan0-dbg libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx0-dbg\n libquadmath0-dbg libstdc++-5-doc\nThe following NEW packages will be installed:\n cpp-5 g++-5 gcc-5 gcc-5-base libasan2 libgcc-5-dev libisl15 libmpx0\n libstdc++-5-dev\n0 upgraded, 9 newly installed, 0 to remove and 25 not upgraded.\nNeed to get 29.1 MB of archives.\nAfter this operation, 100 MB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gcc-5-base amd64 5.5.0-12ubuntu1 [17.1 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libisl15 amd64 0.18-4 [548 kB]\nGet:3 http://archive.ubuntu.com/ubuntu bionic/universe amd64 cpp-5 amd64 5.5.0-12ubuntu1 [7,785 kB]\nGet:4 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libasan2 amd64 5.5.0-12ubuntu1 [264 kB]\nGet:5 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libmpx0 amd64 5.5.0-12ubuntu1 [9,888 B]\nGet:6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libgcc-5-dev amd64 5.5.0-12ubuntu1 [2,224 kB]\nGet:7 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gcc-5 amd64 5.5.0-12ubuntu1 [8,357 kB]\nGet:8 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libstdc++-5-dev amd64 5.5.0-12ubuntu1 [1,415 kB]\nGet:9 http://archive.ubuntu.com/ubuntu bionic/universe amd64 g++-5 amd64 5.5.0-12ubuntu1 [8,450 kB]\nFetched 29.1 MB in 2s (18.3 MB/s)\ndebconf: unable to initialize frontend: Dialog\ndebconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 9.)\ndebconf: falling back to frontend: Readline\ndebconf: unable to initialize frontend: Readline\ndebconf: (This frontend requires a controlling tty.)\ndebconf: falling back to frontend: Teletype\ndpkg-preconfigure: unable to re-open stdin: \nSelecting previously unselected package gcc-5-base:amd64.\n(Reading database ... 153720 files and directories currently installed.)\nPreparing to unpack .../0-gcc-5-base_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking gcc-5-base:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libisl15:amd64.\nPreparing to unpack .../1-libisl15_0.18-4_amd64.deb ...\nUnpacking libisl15:amd64 (0.18-4) ...\nSelecting previously unselected package cpp-5.\nPreparing to unpack .../2-cpp-5_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking cpp-5 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libasan2:amd64.\nPreparing to unpack .../3-libasan2_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libasan2:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libmpx0:amd64.\nPreparing to unpack .../4-libmpx0_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libmpx0:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libgcc-5-dev:amd64.\nPreparing to unpack .../5-libgcc-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libgcc-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package gcc-5.\nPreparing to unpack .../6-gcc-5_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking gcc-5 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libstdc++-5-dev:amd64.\nPreparing to unpack .../7-libstdc++-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libstdc++-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package g++-5.\nPreparing to unpack .../8-g++-5_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking g++-5 (5.5.0-12ubuntu1) ...\nSetting up libisl15:amd64 (0.18-4) ...\nSetting up gcc-5-base:amd64 (5.5.0-12ubuntu1) ...\nSetting up libmpx0:amd64 (5.5.0-12ubuntu1) ...\nSetting up libasan2:amd64 (5.5.0-12ubuntu1) ...\nSetting up libgcc-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSetting up cpp-5 (5.5.0-12ubuntu1) ...\nSetting up libstdc++-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSetting up gcc-5 (5.5.0-12ubuntu1) ...\nSetting up g++-5 (5.5.0-12ubuntu1) ...\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\nupdate-alternatives: using /usr/bin/gcc-5 to provide /usr/bin/gcc (gcc) in auto mode\nUsing built-in specs.\nCOLLECT_GCC=gcc\nCOLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper\nTarget: x86_64-linux-gnu\nConfigured with: ../src/configure -v --with-pkgversion='Ubuntu 5.5.0-12ubuntu1' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --enable-default-pie --with-system-zlib --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu\nThread model: posix\ngcc version 5.5.0 20171010 (Ubuntu 5.5.0-12ubuntu1) \n" ], [ "cd /content/planercnn_with_adf/", "/content/planercnn_with_adf\n" ], [ "!ls", "anchors evaluate_adf.py models_adf requirements.txt\nconfig.py evaluate.py\t nms\t roialign\ncontrib evaluate_utils.py options.py train_planercnn.py\ndata_prep example_images plane_utils.py utils.py\ndatasets models\t README.md visualize_utils.py\n" ], [ "cd nms/src/cuda/", "/content/planercnn_with_adf/nms/src/cuda\n" ], [ "!gcc --version", "gcc (Ubuntu 5.5.0-12ubuntu1) 5.5.0 20171010\nCopyright (C) 2015 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\n" ], [ "# !sudo dpkg --configure -a # run it in case below cmd fails", "_____no_output_____" ], [ "!sudo apt-get install gcc-5 g++-5 g++-5-multilib gfortran-5", "Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\ng++-5 is already the newest version (5.5.0-12ubuntu1).\ngcc-5 is already the newest version (5.5.0-12ubuntu1).\nThe following additional packages will be installed:\n gcc-10-base gcc-5-multilib gcc-6-base gcc-7-multilib gcc-multilib lib32asan2\n lib32asan4 lib32atomic1 lib32cilkrts5 lib32gcc-5-dev lib32gcc-7-dev\n lib32gomp1 lib32itm1 lib32mpx0 lib32mpx2 lib32quadmath0 lib32stdc++-5-dev\n lib32ubsan0 libc6-dev-i386 libc6-dev-x32 libc6-x32 libgfortran-5-dev\n libgfortran3 libx32asan2 libx32asan4 libx32atomic1 libx32cilkrts5\n libx32gcc-5-dev libx32gcc-7-dev libx32gcc-s1 libx32gcc1 libx32gomp1\n libx32itm1 libx32quadmath0 libx32stdc++-5-dev libx32stdc++6 libx32ubsan0\nSuggested packages:\n lib32stdc++6-5-dbg libx32stdc++6-5-dbg gfortran-5-multilib gfortran-5-doc\n libgfortran3-dbg\nThe following NEW packages will be installed:\n g++-5-multilib gcc-10-base gcc-5-multilib gcc-6-base gcc-7-multilib\n gcc-multilib gfortran-5 lib32asan2 lib32asan4 lib32atomic1 lib32cilkrts5\n lib32gcc-5-dev lib32gcc-7-dev lib32gomp1 lib32itm1 lib32mpx0 lib32mpx2\n lib32quadmath0 lib32stdc++-5-dev lib32ubsan0 libc6-dev-i386 libc6-dev-x32\n libc6-x32 libgfortran-5-dev libgfortran3 libx32asan2 libx32asan4\n libx32atomic1 libx32cilkrts5 libx32gcc-5-dev libx32gcc-7-dev libx32gcc-s1\n libx32gcc1 libx32gomp1 libx32itm1 libx32quadmath0 libx32stdc++-5-dev\n libx32stdc++6 libx32ubsan0\n0 upgraded, 39 newly installed, 0 to remove and 25 not upgraded.\nNeed to get 27.7 MB of archives.\nAfter this operation, 123 MB of additional disk space will be used.\nGet:1 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 gcc-10-base amd64 10.1.0-2ubuntu1~18.04 [20.1 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev-i386 amd64 2.27-3ubuntu1.2 [1,817 kB]\nGet:3 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32gcc-s1 amd64 10.1.0-2ubuntu1~18.04 [41.2 kB]\nGet:4 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32gcc1 amd64 1:10.1.0-2ubuntu1~18.04 [980 B]\nGet:5 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-x32 amd64 2.27-3ubuntu1.2 [2,847 kB]\nGet:6 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 lib32gomp1 amd64 10.1.0-2ubuntu1~18.04 [109 kB]\nGet:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev-x32 amd64 2.27-3ubuntu1.2 [2,018 kB]\nGet:8 http://archive.ubuntu.com/ubuntu bionic/universe amd64 lib32asan2 amd64 5.5.0-12ubuntu1 [260 kB]\nGet:9 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libx32asan2 amd64 5.5.0-12ubuntu1 [252 kB]\nGet:10 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 lib32ubsan0 amd64 7.5.0-3ubuntu1~18.04 [140 kB]\nGet:11 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libx32ubsan0 amd64 7.5.0-3ubuntu1~18.04 [127 kB]\nGet:12 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 lib32cilkrts5 amd64 7.5.0-3ubuntu1~18.04 [47.2 kB]\nGet:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libx32cilkrts5 amd64 7.5.0-3ubuntu1~18.04 [43.0 kB]\nGet:14 http://archive.ubuntu.com/ubuntu bionic/universe amd64 lib32mpx0 amd64 5.5.0-12ubuntu1 [11.2 kB]\nGet:15 http://archive.ubuntu.com/ubuntu bionic/universe amd64 lib32gcc-5-dev amd64 5.5.0-12ubuntu1 [2,077 kB]\nGet:16 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libx32gcc-5-dev amd64 5.5.0-12ubuntu1 [1,870 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gcc-5-multilib amd64 5.5.0-12ubuntu1 [1,040 B]\nGet:18 http://archive.ubuntu.com/ubuntu bionic/universe amd64 lib32stdc++-5-dev amd64 5.5.0-12ubuntu1 [640 kB]\nGet:19 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libx32stdc++-5-dev amd64 5.5.0-12ubuntu1 [609 kB]\nGet:20 http://archive.ubuntu.com/ubuntu bionic/universe amd64 g++-5-multilib amd64 5.5.0-12ubuntu1 [1,064 B]\nGet:21 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 gcc-6-base amd64 6.5.0-2ubuntu1~18.04 [16.7 kB]\nGet:22 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 lib32asan4 amd64 7.5.0-3ubuntu1~18.04 [362 kB]\nGet:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libx32asan4 amd64 7.5.0-3ubuntu1~18.04 [351 kB]\nGet:24 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 lib32mpx2 amd64 8.4.0-1ubuntu1~18.04 [12.9 kB]\nGet:25 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 lib32gcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [2,211 kB]\nGet:26 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libx32gcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [1,999 kB]\nGet:27 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32gomp1 amd64 10.1.0-2ubuntu1~18.04 [102 kB]\nGet:28 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7-multilib amd64 7.5.0-3ubuntu1~18.04 [1,048 B]\nGet:29 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-multilib amd64 4:7.4.0-1ubuntu2.3 [1,428 B]\nGet:30 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libgfortran3 amd64 6.5.0-2ubuntu1~18.04 [270 kB]\nGet:31 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libgfortran-5-dev amd64 5.5.0-12ubuntu1 [296 kB]\nGet:32 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gfortran-5 amd64 5.5.0-12ubuntu1 [8,166 kB]\nGet:33 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 lib32itm1 amd64 10.1.0-2ubuntu1~18.04 [28.1 kB]\nGet:34 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32itm1 amd64 10.1.0-2ubuntu1~18.04 [26.4 kB]\nGet:35 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 lib32atomic1 amd64 10.1.0-2ubuntu1~18.04 [8,748 B]\nGet:36 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32atomic1 amd64 10.1.0-2ubuntu1~18.04 [9,140 B]\nGet:37 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32stdc++6 amd64 10.1.0-2ubuntu1~18.04 [483 kB]\nGet:38 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 lib32quadmath0 amd64 10.1.0-2ubuntu1~18.04 [230 kB]\nGet:39 http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu bionic/main amd64 libx32quadmath0 amd64 10.1.0-2ubuntu1~18.04 [148 kB]\nFetched 27.7 MB in 4s (7,316 kB/s)\ndebconf: unable to initialize frontend: Dialog\ndebconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 39.)\ndebconf: falling back to frontend: Readline\ndebconf: unable to initialize frontend: Readline\ndebconf: (This frontend requires a controlling tty.)\ndebconf: falling back to frontend: Teletype\ndpkg-preconfigure: unable to re-open stdin: \nSelecting previously unselected package gcc-10-base:amd64.\n(Reading database ... 154720 files and directories currently installed.)\nPreparing to unpack .../00-gcc-10-base_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking gcc-10-base:amd64 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libc6-dev-i386.\nPreparing to unpack .../01-libc6-dev-i386_2.27-3ubuntu1.2_amd64.deb ...\nUnpacking libc6-dev-i386 (2.27-3ubuntu1.2) ...\nSelecting previously unselected package libc6-x32.\nPreparing to unpack .../02-libc6-x32_2.27-3ubuntu1.2_amd64.deb ...\nUnpacking libc6-x32 (2.27-3ubuntu1.2) ...\nSelecting previously unselected package libc6-dev-x32.\nPreparing to unpack .../03-libc6-dev-x32_2.27-3ubuntu1.2_amd64.deb ...\nUnpacking libc6-dev-x32 (2.27-3ubuntu1.2) ...\nSelecting previously unselected package libx32gcc-s1.\nPreparing to unpack .../04-libx32gcc-s1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32gcc-s1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32gcc1.\nPreparing to unpack .../05-libx32gcc1_1%3a10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32gcc1 (1:10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32gomp1.\nPreparing to unpack .../06-lib32gomp1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking lib32gomp1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32gomp1.\nPreparing to unpack .../07-libx32gomp1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32gomp1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32itm1.\nPreparing to unpack .../08-lib32itm1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking lib32itm1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32itm1.\nPreparing to unpack .../09-libx32itm1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32itm1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32atomic1.\nPreparing to unpack .../10-lib32atomic1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking lib32atomic1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32atomic1.\nPreparing to unpack .../11-libx32atomic1_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32atomic1 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32asan2.\nPreparing to unpack .../12-lib32asan2_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking lib32asan2 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libx32asan2.\nPreparing to unpack .../13-libx32asan2_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libx32asan2 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package lib32ubsan0.\nPreparing to unpack .../14-lib32ubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking lib32ubsan0 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package libx32stdc++6.\nPreparing to unpack .../15-libx32stdc++6_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32stdc++6 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32ubsan0.\nPreparing to unpack .../16-libx32ubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking libx32ubsan0 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package lib32cilkrts5.\nPreparing to unpack .../17-lib32cilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking lib32cilkrts5 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package libx32cilkrts5.\nPreparing to unpack .../18-libx32cilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking libx32cilkrts5 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package lib32mpx0.\nPreparing to unpack .../19-lib32mpx0_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking lib32mpx0 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package lib32quadmath0.\nPreparing to unpack .../20-lib32quadmath0_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking lib32quadmath0 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libx32quadmath0.\nPreparing to unpack .../21-libx32quadmath0_10.1.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libx32quadmath0 (10.1.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32gcc-5-dev.\nPreparing to unpack .../22-lib32gcc-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking lib32gcc-5-dev (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libx32gcc-5-dev.\nPreparing to unpack .../23-libx32gcc-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libx32gcc-5-dev (5.5.0-12ubuntu1) ...\nSelecting previously unselected package gcc-5-multilib.\nPreparing to unpack .../24-gcc-5-multilib_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking gcc-5-multilib (5.5.0-12ubuntu1) ...\nSelecting previously unselected package lib32stdc++-5-dev.\nPreparing to unpack .../25-lib32stdc++-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking lib32stdc++-5-dev (5.5.0-12ubuntu1) ...\nSelecting previously unselected package libx32stdc++-5-dev.\nPreparing to unpack .../26-libx32stdc++-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libx32stdc++-5-dev (5.5.0-12ubuntu1) ...\nSelecting previously unselected package g++-5-multilib.\nPreparing to unpack .../27-g++-5-multilib_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking g++-5-multilib (5.5.0-12ubuntu1) ...\nSelecting previously unselected package gcc-6-base:amd64.\nPreparing to unpack .../28-gcc-6-base_6.5.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking gcc-6-base:amd64 (6.5.0-2ubuntu1~18.04) ...\nSelecting previously unselected package lib32asan4.\nPreparing to unpack .../29-lib32asan4_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking lib32asan4 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package libx32asan4.\nPreparing to unpack .../30-libx32asan4_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking libx32asan4 (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package lib32mpx2.\nPreparing to unpack .../31-lib32mpx2_8.4.0-1ubuntu1~18.04_amd64.deb ...\nUnpacking lib32mpx2 (8.4.0-1ubuntu1~18.04) ...\nSelecting previously unselected package lib32gcc-7-dev.\nPreparing to unpack .../32-lib32gcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking lib32gcc-7-dev (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package libx32gcc-7-dev.\nPreparing to unpack .../33-libx32gcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking libx32gcc-7-dev (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package gcc-7-multilib.\nPreparing to unpack .../34-gcc-7-multilib_7.5.0-3ubuntu1~18.04_amd64.deb ...\nUnpacking gcc-7-multilib (7.5.0-3ubuntu1~18.04) ...\nSelecting previously unselected package gcc-multilib.\nPreparing to unpack .../35-gcc-multilib_4%3a7.4.0-1ubuntu2.3_amd64.deb ...\nUnpacking gcc-multilib (4:7.4.0-1ubuntu2.3) ...\nSelecting previously unselected package libgfortran3:amd64.\nPreparing to unpack .../36-libgfortran3_6.5.0-2ubuntu1~18.04_amd64.deb ...\nUnpacking libgfortran3:amd64 (6.5.0-2ubuntu1~18.04) ...\nSelecting previously unselected package libgfortran-5-dev:amd64.\nPreparing to unpack .../37-libgfortran-5-dev_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking libgfortran-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSelecting previously unselected package gfortran-5.\nPreparing to unpack .../38-gfortran-5_5.5.0-12ubuntu1_amd64.deb ...\nUnpacking gfortran-5 (5.5.0-12ubuntu1) ...\nSetting up libc6-x32 (2.27-3ubuntu1.2) ...\nSetting up lib32asan4 (7.5.0-3ubuntu1~18.04) ...\nSetting up lib32mpx2 (8.4.0-1ubuntu1~18.04) ...\nSetting up gcc-6-base:amd64 (6.5.0-2ubuntu1~18.04) ...\nSetting up gcc-10-base:amd64 (10.1.0-2ubuntu1~18.04) ...\nSetting up libx32gomp1 (10.1.0-2ubuntu1~18.04) ...\nSetting up libx32itm1 (10.1.0-2ubuntu1~18.04) ...\nSetting up lib32mpx0 (5.5.0-12ubuntu1) ...\nSetting up libx32gcc-s1 (10.1.0-2ubuntu1~18.04) ...\nSetting up lib32ubsan0 (7.5.0-3ubuntu1~18.04) ...\nSetting up lib32quadmath0 (10.1.0-2ubuntu1~18.04) ...\nSetting up lib32cilkrts5 (7.5.0-3ubuntu1~18.04) ...\nSetting up libc6-dev-i386 (2.27-3ubuntu1.2) ...\nSetting up libc6-dev-x32 (2.27-3ubuntu1.2) ...\nSetting up lib32asan2 (5.5.0-12ubuntu1) ...\nSetting up lib32itm1 (10.1.0-2ubuntu1~18.04) ...\nSetting up libx32quadmath0 (10.1.0-2ubuntu1~18.04) ...\nSetting up libx32gcc1 (1:10.1.0-2ubuntu1~18.04) ...\nSetting up lib32gomp1 (10.1.0-2ubuntu1~18.04) ...\nSetting up libgfortran3:amd64 (6.5.0-2ubuntu1~18.04) ...\nSetting up libx32stdc++6 (10.1.0-2ubuntu1~18.04) ...\nSetting up lib32atomic1 (10.1.0-2ubuntu1~18.04) ...\nSetting up libx32atomic1 (10.1.0-2ubuntu1~18.04) ...\nSetting up libgfortran-5-dev:amd64 (5.5.0-12ubuntu1) ...\nSetting up libx32asan2 (5.5.0-12ubuntu1) ...\nSetting up libx32asan4 (7.5.0-3ubuntu1~18.04) ...\nSetting up libx32cilkrts5 (7.5.0-3ubuntu1~18.04) ...\nSetting up libx32ubsan0 (7.5.0-3ubuntu1~18.04) ...\nSetting up gfortran-5 (5.5.0-12ubuntu1) ...\nSetting up libx32gcc-7-dev (7.5.0-3ubuntu1~18.04) ...\nSetting up lib32gcc-7-dev (7.5.0-3ubuntu1~18.04) ...\nSetting up lib32gcc-5-dev (5.5.0-12ubuntu1) ...\nSetting up libx32gcc-5-dev (5.5.0-12ubuntu1) ...\nSetting up gcc-7-multilib (7.5.0-3ubuntu1~18.04) ...\nSetting up lib32stdc++-5-dev (5.5.0-12ubuntu1) ...\nSetting up libx32stdc++-5-dev (5.5.0-12ubuntu1) ...\nSetting up gcc-5-multilib (5.5.0-12ubuntu1) ...\nSetting up gcc-multilib (4:7.4.0-1ubuntu2.3) ...\nSetting up g++-5-multilib (5.5.0-12ubuntu1) ...\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\n" ], [ "!sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 --slave /usr/bin/g++ g++ /usr/bin/g++-5", "_____no_output_____" ], [ "!sudo update-alternatives --config gcc", "There is only one alternative in link group gcc (providing /usr/bin/gcc): /usr/bin/gcc-5\nNothing to configure.\n" ], [ "!gcc --version", "gcc (Ubuntu 5.5.0-12ubuntu1) 5.5.0 20171010\nCopyright (C) 2015 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\n" ], [ "!nvcc -c -o nms_kernel.cu.o nms_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_60", "_____no_output_____" ], [ "cd ../../", "/content/planercnn_with_adf/nms\n" ], [ "!python build.py", "Including CUDA code.\n/content/planercnn_with_adf/nms\ngenerating /tmp/tmpremmjwvf/_nms.c\nsetting the current directory to '/tmp/tmpremmjwvf'\nrunning build_ext\nbuilding '_nms' extension\ncreating content\ncreating content/planercnn_with_adf\ncreating content/planercnn_with_adf/nms\ncreating content/planercnn_with_adf/nms/src\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c _nms.c -o ./_nms.o -std=c99\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/planercnn_with_adf/nms/src/nms.c -o ./content/planercnn_with_adf/nms/src/nms.o -std=c99\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/planercnn_with_adf/nms/src/nms_cuda.c -o ./content/planercnn_with_adf/nms/src/nms_cuda.o -std=c99\n\u001b[01m\u001b[K/content/planercnn_with_adf/nms/src/nms_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kgpu_nms\u001b[m\u001b[K’:\n\u001b[01m\u001b[K/content/planercnn_with_adf/nms/src/nms_cuda.c:29:35:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kinitialization from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n unsigned long long* mask_flat = \u001b[01;35m\u001b[KTHCudaLongTensor_data\u001b[m\u001b[K(state, mask);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K/content/planercnn_with_adf/nms/src/nms_cuda.c:37:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kinitialization from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n unsigned long long * mask_cpu_flat = \u001b[01;35m\u001b[KTHLongTensor_data\u001b[m\u001b[K(mask_cpu);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K/content/planercnn_with_adf/nms/src/nms_cuda.c:40:39:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kinitialization from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n unsigned long long* remv_cpu_flat = \u001b[01;35m\u001b[KTHLongTensor_data\u001b[m\u001b[K(remv_cpu);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K/content/planercnn_with_adf/nms/src/nms_cuda.c:23:7:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kboxes_dim\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kboxes_dim\u001b[m\u001b[K = THCudaTensor_size(state, boxes, 1);\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\nx86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 ./_nms.o ./content/planercnn_with_adf/nms/src/nms.o ./content/planercnn_with_adf/nms/src/nms_cuda.o /content/planercnn_with_adf/nms/src/cuda/nms_kernel.cu.o -o ./_nms.so\n" ], [ "cd ..", "/content/planercnn_with_adf\n" ], [ "cd roialign/roi_align/src/cuda/", "/content/planercnn_with_adf/roialign/roi_align/src/cuda\n" ], [ "!nvcc -c -o crop_and_resize_kernel.cu.o crop_and_resize_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_60", "_____no_output_____" ], [ "cd ../../", "/content/planercnn_with_adf/roialign/roi_align\n" ], [ "!python build.py", "Including CUDA code.\n/content/planercnn_with_adf/roialign/roi_align\ngenerating /tmp/tmprfw1vwgp/_crop_and_resize.c\nsetting the current directory to '/tmp/tmprfw1vwgp'\nrunning build_ext\nbuilding '_crop_and_resize' extension\ncreating content\ncreating content/planercnn_with_adf\ncreating content/planercnn_with_adf/roialign\ncreating content/planercnn_with_adf/roialign/roi_align\ncreating content/planercnn_with_adf/roialign/roi_align/src\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c _crop_and_resize.c -o ./_crop_and_resize.o -std=c99 -fopenmp -std=c99\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize.c -o ./content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize.o -std=c99 -fopenmp -std=c99\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/TH -I/usr/local/lib/python3.6/dist-packages/torch/utils/ffi/../../lib/include/THC -I/usr/local/cuda/include -I/usr/include/python3.6m -c /content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize_gpu.c -o ./content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize_gpu.o -std=c99 -fopenmp -std=c99\nx86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 ./_crop_and_resize.o ./content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize.o ./content/planercnn_with_adf/roialign/roi_align/src/crop_and_resize_gpu.o /content/planercnn_with_adf/roialign/roi_align/src/cuda/crop_and_resize_kernel.cu.o -o ./_crop_and_resize.so\n" ], [ "cd ../../", "/content/planercnn_with_adf\n" ], [ "#!python evaluate_adf.py --methods=f --suffix=warping_refine --dataset=inference --customDataFolder=example_images", "_____no_output_____" ], [ "ls", "\u001b[0m\u001b[01;34manchors\u001b[0m/ \u001b[01;32mevaluate_adf.py\u001b[0m* \u001b[01;34mmodels_adf\u001b[0m/ \u001b[01;32mrequirements.txt\u001b[0m*\n\u001b[01;32mconfig.py\u001b[0m* \u001b[01;32mevaluate.py\u001b[0m* \u001b[01;34mnms\u001b[0m/ \u001b[01;34mroialign\u001b[0m/\n\u001b[01;34mcontrib\u001b[0m/ \u001b[01;32mevaluate_utils.py\u001b[0m* \u001b[01;32moptions.py\u001b[0m* \u001b[01;32mtrain_planercnn.py\u001b[0m*\n\u001b[01;34mdata_prep\u001b[0m/ \u001b[01;34mexample_images\u001b[0m/ \u001b[01;32mplane_utils.py\u001b[0m* \u001b[01;32mutils.py\u001b[0m*\n\u001b[01;34mdatasets\u001b[0m/ \u001b[01;34mmodels\u001b[0m/ README.md \u001b[01;32mvisualize_utils.py\u001b[0m*\n" ], [ "mkdir inputs", "_____no_output_____" ], [ "%%writefile inputs/camera.txt\n587 587 320 240 640 480", "Writing inputs/camera.txt\n" ], [ "# !zip -r testfile.zip test", "_____no_output_____" ], [ "!ls", "anchors\t\t evaluate.py\t nms\t\t train_planercnn.py\nconfig.py\t evaluate_utils.py options.py\t utils.py\ncontrib\t\t example_images plane_utils.py visualize_utils.py\ndata_prep\t inputs\t\t README.md\ndatasets\t models\t\t requirements.txt\nevaluate_adf.py models_adf\t roialign\n" ], [ "cd /content/planercnn_with_adf/", "/content/planercnn_with_adf\n" ], [ "!wget https://www.dropbox.com/s/yjcg6s57n581sk0/checkpoint.zip?dl=0", "--2020-10-31 11:33:37-- https://www.dropbox.com/s/yjcg6s57n581sk0/checkpoint.zip?dl=0\nResolving www.dropbox.com (www.dropbox.com)... 162.125.6.1, 2620:100:601c:1::a27d:601\nConnecting to www.dropbox.com (www.dropbox.com)|162.125.6.1|:443... connected.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: /s/raw/yjcg6s57n581sk0/checkpoint.zip [following]\n--2020-10-31 11:33:37-- https://www.dropbox.com/s/raw/yjcg6s57n581sk0/checkpoint.zip\nReusing existing connection to www.dropbox.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com/cd/0/inline/BCRfu87cO25Fiu-ZPaY7jhiUKtuuLs2PUSVBPMW2cimkGLUANyS5wKUnHdL19j4zYxSEFzdkR3RyAi6VuUemtjnYis12UnIziCh7PvQW4cdzng/file# [following]\n--2020-10-31 11:33:38-- https://uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com/cd/0/inline/BCRfu87cO25Fiu-ZPaY7jhiUKtuuLs2PUSVBPMW2cimkGLUANyS5wKUnHdL19j4zYxSEFzdkR3RyAi6VuUemtjnYis12UnIziCh7PvQW4cdzng/file\nResolving uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com (uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com)... 162.125.6.15, 2620:100:601c:15::a27d:60f\nConnecting to uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com (uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com)|162.125.6.15|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: /cd/0/inline2/BCR6u--TeE0NFpmHHVymTrQPyHriuLdEGGtiTQUl2Ahs92_hxs_P-wT-AOUghOqotFcNmb37pAVdVFuv_eCVJgETgOlAsaCaJMBCUX0VDLKW022ac4zhVa7NDGNDSRhAHw8WSSpJqSySLWbXflbYB-hbxDUIOUlddnx4r8nbOeLUJL4RrZVfyTm4PxiC--9kGr6x8pcbMmKVsinzmu_bHNvuFDYp235-NNrOjGjntMb8oh4gTirUEhA9mxfTSTPC5vdBheZw9h7O7bXAXqNyujZmHMTHO78MQcWiJC4-BBi_0d9PB3ssL-rrdlQ0Jkz_Nbg1tS47NIF_9_RNxfuQeThI/file [following]\n--2020-10-31 11:33:38-- https://uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com/cd/0/inline2/BCR6u--TeE0NFpmHHVymTrQPyHriuLdEGGtiTQUl2Ahs92_hxs_P-wT-AOUghOqotFcNmb37pAVdVFuv_eCVJgETgOlAsaCaJMBCUX0VDLKW022ac4zhVa7NDGNDSRhAHw8WSSpJqSySLWbXflbYB-hbxDUIOUlddnx4r8nbOeLUJL4RrZVfyTm4PxiC--9kGr6x8pcbMmKVsinzmu_bHNvuFDYp235-NNrOjGjntMb8oh4gTirUEhA9mxfTSTPC5vdBheZw9h7O7bXAXqNyujZmHMTHO78MQcWiJC4-BBi_0d9PB3ssL-rrdlQ0Jkz_Nbg1tS47NIF_9_RNxfuQeThI/file\nReusing existing connection to uca9823fa6784de130d048eb6faa.dl.dropboxusercontent.com:443.\nHTTP request sent, awaiting response... 200 OK\nLength: 262259930 (250M) [application/zip]\nSaving to: ‘checkpoint.zip?dl=0’\n\ncheckpoint.zip?dl=0 100%[===================>] 250.11M 62.6MB/s in 4.0s \n\n2020-10-31 11:33:43 (62.6 MB/s) - ‘checkpoint.zip?dl=0’ saved [262259930/262259930]\n\n" ], [ "!mkdir checkpoint", "_____no_output_____" ], [ "!mv \"checkpoint.zip?dl=0\" \"planercnn_refine.zip\" ", "_____no_output_____" ], [ "!mv planercnn_refine.zip checkpoint/", "_____no_output_____" ], [ "cd checkpoint/", "/content/planercnn_with_adf/checkpoint\n" ], [ "!unzip planercnn_refine.zip", "Archive: planercnn_refine.zip\n creating: planercnn_normal_warping_refine/\n inflating: planercnn_normal_warping_refine/checkpoint_refine.pth \n inflating: planercnn_normal_warping_refine/checkpoint.pth \n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "cp /content/drive/My\\ Drive/computer_vision/session14_assignment/planercnn-master/inputs/Images/* /content/planercnn_with_adf/inputs/", "_____no_output_____" ], [ "## Update evaluate_adf.py - comment line 644, 645, 646", "_____no_output_____" ], [ "# rm /content/planercnn_with_adf/test/inference/*", "_____no_output_____" ], [ "ls /content/planercnn_with_adf/inputs/ | wc -l", "3591\n" ], [ "from shutil import copyfile\nimport shutil\nimport os\nimport glob", "_____no_output_____" ], [ "cd /content/planercnn_with_adf\n", "/content/planercnn_with_adf\n" ], [ "!mkdir input_temp", "_____no_output_____" ], [ "img_names_list1 = glob.glob('inputs/*')\nimg_names_list1.remove(\"inputs/camera.txt\")\nlen(img_names_list1)", "_____no_output_____" ], [ "def copy_images(src, tar, input_filename):\n files = glob.glob(src)\n # print(files)\n for f in files:\n filename = f.split(\"/\")[-1]\n if('image' in filename or 'segmentation_0_final' in filename):\n if 'image_0' in filename:\n new_input_filename = input_filename.split('.')[:-1][0]+'.png'\n new_filename = os.path.dirname(os.path.realpath(f))+\"/\"+new_input_filename\n os.rename(f,new_filename)\n shutil.copy(new_filename, f\"{tar}{new_input_filename}\")\n elif 'segmentation_0_final' in filename:\n new_output_filename = input_filename.split('.')[:-1][0]+'_segmentation_final.png'\n new_filename = os.path.dirname(os.path.realpath(f))+\"/\"+new_output_filename\n os.rename(f,new_filename)\n shutil.copy(new_filename, f\"{tar}{new_output_filename}\")", "_____no_output_____" ], [ "def clean_dir(dir):\n os.system(f'rm -f {dir}*')", "_____no_output_____" ], [ "output = '/content/drive/My Drive/computer_vision/session14_assignment/planercnn-master/outputs/'", "_____no_output_____" ], [ "for i in range(3590):\n if len(img_names_list1)>0:\n img_names_temp = img_names_list1[:1]\n print(f'running set - {i}')\n del img_names_list1[:1]\n os.system('rm -f input_temp/*')\n for item in img_names_temp:\n src = item\n filename = item.split(\"/\")[1]\n dst = 'input_temp/'+filename\n copyfile(src, dst) # copy 100 images\n copyfile('inputs/camera.txt', 'input_temp/camera.txt') # copy camera.txt file\n os.system('rm -f test/inference/*') # Cleaning inference folder to clean before evaluation.\n os.system('python evaluate_adf.py --methods=f --suffix=warping_refine --dataset=inference --customDataFolder=input_temp')\n copy_images('test/inference/*', output, filename)\n # print(f'Yet to run - {len(img_names_list1)} images')", "running set - 0\nrunning set - 1\nrunning set - 2\nrunning set - 3\nrunning set - 4\nrunning set - 5\nrunning set - 6\nrunning set - 7\nrunning set - 8\nrunning set - 9\nrunning set - 10\nrunning set - 11\nrunning set - 12\nrunning set - 13\nrunning set - 14\nrunning set - 15\nrunning set - 16\nrunning set - 17\nrunning set - 18\nrunning set - 19\nrunning set - 20\nrunning set - 21\nrunning set - 22\nrunning set - 23\nrunning set - 24\nrunning set - 25\nrunning set - 26\nrunning set - 27\nrunning set - 28\nrunning set - 29\nrunning set - 30\nrunning set - 31\nrunning set - 32\nrunning set - 33\nrunning set - 34\nrunning set - 35\nrunning set - 36\nrunning set - 37\nrunning set - 38\nrunning set - 39\nrunning set - 40\nrunning set - 41\nrunning set - 42\nrunning set - 43\nrunning set - 44\nrunning set - 45\nrunning set - 46\nrunning set - 47\nrunning set - 48\nrunning set - 49\nrunning set - 50\nrunning set - 51\nrunning set - 52\nrunning set - 53\nrunning set - 54\nrunning set - 55\nrunning set - 56\nrunning set - 57\nrunning set - 58\nrunning set - 59\nrunning set - 60\nrunning set - 61\nrunning set - 62\nrunning set - 63\nrunning set - 64\nrunning set - 65\nrunning set - 66\nrunning set - 67\nrunning set - 68\nrunning set - 69\nrunning set - 70\nrunning set - 71\nrunning set - 72\nrunning set - 73\nrunning set - 74\nrunning set - 75\nrunning set - 76\nrunning set - 77\nrunning set - 78\nrunning set - 79\nrunning set - 80\nrunning set - 81\nrunning set - 82\nrunning set - 83\nrunning set - 84\nrunning set - 85\nrunning set - 86\nrunning set - 87\nrunning set - 88\nrunning set - 89\nrunning set - 90\nrunning set - 91\nrunning set - 92\nrunning set - 93\nrunning set - 94\nrunning set - 95\nrunning set - 96\nrunning set - 97\nrunning set - 98\nrunning set - 99\nrunning set - 100\nrunning set - 101\nrunning set - 102\nrunning set - 103\nrunning set - 104\nrunning set - 105\nrunning set - 106\nrunning set - 107\nrunning set - 108\nrunning set - 109\nrunning set - 110\nrunning set - 111\nrunning set - 112\nrunning set - 113\nrunning set - 114\nrunning set - 115\nrunning set - 116\nrunning set - 117\nrunning set - 118\nrunning set - 119\nrunning set - 120\nrunning set - 121\nrunning set - 122\nrunning set - 123\nrunning set - 124\nrunning set - 125\nrunning set - 126\nrunning set - 127\nrunning set - 128\nrunning set - 129\nrunning set - 130\nrunning set - 131\nrunning set - 132\nrunning set - 133\nrunning set - 134\nrunning set - 135\nrunning set - 136\nrunning set - 137\nrunning set - 138\nrunning set - 139\nrunning set - 140\nrunning set - 141\nrunning set - 142\nrunning set - 143\nrunning set - 144\nrunning set - 145\nrunning set - 146\nrunning set - 147\nrunning set - 148\nrunning set - 149\nrunning set - 150\nrunning set - 151\nrunning set - 152\nrunning set - 153\nrunning set - 154\nrunning set - 155\nrunning set - 156\nrunning set - 157\nrunning set - 158\nrunning set - 159\nrunning set - 160\nrunning set - 161\nrunning set - 162\nrunning set - 163\nrunning set - 164\nrunning set - 165\nrunning set - 166\nrunning set - 167\nrunning set - 168\nrunning set - 169\nrunning set - 170\nrunning set - 171\nrunning set - 172\nrunning set - 173\nrunning set - 174\nrunning set - 175\nrunning set - 176\nrunning set - 177\nrunning set - 178\nrunning set - 179\nrunning set - 180\nrunning set - 181\nrunning set - 182\nrunning set - 183\nrunning set - 184\nrunning set - 185\nrunning set - 186\nrunning set - 187\nrunning set - 188\nrunning set - 189\nrunning set - 190\nrunning set - 191\nrunning set - 192\nrunning set - 193\nrunning set - 194\nrunning set - 195\nrunning set - 196\nrunning set - 197\nrunning set - 198\nrunning set - 199\nrunning set - 200\nrunning set - 201\nrunning set - 202\nrunning set - 203\nrunning set - 204\nrunning set - 205\nrunning set - 206\nrunning set - 207\nrunning set - 208\nrunning set - 209\nrunning set - 210\nrunning set - 211\nrunning set - 212\nrunning set - 213\nrunning set - 214\nrunning set - 215\nrunning set - 216\nrunning set - 217\nrunning set - 218\nrunning set - 219\nrunning set - 220\nrunning set - 221\nrunning set - 222\nrunning set - 223\nrunning set - 224\nrunning set - 225\nrunning set - 226\nrunning set - 227\nrunning set - 228\nrunning set - 229\nrunning set - 230\nrunning set - 231\nrunning set - 232\nrunning set - 233\nrunning set - 234\nrunning set - 235\nrunning set - 236\nrunning set - 237\nrunning set - 238\nrunning set - 239\nrunning set - 240\nrunning set - 241\nrunning set - 242\nrunning set - 243\nrunning set - 244\nrunning set - 245\nrunning set - 246\nrunning set - 247\nrunning set - 248\nrunning set - 249\nrunning set - 250\nrunning set - 251\nrunning set - 252\nrunning set - 253\nrunning set - 254\nrunning set - 255\nrunning set - 256\nrunning set - 257\nrunning set - 258\nrunning set - 259\nrunning set - 260\nrunning set - 261\nrunning set - 262\nrunning set - 263\nrunning set - 264\nrunning set - 265\nrunning set - 266\nrunning set - 267\nrunning set - 268\nrunning set - 269\nrunning set - 270\nrunning set - 271\nrunning set - 272\nrunning set - 273\nrunning set - 274\nrunning set - 275\nrunning set - 276\nrunning set - 277\nrunning set - 278\nrunning set - 279\nrunning set - 280\nrunning set - 281\nrunning set - 282\nrunning set - 283\nrunning set - 284\nrunning set - 285\nrunning set - 286\nrunning set - 287\nrunning set - 288\nrunning set - 289\nrunning set - 290\nrunning set - 291\nrunning set - 292\nrunning set - 293\nrunning set - 294\nrunning set - 295\nrunning set - 296\nrunning set - 297\nrunning set - 298\nrunning set - 299\nrunning set - 300\nrunning set - 301\nrunning set - 302\nrunning set - 303\nrunning set - 304\nrunning set - 305\nrunning set - 306\nrunning set - 307\nrunning set - 308\nrunning set - 309\nrunning set - 310\nrunning set - 311\nrunning set - 312\nrunning set - 313\nrunning set - 314\nrunning set - 315\nrunning set - 316\nrunning set - 317\nrunning set - 318\nrunning set - 319\nrunning set - 320\nrunning set - 321\nrunning set - 322\nrunning set - 323\nrunning set - 324\nrunning set - 325\nrunning set - 326\nrunning set - 327\nrunning set - 328\nrunning set - 329\nrunning set - 330\nrunning set - 331\nrunning set - 332\nrunning set - 333\nrunning set - 334\nrunning set - 335\nrunning set - 336\nrunning set - 337\nrunning set - 338\nrunning set - 339\nrunning set - 340\nrunning set - 341\nrunning set - 342\nrunning set - 343\nrunning set - 344\nrunning set - 345\nrunning set - 346\nrunning set - 347\nrunning set - 348\nrunning set - 349\nrunning set - 350\nrunning set - 351\nrunning set - 352\nrunning set - 353\nrunning set - 354\nrunning set - 355\nrunning set - 356\nrunning set - 357\nrunning set - 358\nrunning set - 359\nrunning set - 360\nrunning set - 361\nrunning set - 362\nrunning set - 363\nrunning set - 364\nrunning set - 365\nrunning set - 366\nrunning set - 367\nrunning set - 368\nrunning set - 369\nrunning set - 370\nrunning set - 371\nrunning set - 372\nrunning set - 373\nrunning set - 374\nrunning set - 375\nrunning set - 376\nrunning set - 377\nrunning set - 378\nrunning set - 379\nrunning set - 380\nrunning set - 381\nrunning set - 382\nrunning set - 383\nrunning set - 384\nrunning set - 385\nrunning set - 386\nrunning set - 387\nrunning set - 388\nrunning set - 389\nrunning set - 390\nrunning set - 391\nrunning set - 392\nrunning set - 393\nrunning set - 394\nrunning set - 395\nrunning set - 396\nrunning set - 397\nrunning set - 398\nrunning set - 399\nrunning set - 400\nrunning set - 401\nrunning set - 402\nrunning set - 403\nrunning set - 404\nrunning set - 405\nrunning set - 406\nrunning set - 407\nrunning set - 408\nrunning set - 409\nrunning set - 410\nrunning set - 411\nrunning set - 412\nrunning set - 413\nrunning set - 414\nrunning set - 415\nrunning set - 416\nrunning set - 417\nrunning set - 418\nrunning set - 419\nrunning set - 420\nrunning set - 421\nrunning set - 422\nrunning set - 423\nrunning set - 424\nrunning set - 425\nrunning set - 426\nrunning set - 427\nrunning set - 428\nrunning set - 429\nrunning set - 430\nrunning set - 431\nrunning set - 432\nrunning set - 433\nrunning set - 434\nrunning set - 435\nrunning set - 436\nrunning set - 437\nrunning set - 438\nrunning set - 439\nrunning set - 440\nrunning set - 441\nrunning set - 442\nrunning set - 443\nrunning set - 444\nrunning set - 445\nrunning set - 446\nrunning set - 447\nrunning set - 448\nrunning set - 449\nrunning set - 450\nrunning set - 451\nrunning set - 452\nrunning set - 453\nrunning set - 454\nrunning set - 455\nrunning set - 456\nrunning set - 457\nrunning set - 458\nrunning set - 459\nrunning set - 460\nrunning set - 461\nrunning set - 462\nrunning set - 463\nrunning set - 464\nrunning set - 465\nrunning set - 466\nrunning set - 467\nrunning set - 468\nrunning set - 469\nrunning set - 470\nrunning set - 471\nrunning set - 472\nrunning set - 473\nrunning set - 474\nrunning set - 475\nrunning set - 476\nrunning set - 477\nrunning set - 478\nrunning set - 479\nrunning set - 480\nrunning set - 481\nrunning set - 482\nrunning set - 483\nrunning set - 484\nrunning set - 485\nrunning set - 486\nrunning set - 487\nrunning set - 488\nrunning set - 489\nrunning set - 490\nrunning set - 491\nrunning set - 492\nrunning set - 493\nrunning set - 494\nrunning set - 495\nrunning set - 496\nrunning set - 497\nrunning set - 498\nrunning set - 499\nrunning set - 500\nrunning set - 501\nrunning set - 502\nrunning set - 503\nrunning set - 504\nrunning set - 505\nrunning set - 506\nrunning set - 507\nrunning set - 508\nrunning set - 509\nrunning set - 510\nrunning set - 511\nrunning set - 512\nrunning set - 513\nrunning set - 514\nrunning set - 515\nrunning set - 516\nrunning set - 517\nrunning set - 518\nrunning set - 519\nrunning set - 520\nrunning set - 521\nrunning set - 522\nrunning set - 523\nrunning set - 524\nrunning set - 525\nrunning set - 526\nrunning set - 527\nrunning set - 528\nrunning set - 529\nrunning set - 530\nrunning set - 531\nrunning set - 532\nrunning set - 533\nrunning set - 534\nrunning set - 535\nrunning set - 536\nrunning set - 537\nrunning set - 538\nrunning set - 539\nrunning set - 540\nrunning set - 541\nrunning set - 542\nrunning set - 543\nrunning set - 544\nrunning set - 545\nrunning set - 546\nrunning set - 547\nrunning set - 548\nrunning set - 549\nrunning set - 550\nrunning set - 551\nrunning set - 552\nrunning set - 553\nrunning set - 554\nrunning set - 555\nrunning set - 556\nrunning set - 557\nrunning set - 558\nrunning set - 559\nrunning set - 560\nrunning set - 561\nrunning set - 562\nrunning set - 563\nrunning set - 564\nrunning set - 565\nrunning set - 566\nrunning set - 567\nrunning set - 568\nrunning set - 569\nrunning set - 570\nrunning set - 571\nrunning set - 572\nrunning set - 573\nrunning set - 574\nrunning set - 575\nrunning set - 576\nrunning set - 577\nrunning set - 578\nrunning set - 579\nrunning set - 580\nrunning set - 581\nrunning set - 582\nrunning set - 583\nrunning set - 584\nrunning set - 585\nrunning set - 586\nrunning set - 587\nrunning set - 588\nrunning set - 589\nrunning set - 590\nrunning set - 591\nrunning set - 592\nrunning set - 593\nrunning set - 594\nrunning set - 595\nrunning set - 596\nrunning set - 597\nrunning set - 598\nrunning set - 599\nrunning set - 600\nrunning set - 601\nrunning set - 602\nrunning set - 603\nrunning set - 604\nrunning set - 605\nrunning set - 606\nrunning set - 607\nrunning set - 608\nrunning set - 609\nrunning set - 610\nrunning set - 611\nrunning set - 612\nrunning set - 613\nrunning set - 614\nrunning set - 615\nrunning set - 616\nrunning set - 617\nrunning set - 618\nrunning set - 619\nrunning set - 620\nrunning set - 621\nrunning set - 622\nrunning set - 623\nrunning set - 624\nrunning set - 625\nrunning set - 626\nrunning set - 627\nrunning set - 628\nrunning set - 629\nrunning set - 630\nrunning set - 631\nrunning set - 632\nrunning set - 633\nrunning set - 634\nrunning set - 635\nrunning set - 636\nrunning set - 637\nrunning set - 638\nrunning set - 639\nrunning set - 640\nrunning set - 641\nrunning set - 642\nrunning set - 643\nrunning set - 644\nrunning set - 645\nrunning set - 646\nrunning set - 647\nrunning set - 648\nrunning set - 649\nrunning set - 650\nrunning set - 651\nrunning set - 652\nrunning set - 653\nrunning set - 654\nrunning set - 655\nrunning set - 656\nrunning set - 657\nrunning set - 658\nrunning set - 659\nrunning set - 660\nrunning set - 661\nrunning set - 662\nrunning set - 663\nrunning set - 664\nrunning set - 665\nrunning set - 666\nrunning set - 667\nrunning set - 668\nrunning set - 669\nrunning set - 670\nrunning set - 671\nrunning set - 672\nrunning set - 673\nrunning set - 674\nrunning set - 675\nrunning set - 676\nrunning set - 677\nrunning set - 678\nrunning set - 679\nrunning set - 680\nrunning set - 681\nrunning set - 682\nrunning set - 683\nrunning set - 684\nrunning set - 685\nrunning set - 686\nrunning set - 687\nrunning set - 688\nrunning set - 689\nrunning set - 690\nrunning set - 691\nrunning set - 692\nrunning set - 693\nrunning set - 694\nrunning set - 695\nrunning set - 696\nrunning set - 697\nrunning set - 698\nrunning set - 699\nrunning set - 700\nrunning set - 701\nrunning set - 702\nrunning set - 703\nrunning set - 704\nrunning set - 705\nrunning set - 706\nrunning set - 707\nrunning set - 708\nrunning set - 709\nrunning set - 710\nrunning set - 711\nrunning set - 712\nrunning set - 713\nrunning set - 714\nrunning set - 715\nrunning set - 716\nrunning set - 717\nrunning set - 718\nrunning set - 719\nrunning set - 720\nrunning set - 721\nrunning set - 722\nrunning set - 723\nrunning set - 724\nrunning set - 725\nrunning set - 726\nrunning set - 727\nrunning set - 728\nrunning set - 729\nrunning set - 730\nrunning set - 731\nrunning set - 732\nrunning set - 733\nrunning set - 734\nrunning set - 735\nrunning set - 736\nrunning set - 737\nrunning set - 738\nrunning set - 739\nrunning set - 740\nrunning set - 741\nrunning set - 742\nrunning set - 743\nrunning set - 744\nrunning set - 745\nrunning set - 746\nrunning set - 747\nrunning set - 748\nrunning set - 749\nrunning set - 750\nrunning set - 751\nrunning set - 752\nrunning set - 753\nrunning set - 754\nrunning set - 755\nrunning set - 756\nrunning set - 757\nrunning set - 758\nrunning set - 759\nrunning set - 760\nrunning set - 761\nrunning set - 762\nrunning set - 763\nrunning set - 764\nrunning set - 765\nrunning set - 766\nrunning set - 767\nrunning set - 768\nrunning set - 769\nrunning set - 770\nrunning set - 771\nrunning set - 772\nrunning set - 773\nrunning set - 774\nrunning set - 775\nrunning set - 776\nrunning set - 777\nrunning set - 778\nrunning set - 779\nrunning set - 780\nrunning set - 781\nrunning set - 782\nrunning set - 783\nrunning set - 784\nrunning set - 785\nrunning set - 786\nrunning set - 787\nrunning set - 788\nrunning set - 789\nrunning set - 790\nrunning set - 791\nrunning set - 792\nrunning set - 793\nrunning set - 794\nrunning set - 795\nrunning set - 796\nrunning set - 797\nrunning set - 798\nrunning set - 799\nrunning set - 800\nrunning set - 801\nrunning set - 802\nrunning set - 803\nrunning set - 804\nrunning set - 805\nrunning set - 806\nrunning set - 807\nrunning set - 808\nrunning set - 809\nrunning set - 810\nrunning set - 811\nrunning set - 812\nrunning set - 813\nrunning set - 814\nrunning set - 815\nrunning set - 816\nrunning set - 817\nrunning set - 818\nrunning set - 819\nrunning set - 820\nrunning set - 821\nrunning set - 822\nrunning set - 823\nrunning set - 824\nrunning set - 825\nrunning set - 826\nrunning set - 827\nrunning set - 828\nrunning set - 829\nrunning set - 830\nrunning set - 831\nrunning set - 832\nrunning set - 833\nrunning set - 834\nrunning set - 835\nrunning set - 836\nrunning set - 837\nrunning set - 838\nrunning set - 839\nrunning set - 840\nrunning set - 841\nrunning set - 842\nrunning set - 843\nrunning set - 844\nrunning set - 845\nrunning set - 846\nrunning set - 847\nrunning set - 848\nrunning set - 849\nrunning set - 850\nrunning set - 851\nrunning set - 852\nrunning set - 853\nrunning set - 854\nrunning set - 855\nrunning set - 856\nrunning set - 857\nrunning set - 858\nrunning set - 859\nrunning set - 860\nrunning set - 861\nrunning set - 862\nrunning set - 863\nrunning set - 864\nrunning set - 865\nrunning set - 866\nrunning set - 867\nrunning set - 868\nrunning set - 869\nrunning set - 870\nrunning set - 871\nrunning set - 872\nrunning set - 873\nrunning set - 874\nrunning set - 875\nrunning set - 876\nrunning set - 877\nrunning set - 878\nrunning set - 879\nrunning set - 880\nrunning set - 881\nrunning set - 882\nrunning set - 883\nrunning set - 884\nrunning set - 885\nrunning set - 886\nrunning set - 887\nrunning set - 888\nrunning set - 889\nrunning set - 890\nrunning set - 891\nrunning set - 892\nrunning set - 893\nrunning set - 894\nrunning set - 895\nrunning set - 896\nrunning set - 897\nrunning set - 898\nrunning set - 899\nrunning set - 900\nrunning set - 901\nrunning set - 902\nrunning set - 903\nrunning set - 904\nrunning set - 905\nrunning set - 906\nrunning set - 907\nrunning set - 908\nrunning set - 909\nrunning set - 910\nrunning set - 911\nrunning set - 912\nrunning set - 913\nrunning set - 914\nrunning set - 915\nrunning set - 916\nrunning set - 917\nrunning set - 918\nrunning set - 919\nrunning set - 920\nrunning set - 921\nrunning set - 922\nrunning set - 923\nrunning set - 924\nrunning set - 925\nrunning set - 926\nrunning set - 927\nrunning set - 928\nrunning set - 929\nrunning set - 930\nrunning set - 931\nrunning set - 932\nrunning set - 933\nrunning set - 934\nrunning set - 935\nrunning set - 936\nrunning set - 937\nrunning set - 938\nrunning set - 939\nrunning set - 940\nrunning set - 941\nrunning set - 942\nrunning set - 943\nrunning set - 944\nrunning set - 945\nrunning set - 946\nrunning set - 947\nrunning set - 948\nrunning set - 949\nrunning set - 950\nrunning set - 951\nrunning set - 952\nrunning set - 953\nrunning set - 954\nrunning set - 955\nrunning set - 956\nrunning set - 957\nrunning set - 958\nrunning set - 959\nrunning set - 960\nrunning set - 961\nrunning set - 962\nrunning set - 963\nrunning set - 964\nrunning set - 965\nrunning set - 966\nrunning set - 967\nrunning set - 968\nrunning set - 969\nrunning set - 970\nrunning set - 971\nrunning set - 972\nrunning set - 973\nrunning set - 974\nrunning set - 975\nrunning set - 976\nrunning set - 977\nrunning set - 978\nrunning set - 979\nrunning set - 980\nrunning set - 981\nrunning set - 982\nrunning set - 983\nrunning set - 984\nrunning set - 985\nrunning set - 986\nrunning set - 987\nrunning set - 988\nrunning set - 989\nrunning set - 990\nrunning set - 991\nrunning set - 992\nrunning set - 993\nrunning set - 994\nrunning set - 995\nrunning set - 996\nrunning set - 997\nrunning set - 998\nrunning set - 999\nrunning set - 1000\nrunning set - 1001\nrunning set - 1002\nrunning set - 1003\nrunning set - 1004\nrunning set - 1005\nrunning set - 1006\nrunning set - 1007\nrunning set - 1008\nrunning set - 1009\nrunning set - 1010\nrunning set - 1011\nrunning set - 1012\nrunning set - 1013\nrunning set - 1014\nrunning set - 1015\nrunning set - 1016\nrunning set - 1017\nrunning set - 1018\nrunning set - 1019\nrunning set - 1020\nrunning set - 1021\nrunning set - 1022\nrunning set - 1023\nrunning set - 1024\nrunning set - 1025\nrunning set - 1026\nrunning set - 1027\nrunning set - 1028\nrunning set - 1029\nrunning set - 1030\nrunning set - 1031\nrunning set - 1032\nrunning set - 1033\nrunning set - 1034\nrunning set - 1035\nrunning set - 1036\nrunning set - 1037\nrunning set - 1038\nrunning set - 1039\nrunning set - 1040\nrunning set - 1041\nrunning set - 1042\nrunning set - 1043\nrunning set - 1044\nrunning set - 1045\nrunning set - 1046\nrunning set - 1047\nrunning set - 1048\nrunning set - 1049\nrunning set - 1050\nrunning set - 1051\nrunning set - 1052\nrunning set - 1053\nrunning set - 1054\nrunning set - 1055\nrunning set - 1056\nrunning set - 1057\nrunning set - 1058\nrunning set - 1059\nrunning set - 1060\nrunning set - 1061\nrunning set - 1062\nrunning set - 1063\nrunning set - 1064\nrunning set - 1065\nrunning set - 1066\nrunning set - 1067\nrunning set - 1068\nrunning set - 1069\nrunning set - 1070\nrunning set - 1071\nrunning set - 1072\nrunning set - 1073\nrunning set - 1074\nrunning set - 1075\nrunning set - 1076\nrunning set - 1077\nrunning set - 1078\nrunning set - 1079\nrunning set - 1080\nrunning set - 1081\nrunning set - 1082\nrunning set - 1083\nrunning set - 1084\nrunning set - 1085\nrunning set - 1086\nrunning set - 1087\nrunning set - 1088\nrunning set - 1089\nrunning set - 1090\nrunning set - 1091\nrunning set - 1092\nrunning set - 1093\nrunning set - 1094\nrunning set - 1095\nrunning set - 1096\nrunning set - 1097\nrunning set - 1098\nrunning set - 1099\nrunning set - 1100\nrunning set - 1101\nrunning set - 1102\nrunning set - 1103\nrunning set - 1104\nrunning set - 1105\nrunning set - 1106\nrunning set - 1107\nrunning set - 1108\nrunning set - 1109\nrunning set - 1110\nrunning set - 1111\nrunning set - 1112\nrunning set - 1113\nrunning set - 1114\nrunning set - 1115\nrunning set - 1116\nrunning set - 1117\nrunning set - 1118\nrunning set - 1119\nrunning set - 1120\nrunning set - 1121\nrunning set - 1122\nrunning set - 1123\nrunning set - 1124\nrunning set - 1125\nrunning set - 1126\nrunning set - 1127\nrunning set - 1128\nrunning set - 1129\nrunning set - 1130\nrunning set - 1131\nrunning set - 1132\nrunning set - 1133\nrunning set - 1134\nrunning set - 1135\nrunning set - 1136\nrunning set - 1137\nrunning set - 1138\nrunning set - 1139\nrunning set - 1140\nrunning set - 1141\nrunning set - 1142\nrunning set - 1143\nrunning set - 1144\nrunning set - 1145\nrunning set - 1146\nrunning set - 1147\nrunning set - 1148\nrunning set - 1149\nrunning set - 1150\nrunning set - 1151\nrunning set - 1152\nrunning set - 1153\nrunning set - 1154\nrunning set - 1155\nrunning set - 1156\nrunning set - 1157\nrunning set - 1158\nrunning set - 1159\nrunning set - 1160\nrunning set - 1161\nrunning set - 1162\nrunning set - 1163\nrunning set - 1164\nrunning set - 1165\nrunning set - 1166\nrunning set - 1167\nrunning set - 1168\nrunning set - 1169\nrunning set - 1170\nrunning set - 1171\nrunning set - 1172\nrunning set - 1173\nrunning set - 1174\nrunning set - 1175\nrunning set - 1176\nrunning set - 1177\nrunning set - 1178\nrunning set - 1179\nrunning set - 1180\nrunning set - 1181\nrunning set - 1182\nrunning set - 1183\nrunning set - 1184\nrunning set - 1185\nrunning set - 1186\nrunning set - 1187\nrunning set - 1188\nrunning set - 1189\nrunning set - 1190\nrunning set - 1191\nrunning set - 1192\nrunning set - 1193\nrunning set - 1194\nrunning set - 1195\nrunning set - 1196\nrunning set - 1197\nrunning set - 1198\nrunning set - 1199\nrunning set - 1200\nrunning set - 1201\nrunning set - 1202\nrunning set - 1203\nrunning set - 1204\nrunning set - 1205\nrunning set - 1206\nrunning set - 1207\nrunning set - 1208\nrunning set - 1209\nrunning set - 1210\nrunning set - 1211\nrunning set - 1212\nrunning set - 1213\nrunning set - 1214\nrunning set - 1215\nrunning set - 1216\nrunning set - 1217\nrunning set - 1218\nrunning set - 1219\nrunning set - 1220\nrunning set - 1221\nrunning set - 1222\nrunning set - 1223\nrunning set - 1224\nrunning set - 1225\nrunning set - 1226\nrunning set - 1227\nrunning set - 1228\nrunning set - 1229\nrunning set - 1230\nrunning set - 1231\nrunning set - 1232\nrunning set - 1233\nrunning set - 1234\nrunning set - 1235\nrunning set - 1236\nrunning set - 1237\nrunning set - 1238\nrunning set - 1239\nrunning set - 1240\nrunning set - 1241\nrunning set - 1242\nrunning set - 1243\nrunning set - 1244\nrunning set - 1245\nrunning set - 1246\nrunning set - 1247\nrunning set - 1248\nrunning set - 1249\nrunning set - 1250\nrunning set - 1251\nrunning set - 1252\nrunning set - 1253\nrunning set - 1254\nrunning set - 1255\nrunning set - 1256\nrunning set - 1257\nrunning set - 1258\nrunning set - 1259\nrunning set - 1260\nrunning set - 1261\nrunning set - 1262\nrunning set - 1263\nrunning set - 1264\nrunning set - 1265\nrunning set - 1266\nrunning set - 1267\nrunning set - 1268\nrunning set - 1269\nrunning set - 1270\nrunning set - 1271\nrunning set - 1272\nrunning set - 1273\nrunning set - 1274\nrunning set - 1275\nrunning set - 1276\nrunning set - 1277\nrunning set - 1278\nrunning set - 1279\nrunning set - 1280\nrunning set - 1281\nrunning set - 1282\nrunning set - 1283\nrunning set - 1284\nrunning set - 1285\nrunning set - 1286\nrunning set - 1287\nrunning set - 1288\nrunning set - 1289\nrunning set - 1290\nrunning set - 1291\nrunning set - 1292\nrunning set - 1293\nrunning set - 1294\nrunning set - 1295\nrunning set - 1296\nrunning set - 1297\nrunning set - 1298\nrunning set - 1299\nrunning set - 1300\nrunning set - 1301\nrunning set - 1302\nrunning set - 1303\nrunning set - 1304\nrunning set - 1305\nrunning set - 1306\nrunning set - 1307\nrunning set - 1308\nrunning set - 1309\nrunning set - 1310\nrunning set - 1311\nrunning set - 1312\nrunning set - 1313\nrunning set - 1314\nrunning set - 1315\nrunning set - 1316\nrunning set - 1317\nrunning set - 1318\nrunning set - 1319\nrunning set - 1320\nrunning set - 1321\nrunning set - 1322\nrunning set - 1323\nrunning set - 1324\nrunning set - 1325\nrunning set - 1326\nrunning set - 1327\nrunning set - 1328\nrunning set - 1329\nrunning set - 1330\nrunning set - 1331\nrunning set - 1332\nrunning set - 1333\nrunning set - 1334\nrunning set - 1335\nrunning set - 1336\nrunning set - 1337\nrunning set - 1338\nrunning set - 1339\nrunning set - 1340\nrunning set - 1341\nrunning set - 1342\nrunning set - 1343\nrunning set - 1344\nrunning set - 1345\nrunning set - 1346\nrunning set - 1347\nrunning set - 1348\nrunning set - 1349\nrunning set - 1350\nrunning set - 1351\nrunning set - 1352\nrunning set - 1353\nrunning set - 1354\nrunning set - 1355\nrunning set - 1356\nrunning set - 1357\nrunning set - 1358\nrunning set - 1359\nrunning set - 1360\nrunning set - 1361\nrunning set - 1362\nrunning set - 1363\nrunning set - 1364\nrunning set - 1365\nrunning set - 1366\nrunning set - 1367\nrunning set - 1368\nrunning set - 1369\nrunning set - 1370\nrunning set - 1371\nrunning set - 1372\nrunning set - 1373\nrunning set - 1374\nrunning set - 1375\nrunning set - 1376\nrunning set - 1377\nrunning set - 1378\nrunning set - 1379\nrunning set - 1380\nrunning set - 1381\nrunning set - 1382\nrunning set - 1383\nrunning set - 1384\nrunning set - 1385\nrunning set - 1386\nrunning set - 1387\nrunning set - 1388\nrunning set - 1389\nrunning set - 1390\nrunning set - 1391\nrunning set - 1392\nrunning set - 1393\nrunning set - 1394\nrunning set - 1395\nrunning set - 1396\nrunning set - 1397\nrunning set - 1398\nrunning set - 1399\nrunning set - 1400\nrunning set - 1401\nrunning set - 1402\nrunning set - 1403\nrunning set - 1404\nrunning set - 1405\nrunning set - 1406\nrunning set - 1407\nrunning set - 1408\nrunning set - 1409\nrunning set - 1410\nrunning set - 1411\nrunning set - 1412\nrunning set - 1413\nrunning set - 1414\nrunning set - 1415\nrunning set - 1416\nrunning set - 1417\nrunning set - 1418\nrunning set - 1419\nrunning set - 1420\nrunning set - 1421\nrunning set - 1422\nrunning set - 1423\nrunning set - 1424\nrunning set - 1425\nrunning set - 1426\nrunning set - 1427\nrunning set - 1428\nrunning set - 1429\nrunning set - 1430\nrunning set - 1431\nrunning set - 1432\nrunning set - 1433\nrunning set - 1434\nrunning set - 1435\nrunning set - 1436\nrunning set - 1437\nrunning set - 1438\nrunning set - 1439\nrunning set - 1440\nrunning set - 1441\nrunning set - 1442\nrunning set - 1443\nrunning set - 1444\nrunning set - 1445\nrunning set - 1446\nrunning set - 1447\nrunning set - 1448\nrunning set - 1449\nrunning set - 1450\nrunning set - 1451\nrunning set - 1452\nrunning set - 1453\nrunning set - 1454\nrunning set - 1455\nrunning set - 1456\nrunning set - 1457\nrunning set - 1458\nrunning set - 1459\nrunning set - 1460\nrunning set - 1461\nrunning set - 1462\nrunning set - 1463\nrunning set - 1464\nrunning set - 1465\nrunning set - 1466\nrunning set - 1467\nrunning set - 1468\nrunning set - 1469\nrunning set - 1470\nrunning set - 1471\nrunning set - 1472\nrunning set - 1473\nrunning set - 1474\nrunning set - 1475\nrunning set - 1476\nrunning set - 1477\nrunning set - 1478\nrunning set - 1479\nrunning set - 1480\nrunning set - 1481\nrunning set - 1482\nrunning set - 1483\nrunning set - 1484\nrunning set - 1485\nrunning set - 1486\nrunning set - 1487\nrunning set - 1488\nrunning set - 1489\nrunning set - 1490\nrunning set - 1491\nrunning set - 1492\nrunning set - 1493\nrunning set - 1494\nrunning set - 1495\nrunning set - 1496\nrunning set - 1497\nrunning set - 1498\nrunning set - 1499\nrunning set - 1500\nrunning set - 1501\nrunning set - 1502\nrunning set - 1503\nrunning set - 1504\nrunning set - 1505\nrunning set - 1506\nrunning set - 1507\nrunning set - 1508\nrunning set - 1509\nrunning set - 1510\nrunning set - 1511\nrunning set - 1512\nrunning set - 1513\nrunning set - 1514\nrunning set - 1515\nrunning set - 1516\nrunning set - 1517\nrunning set - 1518\nrunning set - 1519\nrunning set - 1520\nrunning set - 1521\nrunning set - 1522\nrunning set - 1523\nrunning set - 1524\nrunning set - 1525\nrunning set - 1526\nrunning set - 1527\nrunning set - 1528\nrunning set - 1529\nrunning set - 1530\nrunning set - 1531\nrunning set - 1532\nrunning set - 1533\nrunning set - 1534\nrunning set - 1535\nrunning set - 1536\nrunning set - 1537\nrunning set - 1538\nrunning set - 1539\nrunning set - 1540\nrunning set - 1541\nrunning set - 1542\nrunning set - 1543\nrunning set - 1544\nrunning set - 1545\nrunning set - 1546\nrunning set - 1547\nrunning set - 1548\nrunning set - 1549\nrunning set - 1550\nrunning set - 1551\nrunning set - 1552\nrunning set - 1553\nrunning set - 1554\nrunning set - 1555\nrunning set - 1556\nrunning set - 1557\nrunning set - 1558\nrunning set - 1559\nrunning set - 1560\nrunning set - 1561\nrunning set - 1562\nrunning set - 1563\nrunning set - 1564\nrunning set - 1565\nrunning set - 1566\nrunning set - 1567\nrunning set - 1568\nrunning set - 1569\nrunning set - 1570\nrunning set - 1571\nrunning set - 1572\nrunning set - 1573\nrunning set - 1574\nrunning set - 1575\nrunning set - 1576\nrunning set - 1577\nrunning set - 1578\nrunning set - 1579\nrunning set - 1580\nrunning set - 1581\nrunning set - 1582\nrunning set - 1583\nrunning set - 1584\nrunning set - 1585\nrunning set - 1586\nrunning set - 1587\nrunning set - 1588\nrunning set - 1589\nrunning set - 1590\nrunning set - 1591\nrunning set - 1592\nrunning set - 1593\nrunning set - 1594\nrunning set - 1595\nrunning set - 1596\nrunning set - 1597\nrunning set - 1598\nrunning set - 1599\nrunning set - 1600\nrunning set - 1601\nrunning set - 1602\nrunning set - 1603\nrunning set - 1604\nrunning set - 1605\nrunning set - 1606\nrunning set - 1607\nrunning set - 1608\nrunning set - 1609\nrunning set - 1610\nrunning set - 1611\nrunning set - 1612\nrunning set - 1613\nrunning set - 1614\nrunning set - 1615\nrunning set - 1616\nrunning set - 1617\nrunning set - 1618\nrunning set - 1619\nrunning set - 1620\nrunning set - 1621\nrunning set - 1622\nrunning set - 1623\nrunning set - 1624\nrunning set - 1625\nrunning set - 1626\nrunning set - 1627\nrunning set - 1628\nrunning set - 1629\nrunning set - 1630\nrunning set - 1631\nrunning set - 1632\nrunning set - 1633\nrunning set - 1634\nrunning set - 1635\nrunning set - 1636\nrunning set - 1637\nrunning set - 1638\nrunning set - 1639\nrunning set - 1640\nrunning set - 1641\nrunning set - 1642\nrunning set - 1643\nrunning set - 1644\nrunning set - 1645\nrunning set - 1646\nrunning set - 1647\nrunning set - 1648\nrunning set - 1649\nrunning set - 1650\nrunning set - 1651\nrunning set - 1652\nrunning set - 1653\nrunning set - 1654\nrunning set - 1655\nrunning set - 1656\nrunning set - 1657\nrunning set - 1658\nrunning set - 1659\nrunning set - 1660\nrunning set - 1661\nrunning set - 1662\nrunning set - 1663\nrunning set - 1664\nrunning set - 1665\nrunning set - 1666\nrunning set - 1667\nrunning set - 1668\nrunning set - 1669\nrunning set - 1670\nrunning set - 1671\nrunning set - 1672\nrunning set - 1673\nrunning set - 1674\nrunning set - 1675\nrunning set - 1676\nrunning set - 1677\nrunning set - 1678\nrunning set - 1679\nrunning set - 1680\nrunning set - 1681\nrunning set - 1682\nrunning set - 1683\nrunning set - 1684\nrunning set - 1685\nrunning set - 1686\nrunning set - 1687\nrunning set - 1688\nrunning set - 1689\nrunning set - 1690\nrunning set - 1691\nrunning set - 1692\nrunning set - 1693\nrunning set - 1694\nrunning set - 1695\nrunning set - 1696\nrunning set - 1697\nrunning set - 1698\nrunning set - 1699\nrunning set - 1700\nrunning set - 1701\nrunning set - 1702\nrunning set - 1703\nrunning set - 1704\nrunning set - 1705\nrunning set - 1706\nrunning set - 1707\nrunning set - 1708\nrunning set - 1709\nrunning set - 1710\nrunning set - 1711\nrunning set - 1712\nrunning set - 1713\nrunning set - 1714\nrunning set - 1715\nrunning set - 1716\nrunning set - 1717\nrunning set - 1718\nrunning set - 1719\nrunning set - 1720\nrunning set - 1721\nrunning set - 1722\nrunning set - 1723\nrunning set - 1724\nrunning set - 1725\nrunning set - 1726\nrunning set - 1727\nrunning set - 1728\nrunning set - 1729\nrunning set - 1730\nrunning set - 1731\nrunning set - 1732\nrunning set - 1733\nrunning set - 1734\nrunning set - 1735\nrunning set - 1736\nrunning set - 1737\nrunning set - 1738\nrunning set - 1739\nrunning set - 1740\nrunning set - 1741\nrunning set - 1742\nrunning set - 1743\nrunning set - 1744\nrunning set - 1745\nrunning set - 1746\nrunning set - 1747\nrunning set - 1748\nrunning set - 1749\nrunning set - 1750\nrunning set - 1751\nrunning set - 1752\nrunning set - 1753\nrunning set - 1754\nrunning set - 1755\nrunning set - 1756\nrunning set - 1757\nrunning set - 1758\nrunning set - 1759\nrunning set - 1760\nrunning set - 1761\nrunning set - 1762\nrunning set - 1763\nrunning set - 1764\nrunning set - 1765\nrunning set - 1766\nrunning set - 1767\nrunning set - 1768\nrunning set - 1769\nrunning set - 1770\nrunning set - 1771\nrunning set - 1772\nrunning set - 1773\nrunning set - 1774\nrunning set - 1775\nrunning set - 1776\nrunning set - 1777\nrunning set - 1778\nrunning set - 1779\nrunning set - 1780\nrunning set - 1781\nrunning set - 1782\nrunning set - 1783\nrunning set - 1784\nrunning set - 1785\nrunning set - 1786\nrunning set - 1787\nrunning set - 1788\nrunning set - 1789\nrunning set - 1790\nrunning set - 1791\nrunning set - 1792\nrunning set - 1793\nrunning set - 1794\nrunning set - 1795\nrunning set - 1796\nrunning set - 1797\nrunning set - 1798\nrunning set - 1799\nrunning set - 1800\nrunning set - 1801\nrunning set - 1802\nrunning set - 1803\nrunning set - 1804\nrunning set - 1805\nrunning set - 1806\nrunning set - 1807\nrunning set - 1808\nrunning set - 1809\nrunning set - 1810\nrunning set - 1811\nrunning set - 1812\nrunning set - 1813\nrunning set - 1814\nrunning set - 1815\nrunning set - 1816\nrunning set - 1817\nrunning set - 1818\nrunning set - 1819\nrunning set - 1820\nrunning set - 1821\nrunning set - 1822\nrunning set - 1823\nrunning set - 1824\nrunning set - 1825\nrunning set - 1826\nrunning set - 1827\nrunning set - 1828\nrunning set - 1829\nrunning set - 1830\nrunning set - 1831\nrunning set - 1832\nrunning set - 1833\nrunning set - 1834\nrunning set - 1835\nrunning set - 1836\nrunning set - 1837\nrunning set - 1838\nrunning set - 1839\nrunning set - 1840\nrunning set - 1841\nrunning set - 1842\nrunning set - 1843\nrunning set - 1844\nrunning set - 1845\nrunning set - 1846\nrunning set - 1847\nrunning set - 1848\nrunning set - 1849\nrunning set - 1850\nrunning set - 1851\nrunning set - 1852\nrunning set - 1853\nrunning set - 1854\nrunning set - 1855\nrunning set - 1856\nrunning set - 1857\nrunning set - 1858\nrunning set - 1859\nrunning set - 1860\nrunning set - 1861\nrunning set - 1862\nrunning set - 1863\nrunning set - 1864\nrunning set - 1865\nrunning set - 1866\nrunning set - 1867\nrunning set - 1868\nrunning set - 1869\nrunning set - 1870\nrunning set - 1871\nrunning set - 1872\nrunning set - 1873\nrunning set - 1874\nrunning set - 1875\nrunning set - 1876\nrunning set - 1877\nrunning set - 1878\nrunning set - 1879\nrunning set - 1880\nrunning set - 1881\nrunning set - 1882\nrunning set - 1883\nrunning set - 1884\nrunning set - 1885\nrunning set - 1886\nrunning set - 1887\nrunning set - 1888\nrunning set - 1889\nrunning set - 1890\nrunning set - 1891\nrunning set - 1892\nrunning set - 1893\nrunning set - 1894\nrunning set - 1895\nrunning set - 1896\nrunning set - 1897\nrunning set - 1898\nrunning set - 1899\nrunning set - 1900\nrunning set - 1901\nrunning set - 1902\nrunning set - 1903\nrunning set - 1904\nrunning set - 1905\nrunning set - 1906\nrunning set - 1907\nrunning set - 1908\nrunning set - 1909\nrunning set - 1910\nrunning set - 1911\nrunning set - 1912\nrunning set - 1913\nrunning set - 1914\nrunning set - 1915\nrunning set - 1916\nrunning set - 1917\nrunning set - 1918\nrunning set - 1919\nrunning set - 1920\nrunning set - 1921\nrunning set - 1922\nrunning set - 1923\nrunning set - 1924\nrunning set - 1925\nrunning set - 1926\nrunning set - 1927\nrunning set - 1928\nrunning set - 1929\nrunning set - 1930\nrunning set - 1931\nrunning set - 1932\nrunning set - 1933\nrunning set - 1934\nrunning set - 1935\nrunning set - 1936\nrunning set - 1937\nrunning set - 1938\nrunning set - 1939\nrunning set - 1940\nrunning set - 1941\nrunning set - 1942\nrunning set - 1943\nrunning set - 1944\nrunning set - 1945\nrunning set - 1946\nrunning set - 1947\nrunning set - 1948\nrunning set - 1949\nrunning set - 1950\nrunning set - 1951\nrunning set - 1952\nrunning set - 1953\nrunning set - 1954\nrunning set - 1955\nrunning set - 1956\nrunning set - 1957\nrunning set - 1958\nrunning set - 1959\nrunning set - 1960\nrunning set - 1961\nrunning set - 1962\nrunning set - 1963\nrunning set - 1964\nrunning set - 1965\nrunning set - 1966\nrunning set - 1967\nrunning set - 1968\nrunning set - 1969\nrunning set - 1970\nrunning set - 1971\nrunning set - 1972\nrunning set - 1973\nrunning set - 1974\nrunning set - 1975\nrunning set - 1976\nrunning set - 1977\nrunning set - 1978\nrunning set - 1979\nrunning set - 1980\nrunning set - 1981\nrunning set - 1982\nrunning set - 1983\nrunning set - 1984\nrunning set - 1985\nrunning set - 1986\nrunning set - 1987\nrunning set - 1988\nrunning set - 1989\nrunning set - 1990\nrunning set - 1991\nrunning set - 1992\nrunning set - 1993\nrunning set - 1994\nrunning set - 1995\nrunning set - 1996\nrunning set - 1997\nrunning set - 1998\nrunning set - 1999\nrunning set - 2000\nrunning set - 2001\nrunning set - 2002\nrunning set - 2003\nrunning set - 2004\nrunning set - 2005\nrunning set - 2006\nrunning set - 2007\nrunning set - 2008\nrunning set - 2009\nrunning set - 2010\nrunning set - 2011\nrunning set - 2012\nrunning set - 2013\nrunning set - 2014\nrunning set - 2015\nrunning set - 2016\nrunning set - 2017\nrunning set - 2018\nrunning set - 2019\nrunning set - 2020\nrunning set - 2021\nrunning set - 2022\nrunning set - 2023\nrunning set - 2024\nrunning set - 2025\nrunning set - 2026\nrunning set - 2027\nrunning set - 2028\nrunning set - 2029\nrunning set - 2030\nrunning set - 2031\nrunning set - 2032\nrunning set - 2033\nrunning set - 2034\nrunning set - 2035\nrunning set - 2036\nrunning set - 2037\nrunning set - 2038\nrunning set - 2039\nrunning set - 2040\nrunning set - 2041\nrunning set - 2042\nrunning set - 2043\nrunning set - 2044\nrunning set - 2045\nrunning set - 2046\nrunning set - 2047\nrunning set - 2048\nrunning set - 2049\nrunning set - 2050\nrunning set - 2051\nrunning set - 2052\nrunning set - 2053\nrunning set - 2054\nrunning set - 2055\nrunning set - 2056\nrunning set - 2057\nrunning set - 2058\nrunning set - 2059\nrunning set - 2060\nrunning set - 2061\nrunning set - 2062\nrunning set - 2063\nrunning set - 2064\nrunning set - 2065\nrunning set - 2066\nrunning set - 2067\nrunning set - 2068\nrunning set - 2069\nrunning set - 2070\nrunning set - 2071\nrunning set - 2072\nrunning set - 2073\nrunning set - 2074\nrunning set - 2075\nrunning set - 2076\nrunning set - 2077\nrunning set - 2078\nrunning set - 2079\nrunning set - 2080\nrunning set - 2081\nrunning set - 2082\nrunning set - 2083\nrunning set - 2084\nrunning set - 2085\nrunning set - 2086\nrunning set - 2087\nrunning set - 2088\nrunning set - 2089\nrunning set - 2090\nrunning set - 2091\nrunning set - 2092\nrunning set - 2093\nrunning set - 2094\nrunning set - 2095\nrunning set - 2096\nrunning set - 2097\nrunning set - 2098\nrunning set - 2099\nrunning set - 2100\nrunning set - 2101\nrunning set - 2102\nrunning set - 2103\nrunning set - 2104\nrunning set - 2105\nrunning set - 2106\nrunning set - 2107\nrunning set - 2108\nrunning set - 2109\nrunning set - 2110\nrunning set - 2111\nrunning set - 2112\nrunning set - 2113\nrunning set - 2114\nrunning set - 2115\nrunning set - 2116\nrunning set - 2117\nrunning set - 2118\nrunning set - 2119\nrunning set - 2120\nrunning set - 2121\nrunning set - 2122\nrunning set - 2123\nrunning set - 2124\nrunning set - 2125\nrunning set - 2126\nrunning set - 2127\nrunning set - 2128\nrunning set - 2129\nrunning set - 2130\nrunning set - 2131\nrunning set - 2132\nrunning set - 2133\nrunning set - 2134\nrunning set - 2135\nrunning set - 2136\nrunning set - 2137\nrunning set - 2138\nrunning set - 2139\nrunning set - 2140\nrunning set - 2141\nrunning set - 2142\nrunning set - 2143\nrunning set - 2144\nrunning set - 2145\nrunning set - 2146\nrunning set - 2147\nrunning set - 2148\nrunning set - 2149\nrunning set - 2150\nrunning set - 2151\nrunning set - 2152\nrunning set - 2153\nrunning set - 2154\nrunning set - 2155\nrunning set - 2156\nrunning set - 2157\nrunning set - 2158\nrunning set - 2159\nrunning set - 2160\nrunning set - 2161\nrunning set - 2162\nrunning set - 2163\nrunning set - 2164\nrunning set - 2165\nrunning set - 2166\nrunning set - 2167\nrunning set - 2168\nrunning set - 2169\nrunning set - 2170\nrunning set - 2171\nrunning set - 2172\nrunning set - 2173\nrunning set - 2174\nrunning set - 2175\nrunning set - 2176\nrunning set - 2177\nrunning set - 2178\nrunning set - 2179\nrunning set - 2180\nrunning set - 2181\nrunning set - 2182\nrunning set - 2183\nrunning set - 2184\nrunning set - 2185\nrunning set - 2186\nrunning set - 2187\nrunning set - 2188\nrunning set - 2189\nrunning set - 2190\nrunning set - 2191\nrunning set - 2192\nrunning set - 2193\nrunning set - 2194\nrunning set - 2195\nrunning set - 2196\nrunning set - 2197\nrunning set - 2198\nrunning set - 2199\nrunning set - 2200\nrunning set - 2201\nrunning set - 2202\nrunning set - 2203\nrunning set - 2204\nrunning set - 2205\nrunning set - 2206\nrunning set - 2207\nrunning set - 2208\nrunning set - 2209\nrunning set - 2210\nrunning set - 2211\nrunning set - 2212\nrunning set - 2213\nrunning set - 2214\nrunning set - 2215\nrunning set - 2216\nrunning set - 2217\nrunning set - 2218\nrunning set - 2219\nrunning set - 2220\nrunning set - 2221\nrunning set - 2222\nrunning set - 2223\nrunning set - 2224\nrunning set - 2225\nrunning set - 2226\nrunning set - 2227\nrunning set - 2228\nrunning set - 2229\nrunning set - 2230\nrunning set - 2231\nrunning set - 2232\nrunning set - 2233\nrunning set - 2234\nrunning set - 2235\nrunning set - 2236\nrunning set - 2237\nrunning set - 2238\nrunning set - 2239\nrunning set - 2240\nrunning set - 2241\nrunning set - 2242\nrunning set - 2243\nrunning set - 2244\nrunning set - 2245\nrunning set - 2246\nrunning set - 2247\nrunning set - 2248\nrunning set - 2249\nrunning set - 2250\nrunning set - 2251\nrunning set - 2252\nrunning set - 2253\nrunning set - 2254\nrunning set - 2255\nrunning set - 2256\nrunning set - 2257\nrunning set - 2258\nrunning set - 2259\nrunning set - 2260\nrunning set - 2261\nrunning set - 2262\nrunning set - 2263\nrunning set - 2264\nrunning set - 2265\nrunning set - 2266\nrunning set - 2267\nrunning set - 2268\nrunning set - 2269\nrunning set - 2270\nrunning set - 2271\nrunning set - 2272\nrunning set - 2273\nrunning set - 2274\nrunning set - 2275\nrunning set - 2276\nrunning set - 2277\nrunning set - 2278\nrunning set - 2279\nrunning set - 2280\nrunning set - 2281\nrunning set - 2282\nrunning set - 2283\nrunning set - 2284\nrunning set - 2285\nrunning set - 2286\nrunning set - 2287\nrunning set - 2288\nrunning set - 2289\nrunning set - 2290\nrunning set - 2291\nrunning set - 2292\nrunning set - 2293\nrunning set - 2294\nrunning set - 2295\nrunning set - 2296\nrunning set - 2297\nrunning set - 2298\nrunning set - 2299\nrunning set - 2300\nrunning set - 2301\nrunning set - 2302\nrunning set - 2303\nrunning set - 2304\nrunning set - 2305\nrunning set - 2306\nrunning set - 2307\nrunning set - 2308\nrunning set - 2309\nrunning set - 2310\nrunning set - 2311\nrunning set - 2312\nrunning set - 2313\nrunning set - 2314\nrunning set - 2315\nrunning set - 2316\nrunning set - 2317\nrunning set - 2318\nrunning set - 2319\nrunning set - 2320\nrunning set - 2321\nrunning set - 2322\nrunning set - 2323\nrunning set - 2324\nrunning set - 2325\nrunning set - 2326\nrunning set - 2327\nrunning set - 2328\nrunning set - 2329\nrunning set - 2330\nrunning set - 2331\nrunning set - 2332\nrunning set - 2333\nrunning set - 2334\nrunning set - 2335\nrunning set - 2336\nrunning set - 2337\nrunning set - 2338\nrunning set - 2339\nrunning set - 2340\nrunning set - 2341\nrunning set - 2342\nrunning set - 2343\nrunning set - 2344\nrunning set - 2345\nrunning set - 2346\nrunning set - 2347\nrunning set - 2348\nrunning set - 2349\nrunning set - 2350\nrunning set - 2351\nrunning set - 2352\nrunning set - 2353\nrunning set - 2354\nrunning set - 2355\nrunning set - 2356\nrunning set - 2357\nrunning set - 2358\nrunning set - 2359\nrunning set - 2360\nrunning set - 2361\nrunning set - 2362\nrunning set - 2363\nrunning set - 2364\nrunning set - 2365\nrunning set - 2366\nrunning set - 2367\nrunning set - 2368\nrunning set - 2369\nrunning set - 2370\nrunning set - 2371\nrunning set - 2372\nrunning set - 2373\nrunning set - 2374\nrunning set - 2375\nrunning set - 2376\nrunning set - 2377\nrunning set - 2378\nrunning set - 2379\nrunning set - 2380\nrunning set - 2381\nrunning set - 2382\nrunning set - 2383\nrunning set - 2384\nrunning set - 2385\nrunning set - 2386\nrunning set - 2387\nrunning set - 2388\nrunning set - 2389\nrunning set - 2390\nrunning set - 2391\nrunning set - 2392\nrunning set - 2393\nrunning set - 2394\nrunning set - 2395\nrunning set - 2396\nrunning set - 2397\nrunning set - 2398\nrunning set - 2399\nrunning set - 2400\nrunning set - 2401\nrunning set - 2402\nrunning set - 2403\nrunning set - 2404\nrunning set - 2405\nrunning set - 2406\nrunning set - 2407\nrunning set - 2408\nrunning set - 2409\nrunning set - 2410\nrunning set - 2411\nrunning set - 2412\nrunning set - 2413\nrunning set - 2414\nrunning set - 2415\nrunning set - 2416\nrunning set - 2417\nrunning set - 2418\nrunning set - 2419\nrunning set - 2420\nrunning set - 2421\nrunning set - 2422\nrunning set - 2423\nrunning set - 2424\nrunning set - 2425\nrunning set - 2426\nrunning set - 2427\nrunning set - 2428\nrunning set - 2429\nrunning set - 2430\nrunning set - 2431\nrunning set - 2432\nrunning set - 2433\nrunning set - 2434\nrunning set - 2435\nrunning set - 2436\nrunning set - 2437\nrunning set - 2438\nrunning set - 2439\nrunning set - 2440\nrunning set - 2441\nrunning set - 2442\nrunning set - 2443\nrunning set - 2444\nrunning set - 2445\nrunning set - 2446\nrunning set - 2447\nrunning set - 2448\nrunning set - 2449\nrunning set - 2450\nrunning set - 2451\nrunning set - 2452\nrunning set - 2453\nrunning set - 2454\nrunning set - 2455\nrunning set - 2456\nrunning set - 2457\nrunning set - 2458\nrunning set - 2459\nrunning set - 2460\nrunning set - 2461\nrunning set - 2462\nrunning set - 2463\nrunning set - 2464\nrunning set - 2465\nrunning set - 2466\nrunning set - 2467\nrunning set - 2468\nrunning set - 2469\nrunning set - 2470\nrunning set - 2471\nrunning set - 2472\nrunning set - 2473\nrunning set - 2474\nrunning set - 2475\nrunning set - 2476\nrunning set - 2477\nrunning set - 2478\nrunning set - 2479\nrunning set - 2480\nrunning set - 2481\nrunning set - 2482\nrunning set - 2483\nrunning set - 2484\nrunning set - 2485\nrunning set - 2486\nrunning set - 2487\nrunning set - 2488\nrunning set - 2489\nrunning set - 2490\nrunning set - 2491\nrunning set - 2492\nrunning set - 2493\nrunning set - 2494\nrunning set - 2495\nrunning set - 2496\nrunning set - 2497\nrunning set - 2498\nrunning set - 2499\nrunning set - 2500\nrunning set - 2501\nrunning set - 2502\nrunning set - 2503\nrunning set - 2504\nrunning set - 2505\nrunning set - 2506\nrunning set - 2507\nrunning set - 2508\nrunning set - 2509\nrunning set - 2510\nrunning set - 2511\nrunning set - 2512\nrunning set - 2513\nrunning set - 2514\nrunning set - 2515\nrunning set - 2516\nrunning set - 2517\nrunning set - 2518\nrunning set - 2519\nrunning set - 2520\nrunning set - 2521\nrunning set - 2522\nrunning set - 2523\nrunning set - 2524\nrunning set - 2525\nrunning set - 2526\nrunning set - 2527\nrunning set - 2528\nrunning set - 2529\nrunning set - 2530\nrunning set - 2531\nrunning set - 2532\nrunning set - 2533\nrunning set - 2534\nrunning set - 2535\nrunning set - 2536\nrunning set - 2537\nrunning set - 2538\nrunning set - 2539\nrunning set - 2540\nrunning set - 2541\nrunning set - 2542\nrunning set - 2543\nrunning set - 2544\nrunning set - 2545\nrunning set - 2546\nrunning set - 2547\nrunning set - 2548\nrunning set - 2549\nrunning set - 2550\nrunning set - 2551\nrunning set - 2552\nrunning set - 2553\nrunning set - 2554\nrunning set - 2555\nrunning set - 2556\nrunning set - 2557\nrunning set - 2558\nrunning set - 2559\nrunning set - 2560\nrunning set - 2561\nrunning set - 2562\nrunning set - 2563\nrunning set - 2564\nrunning set - 2565\nrunning set - 2566\nrunning set - 2567\nrunning set - 2568\nrunning set - 2569\nrunning set - 2570\nrunning set - 2571\nrunning set - 2572\nrunning set - 2573\nrunning set - 2574\nrunning set - 2575\nrunning set - 2576\nrunning set - 2577\nrunning set - 2578\nrunning set - 2579\nrunning set - 2580\nrunning set - 2581\nrunning set - 2582\nrunning set - 2583\nrunning set - 2584\nrunning set - 2585\nrunning set - 2586\nrunning set - 2587\nrunning set - 2588\nrunning set - 2589\nrunning set - 2590\nrunning set - 2591\nrunning set - 2592\nrunning set - 2593\nrunning set - 2594\nrunning set - 2595\nrunning set - 2596\nrunning set - 2597\nrunning set - 2598\nrunning set - 2599\nrunning set - 2600\nrunning set - 2601\nrunning set - 2602\nrunning set - 2603\nrunning set - 2604\nrunning set - 2605\nrunning set - 2606\nrunning set - 2607\nrunning set - 2608\nrunning set - 2609\nrunning set - 2610\nrunning set - 2611\nrunning set - 2612\nrunning set - 2613\nrunning set - 2614\nrunning set - 2615\nrunning set - 2616\nrunning set - 2617\nrunning set - 2618\nrunning set - 2619\nrunning set - 2620\nrunning set - 2621\nrunning set - 2622\nrunning set - 2623\nrunning set - 2624\nrunning set - 2625\nrunning set - 2626\nrunning set - 2627\nrunning set - 2628\nrunning set - 2629\nrunning set - 2630\nrunning set - 2631\nrunning set - 2632\nrunning set - 2633\nrunning set - 2634\nrunning set - 2635\nrunning set - 2636\nrunning set - 2637\nrunning set - 2638\nrunning set - 2639\nrunning set - 2640\nrunning set - 2641\nrunning set - 2642\nrunning set - 2643\nrunning set - 2644\nrunning set - 2645\nrunning set - 2646\nrunning set - 2647\nrunning set - 2648\nrunning set - 2649\nrunning set - 2650\nrunning set - 2651\nrunning set - 2652\nrunning set - 2653\nrunning set - 2654\nrunning set - 2655\nrunning set - 2656\nrunning set - 2657\nrunning set - 2658\nrunning set - 2659\nrunning set - 2660\nrunning set - 2661\nrunning set - 2662\nrunning set - 2663\nrunning set - 2664\nrunning set - 2665\nrunning set - 2666\nrunning set - 2667\nrunning set - 2668\nrunning set - 2669\nrunning set - 2670\nrunning set - 2671\nrunning set - 2672\nrunning set - 2673\nrunning set - 2674\nrunning set - 2675\nrunning set - 2676\nrunning set - 2677\nrunning set - 2678\nrunning set - 2679\nrunning set - 2680\nrunning set - 2681\nrunning set - 2682\nrunning set - 2683\nrunning set - 2684\nrunning set - 2685\nrunning set - 2686\nrunning set - 2687\nrunning set - 2688\nrunning set - 2689\nrunning set - 2690\nrunning set - 2691\nrunning set - 2692\nrunning set - 2693\nrunning set - 2694\nrunning set - 2695\nrunning set - 2696\nrunning set - 2697\nrunning set - 2698\nrunning set - 2699\nrunning set - 2700\nrunning set - 2701\nrunning set - 2702\nrunning set - 2703\nrunning set - 2704\nrunning set - 2705\nrunning set - 2706\nrunning set - 2707\nrunning set - 2708\nrunning set - 2709\nrunning set - 2710\nrunning set - 2711\nrunning set - 2712\nrunning set - 2713\nrunning set - 2714\nrunning set - 2715\nrunning set - 2716\nrunning set - 2717\nrunning set - 2718\nrunning set - 2719\nrunning set - 2720\nrunning set - 2721\nrunning set - 2722\nrunning set - 2723\nrunning set - 2724\nrunning set - 2725\nrunning set - 2726\nrunning set - 2727\nrunning set - 2728\nrunning set - 2729\nrunning set - 2730\nrunning set - 2731\nrunning set - 2732\nrunning set - 2733\nrunning set - 2734\nrunning set - 2735\nrunning set - 2736\nrunning set - 2737\nrunning set - 2738\nrunning set - 2739\nrunning set - 2740\nrunning set - 2741\nrunning set - 2742\nrunning set - 2743\nrunning set - 2744\nrunning set - 2745\nrunning set - 2746\nrunning set - 2747\nrunning set - 2748\nrunning set - 2749\nrunning set - 2750\nrunning set - 2751\nrunning set - 2752\nrunning set - 2753\nrunning set - 2754\nrunning set - 2755\nrunning set - 2756\nrunning set - 2757\nrunning set - 2758\nrunning set - 2759\nrunning set - 2760\nrunning set - 2761\nrunning set - 2762\nrunning set - 2763\nrunning set - 2764\nrunning set - 2765\nrunning set - 2766\nrunning set - 2767\nrunning set - 2768\nrunning set - 2769\nrunning set - 2770\nrunning set - 2771\nrunning set - 2772\nrunning set - 2773\nrunning set - 2774\nrunning set - 2775\nrunning set - 2776\nrunning set - 2777\nrunning set - 2778\nrunning set - 2779\nrunning set - 2780\nrunning set - 2781\nrunning set - 2782\nrunning set - 2783\nrunning set - 2784\nrunning set - 2785\nrunning set - 2786\nrunning set - 2787\nrunning set - 2788\nrunning set - 2789\nrunning set - 2790\nrunning set - 2791\nrunning set - 2792\nrunning set - 2793\nrunning set - 2794\nrunning set - 2795\nrunning set - 2796\nrunning set - 2797\nrunning set - 2798\nrunning set - 2799\nrunning set - 2800\nrunning set - 2801\nrunning set - 2802\nrunning set - 2803\nrunning set - 2804\nrunning set - 2805\nrunning set - 2806\nrunning set - 2807\nrunning set - 2808\nrunning set - 2809\nrunning set - 2810\nrunning set - 2811\nrunning set - 2812\nrunning set - 2813\nrunning set - 2814\nrunning set - 2815\nrunning set - 2816\nrunning set - 2817\nrunning set - 2818\nrunning set - 2819\nrunning set - 2820\nrunning set - 2821\nrunning set - 2822\nrunning set - 2823\nrunning set - 2824\nrunning set - 2825\nrunning set - 2826\nrunning set - 2827\nrunning set - 2828\nrunning set - 2829\nrunning set - 2830\nrunning set - 2831\nrunning set - 2832\nrunning set - 2833\nrunning set - 2834\nrunning set - 2835\nrunning set - 2836\nrunning set - 2837\nrunning set - 2838\nrunning set - 2839\nrunning set - 2840\nrunning set - 2841\nrunning set - 2842\nrunning set - 2843\nrunning set - 2844\nrunning set - 2845\nrunning set - 2846\nrunning set - 2847\nrunning set - 2848\nrunning set - 2849\nrunning set - 2850\nrunning set - 2851\nrunning set - 2852\nrunning set - 2853\nrunning set - 2854\nrunning set - 2855\nrunning set - 2856\nrunning set - 2857\nrunning set - 2858\nrunning set - 2859\nrunning set - 2860\nrunning set - 2861\nrunning set - 2862\nrunning set - 2863\nrunning set - 2864\nrunning set - 2865\nrunning set - 2866\nrunning set - 2867\nrunning set - 2868\nrunning set - 2869\nrunning set - 2870\nrunning set - 2871\nrunning set - 2872\nrunning set - 2873\nrunning set - 2874\nrunning set - 2875\nrunning set - 2876\nrunning set - 2877\nrunning set - 2878\nrunning set - 2879\nrunning set - 2880\nrunning set - 2881\nrunning set - 2882\nrunning set - 2883\nrunning set - 2884\nrunning set - 2885\nrunning set - 2886\nrunning set - 2887\nrunning set - 2888\nrunning set - 2889\nrunning set - 2890\nrunning set - 2891\nrunning set - 2892\nrunning set - 2893\nrunning set - 2894\nrunning set - 2895\nrunning set - 2896\nrunning set - 2897\nrunning set - 2898\nrunning set - 2899\nrunning set - 2900\nrunning set - 2901\nrunning set - 2902\nrunning set - 2903\nrunning set - 2904\nrunning set - 2905\nrunning set - 2906\nrunning set - 2907\nrunning set - 2908\nrunning set - 2909\nrunning set - 2910\nrunning set - 2911\nrunning set - 2912\nrunning set - 2913\nrunning set - 2914\nrunning set - 2915\nrunning set - 2916\nrunning set - 2917\nrunning set - 2918\nrunning set - 2919\nrunning set - 2920\nrunning set - 2921\nrunning set - 2922\nrunning set - 2923\nrunning set - 2924\nrunning set - 2925\nrunning set - 2926\nrunning set - 2927\nrunning set - 2928\nrunning set - 2929\nrunning set - 2930\nrunning set - 2931\nrunning set - 2932\nrunning set - 2933\nrunning set - 2934\nrunning set - 2935\nrunning set - 2936\nrunning set - 2937\nrunning set - 2938\nrunning set - 2939\nrunning set - 2940\nrunning set - 2941\nrunning set - 2942\nrunning set - 2943\nrunning set - 2944\nrunning set - 2945\nrunning set - 2946\nrunning set - 2947\nrunning set - 2948\nrunning set - 2949\nrunning set - 2950\nrunning set - 2951\nrunning set - 2952\nrunning set - 2953\nrunning set - 2954\nrunning set - 2955\nrunning set - 2956\nrunning set - 2957\nrunning set - 2958\nrunning set - 2959\nrunning set - 2960\nrunning set - 2961\nrunning set - 2962\nrunning set - 2963\nrunning set - 2964\nrunning set - 2965\nrunning set - 2966\nrunning set - 2967\nrunning set - 2968\nrunning set - 2969\nrunning set - 2970\nrunning set - 2971\nrunning set - 2972\nrunning set - 2973\nrunning set - 2974\nrunning set - 2975\nrunning set - 2976\nrunning set - 2977\nrunning set - 2978\nrunning set - 2979\nrunning set - 2980\nrunning set - 2981\nrunning set - 2982\nrunning set - 2983\nrunning set - 2984\nrunning set - 2985\nrunning set - 2986\nrunning set - 2987\nrunning set - 2988\nrunning set - 2989\nrunning set - 2990\nrunning set - 2991\nrunning set - 2992\nrunning set - 2993\nrunning set - 2994\nrunning set - 2995\nrunning set - 2996\nrunning set - 2997\nrunning set - 2998\nrunning set - 2999\nrunning set - 3000\nrunning set - 3001\nrunning set - 3002\nrunning set - 3003\nrunning set - 3004\nrunning set - 3005\nrunning set - 3006\nrunning set - 3007\nrunning set - 3008\nrunning set - 3009\nrunning set - 3010\nrunning set - 3011\nrunning set - 3012\nrunning set - 3013\nrunning set - 3014\nrunning set - 3015\nrunning set - 3016\nrunning set - 3017\nrunning set - 3018\nrunning set - 3019\nrunning set - 3020\nrunning set - 3021\nrunning set - 3022\nrunning set - 3023\nrunning set - 3024\nrunning set - 3025\nrunning set - 3026\nrunning set - 3027\nrunning set - 3028\nrunning set - 3029\nrunning set - 3030\nrunning set - 3031\nrunning set - 3032\nrunning set - 3033\nrunning set - 3034\nrunning set - 3035\nrunning set - 3036\nrunning set - 3037\nrunning set - 3038\nrunning set - 3039\nrunning set - 3040\nrunning set - 3041\nrunning set - 3042\nrunning set - 3043\nrunning set - 3044\nrunning set - 3045\nrunning set - 3046\nrunning set - 3047\nrunning set - 3048\nrunning set - 3049\nrunning set - 3050\nrunning set - 3051\nrunning set - 3052\nrunning set - 3053\nrunning set - 3054\nrunning set - 3055\nrunning set - 3056\nrunning set - 3057\nrunning set - 3058\nrunning set - 3059\nrunning set - 3060\nrunning set - 3061\nrunning set - 3062\nrunning set - 3063\nrunning set - 3064\nrunning set - 3065\nrunning set - 3066\nrunning set - 3067\nrunning set - 3068\nrunning set - 3069\nrunning set - 3070\nrunning set - 3071\nrunning set - 3072\nrunning set - 3073\nrunning set - 3074\nrunning set - 3075\nrunning set - 3076\nrunning set - 3077\nrunning set - 3078\nrunning set - 3079\nrunning set - 3080\nrunning set - 3081\nrunning set - 3082\nrunning set - 3083\nrunning set - 3084\nrunning set - 3085\nrunning set - 3086\nrunning set - 3087\nrunning set - 3088\nrunning set - 3089\nrunning set - 3090\nrunning set - 3091\nrunning set - 3092\nrunning set - 3093\nrunning set - 3094\nrunning set - 3095\nrunning set - 3096\nrunning set - 3097\nrunning set - 3098\nrunning set - 3099\nrunning set - 3100\nrunning set - 3101\nrunning set - 3102\nrunning set - 3103\nrunning set - 3104\nrunning set - 3105\nrunning set - 3106\nrunning set - 3107\nrunning set - 3108\nrunning set - 3109\nrunning set - 3110\nrunning set - 3111\nrunning set - 3112\nrunning set - 3113\nrunning set - 3114\nrunning set - 3115\nrunning set - 3116\nrunning set - 3117\nrunning set - 3118\nrunning set - 3119\nrunning set - 3120\nrunning set - 3121\nrunning set - 3122\nrunning set - 3123\nrunning set - 3124\nrunning set - 3125\nrunning set - 3126\nrunning set - 3127\nrunning set - 3128\nrunning set - 3129\nrunning set - 3130\nrunning set - 3131\nrunning set - 3132\nrunning set - 3133\nrunning set - 3134\nrunning set - 3135\nrunning set - 3136\nrunning set - 3137\nrunning set - 3138\nrunning set - 3139\nrunning set - 3140\nrunning set - 3141\nrunning set - 3142\nrunning set - 3143\nrunning set - 3144\nrunning set - 3145\nrunning set - 3146\nrunning set - 3147\nrunning set - 3148\nrunning set - 3149\nrunning set - 3150\nrunning set - 3151\nrunning set - 3152\nrunning set - 3153\nrunning set - 3154\nrunning set - 3155\nrunning set - 3156\nrunning set - 3157\nrunning set - 3158\nrunning set - 3159\nrunning set - 3160\nrunning set - 3161\nrunning set - 3162\nrunning set - 3163\nrunning set - 3164\nrunning set - 3165\nrunning set - 3166\nrunning set - 3167\nrunning set - 3168\nrunning set - 3169\nrunning set - 3170\nrunning set - 3171\nrunning set - 3172\nrunning set - 3173\nrunning set - 3174\nrunning set - 3175\nrunning set - 3176\nrunning set - 3177\nrunning set - 3178\nrunning set - 3179\nrunning set - 3180\nrunning set - 3181\nrunning set - 3182\nrunning set - 3183\nrunning set - 3184\nrunning set - 3185\nrunning set - 3186\nrunning set - 3187\nrunning set - 3188\nrunning set - 3189\nrunning set - 3190\nrunning set - 3191\nrunning set - 3192\nrunning set - 3193\nrunning set - 3194\nrunning set - 3195\nrunning set - 3196\nrunning set - 3197\nrunning set - 3198\nrunning set - 3199\nrunning set - 3200\nrunning set - 3201\nrunning set - 3202\nrunning set - 3203\nrunning set - 3204\nrunning set - 3205\nrunning set - 3206\nrunning set - 3207\nrunning set - 3208\nrunning set - 3209\nrunning set - 3210\nrunning set - 3211\nrunning set - 3212\nrunning set - 3213\nrunning set - 3214\nrunning set - 3215\nrunning set - 3216\nrunning set - 3217\nrunning set - 3218\nrunning set - 3219\nrunning set - 3220\nrunning set - 3221\nrunning set - 3222\nrunning set - 3223\nrunning set - 3224\nrunning set - 3225\nrunning set - 3226\nrunning set - 3227\nrunning set - 3228\nrunning set - 3229\nrunning set - 3230\nrunning set - 3231\nrunning set - 3232\nrunning set - 3233\nrunning set - 3234\nrunning set - 3235\nrunning set - 3236\nrunning set - 3237\nrunning set - 3238\nrunning set - 3239\nrunning set - 3240\nrunning set - 3241\nrunning set - 3242\nrunning set - 3243\nrunning set - 3244\nrunning set - 3245\nrunning set - 3246\nrunning set - 3247\nrunning set - 3248\nrunning set - 3249\nrunning set - 3250\nrunning set - 3251\nrunning set - 3252\nrunning set - 3253\nrunning set - 3254\nrunning set - 3255\nrunning set - 3256\nrunning set - 3257\nrunning set - 3258\nrunning set - 3259\nrunning set - 3260\nrunning set - 3261\nrunning set - 3262\nrunning set - 3263\nrunning set - 3264\nrunning set - 3265\nrunning set - 3266\nrunning set - 3267\nrunning set - 3268\nrunning set - 3269\nrunning set - 3270\nrunning set - 3271\nrunning set - 3272\nrunning set - 3273\nrunning set - 3274\nrunning set - 3275\nrunning set - 3276\nrunning set - 3277\nrunning set - 3278\nrunning set - 3279\nrunning set - 3280\nrunning set - 3281\nrunning set - 3282\nrunning set - 3283\nrunning set - 3284\nrunning set - 3285\nrunning set - 3286\nrunning set - 3287\nrunning set - 3288\nrunning set - 3289\nrunning set - 3290\nrunning set - 3291\nrunning set - 3292\nrunning set - 3293\nrunning set - 3294\nrunning set - 3295\nrunning set - 3296\nrunning set - 3297\nrunning set - 3298\nrunning set - 3299\nrunning set - 3300\nrunning set - 3301\nrunning set - 3302\nrunning set - 3303\nrunning set - 3304\nrunning set - 3305\nrunning set - 3306\nrunning set - 3307\nrunning set - 3308\nrunning set - 3309\nrunning set - 3310\nrunning set - 3311\nrunning set - 3312\nrunning set - 3313\nrunning set - 3314\nrunning set - 3315\nrunning set - 3316\nrunning set - 3317\nrunning set - 3318\nrunning set - 3319\nrunning set - 3320\nrunning set - 3321\nrunning set - 3322\nrunning set - 3323\nrunning set - 3324\nrunning set - 3325\nrunning set - 3326\nrunning set - 3327\nrunning set - 3328\nrunning set - 3329\nrunning set - 3330\nrunning set - 3331\nrunning set - 3332\nrunning set - 3333\nrunning set - 3334\nrunning set - 3335\nrunning set - 3336\nrunning set - 3337\nrunning set - 3338\nrunning set - 3339\nrunning set - 3340\nrunning set - 3341\nrunning set - 3342\nrunning set - 3343\nrunning set - 3344\nrunning set - 3345\nrunning set - 3346\nrunning set - 3347\nrunning set - 3348\nrunning set - 3349\nrunning set - 3350\nrunning set - 3351\nrunning set - 3352\nrunning set - 3353\nrunning set - 3354\nrunning set - 3355\nrunning set - 3356\nrunning set - 3357\nrunning set - 3358\nrunning set - 3359\nrunning set - 3360\nrunning set - 3361\nrunning set - 3362\nrunning set - 3363\nrunning set - 3364\nrunning set - 3365\nrunning set - 3366\nrunning set - 3367\nrunning set - 3368\nrunning set - 3369\nrunning set - 3370\nrunning set - 3371\nrunning set - 3372\nrunning set - 3373\nrunning set - 3374\nrunning set - 3375\nrunning set - 3376\nrunning set - 3377\nrunning set - 3378\nrunning set - 3379\nrunning set - 3380\nrunning set - 3381\nrunning set - 3382\nrunning set - 3383\nrunning set - 3384\nrunning set - 3385\nrunning set - 3386\nrunning set - 3387\nrunning set - 3388\nrunning set - 3389\nrunning set - 3390\nrunning set - 3391\nrunning set - 3392\nrunning set - 3393\nrunning set - 3394\nrunning set - 3395\nrunning set - 3396\nrunning set - 3397\nrunning set - 3398\nrunning set - 3399\nrunning set - 3400\nrunning set - 3401\nrunning set - 3402\nrunning set - 3403\nrunning set - 3404\nrunning set - 3405\nrunning set - 3406\nrunning set - 3407\nrunning set - 3408\nrunning set - 3409\nrunning set - 3410\nrunning set - 3411\nrunning set - 3412\nrunning set - 3413\nrunning set - 3414\nrunning set - 3415\nrunning set - 3416\nrunning set - 3417\nrunning set - 3418\nrunning set - 3419\nrunning set - 3420\nrunning set - 3421\nrunning set - 3422\nrunning set - 3423\nrunning set - 3424\nrunning set - 3425\nrunning set - 3426\nrunning set - 3427\nrunning set - 3428\nrunning set - 3429\nrunning set - 3430\nrunning set - 3431\nrunning set - 3432\nrunning set - 3433\nrunning set - 3434\nrunning set - 3435\nrunning set - 3436\nrunning set - 3437\nrunning set - 3438\nrunning set - 3439\nrunning set - 3440\nrunning set - 3441\nrunning set - 3442\nrunning set - 3443\nrunning set - 3444\nrunning set - 3445\nrunning set - 3446\nrunning set - 3447\nrunning set - 3448\nrunning set - 3449\nrunning set - 3450\nrunning set - 3451\nrunning set - 3452\nrunning set - 3453\nrunning set - 3454\nrunning set - 3455\nrunning set - 3456\nrunning set - 3457\nrunning set - 3458\nrunning set - 3459\nrunning set - 3460\nrunning set - 3461\nrunning set - 3462\nrunning set - 3463\nrunning set - 3464\nrunning set - 3465\nrunning set - 3466\nrunning set - 3467\nrunning set - 3468\nrunning set - 3469\nrunning set - 3470\nrunning set - 3471\nrunning set - 3472\nrunning set - 3473\nrunning set - 3474\nrunning set - 3475\nrunning set - 3476\nrunning set - 3477\nrunning set - 3478\nrunning set - 3479\nrunning set - 3480\nrunning set - 3481\nrunning set - 3482\nrunning set - 3483\nrunning set - 3484\nrunning set - 3485\nrunning set - 3486\nrunning set - 3487\nrunning set - 3488\nrunning set - 3489\nrunning set - 3490\nrunning set - 3491\nrunning set - 3492\nrunning set - 3493\nrunning set - 3494\nrunning set - 3495\nrunning set - 3496\nrunning set - 3497\nrunning set - 3498\nrunning set - 3499\nrunning set - 3500\nrunning set - 3501\nrunning set - 3502\nrunning set - 3503\nrunning set - 3504\nrunning set - 3505\nrunning set - 3506\nrunning set - 3507\nrunning set - 3508\nrunning set - 3509\nrunning set - 3510\nrunning set - 3511\nrunning set - 3512\nrunning set - 3513\nrunning set - 3514\nrunning set - 3515\nrunning set - 3516\nrunning set - 3517\nrunning set - 3518\nrunning set - 3519\nrunning set - 3520\nrunning set - 3521\nrunning set - 3522\nrunning set - 3523\nrunning set - 3524\nrunning set - 3525\nrunning set - 3526\nrunning set - 3527\nrunning set - 3528\nrunning set - 3529\nrunning set - 3530\nrunning set - 3531\nrunning set - 3532\nrunning set - 3533\nrunning set - 3534\nrunning set - 3535\nrunning set - 3536\nrunning set - 3537\nrunning set - 3538\nrunning set - 3539\nrunning set - 3540\nrunning set - 3541\nrunning set - 3542\nrunning set - 3543\nrunning set - 3544\nrunning set - 3545\nrunning set - 3546\nrunning set - 3547\nrunning set - 3548\nrunning set - 3549\nrunning set - 3550\nrunning set - 3551\nrunning set - 3552\nrunning set - 3553\nrunning set - 3554\nrunning set - 3555\nrunning set - 3556\nrunning set - 3557\nrunning set - 3558\nrunning set - 3559\nrunning set - 3560\nrunning set - 3561\nrunning set - 3562\nrunning set - 3563\nrunning set - 3564\nrunning set - 3565\nrunning set - 3566\nrunning set - 3567\nrunning set - 3568\nrunning set - 3569\nrunning set - 3570\nrunning set - 3571\nrunning set - 3572\nrunning set - 3573\nrunning set - 3574\nrunning set - 3575\nrunning set - 3576\nrunning set - 3577\nrunning set - 3578\nrunning set - 3579\nrunning set - 3580\nrunning set - 3581\nrunning set - 3582\nrunning set - 3583\nrunning set - 3584\nrunning set - 3585\nrunning set - 3586\nrunning set - 3587\nrunning set - 3588\nrunning set - 3589\n" ], [ "ls /content/drive/My\\ Drive/computer_vision/session14_assignment/planercnn-master/outputs/ | wc -l", "7086\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb400431b20e6d59f6ab0bb447053be79420c901
10,028
ipynb
Jupyter Notebook
examples/15. Fit phase equilibria - Induced association - Ethanol + CPME.ipynb
MatKie/SGTPy
8e98d92fedd2b07d834e547e5154ec8f70d80728
[ "MIT" ]
12
2020-12-27T17:04:33.000Z
2021-07-19T06:28:28.000Z
examples/15. Fit phase equilibria - Induced association - Ethanol + CPME.ipynb
MatKie/SGTPy
8e98d92fedd2b07d834e547e5154ec8f70d80728
[ "MIT" ]
2
2021-05-15T14:27:57.000Z
2021-08-19T15:42:24.000Z
examples/15. Fit phase equilibria - Induced association - Ethanol + CPME.ipynb
MatKie/SGTPy
8e98d92fedd2b07d834e547e5154ec8f70d80728
[ "MIT" ]
5
2021-02-21T01:33:29.000Z
2021-07-26T15:11:08.000Z
46.64186
485
0.571699
[ [ [ "# Fit $k_{ij}$ and $r_c^{ABij}$ interactions parameter of Ethanol and CPME\n\n---\nLet's call $\\underline{\\xi}$ the optimization parameters of a mixture. In order to optimize them, you need to provide experimental phase equilibria data. This can include VLE, LLE and VLLE data. The objective function used for each equilibria type are shown below:\n\n\n### Vapor-Liquid Equilibria Data\n\n\n$$ OF_{VLE}(\\underline{\\xi}) = w_y \\sum_{j=1}^{Np} \\left[ \\sum_{i=1}^c (y_{i,j}^{cal} - y_{i,j}^{exp})^2 \\right] + w_P \\sum_{j=1}^{Np} \\left[ \\frac{P_{j}^{cal} - P_{j}^{exp}}{P_{j}^{exp}} \\right]^2$$\n\nWhere, $Np$ is the number of experimental data points, $y_i$ is the vapor molar fraction of the component $i$ and $P$ is the bubble pressure. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_y$ is the weight for the vapor composition error and $w_P$ is the weight for the bubble pressure error.\n\n\n### Liquid-Liquid Equilibria Data\n$$ OF_{LLE}(\\underline{\\xi}) = w_x \\sum_{j=1}^{Np} \\sum_{i=1}^c \\left[x_{i,j} - x_{i,j}^{exp}\\right]^2 + w_w \\sum_{j=1}^{Np} \\sum_{i=1}^c \\left[ w_{i,j} - w_{i,j}^{exp} \\right]^2 $$\n\nWhere, $Np$ is the number of experimental data points, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the liquids phases. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error.\n\n\n### Vapor-Liquid-Liquid Equilibria Data\n$$ OF_{VLLE}(\\underline{\\xi}) = w_x \\sum_{j=1}^{Np} \\sum_{i=1}^c \\left[x_{i,j}^{cal} - x_{i,j}^{exp}\\right]^2 + w_w \\sum_{j=1}^{Np} \\sum_{i=1}^c \\left[w_{i,j}^{cal} - w_{i,j}^{exp}\\right]^2 + w_y \\sum_{j=1}^{Np} \\sum_{i=1}^c \\left[y_{i,j}^{cal} - y_{i,j}^{exp}\\right]^2 + w_P \\sum_{j=1}^{Np} \\left[ \\frac{P_{j}^{cal}}{P_{j}^{exp}} - 1\\right]^2 $$\n\nWhere, $Np$ is the number of experimental data points, $y_i$, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the vapor and liquids phases, respectively. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error, $w_y$ is the weight for vapor composition error and $w_P$ is weight for three phase equilibria pressure error.\n\nIf there is data for more than one equilibria type, the errors can be added accordinly. So the objective funcion becomes:\n\n$$ OF(\\underline{\\xi}) =OF_{ELV}(\\underline{\\xi}) + OF_{ELL}(\\underline{\\xi}) + OF_{ELLV}(\\underline{\\xi})$$\n\n---\n\nThis notebook has te purpose of showing how to optimize the $k_{ij}$ and $r_c^{ABij}$ for a mixture with induced association. For these mixtures the interactions parameters are shown below:\n\n$$ \\epsilon_{ij} = (1-k_{ij}) \\frac{\\sqrt{\\sigma_i^3 \\sigma_j^3}}{\\sigma_{ij}^3} \\sqrt{\\epsilon_i \\epsilon_j} ;\\quad\\epsilon_{ij}^{AB} = \\frac{\\epsilon^{AB} (self-associating)}{2} ;\\quad r^{ABij}_c (fitted)$$\n\nFirst, it's needed to import the necessary modules", "_____no_output_____" ] ], [ [ "import numpy as np\n\nfrom sgtpy import component, mixture, saftvrmie\nfrom sgtpy.fit import fit_cross", "_____no_output_____" ] ], [ [ "Now that the functions are available it is necessary to create the mixture.", "_____no_output_____" ] ], [ [ "ethanol = component('ethanol2C', ms = 1.7728, sigma = 3.5592 , eps = 224.50,\n lambda_r = 11.319, lambda_a = 6., eAB = 3018.05, rcAB = 0.3547,\n rdAB = 0.4, sites = [1,0,1], cii= 5.3141080872882285e-20)\n\ncpme = component('cpme', ms = 2.32521144, sigma = 4.13606074, eps = 343.91193798, lambda_r = 14.15484877, \n lambda_a = 6.0, npol = 1.91990385,mupol = 1.27, sites =[0,0,1], cii = 3.5213681817448466e-19)\n\nmix = mixture(ethanol, cpme)", "_____no_output_____" ] ], [ [ "Now the experimental equilibria data is read and a tuple is created. It includes the experimental liquid composition, vapor composition, equilibrium temperature and pressure. This is done with ```datavle = (Xexp, Yexp, Texp, Pexp)```", "_____no_output_____" ] ], [ [ "# Experimental data obtained from Mejia, Cartes, J. Chem. Eng. Data, vol. 64, no. 5, pp. 1970–1977, 2019\n\n# Experimental temperature saturation in K\nTexp = np.array([355.77, 346.42, 342.82, 340.41, 338.95, 337.78, 336.95, 336.29,\n 335.72, 335.3 , 334.92, 334.61, 334.35, 334.09, 333.92, 333.79,\n 333.72, 333.72, 333.81, 334.06, 334.58])\n\n# Experimental pressure in Pa\nPexp = np.array([50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000.,\n 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000.,\n 50000., 50000., 50000., 50000., 50000.])\n\n# Experimental liquid composition\nXexp = np.array([[0. , 0.065, 0.11 , 0.161, 0.203, 0.253, 0.301, 0.351, 0.402,\n 0.446, 0.497, 0.541, 0.588, 0.643, 0.689, 0.743, 0.785, 0.837,\n 0.893, 0.947, 1. ],\n [1. , 0.935, 0.89 , 0.839, 0.797, 0.747, 0.699, 0.649, 0.598,\n 0.554, 0.503, 0.459, 0.412, 0.357, 0.311, 0.257, 0.215, 0.163,\n 0.107, 0.053, 0. ]])\n\n# Experimental vapor composition\nYexp = np.array([[0. , 0.302, 0.411, 0.48 , 0.527, 0.567, 0.592, 0.614, 0.642,\n 0.657, 0.678, 0.694, 0.71 , 0.737, 0.753, 0.781, 0.801, 0.837,\n 0.883, 0.929, 1. ],\n [1. , 0.698, 0.589, 0.52 , 0.473, 0.433, 0.408, 0.386, 0.358,\n 0.343, 0.322, 0.306, 0.29 , 0.263, 0.247, 0.219, 0.199, 0.163,\n 0.117, 0.071, 0. ]])\n\ndatavle = (Xexp, Yexp, Texp, Pexp)", "_____no_output_____" ] ], [ [ "The function ```fit_cross``` optimize the $k_{ij}$ correction and $r_c^{ABij}$ distance. An initial guess is needed, as well as the mixture object, the index of the self-associating component and the equilibria data. Optionally, the ```minimize_options``` option allows modifying the minimizer default parameters.", "_____no_output_____" ] ], [ [ "#initial guesses for kij and rcij\nx0 = [0.01015194, 2.23153033]\nfit_cross(x0, mix, assoc=0, datavle=datavle)", "_____no_output_____" ] ], [ [ "If the mixture exhibits other equilibria types you can supply this experimental data to the ``datalle`` or ``datavlle`` parameters.\n\n- ``datalle``: (Xexp, Wexp, Texp, Pexp)\n- ``datavlle``: (Xexp, Wexp, Yexp, Texp, Pexp)\n\nYou can specify the weights for each objetive function through the following parameters:\n\n- ``weights_vle``: list or array_like, weights for the VLE objective function.\n - weights_vle[0] = weight for Y composition error, default to 1.\n - weights_vle[1] = weight for bubble pressure error, default to 1.\n- ``weights_lle``: list or array_like, weights for the LLE objective function.\n - weights_lle[0] = weight for X (liquid 1) composition error, default to 1.\n - weights_lle[1] = weight for W (liquid 2) composition error, default to 1.\n- ``weights_vlle``: list or array_like, weights for the VLLE objective function.\n - weights_vlle[0] = weight for X (liquid 1) composition error, default to 1.\n - weights_vlle[1] = weight for W (liquid 2) composition error, default to 1.\n - weights_vlle[2] = weight for Y (vapor) composition error, default to 1.\n - weights_vlle[3] = weight for equilibrium pressure error, default to 1.\n\nAdditionally, you can set options to the SciPy's ``minimize`` function using the ``minimize_options`` parameter.\n\nFor more information just run:\n```fit_cross?```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb401c4528fa9aabdc5993e8e8f1fc04189b4d96
26,676
ipynb
Jupyter Notebook
examples/GaussianBosonSampling.ipynb
egbQuantum/strawberryfields
674e4fe2de5e5dd791a77f1cd219009120dcbbbf
[ "Apache-2.0" ]
1
2020-07-12T17:53:56.000Z
2020-07-12T17:53:56.000Z
examples/GaussianBosonSampling.ipynb
egbQuantum/strawberryfields
674e4fe2de5e5dd791a77f1cd219009120dcbbbf
[ "Apache-2.0" ]
5
2020-09-26T01:27:24.000Z
2022-02-10T02:13:49.000Z
examples/GaussianBosonSampling.ipynb
egbQuantum/strawberryfields
674e4fe2de5e5dd791a77f1cd219009120dcbbbf
[ "Apache-2.0" ]
null
null
null
36.343324
788
0.579022
[ [ [ "<table width=60%>\n <tr style=\"background-color: white;\">\n <td><img src='https://www.creativedestructionlab.com/wp-content/uploads/2018/05/xanadu.jpg'></td>></td>\n </tr>\n</table>\n\n---\n\n<img src='https://raw.githubusercontent.com/XanaduAI/strawberryfields/master/doc/_static/strawberry-fields-text.png'>\n\n---\n\n<br>\n\n<center> <h1> Gaussian boson sampling tutorial </h1></center>\n\nTo get a feel for how Strawberry Fields works, let's try coding a quantum program, Gaussian boson sampling.", "_____no_output_____" ], [ "## Background information: Gaussian states\n\nA Gaussian state is one that can be described by a [Gaussian function](https://en.wikipedia.org/wiki/Gaussian_function) in the phase space. For example, for a single mode Gaussian state, squeezed in the $x$ quadrature by squeezing operator $S(r)$, could be described by the following [Wigner quasiprobability distribution](Wigner quasiprobability distribution):\n\n$$W(x,p) = \\frac{2}{\\pi}e^{-2\\sigma^2(x-\\bar{x})^2 - 2(p-\\bar{p})^2/\\sigma^2}$$\n\nwhere $\\sigma$ represents the **squeezing**, and $\\bar{x}$ and $\\bar{p}$ are the mean **displacement**, respectively. For multimode states containing $N$ modes, this can be generalised; Gaussian states are uniquely defined by a [multivariate Gaussian function](https://en.wikipedia.org/wiki/Multivariate_normal_distribution), defined in terms of the **vector of means** ${\\mu}$ and a **covariance matrix** $\\sigma$.\n\n### The position and momentum basis\n\nFor example, consider a single mode in the position and momentum quadrature basis (the default for Strawberry Fields). Assuming a Gaussian state with displacement $\\alpha = \\bar{x}+i\\bar{p}$ and squeezing $\\xi = r e^{i\\phi}$ in the phase space, it has a vector of means and a covariance matrix given by:\n\n$$ \\mu = (\\bar{x},\\bar{p}),~~~~~~\\sigma = SS\\dagger=R(\\phi/2)\\begin{bmatrix}e^{-2r} & 0 \\\\0 & e^{2r} \\\\\\end{bmatrix}R(\\phi/2)^T$$\n\nwhere $S$ is the squeezing operator, and $R(\\phi)$ is the standard two-dimensional rotation matrix. For multiple modes, in Strawberry Fields we use the convention \n\n$$ \\mu = (\\bar{x}_1,\\bar{x}_2,\\dots,\\bar{x}_N,\\bar{p}_1,\\bar{p}_2,\\dots,\\bar{p}_N)$$\n\nand therefore, considering $\\phi=0$ for convenience, the multimode covariance matrix is simply\n\n$$\\sigma = \\text{diag}(e^{-2r_1},\\dots,e^{-2r_N},e^{2r_1},\\dots,e^{2r_N})\\in\\mathbb{C}^{2N\\times 2N}$$\n\nIf a continuous-variable state *cannot* be represented in the above form (for example, a single photon Fock state or a cat state), then it is non-Gaussian.\n\n### The annihilation and creation operator basis\n\nIf we are instead working in the creation and annihilation operator basis, we can use the transformation of the single mode squeezing operator\n\n$$ S(\\xi) \\left[\\begin{matrix}\\hat{a}\\\\\\hat{a}^\\dagger\\end{matrix}\\right] = \\left[\\begin{matrix}\\cosh(r)&-e^{i\\phi}\\sinh(r)\\\\-e^{-i\\phi}\\sinh(r)&\\cosh(r)\\end{matrix}\\right] \\left[\\begin{matrix}\\hat{a}\\\\\\hat{a}^\\dagger\\end{matrix}\\right]$$\n\nresulting in\n\n$$\\sigma = SS^\\dagger = \\left[\\begin{matrix}\\cosh(2r)&-e^{i\\phi}\\sinh(2r)\\\\-e^{-i\\phi}\\sinh(2r)&\\cosh(2r)\\end{matrix}\\right]$$\n\nFor multiple Gaussian states with non-zero squeezing, the covariance matrix in this basis simply generalises to\n\n$$\\sigma = \\text{diag}(S_1S_1^\\dagger,\\dots,S_NS_N^\\dagger)\\in\\mathbb{C}^{2N\\times 2N}$$", "_____no_output_____" ], [ "## Introduction to Gaussian boson sampling\n\n<div class=\"alert alert-info\">\n“If you need to wait exponential time for your single photon sources to emit simultaneously, then there would seem to be no advantage over classical computation. This is the reason why so far, boson sampling has only been demonstrated with 3-4 photons. When faced with these problems, until recently, all we could do was shrug our shoulders.” - [Scott Aaronson](https://www.scottaaronson.com/blog/?p=1579)\n</div>\n\nWhile [boson sampling](https://en.wikipedia.org/wiki/Boson_sampling) allows the experimental implementation of a quantum sampling problem that it countably hard classically, one of the main issues it has in experimental setups is one of **scalability**, due to its dependence on an array of simultaneously emitting single photon sources.\n\nCurrently, most physical implementations of boson sampling make use of a process known as [Spontaneous Parametric Down-Conversion](http://en.wikipedia.org/wiki/Spontaneous_parametric_down-conversion) to generate the single photon source inputs. Unfortunately, this method is non-deterministic - as the number of modes in the apparatus increases, the average time required until every photon source emits a simultaneous photon increases *exponentially*.\n\nIn order to simulate a *deterministic* single photon source array, several variations on boson sampling have been proposed; the most well known being scattershot boson sampling ([Lund, 2014](https://link.aps.org/doi/10.1103/PhysRevLett.113.100502)). However, a recent boson sampling variation by [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) negates the need for single photon Fock states altogether, by showing that **incident Gaussian states** - in this case, single mode squeezed states - can produce problems in the same computational complexity class as boson sampling. Even more significantly, this negates the scalability problem with single photon sources, as single mode squeezed states can be easily simultaneously generated experimentally.\n\nAside from changing the input states from single photon Fock states to Gaussian states, the Gaussian boson sampling scheme appears quite similar to that of boson sampling:\n\n1. $N$ single mode squeezed states $\\left|{\\xi_i}\\right\\rangle$, with squeezing parameters $\\xi_i=r_ie^{i\\phi_i}$, enter an $N$ mode linear interferometer with unitary $U$.\n <br>\n \n2. The output of the interferometer is denoted $\\left|{\\psi'}\\right\\rangle$. Each output mode is then measured in the Fock basis, $\\bigotimes_i n_i\\left|{n_i}\\middle\\rangle\\middle\\langle{n_i}\\right|$.\n\nWithout loss of generality, we can absorb the squeezing parameter $\\phi$ into the interferometer, and set $\\phi=0$ for convenience. The covariance matrix **in the creation and annihilation operator basis** at the output of the interferometer is then given by:\n\n$$\\sigma_{out} = \\frac{1}{2} \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right]\\sigma_{in} \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right]$$\n\nUsing phase space methods, [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) showed that the probability of measuring a Fock state is given by\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(U\\bigoplus_i\\tanh(r_i)U^T)]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\sqrt{|\\sigma_{out}+I/2|}},$$\n\ni.e. the sampled single photon probability distribution is proportional to the **Hafnian** of a submatrix of $U\\bigoplus_i\\tanh(r_i)U^T$, dependent upon the output covariance matrix.\n\n<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n\n<p style=\"color: #119a68;\">**The Hafnian**</p>\n\nThe Hafnian of a matrix is defined by\n<br><br>\n$$\\text{Haf}(A) = \\frac{1}{n!2^n}\\sum_{\\sigma=S_{2N}}\\prod_{i=1}^N A_{\\sigma(2i-1)\\sigma(2i)}$$\n<br>\n\n$S_{2N}$ is the set of all permutations of $2N$ elements. In graph theory, the Hafnian calculates the number of perfect <a href=\"https://en.wikipedia.org/wiki/Matching_(graph_theory)\">matchings</a> in an **arbitrary graph** with adjacency matrix $A$.\n<br>\n\nCompare this to the permanent, which calculates the number of perfect matchings on a *bipartite* graph - the Hafnian turns out to be a generalisation of the permanent, with the relationship\n\n$$\\begin{align}\n\\text{Per(A)} = \\text{Haf}\\left(\\left[\\begin{matrix}\n0&A\\\\\nA^T&0\n\\end{matrix}\\right]\\right)\n\\end{align}$$\n\nAs any algorithm that could calculate (or even approximate) the Hafnian could also calculate the permanent - a #P problem - it follows that calculating or approximating the Hafnian must also be a classically hard problem.\n</div>\n\n### Equally squeezed input states\n\nIn the case where all the input states are squeezed equally with squeezing factor $\\xi=r$ (i.e. so $\\phi=0$), we can simplify the denominator into a much nicer form. It can be easily seen that, due to the unitarity of $U$,\n\n$$\\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right] = \\left[ \\begin{matrix}UU^\\dagger&0\\\\0&U^*U^T\\end{matrix} \\right] =I$$\n\nThus, we have \n\n$$\\begin{align}\n\\sigma_{out} +\\frac{1}{2}I &= \\sigma_{out} + \\frac{1}{2} \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right] = \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\frac{1}{2} \\left(\\sigma_{in}+I\\right) \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right]\n\\end{align}$$\n\nwhere we have subtituted in the expression for $\\sigma_{out}$. Taking the determinants of both sides, the two block diagonal matrices containing $U$ are unitary, and thus have determinant 1, resulting in\n\n$$\\left|\\sigma_{out} +\\frac{1}{2}I\\right| =\\left|\\frac{1}{2}\\left(\\sigma_{in}+I\\right)\\right|=\\left|\\frac{1}{2}\\left(SS^\\dagger+I\\right)\\right| $$\n\nBy expanding out the right hand side, and using various trig identities, it is easy to see that this simply reduces to $\\cosh^{2N}(r)$ where $N$ is the number of modes; thus the Gaussian boson sampling problem in the case of equally squeezed input modes reduces to\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(UU^T\\tanh(r))]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\cosh^N(r)},$$", "_____no_output_____" ], [ "## The Gaussian boson sampling circuit\nThe multimode linear interferometer can be decomposed into two-mode beamsplitters (`BSgate`) and single-mode phase shifters (`Rgate`) (<a href=\"https://doi.org/10.1103/physrevlett.73.58\">Reck, 1994</a>), allowing for an almost trivial translation into a continuous-variable quantum circuit.\n\nFor example, in the case of a 4 mode interferometer, with arbitrary $4\\times 4$ unitary $U$, the continuous-variable quantum circuit for Gaussian boson sampling is given by\n\n<img src=\"https://s3.amazonaws.com/xanadu-img/gaussian_boson_sampling.svg\" width=70%/>\n\nIn the above,\n\n* the single mode squeeze states all apply identical squeezing $\\xi=r$,\n* the detectors perform Fock state measurements (i.e. measuring the photon number of each mode),\n* the parameters of the beamsplitters and the rotation gates determines the unitary $U$.\n\nFor $N$ input modes, we must have a minimum of $N$ columns in the beamsplitter array ([Clements, 2016](https://arxiv.org/abs/1603.08788)).", "_____no_output_____" ], [ "## Simulating boson sampling in Strawberry Fields\n\n", "_____no_output_____" ] ], [ [ "import strawberryfields as sf\nfrom strawberryfields.ops import *\nfrom strawberryfields.utils import random_interferometer", "_____no_output_____" ] ], [ [ "Strawberry Fields makes this easy; there is an `Interferometer` quantum operation, and a utility function that allows us to generate the matrix representing a random interferometer.", "_____no_output_____" ] ], [ [ "U = random_interferometer(4)", "_____no_output_____" ] ], [ [ "The lack of Fock states and non-linear operations means we can use the Gaussian backend to simulate Gaussian boson sampling. In this example program, we are using input states with squeezing parameter $\\xi=1$, and the randomly chosen interferometer generated above.", "_____no_output_____" ] ], [ [ "eng = sf.Engine('gaussian')\ngbs = sf.Program(4)\n\nwith gbs.context as q:\n # prepare the input squeezed states\n S = Sgate(1)\n All(S) | q\n\n # interferometer\n Interferometer(U) | q\n MeasureFock() | q\n \nresults = eng.run(gbs, run_options={\"shots\":10})\nstate = results.state\n\n# Note: Running this cell will generate a warning. This is just the Gaussian backend of Strawberryfields telling us \n# that, although it can carry out the MeasureFock operation, it will not update the state of the circuit after doing so,\n# since the resulting state would be non-Gaussian. For this notebook, the warning can be safely ignored.", "/home/xanadu-laptop/nathan-dev/strawberryfields/strawberryfields/backends/gaussianbackend/backend.py:200: UserWarning: Cannot simulate non-Gaussian states. Conditional state after Fock measurement has not been updated.\n" ] ], [ [ "We can see the decomposed beamsplitters and rotation gates, by calling `eng.print_applied()`:", "_____no_output_____" ] ], [ [ "eng.print_applied()", "Run 0:\nSgate(1, 0) | (q[0])\nSgate(1, 0) | (q[1])\nSgate(1, 0) | (q[2])\nSgate(1, 0) | (q[3])\nRgate(-2.799) | (q[0])\nBSgate(1.216, 0) | (q[0], q[1])\nRgate(-0.8845) | (q[2])\nBSgate(1.207, 0) | (q[2], q[3])\nRgate(2.402) | (q[1])\nBSgate(1.39, 0) | (q[1], q[2])\nRgate(0.915) | (q[0])\nBSgate(1.077, 0) | (q[0], q[1])\nRgate(-0.09233) | (q[0])\nRgate(-2.13) | (q[1])\nRgate(-2.676) | (q[2])\nRgate(1.016) | (q[3])\nBSgate(-0.9855, 0) | (q[2], q[3])\nRgate(-2.281) | (q[2])\nBSgate(-1.259, 0) | (q[1], q[2])\nRgate(-1.454) | (q[1])\nMeasureFock() | (q[0], q[1], q[2], q[3])\n" ] ], [ [ "<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n<p style=\"color: #119a68;\">**Available decompositions**</p>\n\nCheck out our <a href=\"https://strawberryfields.readthedocs.io/en/stable/conventions/decompositions.html\">documentation</a> to see the available CV decompositions available in Strawberry Fields.\n</div>\n", "_____no_output_____" ], [ "We can also see some of the measurement samples from this circuit within `results.samples`. These correspond to independent runs of the Gaussian Boson Sampling circuit. ", "_____no_output_____" ] ], [ [ "results.samples", "_____no_output_____" ] ], [ [ "## Analysis\n\nLet's now verify the Gaussian boson sampling result, by comparing the output Fock state probabilities to the Hafnian, using the relationship\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(UU^T\\tanh(r))]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\cosh^N(r)}$$", "_____no_output_____" ], [ "### Calculating the Hafnian\n\nFor the right hand side numerator, we first calculate the submatrix $[(UU^T\\tanh(r))]_{st}$:", "_____no_output_____" ] ], [ [ "import numpy as np\nB = (np.dot(U, U.T) * np.tanh(1))", "_____no_output_____" ] ], [ [ "In Gaussian boson sampling, we determine the submatrix by taking the rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix in the case of the output measurement $\\left|{1,1,0,0}\\right\\rangle$,", "_____no_output_____" ] ], [ [ "B[:,[0,1]][[0,1]]", "_____no_output_____" ] ], [ [ "To calculate the Hafnian in Python, we can use the direct definition\n\n$$\\text{Haf}(A) = \\frac{1}{n!2^n} \\sum_{\\sigma \\in S_{2n}} \\prod_{j=1}^n A_{\\sigma(2j - 1), \\sigma(2j)}$$\n\nNotice that this function counts each term in the definition multiple times, and renormalizes to remove the multiple counts by dividing by a factor $\\frac{1}{n!2^n}$. **This function is extremely slow!**", "_____no_output_____" ] ], [ [ "from itertools import permutations\nfrom scipy.special import factorial\n\ndef Haf(M):\n n = len(M)\n m = int(n/2)\n haf = 0.0\n for i in permutations(range(n)):\n prod = 1.0\n for j in range(m):\n prod *= M[i[2 * j], i[2 * j + 1]]\n haf += prod\n return haf / (factorial(m) * (2 ** m))", "_____no_output_____" ] ], [ [ "## Comparing to the SF result", "_____no_output_____" ], [ "In Strawberry Fields, both Fock and Gaussian states have the method `fock_prob()`, which returns the probability of measuring that particular Fock state.\n\n#### Let's compare the case of measuring at the output state $\\left|0,1,0,1\\right\\rangle$:", "_____no_output_____" ] ], [ [ "B = (np.dot(U, U.T) * np.tanh(1))[:, [1, 3]][[1, 3]]\nnp.abs(Haf(B)) ** 2 / np.cosh(1) ** 4", "_____no_output_____" ], [ "state.fock_prob([0, 1, 0, 1])", "_____no_output_____" ] ], [ [ "#### For the measurement result $\\left|2,0,0,0\\right\\rangle$:", "_____no_output_____" ] ], [ [ "B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 0]][[0, 0]]\nnp.abs(Haf(B)) ** 2 / (2 * np.cosh(1) ** 4)", "_____no_output_____" ], [ "state.fock_prob([2, 0, 0, 0])", "_____no_output_____" ] ], [ [ "#### For the measurement result $\\left|1,1,0,0\\right\\rangle$:", "_____no_output_____" ] ], [ [ "B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 1]][[0, 1]]\nnp.abs(Haf(B)) ** 2 / np.cosh(1) ** 4", "_____no_output_____" ], [ "state.fock_prob([1, 1, 0, 0])", "_____no_output_____" ] ], [ [ "#### For the measurement result $\\left|1,1,1,1\\right\\rangle$, this corresponds to the full matrix $B$:", "_____no_output_____" ] ], [ [ "B = (np.dot(U,U.T) * np.tanh(1))\nnp.abs(Haf(B)) ** 2 / np.cosh(1) ** 4", "_____no_output_____" ], [ "state.fock_prob([1, 1, 1, 1])", "_____no_output_____" ] ], [ [ "#### For the measurement result $\\left|0,0,0,0\\right\\rangle$, this corresponds to a **null** submatrix, which has a Hafnian of 1:", "_____no_output_____" ] ], [ [ "1 / np.cosh(1) ** 4", "_____no_output_____" ], [ "state.fock_prob([0, 0, 0, 0])", "_____no_output_____" ] ], [ [ "As you can see, like in the boson sampling tutorial, they agree with almost negligable difference.\n\n<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n<p style=\"color: #119a68;\">**Exercises**</p>\n\nRepeat this notebook with \n<ol>\n <li> A higher value for <tt>shots</tt> in <tt>eng.run()</tt>, and compare the relative probabilties of events with the expected values.</li>\n <li> A Fock backend such as NumPy, instead of the Gaussian backend</li>\n <li> Different beamsplitter and rotation parameters</li>\n <li> Input states with *differing* squeezed values $r_i$. You will need to modify the code to take into account the fact that the output covariance matrix determinant must now be calculated!\n</ol>\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb401ed7227cbc72ee0bf3721e241058627adb86
2,585
ipynb
Jupyter Notebook
Solution/Machine_Learning_Solution.ipynb
colaberry/odsc-2017
a21be90979ab36f706421c2c43a83ace29333cda
[ "MIT" ]
null
null
null
Solution/Machine_Learning_Solution.ipynb
colaberry/odsc-2017
a21be90979ab36f706421c2c43a83ace29333cda
[ "MIT" ]
null
null
null
Solution/Machine_Learning_Solution.ipynb
colaberry/odsc-2017
a21be90979ab36f706421c2c43a83ace29333cda
[ "MIT" ]
null
null
null
19.007353
138
0.488588
[ [ [ "## 1. Exercise \n\n#### Instructions\n* Given the data set of college majors with information as to who secured a job and who didn't, what is this class of ML problem?\n\nCollege Majors\nMajor\t Grade\tInternship\tSports\tJob at Graduation\nEngineering\tA\tNo\tNo\tNo\nArts\tB\tYes\tYes\tYes\nMathematics\tB\tB\tNo\tYes\n [20, 10, 5, 4],\n [5, 4, 39, 3]] ", "_____no_output_____" ] ], [ [ "# Print the answer\nprint(\"\")", "_____no_output_____" ] ], [ [ "## 1. Solution", "_____no_output_____" ] ], [ [ "# Print the answer\nprint(\"Supervised Learning, Classification\")", "Supervised Learning, Classification\n" ] ], [ [ "## 2. Exercise\n\n#### Instructions\n* Given a line y = 5*x + 3,\n* compute predictions for x = {1, 5, 10, 12} and assign it to variable y.", "_____no_output_____" ] ], [ [ "# Compute y for x, define x below.\nx = 'array'", "_____no_output_____" ] ], [ [ "## 2. Solution", "_____no_output_____" ] ], [ [ "import numpy as np \nx = np.array([1, 5, 10, 12]) \ny = 5*x + 3 \nprint(y)", "[ 8 28 53 63]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb40467fc2f04ce1eb6c05162d6a585a375fe03f
38,424
ipynb
Jupyter Notebook
Notebooks cidades/Sao_Goncalo_Antes.ipynb
amandacaravieri/ProjetoFinal-COVID_Brasil
c10eb0d734e36e5bb888a7b5cd468420da86d066
[ "MIT" ]
null
null
null
Notebooks cidades/Sao_Goncalo_Antes.ipynb
amandacaravieri/ProjetoFinal-COVID_Brasil
c10eb0d734e36e5bb888a7b5cd468420da86d066
[ "MIT" ]
null
null
null
Notebooks cidades/Sao_Goncalo_Antes.ipynb
amandacaravieri/ProjetoFinal-COVID_Brasil
c10eb0d734e36e5bb888a7b5cd468420da86d066
[ "MIT" ]
null
null
null
35.512015
204
0.349755
[ [ [ "# Manipulação e tratamento das bases\nimport pandas as pd\nimport numpy as np\n\n#Pré-Processamento das bases\n!pip install imblearn\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.model_selection import train_test_split\n\n#Modelagem de Dados\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import accuracy_score\n", "Requirement already satisfied: imblearn in c:\\users\\windows10\\anaconda3\\lib\\site-packages (0.0)\nRequirement already satisfied: imbalanced-learn in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imblearn) (0.8.0)\nRequirement already satisfied: scikit-learn>=0.24 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (0.24.2)\nRequirement already satisfied: scipy>=0.19.1 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.6.2)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.0.1)\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.19.5)\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from scikit-learn>=0.24->imbalanced-learn->imblearn) (2.2.0)\n" ], [ "Antes= pd.read_csv('Base_Tratada.csv', sep= ',')\nAntes= Antes.loc[Antes['CO_MUN_NOT'].isin([330490])]\nAntes=Antes[(Antes['Periodo']==1.0)]\nAntes= Antes.drop(columns=[\"CO_MUN_NOT\", \"Periodo\"])\nAntes.head()", "C:\\Users\\Windows10\\anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3441: DtypeWarning: Columns (16) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n" ] ], [ [ "# PRÉ-PROCESSAMENTO", "_____no_output_____" ] ], [ [ "Antes['CS_GESTANT'].replace({1.0: 1, 2.0: 1, 3.0 :1, 4.0 : 1}, inplace= True)\nAntes['CS_GESTANT'].replace({5.0: 0, 6.0:0, 9.0:0}, inplace= True)\nAntes['CS_RACA'].fillna(9,inplace= True)\nAntes['CS_ESCOL_N'].fillna(9,inplace= True)\nAntes['SURTO_SG'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SURTO_SG'].fillna(0,inplace= True)\nAntes['NOSOCOMIAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['NOSOCOMIAL'].fillna(0,inplace= True)\nAntes['FEBRE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['FEBRE'].fillna(0,inplace= True)\nAntes['TOSSE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['TOSSE'].fillna(0,inplace= True)\nAntes['GARGANTA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['GARGANTA'].fillna(0,inplace= True)\nAntes['DISPNEIA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DISPNEIA'].fillna(0,inplace= True)\nAntes['DESC_RESP'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DESC_RESP'].fillna(0,inplace= True)\nAntes['SATURACAO'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SATURACAO'].fillna(0,inplace= True)\nAntes['DIARREIA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DIARREIA'].fillna(0,inplace= True)\nAntes['VOMITO'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['VOMITO'].fillna(0,inplace= True)\nAntes['PUERPERA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['PUERPERA'].fillna(0,inplace= True)\nAntes['CARDIOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['CARDIOPATI'].fillna(0,inplace= True)\nAntes['HEMATOLOGI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['HEMATOLOGI'].fillna(0,inplace= True)\nAntes['SIND_DOWN'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SIND_DOWN'].fillna(0,inplace= True)\nAntes['HEPATICA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['HEPATICA'].fillna(0,inplace= True)\nAntes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ASMA'].fillna(0,inplace= True)\nAntes['DIABETES'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DIABETES'].fillna(0,inplace= True)\nAntes['NEUROLOGIC'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['NEUROLOGIC'].fillna(0,inplace= True)\nAntes['PNEUMOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['PNEUMOPATI'].fillna(0,inplace= True)\nAntes['IMUNODEPRE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['IMUNODEPRE'].fillna(0,inplace= True)\nAntes['RENAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['RENAL'].fillna(0,inplace= True)\nAntes['OBESIDADE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['OBESIDADE'].fillna(0,inplace= True)\nAntes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ASMA'].fillna(0,inplace= True)\nAntes['ANTIVIRAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ANTIVIRAL'].fillna(0,inplace= True)\nAntes['UTI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['UTI'].fillna(0,inplace= True)\nAntes['SUPORT_VEN'].replace({3.0: 0, 9.0: 0}, inplace= True)\nAntes['SUPORT_VEN'].fillna(0,inplace= True)\nAntes['PCR_RESUL'].fillna(4,inplace= True)\nAntes['HISTO_VGM'].replace({0: 2}, inplace= True)\nAntes['DOR_ABD'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['DOR_ABD'].fillna(0,inplace= True)\nAntes['FADIGA'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['FADIGA'].fillna(0,inplace= True)\nAntes['PERD_OLFT'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['PERD_OLFT'].fillna(0,inplace= True)\nAntes['PERD_PALA'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['PERD_PALA'].fillna(0,inplace= True)\nAntes['VACINA'].fillna(0,inplace= True)\nAntes['FATOR_RISC'].replace({'S': 1, 'N':2, '1':1, '2':2}, inplace= True)\nAntes['FATOR_RISC'].fillna(0,inplace= True)", "_____no_output_____" ] ], [ [ "- Resetando o Index novamente.", "_____no_output_____" ] ], [ [ "Antes= Antes.reset_index(drop=True)\nAntes.head()", "_____no_output_____" ] ], [ [ "- Aplicação da Dummy nas Features Categóricas", "_____no_output_____" ] ], [ [ "Antes=pd.get_dummies(Antes, columns=['CS_SEXO', 'CS_GESTANT', 'CS_RACA', 'CS_ESCOL_N',\n 'SURTO_SG', 'NOSOCOMIAL', 'FEBRE', 'TOSSE', 'GARGANTA', 'DISPNEIA',\n 'DESC_RESP', 'SATURACAO', 'DIARREIA', 'VOMITO', 'PUERPERA',\n 'FATOR_RISC', 'CARDIOPATI', 'HEMATOLOGI', 'SIND_DOWN', 'HEPATICA',\n 'ASMA', 'DIABETES', 'NEUROLOGIC', 'PNEUMOPATI', 'IMUNODEPRE', 'RENAL',\n 'OBESIDADE', 'VACINA', 'ANTIVIRAL', 'UTI', 'SUPORT_VEN', 'PCR_RESUL',\n 'HISTO_VGM', 'DOR_ABD', 'FADIGA', 'PERD_OLFT', 'PERD_PALA'], drop_first=True)\nAntes.head()", "_____no_output_____" ] ], [ [ "# Verificando o Balanceamento", "_____no_output_____" ] ], [ [ "Antes[\"EVOLUCAO\"].value_counts(normalize=True)", "_____no_output_____" ], [ "X = Antes[['IDADE_ANOS','CS_SEXO_M','CS_RACA_4.0','FEBRE_1.0','DISPNEIA_1.0','SATURACAO_1.0','UTI_1.0',\n 'SUPORT_VEN_1.0', 'SUPORT_VEN_2.0', 'PCR_RESUL_2.0','TOSSE_1.0','DESC_RESP_1.0', 'FATOR_RISC_2']]\n\ny = Antes['EVOLUCAO']\n\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, random_state=42)", "_____no_output_____" ], [ "Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape", "_____no_output_____" ], [ "smote = SMOTE(sampling_strategy = 'minority', random_state = 42)\nXtrain_over, ytrain_over = smote.fit_resample(Xtrain,ytrain)\n\nXtest_over, ytest_over = smote.fit_resample(Xtest,ytest)\nXtrain_over.shape, ytrain_over.shape, Xtest_over.shape, ytest_over.shape", "_____no_output_____" ] ], [ [ "# Aplicação do Modelo Escolhido", "_____no_output_____" ] ], [ [ "random_state=42\nGRA = GradientBoostingClassifier()\nGRA.fit(Xtrain_over, ytrain_over)", "_____no_output_____" ], [ "previsoes = GRA.predict(Xtest_over)\nprevisoes", "_____no_output_____" ], [ "accuracy_score(ytest_over, previsoes)", "_____no_output_____" ], [ "# Testar Modelo\n\nidade = 43.0\nsexo = 1\nraca = 0\nfebre = 1\ndispneia = 1\nsaturacao = 0\nuti = 1\nsuport1 = 1\nsuport2 = 0\npcr = 1\ntosse = 1\ndescresp = 0\nfrisc = 0\n\nprediction = GRA.predict(np.array([idade, sexo, raca, febre, dispneia, saturacao, uti, suport1, suport2, pcr, tosse, descresp, frisc]).reshape(1, -1))\n\nprint(prediction)\n", "[2.]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb40493597b5b8aaf0d18d1f1ac412b21f461e43
8,712
ipynb
Jupyter Notebook
insane-gpcr.ipynb
spiderman2175/insane-gpcr
2ca85de14745ec87b35d42a31a688ca2499bbb0a
[ "CC0-1.0" ]
null
null
null
insane-gpcr.ipynb
spiderman2175/insane-gpcr
2ca85de14745ec87b35d42a31a688ca2499bbb0a
[ "CC0-1.0" ]
null
null
null
insane-gpcr.ipynb
spiderman2175/insane-gpcr
2ca85de14745ec87b35d42a31a688ca2499bbb0a
[ "CC0-1.0" ]
null
null
null
31.912088
194
0.548898
[ [ [ "import numpy as np\nimport MDAnalysis as mda\nimport nglview as nv\nfrom sklearn.decomposition import PCA\nimport requests\nfrom Bio.PDB import *", "_____no_output_____" ] ], [ [ "### Overall settings", "_____no_output_____" ] ], [ [ "movav_resis = 3 # number of residues used to calculate moving averages for CA positions (must be 3,5 or 7)\nvector_scale_factor = 10 \nvector_width = 1.0 \nvector_colors = [ [0,0,1], [1,0,0], [0,1,0] ]", "_____no_output_____" ] ], [ [ "### G protein details", "_____no_output_____" ], [ "Give the residue numbers for Galpha H5.13 to H5.23 (as used in DOI [10.1073/pnas.1820944116](https://doi.org/10.1073/pnas.1820944116)) and the Galpha chain", "_____no_output_____" ] ], [ [ "h5_inds = [341,351] # Gi residue numbers\n#h5_inds = [337,347] # Gi (6OY9, 6OYA)\n#h5_inds = [346,356] # Gq (+7DFL)\n#h5_inds = [233,243] # Gq (6WHA)\n#h5_inds = [381,391] # Gs\n#h5_inds = [371,381] # Gs (7D3S, 6GDG)\n#h5_inds = [2381,2391] # Gs (6E67)\n#h5_inds = [367,377] # Gs (7JJO)\ngpro_chain = \"A\"", "_____no_output_____" ] ], [ [ "### GPCR details", "_____no_output_____" ], [ "Then you can either read the GPCR details manually by running this...", "_____no_output_____" ] ], [ [ "pdb_filename = \"PDB_filename.pdb\"\ngpcr_chain = \"R\"\ngpcr_name = \"uniprotname_variant\"", "_____no_output_____" ] ], [ [ "OR fetch GPCR structure from PDB:", "_____no_output_____" ] ], [ [ "pdb = \"7DFL\"\n\n# Download the PDB file\npdbl = PDBList()\npdb_filename = pdbl.retrieve_pdb_file( pdb, file_format = \"pdb\" )\n\n# Get the protein name from GPCRdb\nurl = 'https://gpcrdb.org/services/structure/' + pdb + '/'\nresponse = requests.get(url)\nprotein_data = response.json()\ngpcr_chain = protein_data['preferred_chain']\ngpcr_name = protein_data['protein']\nprint( \"gpcr_chain:\", gpcr_chain )\nprint( \"gpcr_name:\", gpcr_name )", "_____no_output_____" ] ], [ [ "Now fetch the GPCR details from GPCRdb by running the following:", "_____no_output_____" ] ], [ [ "# For each helix, get the start and end residue numbers from GPCRdb\nurl = 'https://gpcrdb.org/services/residues/' + gpcr_name + '/'\nresponse = requests.get(url)\nprotein_data = response.json()\ntm_endpoints = np.zeros((8,2))\nhelix_no = 0\nfor i in protein_data:\n generic_no = i['display_generic_number']\n if generic_no == None:\n continue\n expected_prefix = str(helix_no) + \".\"\n next_prefix = str(helix_no + 1) + \".\"\n sequence_no = i['sequence_number']\n if generic_no[:2] == next_prefix:\n tm_endpoints[helix_no,0] = sequence_no\n helix_no += 1\n if generic_no[1] == \".\":\n tm_endpoints[helix_no-1,1] = sequence_no", "_____no_output_____" ] ], [ [ "### Run the analysis", "_____no_output_____" ] ], [ [ "u = mda.Universe( pdb_filename )\n\n# Open NGLView instance\nview1 = nv.show_mdanalysis(u)\nview1.remove_cartoon()\nview1.remove_ball_and_stick()\nview1.add_cartoon('protein',color='#00BB00', opacity=0.3)\n\n# Fit a vector to residues H5.13 to H5.23 and plot it\nh5_selection = \"(segid %s) and (resnum %d-%d) and (name CA)\" % ( gpro_chain, h5_inds[0], h5_inds[1] )\nh5_CAs = u.select_atoms( h5_selection )\nh5_PCA = h5_CAs.principal_axes()\nh5_cog = h5_CAs.center_of_geometry()\nview1.shape.add_arrow( ( h5_cog + vector_scale_factor * h5_PCA[2] ).tolist(), ( h5_cog - vector_scale_factor * h5_PCA[2] ).tolist(), vector_colors[0], vector_width )\n\n# For each TM, fit a vector to CA's #4-9 from the extracellular side\ntm_vectors = np.zeros((7,3))\nbundle_resnum = \"\"\nfor i in range(7):\n \n # Get the indices for extracellular residues no. 4-9 \n extracell_index = 0\n tm_startres = tm_endpoints[i,0] + 3\n if i%2 != 0:\n extracell_index = -1\n tm_startres = tm_endpoints[i,1] - 8\n print( \"For TM%d, using residues %d-%d\" % ( i+1, tm_startres, tm_startres + 5 ) )\n tm_resnum = \"(resnum %d-%d)\" % ( tm_startres, tm_startres + 5 )\n if i > 0:\n bundle_resnum += \" or \"\n bundle_resnum += tm_resnum\n \n # Use CA positions from PDB file\n tm_CAs = u.select_atoms( \"segid %s and %s and (name CA)\" % ( gpcr_chain, tm_resnum ) )\n if len( tm_CAs ) < 6:\n continue\n tm_PCA = tm_CAs.principal_axes()\n principal_idx = 2\n tm_cog = tm_CAs.center_of_geometry()\n \n # Overwrite: Use moving average of three CAs\n tm_CAs = u.select_atoms( \"segid %s and (resnum %d-%d) and (name CA)\" % ( gpcr_chain, tm_startres - ( movav_resis - 1 )/2, tm_startres + 5 + ( movav_resis - 1 )/2 ) )\n tm_pos = tm_CAs.positions\n tm_pos_movav = np.zeros( (6,3) )\n for k in range(6):\n tm_pos_movav[k,:] = np.average( tm_pos[ k:(k+movav_resis-1), : ], axis = 0 )\n pca = PCA()\n pca.fit( tm_pos_movav )\n tm_PCA = pca.components_\n principal_idx = 0\n \n # Make sure the vectors are pointing in the same direction\n dist_to_endpoint = np.zeros(2)\n for k in [0,1]:\n dist_to_endpoint[k] = np.sum( np.power( tm_cog + np.power( -1, k) * tm_PCA[ principal_idx ] - tm_pos[ extracell_index ], 2 ) )\n if dist_to_endpoint[1] > dist_to_endpoint[0]:\n tm_PCA[0] *= -1\n tm_vectors[i,:] = tm_PCA[ principal_idx ]\n # Plot the TM vector\n view1.shape.add_arrow( ( tm_cog - vector_scale_factor * tm_vectors[i,:] ).tolist(), ( tm_cog + vector_scale_factor * tm_vectors[i,:] ).tolist(), vector_colors[1], vector_width )\n\n# Now calculate the GPCR axis by summing the TM vectors\nbundle_vector = np.sum( tm_vectors, axis = 0 )\nbundle_vector /= np.linalg.norm( bundle_vector )\nbundle_CAs = u.select_atoms( \"segid %s and (%s) and (name CA)\" % ( gpcr_chain, bundle_resnum ) )\nbundle_cog = bundle_CAs.center_of_geometry()\nview1.shape.add_arrow( ( bundle_cog - vector_scale_factor * bundle_vector ).tolist(), ( bundle_cog + 4 * vector_scale_factor * bundle_vector ).tolist(), vector_colors[2], vector_width )\n\n# Calculate the angle between the H5 vector and the GPCR axis\nangle = np.round( np.rad2deg( np.arccos( np.dot( bundle_vector, h5_PCA[2] ) ) ) )\nprint( \"Angle: %d degrees\" % np.min( [ angle, 180-angle ] ) )\n\n# Show the NGLView instance\nview1", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb405ce750af2f3fa4ed6f53552a0c2988fcf823
11,660
ipynb
Jupyter Notebook
notebooks/interpolation/wradlib_ipol_example.ipynb
egouden/wradlib-notebooks
213ec2b99bce792d6f11fdbe936ae9626e8beeec
[ "MIT" ]
9
2019-08-17T15:01:28.000Z
2021-06-08T06:47:42.000Z
notebooks/interpolation/wradlib_ipol_example.ipynb
egouden/wradlib-notebooks
213ec2b99bce792d6f11fdbe936ae9626e8beeec
[ "MIT" ]
1
2019-08-30T11:54:34.000Z
2019-08-30T11:57:46.000Z
notebooks/interpolation/wradlib_ipol_example.ipynb
egouden/wradlib-notebooks
213ec2b99bce792d6f11fdbe936ae9626e8beeec
[ "MIT" ]
5
2019-08-07T14:26:43.000Z
2020-03-16T14:22:30.000Z
29.744898
181
0.559348
[ [ [ "This notebook is part of the $\\omega radlib$ documentation: https://docs.wradlib.org.\n\nCopyright (c) $\\omega radlib$ developers.\nDistributed under the MIT License. See LICENSE.txt for more info.", "_____no_output_____" ], [ "# How to use wradlib's ipol module for interpolation tasks?", "_____no_output_____" ] ], [ [ "import wradlib.ipol as ipol\nfrom wradlib.util import get_wradlib_data_file\nfrom wradlib.vis import plot_ppi\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport datetime as dt\nimport warnings\nwarnings.filterwarnings('ignore')\ntry:\n get_ipython().magic(\"matplotlib inline\")\nexcept:\n pl.ion()", "_____no_output_____" ] ], [ [ "### 1-dimensional example\n\nIncludes Nearest Neighbours, Inverse Distance Weighting, and Ordinary Kriging.", "_____no_output_____" ] ], [ [ "# Synthetic observations\nxsrc = np.arange(10)[:, None]\nvals = np.sin(xsrc).ravel()\n\n# Define target coordinates\nxtrg = np.linspace(0, 20, 100)[:, None]\n\n# Set up interpolation objects\n# IDW\nidw = ipol.Idw(xsrc, xtrg)\n# Nearest Neighbours\nnn = ipol.Nearest(xsrc, xtrg)\n# Linear\nok = ipol.OrdinaryKriging(xsrc, xtrg)\n\n# Plot results\npl.figure(figsize=(10,5))\npl.plot(xsrc.ravel(), vals, 'bo', label=\"Observation\")\npl.plot(xtrg.ravel(), idw(vals), 'r-', label=\"IDW interpolation\")\npl.plot(xtrg.ravel(), nn(vals), 'k-', label=\"Nearest Neighbour interpolation\")\npl.plot(xtrg.ravel(), ok(vals), 'g-', label=\"Ordinary Kriging\")\npl.xlabel(\"Distance\", fontsize=\"large\")\npl.ylabel(\"Value\", fontsize=\"large\")\npl.legend(loc=\"bottomright\")", "_____no_output_____" ] ], [ [ "### 2-dimensional example\n\nIncludes Nearest Neighbours, Inverse Distance Weighting, Linear Interpolation, and Ordinary Kriging.", "_____no_output_____" ] ], [ [ "# Synthetic observations and source coordinates\nsrc = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3]))).transpose()\nnp.random.seed(1319622840)\nvals = np.random.uniform(size=len(src))\n\n# Target coordinates\nxtrg = np.linspace(0, 20, 40)\nytrg = np.linspace(0, 20, 40)\ntrg = np.meshgrid(xtrg, ytrg)\ntrg = np.vstack( (trg[0].ravel(), trg[1].ravel()) ).T\n\n# Interpolation objects\nidw = ipol.Idw(src, trg)\nnn = ipol.Nearest(src, trg)\nlinear = ipol.Linear(src, trg)\nok = ipol.OrdinaryKriging(src, trg)\n\n# Subplot layout\ndef gridplot(interpolated, title=\"\"):\n pm = ax.pcolormesh(xtrg, ytrg, interpolated.reshape( (len(xtrg), len(ytrg)) ) )\n pl.axis(\"tight\")\n ax.scatter(src[:, 0], src[:, 1], facecolor=\"None\", s=50, marker='s')\n pl.title(title)\n pl.xlabel(\"x coordinate\")\n pl.ylabel(\"y coordinate\")\n\n# Plot results\nfig = pl.figure(figsize=(8,8))\nax = fig.add_subplot(221, aspect=\"equal\")\ngridplot(idw(vals), \"IDW\")\nax = fig.add_subplot(222, aspect=\"equal\")\ngridplot(nn(vals), \"Nearest Neighbours\")\nax = fig.add_subplot(223, aspect=\"equal\")\ngridplot(np.ma.masked_invalid(linear(vals)), \"Linear interpolation\")\nax = fig.add_subplot(224, aspect=\"equal\")\ngridplot(ok(vals), \"Ordinary Kriging\")\npl.tight_layout()", "_____no_output_____" ] ], [ [ "### Using the convenience function ipol.interpolation in order to deal with missing values\n\n**(1)** Exemplified for one dimension in space and two dimensions of the source value array (could e.g. be two time steps).", "_____no_output_____" ] ], [ [ "# Synthetic observations (e.g. two time steps)\nsrc = np.arange(10)[:, None]\nvals = np.hstack((1.+np.sin(src), 5. + 2.*np.sin(src)))\n# Target coordinates\ntrg = np.linspace(0, 20, 100)[:, None]\n# Here we introduce missing values in the second dimension of the source value array\nvals[3:5, 1] = np.nan\n# interpolation using the convenience function \"interpolate\"\nidw_result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4)\nnn_result = ipol.interpolate(src, trg, vals, ipol.Nearest)\n# Plot results\nfig = pl.figure(figsize=(10,5))\nax = fig.add_subplot(111)\npl1 = ax.plot(trg, idw_result, 'b-', label=\"IDW\")\npl2 = ax.plot(trg, nn_result, 'k-', label=\"Nearest Neighbour\")\npl3 = ax.plot(src, vals, 'ro', label=\"Observations\")", "_____no_output_____" ] ], [ [ "**(2)** Exemplified for two dimensions in space and two dimensions of the source value array (e.g. time steps), containing also NaN values (here we only use IDW interpolation)", "_____no_output_____" ] ], [ [ "# Just a helper function for repeated subplots\ndef plotall(ax, trgx, trgy, src, interp, pts, title, vmin, vmax):\n ix = np.where(np.isfinite(pts))\n ax.pcolormesh(trgx, trgy, interp.reshape( (len(trgx),len(trgy) ) ), vmin=vmin, vmax=vmax )\n ax.scatter(src[ix, 0].ravel(), src[ix, 1].ravel(), c=pts.ravel()[ix], s=20, marker='s',\n vmin=vmin, vmax=vmax)\n ax.set_title(title)\n pl.axis(\"tight\")", "_____no_output_____" ], [ "# Synthetic observations\nsrc = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3])) ).T\nnp.random.seed(1319622840 + 1)\nvals = np.round(np.random.uniform(size=(len(src), 2)), 1)\n\n# Target coordinates\ntrgx = np.linspace(0, 20, 100)\ntrgy = np.linspace(0, 20, 100)\ntrg = np.meshgrid(trgx, trgy)\ntrg = np.vstack((trg[0].ravel(), trg[1].ravel())).transpose()\n\nresult = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4)\n\n# Now introduce NaNs in the observations\nvals_with_nan = vals.copy()\nvals_with_nan[1, 0] = np.nan\nvals_with_nan[1:3, 1] = np.nan\nresult_with_nan = ipol.interpolate(src, trg, vals_with_nan, ipol.Idw, nnearest=4)\nvmin = np.concatenate((vals.ravel(), result.ravel())).min()\nvmax = np.concatenate((vals.ravel(), result.ravel())).max()\n\nfig = pl.figure(figsize=(8,8))\nax = fig.add_subplot(221)\nplotall(ax, trgx, trgy, src, result[:, 0], vals[:, 0], '1st dim: no NaNs', vmin, vmax)\nax = fig.add_subplot(222)\nplotall(ax, trgx, trgy, src, result[:, 1], vals[:, 1], '2nd dim: no NaNs', vmin, vmax)\nax = fig.add_subplot(223)\nplotall(ax, trgx, trgy, src, result_with_nan[:, 0], vals_with_nan[:, 0], '1st dim: one NaN', vmin, vmax)\nax = fig.add_subplot(224)\nplotall(ax, trgx, trgy, src, result_with_nan[:, 1], vals_with_nan[:, 1], '2nd dim: two NaN', vmin, vmax)\npl.tight_layout()", "_____no_output_____" ] ], [ [ "### How to use interpolation for gridding data in polar coordinates?", "_____no_output_____" ], [ "Read polar coordinates and corresponding rainfall intensity from file", "_____no_output_____" ] ], [ [ "filename = get_wradlib_data_file('misc/bin_coords_tur.gz')\nsrc = np.loadtxt(filename)\n\nfilename = get_wradlib_data_file('misc/polar_R_tur.gz')\nvals = np.loadtxt(filename)", "_____no_output_____" ], [ "src.shape", "_____no_output_____" ] ], [ [ "Define target grid coordinates", "_____no_output_____" ] ], [ [ "xtrg = np.linspace(src[:,0].min(), src[:,0].max(), 200)\nytrg = np.linspace(src[:,1].min(), src[:,1].max(), 200)\ntrg = np.meshgrid(xtrg, ytrg)\ntrg = np.vstack((trg[0].ravel(), trg[1].ravel())).T", "_____no_output_____" ] ], [ [ "Linear Interpolation", "_____no_output_____" ] ], [ [ "ip_lin = ipol.Linear(src, trg)\nresult_lin = ip_lin(vals.ravel(), fill_value=np.nan)", "_____no_output_____" ] ], [ [ "IDW interpolation", "_____no_output_____" ] ], [ [ "ip_near = ipol.Nearest(src, trg)\nmaxdist = trg[1,0] - trg[0,0]\nresult_near = ip_near(vals.ravel(), maxdist=maxdist)", "_____no_output_____" ] ], [ [ "Plot results", "_____no_output_____" ] ], [ [ "fig = pl.figure(figsize=(15, 6))\nfig.subplots_adjust(wspace=0.4)\nax = fig.add_subplot(131, aspect=\"equal\")\nplot_ppi(vals, ax=ax)\nax = fig.add_subplot(132, aspect=\"equal\")\npl.pcolormesh(xtrg, ytrg, result_lin.reshape( (len(xtrg), len(ytrg)) ) )\nax = fig.add_subplot(133, aspect=\"equal\")\npl.pcolormesh(xtrg, ytrg, result_near.reshape( (len(xtrg), len(ytrg)) ) )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb4060748428f142a563b414bfdc460fe35a2325
4,278
ipynb
Jupyter Notebook
jupyter-notebooks/alerts/data_prep.ipynb
egovernments/analytics
c9bb4c7ad8845acb41cea72f77844eb4dd39c35d
[ "MIT" ]
10
2016-05-11T16:57:46.000Z
2017-11-19T12:42:48.000Z
jupyter-notebooks/alerts/data_prep.ipynb
egovernments/analytics
c9bb4c7ad8845acb41cea72f77844eb4dd39c35d
[ "MIT" ]
146
2016-05-08T06:05:08.000Z
2017-04-15T05:04:39.000Z
jupyter-notebooks/alerts/data_prep.ipynb
egovernments/analytics
c9bb4c7ad8845acb41cea72f77844eb4dd39c35d
[ "MIT" ]
11
2016-04-27T13:34:50.000Z
2017-09-22T06:08:15.000Z
21.938462
123
0.523375
[ [ [ "DATA=\"../../cocUptoJuly2016.csv\"\nlibrary(dplyr)\nlibrary(lubridate)\nlibrary(xts)", "_____no_output_____" ], [ "raw <- read.csv(DATA, stringsAsFactors = F)\nhead(raw)\nraw$Complaint.Date <- as.POSIXct(raw$Complaint.Date, format = \"%m/%d/%Y %H:%M:%S\")", "_____no_output_____" ], [ "raw <- select(raw, Ward, Complaint.Type, Complaint.Date)", "_____no_output_____" ], [ "complaints.data <- raw[raw$Complaint.Date >= strptime(\"01/01/2012 00:00:00\", format = \"%m/%d/%Y %H:%M:%S\"), ] \ncomplaints.data$NumComplaints <- 1", "_____no_output_____" ], [ "min.date <- min(complaints.data$Complaint.Date)\nmax.date <- max(complaints.data$Complaint.Date)", "_____no_output_____" ], [ "periodicity_ <- function(complaints.frame, periodicity) {\n stopifnot(nrow(complaints.frame) > 0)\n series <- xts(complaints.frame$NumComplaints, complaints.frame$Complaint.Date)\n if(periodicity == \"hour\") {\n series <- period.apply(series, endpoints(series, \"hours\"), FUN = sum)\n } else if (periodicity == \"day\") {\n series <- apply.daily(series, FUN = sum)\n }\n series\n} ", "_____no_output_____" ], [ "# construct city level data\ncity.level.data <- function(periodicity) {\n periodicity_(complaints.data, periodicity)\n}\nplot(city.level.data(\"hour\"))", "_____no_output_____" ], [ "plot(city.level.data(\"day\"))", "_____no_output_____" ], [ "# ward level data\nward.level.data <- function(ward, periodicity) {\n df <- filter(complaints.data, Ward == ward)\n periodicity_(df, periodicity)\n}\nplot(ward.level.data(\"N188\", \"hour\"))", "_____no_output_____" ], [ "plot(ward.level.data(\"N188\", \"day\"))", "_____no_output_____" ], [ "# complaint level of data\ncomplaint.level.data <- function(complaint.type, periodicity) {\n df <- filter(complaints.data, Complaint.Type == complaint.type)\n periodicity_(df, periodicity)\n}\nplot(complaint.level.data(\"Mosquito menace \", \"hour\"))", "_____no_output_____" ], [ "plot(complaint.level.data(\"Mosquito menace \", \"day\"))", "_____no_output_____" ], [ "# the above functions are stored in lib/alerts_data.R", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb4060a39c230822ad2acd851230fb3b4376ee66
2,742
ipynb
Jupyter Notebook
lessons/lesson-12/stage2/notebook.ipynb
pklimai/antidote
64b8887da8a5203b3f57f0d5b56f428f57fedef0
[ "Apache-2.0" ]
null
null
null
lessons/lesson-12/stage2/notebook.ipynb
pklimai/antidote
64b8887da8a5203b3f57f0d5b56f428f57fedef0
[ "Apache-2.0" ]
1
2018-10-30T20:22:36.000Z
2018-10-30T20:22:36.000Z
lessons/lesson-12/stage2/notebook.ipynb
ShrutiVPawaskar/antidote
3e7d3ce3ae627a91460217b05aad29e5742db5f5
[ "Apache-2.0" ]
2
2018-10-26T16:43:05.000Z
2018-10-29T15:27:05.000Z
22.47541
227
0.56674
[ [ [ "# Lab 1 - Working with NAPALM\n\nThis is an example lab to prove out the concept of building a lab with jupyter notebooks in conjunction with the vMX docker image. We'll do a quick example of using NAPALM to connect to a Junos device and print the facts.", "_____no_output_____" ], [ "In order to work with our network device via NAPALM, we need to first import the library. This is done with a simple `import` statement:", "_____no_output_____" ] ], [ [ "import napalm", "_____no_output_____" ] ], [ [ "Next, we want to call `napalm`'s `get_network_driver` function, and pass in the name of the driver we wish to use. In this case, we want `junos` since we know the device we're about to get facts from is a Junos device:", "_____no_output_____" ] ], [ [ "driver = napalm.get_network_driver(\"junos\")", "_____no_output_____" ] ], [ [ "Now that we have a driver, we can call the driver like a function to get a handle on a specific device using its FQDN and the username/password we generated earlier:", "_____no_output_____" ] ], [ [ "device = driver(hostname=\"vqfx1.default.svc.cluster.local\", username=\"root\", password=\"VR-netlab9\")", "_____no_output_____" ] ], [ [ "We can initiate the connection to the device with a call to `device.open()`:", "_____no_output_____" ] ], [ [ "device.open()", "_____no_output_____" ] ], [ [ "Finally, call `device.get_facts()` to ", "_____no_output_____" ] ], [ [ "device.get_facts()", "_____no_output_____" ] ], [ [ "That's it!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb4062c78e5d1aa4805d2f6765fc6bb87f61c681
309,039
ipynb
Jupyter Notebook
reports/c9.0-mc-deply-2022-03-16.ipynb
marxcerqueira/Kaggle-HighValue-Custormers-Identification
1d490af7cb5d142d6549295e08216e31bab7ddc6
[ "MIT" ]
null
null
null
reports/c9.0-mc-deply-2022-03-16.ipynb
marxcerqueira/Kaggle-HighValue-Custormers-Identification
1d490af7cb5d142d6549295e08216e31bab7ddc6
[ "MIT" ]
null
null
null
reports/c9.0-mc-deply-2022-03-16.ipynb
marxcerqueira/Kaggle-HighValue-Custormers-Identification
1d490af7cb5d142d6549295e08216e31bab7ddc6
[ "MIT" ]
null
null
null
77.628485
64,512
0.800022
[ [ [ "<span style=\"color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;\">An Exception was encountered at '<a href=\"#papermill-error-cell\">In [42]</a>'.</span>", "_____no_output_____" ], [ "# High Value Customers Identification (Insiders) #\n**By: Marx Cerqueira**", "_____no_output_____" ], [ "# IMPORTS", "_____no_output_____" ] ], [ [ "import re\nimport os\nimport inflection\nimport sqlite3\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport umap.umap_ as umap\n\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import metrics as m\nfrom sklearn import preprocessing as pp\nfrom sklearn import decomposition as dd\nfrom sklearn import ensemble as en\nfrom sklearn import manifold as mn\nfrom sklearn import mixture as mx\nfrom sklearn import cluster as c\n\nfrom scipy.cluster import hierarchy as hc\n\nfrom plotly import express as px\nfrom sqlalchemy import create_engine", "_____no_output_____" ] ], [ [ "## Loading Data", "_____no_output_____" ] ], [ [ "# load data\ndf_ecomm_raw = pd.read_csv('/home/marxcerqueira/repos/Kaggle-HighValue-Custormers-Identification/data/raw/Ecommerce.csv',\n encoding='iso-8859-1',\n low_memory=False)\n\n#drop extra column\ndf_ecomm_raw = df_ecomm_raw.drop(columns = ['Unnamed: 8'], axis = 1)", "_____no_output_____" ] ], [ [ "# DATA DISCRIPTION", "_____no_output_____" ] ], [ [ "# Copy dataset\ndf0 = df_ecomm_raw.copy()", "_____no_output_____" ] ], [ [ "## Rename Columns", "_____no_output_____" ] ], [ [ "cols_old = ['InvoiceNo','StockCode','Description','Quantity', 'InvoiceDate','UnitPrice','CustomerID','Country']\n\nsnakecase = lambda x: inflection.underscore(x)\ncol_news = list(map(snakecase, cols_old))\n\n# Rename columns\ndf0.columns = col_news", "_____no_output_____" ] ], [ [ "## Data Dimension", "_____no_output_____" ] ], [ [ "print('Number of rows: {}.'.format(df0.shape[0]))\nprint('Number of cols: {}.'.format(df0.shape[1]))", "Number of rows: 541909.\nNumber of cols: 8.\n" ] ], [ [ "## Data Types", "_____no_output_____" ] ], [ [ "df0.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 541909 entries, 0 to 541908\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 invoice_no 541909 non-null object \n 1 stock_code 541909 non-null object \n 2 description 540455 non-null object \n 3 quantity 541909 non-null int64 \n 4 invoice_date 541909 non-null object \n 5 unit_price 541909 non-null float64\n 6 customer_id 406829 non-null float64\n 7 country 541909 non-null object \ndtypes: float64(2), int64(1), object(5)\nmemory usage: 33.1+ MB\n" ] ], [ [ "## Check NA Values", "_____no_output_____" ] ], [ [ "missing_count = df0.isnull().sum() # the count of missing values\nvalue_count = df0.isnull().count() # the total values count\n\nmissing_percentage = round(missing_count/value_count*100,2) # the percentage of missing values\nmissing_df = pd.DataFrame({'missing value count': missing_count, 'percentage': missing_percentage})\nmissing_df", "_____no_output_____" ], [ "barchart = missing_df.plot.bar(y='percentage')\nfor index, percentage in enumerate( missing_percentage ):\n barchart.text( index, percentage, str(percentage)+'%')", "_____no_output_____" ] ], [ [ "## Fillout NA", "_____no_output_____" ] ], [ [ "# separate NA's in two different dataframe, one with NAs and other without it\ndf_missing = df0.loc[df0['customer_id'].isna(), :]\ndf_not_missing = df0.loc[~df0['customer_id'].isna(), :]", "_____no_output_____" ], [ "# create reference\ndf_backup = pd.DataFrame( df_missing['invoice_no'].drop_duplicates().copy() )\ndf_backup['customer_id'] = np.arange( 19000, 19000+len( df_backup ), 1) # Fillout NA stratety: creating customers_id to keep their behavior (25% of the database)\n\n# merge original with reference dataframe\ndf0 = pd.merge( df0, df_backup, on='invoice_no', how='left' )\n\n# coalesce \ndf0['customer_id'] = df0['customer_id_x'].combine_first( df0['customer_id_y'] )\n\n# drop extra columns\ndf0 = df0.drop( columns=['customer_id_x', 'customer_id_y'], axis=1 )", "_____no_output_____" ], [ "df0.isna().sum()", "_____no_output_____" ] ], [ [ "## Change Types", "_____no_output_____" ] ], [ [ "# Transforme datatype of variable invoice_date to datetime\ndf0['invoice_date'] = pd.to_datetime(df0['invoice_date'])", "_____no_output_____" ], [ "df0['customer_id'] = df0['customer_id'].astype('int64')", "_____no_output_____" ], [ "df0.dtypes", "_____no_output_____" ] ], [ [ "## Descriptive Statistics", "_____no_output_____" ] ], [ [ "df0.describe().T", "_____no_output_____" ], [ "df0.describe(include = object).T", "_____no_output_____" ], [ "num_attributes = df0.select_dtypes(include = np.number)\ncat_attributes = df0.select_dtypes(exclude = [np.number, np.datetime64])", "_____no_output_____" ] ], [ [ "### Numerical Attributes", "_____no_output_____" ] ], [ [ "# central tendency - mean, median\nct1 = pd.DataFrame(num_attributes.apply(np.mean)).T\nct2 = pd.DataFrame(num_attributes.apply(np.median)).T\n\n# dispersion - desvio padrão, min, max, range, skew, kurtosis\nd1 = pd.DataFrame(num_attributes.apply(np.std)).T\nd2 = pd.DataFrame(num_attributes.apply(np.min)).T\nd3 = pd.DataFrame(num_attributes.apply(np.max)).T\nd4 = pd.DataFrame(num_attributes.apply(lambda x: x.max()-x.min())).T\nd5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T\nd6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T\n\n#concatenate\n\nm1 = pd.concat([d2,d3,d4,ct1,ct2,d1,d5,d6]).T.reset_index()\nm1.columns = ['attributes', 'min', 'max', 'range', 'mean', 'mediana','std', 'skew','kurtosis']\nm1", "_____no_output_____" ] ], [ [ "### Categorical Attributes", "_____no_output_____" ], [ "#### Invoice_No", "_____no_output_____" ] ], [ [ "# problem: We got letters and numbers in invoice_no\n#df1['invoice_no'].astype( int )\n\n# identification: \ndf_letter_invoices = df0.loc[df0['invoice_no'].apply( lambda x: bool( re.search( '[^0-9]+', x ) ) ), :]\ndf_letter_invoices.head()\n\nprint( 'Total number of invoices: {}'.format( len( df_letter_invoices ) ) )\nprint( 'Total number of negative quantity: {}'.format( len( df_letter_invoices[ df_letter_invoices['quantity'] < 0 ] ) ) )", "Total number of invoices: 9291\nTotal number of negative quantity: 9288\n" ] ], [ [ "#### Stock_Code", "_____no_output_____" ] ], [ [ "# check stock codes only characters\ndf0.loc[df0['stock_code'].apply( lambda x: bool( re.search( '^[a-zA-Z]+$', x ) ) ), 'stock_code'].unique()\n\n# Acão:\n## 1. Remove stock_code in ['POST', 'D', 'M', 'PADS', 'DOT', 'CRUK']", "_____no_output_____" ] ], [ [ "# VARIABLE FILTERING", "_____no_output_____" ] ], [ [ "df1 = df0.copy()", "_____no_output_____" ], [ " # === Numerical attributes ====\ndf1 = df1.loc[df1['unit_price'] >= 0.04, :]\n\n# === Categorical attributes ====\ndf1 = df1[~df1['stock_code'].isin( ['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY',\n 'DCGSSGIRL', 'PADS', 'B', 'CRUK'] )]\n\n# description\ndf1 = df1.drop( columns='description', axis=1 )\n\n# country \ndf1 = df1[~df1['country'].isin( ['European Community', 'Unspecified' ] ) ] #assuming this risk so we can use lat long parameters\n\n# bad customers\ndf1 = df1[~df1['customer_id'].isin([16446])]\n\n# quantity \ndf1_returns = df1.loc[df1['quantity'] < 0, :].copy()\ndf1_purchases = df1.loc[df1['quantity'] >= 0, :].copy()", "_____no_output_____" ] ], [ [ "# FEATURE ENGINEERING", "_____no_output_____" ] ], [ [ "df2 = df1.copy()", "_____no_output_____" ] ], [ [ "## Feature Creation", "_____no_output_____" ] ], [ [ "# data reference\n# RFM Model, creating feature for it\n\ndf_ref = df2.drop(['invoice_no', 'stock_code',\n 'quantity', 'invoice_date', 'unit_price',\n 'country'], axis = 1).drop_duplicates(ignore_index = True).copy()", "_____no_output_____" ] ], [ [ "### Gross Revenue", "_____no_output_____" ] ], [ [ "# Gross Revenue ( Faturamento ) quantity * price\ndf1_purchases.loc[:, 'gross_revenue'] = df1_purchases.loc[:,'quantity'] * df1_purchases.loc[:, 'unit_price']\n\n# Monetary (How much money a customer spends on purchases)\ndf_monetary = df1_purchases.loc[:, ['customer_id', 'gross_revenue']].groupby( 'customer_id' ).sum().reset_index()\ndf_ref = pd.merge( df_ref, df_monetary, on='customer_id', how='left' )\ndf_ref.isna().sum()", "_____no_output_____" ] ], [ [ "### Recency", "_____no_output_____" ] ], [ [ "# Recency - Day from last purchase\ndf_recency = df1_purchases.loc[:, ['customer_id', 'invoice_date']].groupby( 'customer_id' ).max().reset_index()\ndf_recency['recency_days'] = ( df1['invoice_date'].max() - df_recency['invoice_date'] ).dt.days\ndf_recency = df_recency[['customer_id', 'recency_days']].copy()\n\ndf_ref = pd.merge( df_ref, df_recency, on='customer_id', how='left' )\ndf_ref.isna().sum()", "_____no_output_____" ] ], [ [ "### Qty Products (different stock codes by customer)", "_____no_output_____" ] ], [ [ "# Quantity of unique products purchased (Frequency: qntd of products over time)\n\n# Number of products (different stock codes by customer)\ndf_freq = (df1_purchases.loc[:, ['customer_id', 'stock_code']].groupby( 'customer_id' ).count()\n .reset_index()\n .rename( columns={'stock_code': 'qty_products'} ) )\ndf_ref = pd.merge( df_ref, df_freq, on='customer_id', how='left' )\ndf_ref.isna().sum()", "_____no_output_____" ] ], [ [ "### Frequency", "_____no_output_____" ] ], [ [ "#Frequency Purchase (rate: purchases by day)\ndf_aux = ( df1_purchases[['customer_id', 'invoice_no', 'invoice_date']].drop_duplicates()\n .groupby( 'customer_id')\n .agg( max_ = ( 'invoice_date', 'max' ), \n min_ = ( 'invoice_date', 'min' ),\n days_= ( 'invoice_date', lambda x: ( ( x.max() - x.min() ).days ) + 1 ),\n buy_ = ( 'invoice_no', 'count' ) ) ).reset_index()\n# Frequency\ndf_aux['frequency'] = df_aux[['buy_', 'days_']].apply( lambda x: x['buy_'] / x['days_'] if x['days_'] != 0 else 0, axis=1 )\n\n# Merge\ndf_ref = pd.merge( df_ref, df_aux[['customer_id', 'frequency']], on='customer_id', how='left' )\ndf_ref.isna().sum()", "_____no_output_____" ] ], [ [ "### Number of Returns", "_____no_output_____" ] ], [ [ "#Number of Returns\ndf_returns = df1_returns[['customer_id', 'quantity']].groupby( 'customer_id' ).sum().reset_index().rename( columns={'quantity':'qty_returns'} )\ndf_returns['qty_returns'] = df_returns['qty_returns'] * -1\n\ndf_ref = pd.merge( df_ref, df_returns, how='left', on='customer_id' )\ndf_ref.loc[df_ref['qty_returns'].isna(), 'qty_returns'] = 0 #customers with 0 returned items\n\ndf_ref.isna().sum()", "_____no_output_____" ] ], [ [ "# EXPLORATORY DATA ANALYSIS (EDA)", "_____no_output_____" ] ], [ [ "df3 = df_ref.dropna().copy()\ndf3.isna().sum()", "_____no_output_____" ] ], [ [ "## Space Study", "_____no_output_____" ] ], [ [ "# Original dataset\n#df33 = df3.drop(columns = ['customer_id'], axis = '').copy()\n\n# dataset with selected columns due feature selection based on its importance\ncols_selected = ['customer_id', 'gross_revenue', 'recency_days', 'qty_products', 'frequency', 'qty_returns']\ndf33 = df3[cols_selected].drop(columns = 'customer_id', axis = 1)", "_____no_output_____" ], [ "df33.head()", "_____no_output_____" ], [ "mm = pp.MinMaxScaler()\n\ndf33['gross_revenue'] = mm.fit_transform(df33[['gross_revenue']])\ndf33['recency_days'] = mm.fit_transform(df33[['recency_days']])\ndf33['qty_products'] = mm.fit_transform(df33[['qty_products']])\ndf33['frequency'] = mm.fit_transform(df33[['frequency']])\ndf33['qty_returns'] = mm.fit_transform(df33[['qty_returns']])\n\nX = df33.copy()", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ] ], [ [ "#### PCA", "_____no_output_____" ] ], [ [ "pca = dd.PCA( n_components = X.shape[1])\n\n\nprincipal_components = pca.fit_transform(X)\n\n# plot explained variables\nfeatures = range(pca.n_components_)\n\nplt.bar(features, pca.explained_variance_ratio_, color = 'black') #quais componentes principais com a maior variação de dados\n\n# pca component\ndf_pca = pd.DataFrame( principal_components )", "_____no_output_____" ], [ "sns.scatterplot(x = 0, y = 1, data = df_pca);", "_____no_output_____" ] ], [ [ "#### UMAP", "_____no_output_____" ] ], [ [ "reducer = umap.UMAP(random_state = 42)\nembedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço\n\n#embedding\ndf_umap = pd.DataFrame()\ndf_umap['embedding_X'] = embedding[:, 0]\ndf_umap['embedding_y'] = embedding[:, 1]\n\n#plot UMAP - cluster projetado de alta dimencionalidade\nsns.scatterplot(x = 'embedding_X', y = 'embedding_y',\n data = df_umap);", "_____no_output_____" ] ], [ [ "#### t-SNE", "_____no_output_____" ] ], [ [ "reducer = mn.TSNE( n_components = 2, n_jobs = -1, random_state = 42)\nembedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço\n\n#embedding\ndf_tsne = pd.DataFrame()\ndf_tsne['embedding_X'] = embedding[:, 0]\ndf_tsne['embedding_y'] = embedding[:, 1]\n\n#plot UMAP - cluster projetado de alta dimencionalidade\nsns.scatterplot(x = 'embedding_X', y = 'embedding_y',\n data = df_tsne);", "/home/marxcerqueira/.local/lib/python3.8/site-packages/sklearn/manifold/_t_sne.py:780: FutureWarning: The default initialization in TSNE will change from 'random' to 'pca' in 1.2.\n warnings.warn(\n/home/marxcerqueira/.local/lib/python3.8/site-packages/sklearn/manifold/_t_sne.py:790: FutureWarning: The default learning rate in TSNE will change from 200.0 to 'auto' in 1.2.\n warnings.warn(\n" ] ], [ [ "#### Tree-Based Embedding", "_____no_output_____" ] ], [ [ "df3.head()", "_____no_output_____" ], [ "# training dataset \nX = df33.drop(columns = ['gross_revenue'], axis = 1) #target variable\ny = df33['gross_revenue']\n\n# I could use boruta to select features to build a better embedding space\n\n# model definition\nrf_model = en.RandomForestRegressor(n_estimators = 100, random_state = 42)\n\n# model training\nrf_model.fit(X,y)\n\n# leaf\ndf_leaf = pd.DataFrame(rf_model.apply( X ))", "_____no_output_____" ], [ "# using UMAP to reduce the space study from 100 to 2\nreducer = umap.UMAP(random_state = 42)\nembedding = reducer.fit_transform(df_leaf) #gera o espaço projetado - embedding é a projeção gerada em outro espaço\n\n#embedding\ndf_tree = pd.DataFrame()\ndf_tree['embedding_X'] = embedding[:, 0]\ndf_tree['embedding_y'] = embedding[:, 1]\n\n#plot UMAP - cluster projetado de alta dimencionalidade\nsns.scatterplot(x = 'embedding_X', y = 'embedding_y',\n data = df_tree);", "/home/marxcerqueira/.local/lib/python3.8/site-packages/sklearn/manifold/_spectral_embedding.py:260: UserWarning: Graph is not fully connected, spectral embedding may not work as expected.\n warnings.warn(\n" ] ], [ [ "# DATA PREPARATION", "_____no_output_____" ], [ "<span id=\"papermill-error-cell\" style=\"color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;\">Execution using papermill encountered an exception here and stopped:</span>", "_____no_output_____" ] ], [ [ "# Tree-Based Embbeding\ndf4 = df_tree.copy()\ndf4.to_csv('../src/data/tree_based_embbeding.csv', index = False)\n\n# # UMAP Embbeding\n# df4 = df_umap.copy()\n\n# # TSNE Embedding\n# df4 = df_tsne.copy()", "_____no_output_____" ] ], [ [ "# HYPERPARAMETER FINE-TUNNING", "_____no_output_____" ] ], [ [ "X = df4.copy()", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "clusters = np.arange(2, 31, 1) #silhouette was increasing, so we put more k points\nclusters", "_____no_output_____" ] ], [ [ "## K-Means", "_____no_output_____" ] ], [ [ "kmeans_sil = []\n\nfor k in clusters:\n # model definition\n kmeans_model = c.KMeans( n_clusters = k, n_init = 100, random_state = 42 )\n\n # model training\n kmeans_model.fit(X)\n\n # model predict\n labels = kmeans_model.predict(X)\n\n # model performance\n sil = m.silhouette_score( X, labels, metric = 'euclidean')\n kmeans_sil.append(sil) ", "_____no_output_____" ], [ "plt.plot( clusters, kmeans_sil, linestyle = '--', marker = 'o', color = 'b' )\nplt.xlabel( 'K' );\nplt.ylabel('Silhouette Score');\nplt.title('KMeans Silhouette Score per K ');", "_____no_output_____" ] ], [ [ "## GMM", "_____no_output_____" ] ], [ [ "gmm_sil = []\nfor k in clusters:\n # model definition\n gmm_model = mx.GaussianMixture(n_components = k, n_init = 100, random_state = 42)\n \n # model training\n \n gmm_model.fit(X)\n \n # model prediction\n \n labels = gmm_model.predict(X)\n \n # model performance\n sil = m.silhouette_score(X, labels, metric = 'euclidean')\n gmm_sil.append(sil)", "_____no_output_____" ], [ "plt.plot(clusters, gmm_sil, linestyle = '--', marker = 'o', color = 'b')\nplt.xlabel( 'K' );\nplt.ylabel('Silhouette Score');\nplt.title('GMM Silhouette Score per K ');", "_____no_output_____" ] ], [ [ "## Hierarchical Clustering", "_____no_output_____" ] ], [ [ "# model definition and training\nhc_model = hc.linkage(X, 'ward')", "_____no_output_____" ] ], [ [ "### H-Clustering Silhouette Score", "_____no_output_____" ] ], [ [ "hc_sil = []\nfor k in clusters:\n #model definition and training\n hc_model = hc.linkage(X, 'ward')\n\n # model predict\n labels = hc.fcluster(hc_model, k, criterion = 'maxclust')\n\n # metrics\n sil = m.silhouette_score(X, labels, metric = 'euclidean')\n hc_sil.append(sil)", "_____no_output_____" ], [ "plt.plot(clusters, hc_sil, linestyle = '--', marker = 'o', color = 'b')", "_____no_output_____" ] ], [ [ "## Results", "_____no_output_____" ] ], [ [ "## Results - Tree Based Embedding\ndf_results = pd.DataFrame({'KMeans:': kmeans_sil,\n 'GMM': gmm_sil,\n 'HC': hc_sil}\n ).T\n\ndf_results.columns = clusters\ndf_results.style.highlight_max(color = 'lightgreen', axis = 1)", "_____no_output_____" ], [ "## Results - UMAP Embedding\ndf_results = pd.DataFrame({'KMeans:': kmeans_sil,\n 'GMM': gmm_sil,\n 'HC': hc_sil}\n ).T\n\ndf_results.columns = clusters\ndf_results.style.highlight_max(color = 'lightgreen', axis = 1)", "_____no_output_____" ], [ "## Results - TSNE Embedding\ndf_results = pd.DataFrame({'KMeans:': kmeans_sil,\n 'GMM': gmm_sil,\n 'HC': hc_sil}\n ).T\n\ndf_results.columns = clusters\ndf_results.style.highlight_max(color = 'lightgreen', axis = 1)", "_____no_output_____" ] ], [ [ "# MACHINE LEARNING MODEL TRAINING", "_____no_output_____" ], [ "## K-Means", "_____no_output_____" ] ], [ [ "# model definition\nk = 8;\nkmeans = c.KMeans(init = 'random', n_clusters = k, n_init = 100, max_iter = 300, random_state = 42)\n\n# model training\nkmeans.fit(X)\n\n# clustering\nlabels = kmeans.labels_", "_____no_output_____" ], [ "# # trying with GMM beacuse of its approach in the embedding space\n# # k=11 ;\n# # model definition\n# gmm_model = mx.GaussianMixture(n_components = k,n_init = 10 ,random_state=42)\n\n# # model training\n# gmm_model.fit(X)\n\n# # model prediction\n# labels = gmm_model.predict(X)", "_____no_output_____" ] ], [ [ "## Cluster Validation", "_____no_output_____" ] ], [ [ "# WSS (Within-cluster Sum of Square )\n# print('WSS score: {}'.format(kmeans.inertia_))\n\n# SS (Silhouette Score)\nprint('SS score: {}'.format(m.silhouette_score(X, labels, metric = 'euclidean')))", "_____no_output_____" ] ], [ [ "# CLUSTER ANALYSIS", "_____no_output_____" ] ], [ [ "df9 = X.copy()\ndf9['cluster'] = labels", "_____no_output_____" ] ], [ [ "## Visualization Inspection", "_____no_output_____" ] ], [ [ "# k = 8 for KMeans\nsns.scatterplot(x = 'embedding_X', y = 'embedding_y', hue = 'cluster', data = df9, palette = 'deep')", "_____no_output_____" ] ], [ [ "## Cluster Profile", "_____no_output_____" ] ], [ [ "df92 = df3[cols_selected].copy()\ndf92['cluster'] = labels\ndf92.head()", "_____no_output_____" ], [ "# Explaining clusters profile based on this averages\n# Number of customer\ndf_cluster = df92[['customer_id', 'cluster']].groupby( 'cluster' ).count().reset_index()\ndf_cluster['perc_customer'] = 100*( df_cluster['customer_id'] / df_cluster['customer_id'].sum() )\n\n# Avg Gross revenue\ndf_avg_gross_revenue = df92[['gross_revenue', 'cluster']].groupby( 'cluster' ).mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_avg_gross_revenue, how='inner', on='cluster' )\n\n# Avg recency days\ndf_avg_recency_days = df92[['recency_days', 'cluster']].groupby( 'cluster' ).mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_avg_recency_days, how='inner', on='cluster' )\n\n# Avg qty products\ndf_qty_products = df92[['qty_products', 'cluster']].groupby( 'cluster' ).mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_qty_products, how='inner', on='cluster' )\n\n# Frequency\ndf_frequency = df92[['frequency', 'cluster']].groupby( 'cluster' ).mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_frequency, how='inner', on='cluster' )\n\n# Avg qty returns\ndf_qty_returns = df92[['qty_returns', 'cluster']].groupby( 'cluster' ).mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_qty_returns, how='inner', on='cluster' )\n\ndf_cluster", "_____no_output_____" ], [ "# during the new EDA we can do a analyse inside each cluster", "_____no_output_____" ] ], [ [ "Cluster Insiders (04):\n\n- Number of customers: 551 (9.67% of costumers)\n- Avg Gross Revenue: $10410,00\n- Recency Average: 45 days\n- Avg of Qty Products Purchased: 366 un\n- Purchase Frequency: 0.21 products per day", "_____no_output_____" ], [ "# Exploratory Dada Analysis", "_____no_output_____" ] ], [ [ "df10 = df92.copy()", "_____no_output_____" ], [ "df10.head()", "_____no_output_____" ] ], [ [ "# MODEL DEPLOYMENT", "_____no_output_____" ] ], [ [ "df92.dtypes", "_____no_output_____" ], [ "df92['recency_days'] = df92['recency_days'].astype(int)\ndf92['qty_products'] = df92['qty_products'].astype(int)\ndf92['qty_returns'] = df92['qty_returns'].astype(int)", "_____no_output_____" ], [ "# # create database\n# conn = sqlite3.connect('insiders_db.sqlite')\n\n# # create table\n# query_create_insiders = \"\"\"\n# CREATE TABLE insiders (\n# custer_id INTEGER,\n# gross_revenue REAL,\n# recency_days INTEGER,\n# qty_products INTEGER,\n# frequency INTEGER,\n# qty_returns INTEGER,\n# cluster INTEGER\n# )\n# \"\"\"\n\n# conn.execute(query_create_insiders)\n# conn.commit()\n# conn.close()", "_____no_output_____" ], [ "# database connection\nconn = create_engine('sqlite:///insiders_db.sqlite')\n\n# # drop table\n# query_drop_insiders = \"\"\"\n# DROP TABLE insiders\n\n# \"\"\"\n\n#create table\nquery_create_insiders = \"\"\"\n CREATE TABLE insiders (\n customer_id INTEGER,\n gross_revenue REAL,\n recency_days INTEGER,\n qty_products INTEGER,\n frequency INTEGER,\n qty_returns INTEGER,\n cluster INTEGER\n )\n\"\"\"\n\nconn.execute(query_create_insiders)", "_____no_output_____" ], [ "# insert into data\ndf92.to_sql('insiders', con = conn, if_exists = 'append', index = False)", "_____no_output_____" ], [ "# consulting database\n# get query\nquery_collect = \"\"\"\n SELECT * from insiders\n\"\"\"\n\ndf = pd.read_sql_query(query_collect, conn)\ndf.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb406de1dc91d0fb498b578fcc3468977c99c935
509,448
ipynb
Jupyter Notebook
SNN/Full_connection_fin.ipynb
bamboo-nova/Julia_practice
52124bbb6e6ff9fefafb0f2fb002389ad270bfd0
[ "MIT" ]
null
null
null
SNN/Full_connection_fin.ipynb
bamboo-nova/Julia_practice
52124bbb6e6ff9fefafb0f2fb002389ad270bfd0
[ "MIT" ]
null
null
null
SNN/Full_connection_fin.ipynb
bamboo-nova/Julia_practice
52124bbb6e6ff9fefafb0f2fb002389ad270bfd0
[ "MIT" ]
null
null
null
183.254676
22,638
0.686998
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb4075aff8e65324edf64d3aef9f7e09b3d12da3
332,281
ipynb
Jupyter Notebook
0.1-ReadingVisualisingSaving.ipynb
jeammimi/formation
97a86b50aea0f4f19f236300af90df2e9e2f838c
[ "MIT" ]
null
null
null
0.1-ReadingVisualisingSaving.ipynb
jeammimi/formation
97a86b50aea0f4f19f236300af90df2e9e2f838c
[ "MIT" ]
null
null
null
0.1-ReadingVisualisingSaving.ipynb
jeammimi/formation
97a86b50aea0f4f19f236300af90df2e9e2f838c
[ "MIT" ]
null
null
null
1,217.14652
293,520
0.960211
[ [ [ "# Library necessary", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom pylab import *\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# reading data\nThe library that you need to use is pandas\nYou can look either by autocompletion or using google\n\n### exercice: read csvfile called example.csv", "_____no_output_____" ] ], [ [ "# Correction\ndata = pd.read_csv(\"example.csv\")\nprint(data.head()) #not good\n\ndata = pd.read_csv(\"example.csv\",sep=\"\\t\")\nprint(data.head())# good\n", " LogP\\tMW\\tPolarity\n0 15.593723467383032\\t85.49511285287882\\t3.50540...\n1 6.179678069773948\\t54.56185349271858\\t0.325312...\n2 20.864675602444155\\t21.799255962466823\\t9.2544...\n3 13.162617281355232\\t39.582379122822665\\t4.5611...\n4 8.874363969611059\\t37.061797145952625\\t2.54152...\n LogP MW Polarity\n0 15.593723 85.495113 3.505403\n1 6.179678 54.561853 0.325312\n2 20.864676 21.799256 9.254491\n3 13.162617 39.582379 4.561116\n4 8.874364 37.061797 2.541523\n" ], [ "# if it has no name:\ndata = pd.read_csv(\"example_without_name.csv\",sep=\"\\t\")\nprint(data.head()) #not good\ndata = pd.read_csv(\"example_without_name.csv\",sep=\"\\t\",names=[\"LogP\",\"MW\",\"Polarity\"])\nprint(data.head()) #good\n", " 15.593723467383032 85.49511285287882 3.5054032661994947\n0 6.179678 54.561853 0.325312\n1 20.864676 21.799256 9.254491\n2 13.162617 39.582379 4.561116\n3 8.874364 37.061797 2.541523\n4 12.464101 55.026992 3.472338\n LogP MW Polarity\n0 15.593723 85.495113 3.505403\n1 6.179678 54.561853 0.325312\n2 20.864676 21.799256 9.254491\n3 13.162617 39.582379 4.561116\n4 8.874364 37.061797 2.541523\n" ] ], [ [ "# Exercice visualisation", "_____no_output_____" ], [ "The library to use is allready loaded (pylab) and all the function are loaded inside \nby the code\n\n\nfrom pylab import *", "_____no_output_____" ] ], [ [ "#Correction :\nplot(data[\"MW\"],data[\"LogP\"],\"o\")\nxlabel(\"MW\")\nylabel(\"LogP\")", "_____no_output_____" ] ], [ [ "# more complex visualisation:", "_____no_output_____" ] ], [ [ "from pandas.plotting import scatter_matrix\nscatter_matrix(data,figsize=(10,10));", "_____no_output_____" ] ], [ [ "# example of creating a new feature", "_____no_output_____" ] ], [ [ "# Computing an ploting another value\ndata[\"NewFeat\"] = data[\"MW\"] * data[\"Polarity\"]\n", "_____no_output_____" ] ], [ [ "# Do different visualisation", "_____no_output_____" ], [ "# Now saving the new feature using pandas", "_____no_output_____" ] ], [ [ "# correction:\ndata.to_csv(\"example_with_new_feature.csv\",sep=\"\\t\",index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb40766042883b66f709ca7a699149c0212effdc
753,941
ipynb
Jupyter Notebook
python/machine learning/EDA.ipynb
duttashi/stackoverflow-notes
b8fae6baa756850e2f8c007aabf6ebf87346f961
[ "MIT" ]
1
2021-05-24T12:11:14.000Z
2021-05-24T12:11:14.000Z
python/machine learning/EDA.ipynb
duttashi/stackoverflow-notes
b8fae6baa756850e2f8c007aabf6ebf87346f961
[ "MIT" ]
41
2018-09-11T12:57:11.000Z
2019-04-09T06:32:14.000Z
python/machine learning/EDA.ipynb
duttashi/stackoverflow-notes
b8fae6baa756850e2f8c007aabf6ebf87346f961
[ "MIT" ]
null
null
null
269.072448
219,880
0.891908
[ [ [ "## Exploratory Data Analysis", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# read dataset\ndf = pd.read_csv('../datasets/winequality/winequality-red.csv',sep=';')", "_____no_output_____" ], [ "# check data dimensions\nprint(df.shape)\n# check length\nprint(len(df))\n# check number of dimensions of your DataFrame or Series\nprint(df.ndim)\n# show the first five rows\nprint(df.head(5))\n# show the last five rows\nprint(df.tail(5))", "(1599, 12)\n1599\n2\n fixed acidity volatile acidity citric acid residual sugar chlorides \\\n0 7.4 0.70 0.00 1.9 0.076 \n1 7.8 0.88 0.00 2.6 0.098 \n2 7.8 0.76 0.04 2.3 0.092 \n3 11.2 0.28 0.56 1.9 0.075 \n4 7.4 0.70 0.00 1.9 0.076 \n\n free sulfur dioxide total sulfur dioxide density pH sulphates \\\n0 11.0 34.0 0.9978 3.51 0.56 \n1 25.0 67.0 0.9968 3.20 0.68 \n2 15.0 54.0 0.9970 3.26 0.65 \n3 17.0 60.0 0.9980 3.16 0.58 \n4 11.0 34.0 0.9978 3.51 0.56 \n\n alcohol quality \n0 9.4 5 \n1 9.8 5 \n2 9.8 5 \n3 9.8 6 \n4 9.4 5 \n fixed acidity volatile acidity citric acid residual sugar chlorides \\\n1594 6.2 0.600 0.08 2.0 0.090 \n1595 5.9 0.550 0.10 2.2 0.062 \n1596 6.3 0.510 0.13 2.3 0.076 \n1597 5.9 0.645 0.12 2.0 0.075 \n1598 6.0 0.310 0.47 3.6 0.067 \n\n free sulfur dioxide total sulfur dioxide density pH sulphates \\\n1594 32.0 44.0 0.99490 3.45 0.58 \n1595 39.0 51.0 0.99512 3.52 0.76 \n1596 29.0 40.0 0.99574 3.42 0.75 \n1597 32.0 44.0 0.99547 3.57 0.71 \n1598 18.0 42.0 0.99549 3.39 0.66 \n\n alcohol quality \n1594 10.5 5 \n1595 11.2 6 \n1596 11.0 6 \n1597 10.2 5 \n1598 11.0 6 \n" ], [ "# print column names\ndf.dtypes", "_____no_output_____" ], [ "# return the number of non-missing values for each column of the DataFrame\nprint(df.count)\n# change direction to get count of non-missing values for each each row\ndf.count(axis='columns') ", "<bound method DataFrame.count of fixed acidity volatile acidity citric acid residual sugar chlorides \\\n0 7.4 0.700 0.00 1.9 0.076 \n1 7.8 0.880 0.00 2.6 0.098 \n2 7.8 0.760 0.04 2.3 0.092 \n3 11.2 0.280 0.56 1.9 0.075 \n4 7.4 0.700 0.00 1.9 0.076 \n5 7.4 0.660 0.00 1.8 0.075 \n6 7.9 0.600 0.06 1.6 0.069 \n7 7.3 0.650 0.00 1.2 0.065 \n8 7.8 0.580 0.02 2.0 0.073 \n9 7.5 0.500 0.36 6.1 0.071 \n10 6.7 0.580 0.08 1.8 0.097 \n11 7.5 0.500 0.36 6.1 0.071 \n12 5.6 0.615 0.00 1.6 0.089 \n13 7.8 0.610 0.29 1.6 0.114 \n14 8.9 0.620 0.18 3.8 0.176 \n15 8.9 0.620 0.19 3.9 0.170 \n16 8.5 0.280 0.56 1.8 0.092 \n17 8.1 0.560 0.28 1.7 0.368 \n18 7.4 0.590 0.08 4.4 0.086 \n19 7.9 0.320 0.51 1.8 0.341 \n20 8.9 0.220 0.48 1.8 0.077 \n21 7.6 0.390 0.31 2.3 0.082 \n22 7.9 0.430 0.21 1.6 0.106 \n23 8.5 0.490 0.11 2.3 0.084 \n24 6.9 0.400 0.14 2.4 0.085 \n25 6.3 0.390 0.16 1.4 0.080 \n26 7.6 0.410 0.24 1.8 0.080 \n27 7.9 0.430 0.21 1.6 0.106 \n28 7.1 0.710 0.00 1.9 0.080 \n29 7.8 0.645 0.00 2.0 0.082 \n... ... ... ... ... ... \n1569 6.2 0.510 0.14 1.9 0.056 \n1570 6.4 0.360 0.53 2.2 0.230 \n1571 6.4 0.380 0.14 2.2 0.038 \n1572 7.3 0.690 0.32 2.2 0.069 \n1573 6.0 0.580 0.20 2.4 0.075 \n1574 5.6 0.310 0.78 13.9 0.074 \n1575 7.5 0.520 0.40 2.2 0.060 \n1576 8.0 0.300 0.63 1.6 0.081 \n1577 6.2 0.700 0.15 5.1 0.076 \n1578 6.8 0.670 0.15 1.8 0.118 \n1579 6.2 0.560 0.09 1.7 0.053 \n1580 7.4 0.350 0.33 2.4 0.068 \n1581 6.2 0.560 0.09 1.7 0.053 \n1582 6.1 0.715 0.10 2.6 0.053 \n1583 6.2 0.460 0.29 2.1 0.074 \n1584 6.7 0.320 0.44 2.4 0.061 \n1585 7.2 0.390 0.44 2.6 0.066 \n1586 7.5 0.310 0.41 2.4 0.065 \n1587 5.8 0.610 0.11 1.8 0.066 \n1588 7.2 0.660 0.33 2.5 0.068 \n1589 6.6 0.725 0.20 7.8 0.073 \n1590 6.3 0.550 0.15 1.8 0.077 \n1591 5.4 0.740 0.09 1.7 0.089 \n1592 6.3 0.510 0.13 2.3 0.076 \n1593 6.8 0.620 0.08 1.9 0.068 \n1594 6.2 0.600 0.08 2.0 0.090 \n1595 5.9 0.550 0.10 2.2 0.062 \n1596 6.3 0.510 0.13 2.3 0.076 \n1597 5.9 0.645 0.12 2.0 0.075 \n1598 6.0 0.310 0.47 3.6 0.067 \n\n free sulfur dioxide total sulfur dioxide density pH sulphates \\\n0 11.0 34.0 0.99780 3.51 0.56 \n1 25.0 67.0 0.99680 3.20 0.68 \n2 15.0 54.0 0.99700 3.26 0.65 \n3 17.0 60.0 0.99800 3.16 0.58 \n4 11.0 34.0 0.99780 3.51 0.56 \n5 13.0 40.0 0.99780 3.51 0.56 \n6 15.0 59.0 0.99640 3.30 0.46 \n7 15.0 21.0 0.99460 3.39 0.47 \n8 9.0 18.0 0.99680 3.36 0.57 \n9 17.0 102.0 0.99780 3.35 0.80 \n10 15.0 65.0 0.99590 3.28 0.54 \n11 17.0 102.0 0.99780 3.35 0.80 \n12 16.0 59.0 0.99430 3.58 0.52 \n13 9.0 29.0 0.99740 3.26 1.56 \n14 52.0 145.0 0.99860 3.16 0.88 \n15 51.0 148.0 0.99860 3.17 0.93 \n16 35.0 103.0 0.99690 3.30 0.75 \n17 16.0 56.0 0.99680 3.11 1.28 \n18 6.0 29.0 0.99740 3.38 0.50 \n19 17.0 56.0 0.99690 3.04 1.08 \n20 29.0 60.0 0.99680 3.39 0.53 \n21 23.0 71.0 0.99820 3.52 0.65 \n22 10.0 37.0 0.99660 3.17 0.91 \n23 9.0 67.0 0.99680 3.17 0.53 \n24 21.0 40.0 0.99680 3.43 0.63 \n25 11.0 23.0 0.99550 3.34 0.56 \n26 4.0 11.0 0.99620 3.28 0.59 \n27 10.0 37.0 0.99660 3.17 0.91 \n28 14.0 35.0 0.99720 3.47 0.55 \n29 8.0 16.0 0.99640 3.38 0.59 \n... ... ... ... ... ... \n1569 15.0 34.0 0.99396 3.48 0.57 \n1570 19.0 35.0 0.99340 3.37 0.93 \n1571 15.0 25.0 0.99514 3.44 0.65 \n1572 35.0 104.0 0.99632 3.33 0.51 \n1573 15.0 50.0 0.99467 3.58 0.67 \n1574 23.0 92.0 0.99677 3.39 0.48 \n1575 12.0 20.0 0.99474 3.26 0.64 \n1576 16.0 29.0 0.99588 3.30 0.78 \n1577 13.0 27.0 0.99622 3.54 0.60 \n1578 13.0 20.0 0.99540 3.42 0.67 \n1579 24.0 32.0 0.99402 3.54 0.60 \n1580 9.0 26.0 0.99470 3.36 0.60 \n1581 24.0 32.0 0.99402 3.54 0.60 \n1582 13.0 27.0 0.99362 3.57 0.50 \n1583 32.0 98.0 0.99578 3.33 0.62 \n1584 24.0 34.0 0.99484 3.29 0.80 \n1585 22.0 48.0 0.99494 3.30 0.84 \n1586 34.0 60.0 0.99492 3.34 0.85 \n1587 18.0 28.0 0.99483 3.55 0.66 \n1588 34.0 102.0 0.99414 3.27 0.78 \n1589 29.0 79.0 0.99770 3.29 0.54 \n1590 26.0 35.0 0.99314 3.32 0.82 \n1591 16.0 26.0 0.99402 3.67 0.56 \n1592 29.0 40.0 0.99574 3.42 0.75 \n1593 28.0 38.0 0.99651 3.42 0.82 \n1594 32.0 44.0 0.99490 3.45 0.58 \n1595 39.0 51.0 0.99512 3.52 0.76 \n1596 29.0 40.0 0.99574 3.42 0.75 \n1597 32.0 44.0 0.99547 3.57 0.71 \n1598 18.0 42.0 0.99549 3.39 0.66 \n\n alcohol quality \n0 9.4 5 \n1 9.8 5 \n2 9.8 5 \n3 9.8 6 \n4 9.4 5 \n5 9.4 5 \n6 9.4 5 \n7 10.0 7 \n8 9.5 7 \n9 10.5 5 \n10 9.2 5 \n11 10.5 5 \n12 9.9 5 \n13 9.1 5 \n14 9.2 5 \n15 9.2 5 \n16 10.5 7 \n17 9.3 5 \n18 9.0 4 \n19 9.2 6 \n20 9.4 6 \n21 9.7 5 \n22 9.5 5 \n23 9.4 5 \n24 9.7 6 \n25 9.3 5 \n26 9.5 5 \n27 9.5 5 \n28 9.4 5 \n29 9.8 6 \n... ... ... \n1569 11.5 6 \n1570 12.4 6 \n1571 11.1 6 \n1572 9.5 5 \n1573 12.5 6 \n1574 10.5 6 \n1575 11.8 6 \n1576 10.8 6 \n1577 11.9 6 \n1578 11.3 6 \n1579 11.3 5 \n1580 11.9 6 \n1581 11.3 5 \n1582 11.9 5 \n1583 9.8 5 \n1584 11.6 7 \n1585 11.5 6 \n1586 11.4 6 \n1587 10.9 6 \n1588 12.8 6 \n1589 9.2 5 \n1590 11.6 6 \n1591 11.6 6 \n1592 11.0 6 \n1593 9.5 6 \n1594 10.5 5 \n1595 11.2 6 \n1596 11.0 6 \n1597 10.2 5 \n1598 11.0 6 \n\n[1599 rows x 12 columns]>\n" ], [ "# To print the metadata, use info()\nprint(df.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1599 entries, 0 to 1598\nData columns (total 12 columns):\nfixed acidity 1599 non-null float64\nvolatile acidity 1599 non-null float64\ncitric acid 1599 non-null float64\nresidual sugar 1599 non-null float64\nchlorides 1599 non-null float64\nfree sulfur dioxide 1599 non-null float64\ntotal sulfur dioxide 1599 non-null float64\ndensity 1599 non-null float64\npH 1599 non-null float64\nsulphates 1599 non-null float64\nalcohol 1599 non-null float64\nquality 1599 non-null int64\ndtypes: float64(11), int64(1)\nmemory usage: 150.0 KB\nNone\n" ], [ "# show the columns\ndf.columns", "_____no_output_____" ] ], [ [ "### Sorting\n\nA DataFrame can be sorted by the value of one of the variables (i.e columns). For example, we can sort by Total day charge (use ascending=False to sort in descending order):", "_____no_output_____" ] ], [ [ "df.sort_values(by='alcohol', ascending=False).head()", "_____no_output_____" ] ], [ [ "Alternatively, we can also sort by multiple columns:", "_____no_output_____" ] ], [ [ "df.sort_values(by=['alcohol', 'quality'],\n ascending=[True, False]).head()", "_____no_output_____" ] ], [ [ "### Indexing and retrieving data\nDataFrame can be indexed in different ways.\n\nTo get a single column, you can use a DataFrame['Name'] construction. Let's use this to answer a question about that column alone: **what is the proportion of alcohol in our dataframe?**", "_____no_output_____" ] ], [ [ "df['alcohol'].mean()", "_____no_output_____" ] ], [ [ "### Applying Functions to Cells, Columns and Rows\n\n**To apply functions to each column, use `apply():`**", "_____no_output_____" ] ], [ [ "df.apply(np.max) ", "_____no_output_____" ] ], [ [ "The apply method can also be used to apply a function to each row. To do this, specify `axis=1`. `lambda` functions are very convenient in such scenarios. For example, if we need to select all wines with alcohol content greater than 6, we can do it like this:", "_____no_output_____" ] ], [ [ "df[df['alcohol'].apply(lambda alcohol: alcohol > 6)].head()", "_____no_output_____" ] ], [ [ "The `map` method can be used to **replace values in a column** by passing a dictionary of the form `{old_value: new_value}` as its argument:", "_____no_output_____" ] ], [ [ "d = {'9.4' : 100, '9.8' : 200}\ndf['alcohol'] = df['alcohol'].map(d)\ndf.head()", "_____no_output_____" ] ], [ [ "The same thing can be done with the `replace` method:", "_____no_output_____" ], [ "### Grouping\n\nIn general, grouping data in Pandas goes as follows:\n\n df.groupby(by=grouping_columns)[columns_to_show].function()\n\n1. First, the `groupby` method divides the grouping_columns by their values. They become a new index in the resulting dataframe.\n\n2. Then, columns of interest are selected (`columns_to_show`). If columns_to_show is not included, all non groupby clauses will be included.\n\n3. Finally, one or several functions are applied to the obtained groups per selected columns.\n\nHere is an example where we group the data according to the values of the `sulphates` variable and display statistics of three columns in each group:\n", "_____no_output_____" ] ], [ [ "columns_to_show = ['pH', 'chlorides', 'citric acid']\n\ndf.groupby(['sulphates'])[columns_to_show].describe(percentiles=[]).head()", "_____no_output_____" ] ], [ [ "Let’s do the same thing, but slightly differently by passing a list of functions to `agg()`:", "_____no_output_____" ] ], [ [ "columns_to_show = ['pH', 'chlorides', 'citric acid']\n\ndf.groupby(['sulphates'])[columns_to_show].agg([np.mean, np.std, np.min, \n np.max]).head()", "_____no_output_____" ] ], [ [ "### Summary tables\n\nSuppose we want to see how the observations in our sample are distributed in the context of two variables - `sulphates` and `quality`. To do so, we can build a contingency table using the `crosstab` method:", "_____no_output_____" ] ], [ [ "pd.crosstab(df['sulphates'], df['quality']).head()", "_____no_output_____" ], [ "pd.crosstab(df['sulphates'], df['quality'], normalize=True).head()", "_____no_output_____" ] ], [ [ "## First attempt on predicting wine quality\n\nLet's see how wine quality is related to the alcohol content in it. We’ll do this using a crosstab contingency table and also through visual analysis with Seaborn (however, visual analysis will be covered more thoroughly in the next article).", "_____no_output_____" ] ], [ [ "pd.crosstab(df['pH'], df['quality'], margins=True).head()", "_____no_output_____" ], [ "sns.countplot(x='density', hue='quality', data=df);", "_____no_output_____" ] ], [ [ "### Histogram", "_____no_output_____" ] ], [ [ "# create histogram\nbin_edges = np.arange(0, df['residual sugar'].max() + 1, 1)\nfig = plt.hist(df['residual sugar'], bins=bin_edges)\n\n# add plot labels\nplt.xlabel('count')\nplt.ylabel('residual sugar')\nplt.show()", "_____no_output_____" ] ], [ [ "### Scatterplot for continuous variables", "_____no_output_____" ] ], [ [ "# create scatterplot\nfig = plt.scatter(df['pH'], df['residual sugar'])\n\n# add plot labels\nplt.xlabel('pH')\nplt.ylabel('residual sugar')\nplt.show()", "_____no_output_____" ] ], [ [ "### Scatterplot Matrix", "_____no_output_____" ] ], [ [ "# show columns\ndf.columns", "_____no_output_____" ], [ "# create scatterplot matrix\nfig = sns.pairplot(data=df[['alcohol', 'pH', 'residual sugar', 'quality']], \n hue='quality')\n\n# add plot labels\nplt.xlabel('pH')\nplt.ylabel('residual sugar')\nplt.show()", "_____no_output_____" ] ], [ [ "### Boxplots\n\n- Distribution of data in terms of median and percentiles (median is the 50th percentile)", "_____no_output_____" ], [ "##### manual approach", "_____no_output_____" ] ], [ [ "percentiles = np.percentile(df['alcohol'], q=[25, 50, 75])\npercentiles", "_____no_output_____" ], [ "for p in percentiles:\n plt.axhline(p, color='black', linestyle='-')\nplt.scatter(np.zeros(df.shape[0]) + 0.5, df['alcohol'])\n\niqr = percentiles[-1] - percentiles[0]\nupper_whisker = min(df['alcohol'].max(), percentiles[-1] + iqr * 1.5)\nlower_whisker = max(df['alcohol'].min(), percentiles[0] - iqr * 1.5)\nplt.axhline(upper_whisker, color='black', linestyle='--')\nplt.axhline(lower_whisker, color='black', linestyle='--')\n\nplt.ylim([8, 16])\nplt.ylabel('alcohol')\nfig = plt.gca()\nfig.axes.get_xaxis().set_ticks([])\nplt.show()", "_____no_output_____" ] ], [ [ "#### using matplotlib.pyplot.boxplot approach", "_____no_output_____" ] ], [ [ "plt.boxplot(df['alcohol'])\n\nplt.ylim([8, 16])\nplt.ylabel('alcohol')\n\nfig = plt.gca()\nfig.axes.get_xaxis().set_ticks([])\nplt.show()", "_____no_output_____" ], [ "# Assume density is the target variable\n#descriptive statistics summary\ndf['density'].describe()", "_____no_output_____" ], [ "#histogram\nsns.distplot(df['density']);", "_____no_output_____" ], [ "#skewness and kurtosis\nprint(\"Skewness: %f\" % df['density'].skew())\nprint(\"Kurtosis: %f\" % df['density'].kurt())", "Skewness: 0.071288\nKurtosis: 0.934079\n" ] ], [ [ "### Relationship with other continuous variables", "_____no_output_____" ] ], [ [ "# other variables are fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol',\nvar = 'pH'\ndata = pd.concat([df['density'], df[var]], axis=1)\ndata.plot.scatter(x=var, y='density');", "_____no_output_____" ], [ "### Relationship with categorical variable \nvar = 'quality'\ndata = pd.concat([df['density'], df[var]], axis=1)\nf, ax = plt.subplots(figsize=(8, 6))\nfig = sns.boxplot(x=var, y=\"density\", data=data)", "_____no_output_____" ] ], [ [ "#### Correlation matrix (heatmap style) ", "_____no_output_____" ] ], [ [ "#correlation matrix\ncorrmat = df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=.8, square=True);", "_____no_output_____" ] ], [ [ "#### `density` correlation matrix (zoomed heatmap style)", "_____no_output_____" ] ], [ [ "k = 10 #number of variables for heatmap\ncols = corrmat.nlargest(k, 'density')['density'].index\ncm = np.corrcoef(df[cols].values.T)\nsns.set(font_scale=1.25)\nhm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)\nplt.show()", "_____no_output_____" ] ], [ [ "From the above heatmap plot we can see that variable `density` is highly correlated to `fixed acidity`, `citric acid`, `total sulphur dioxide`, and `free sulphur dioxide`", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ], [ "#scatterplot\nsns.set()\ncols = ['fixed acidity', 'citric acid', 'total sulfur dioxide', 'free sulfur dioxide']\nsns.pairplot(df[cols], height = 2.5)\nplt.show();", "_____no_output_____" ] ], [ [ "### Missing data\n\nImportant questions when thinking about missing data:\n\n- How prevalent is the missing data?\n- Is missing data random or does it have a pattern?", "_____no_output_____" ] ], [ [ "#missing data\ntotal = df.isnull().sum().sort_values(ascending=False)\npercent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\nmissing_data.head(20)", "_____no_output_____" ] ], [ [ "### Detailed Statistical Analysis\n\nAccording to Hair et al. (2013), four assumptions should be tested:\n\n**Normality** - When we talk about normality what we mean is that the data should look like a normal distribution. This is important because several statistic tests rely on this (e.g. t-statistics). In this exercise we'll just check univariate normality for 'density' (which is a limited approach). Remember that univariate normality doesn't ensure multivariate normality (which is what we would like to have), but it helps. Another detail to take into account is that in big samples (>200 observations) normality is not such an issue. However, if we solve normality, we avoid a lot of other problems (e.g. heteroscedacity) so that's the main reason why we are doing this analysis.\n\n**Homoscedasticity** - I just hope I wrote it right. Homoscedasticity refers to the 'assumption that dependent variable(s) exhibit equal levels of variance across the range of predictor variable(s)' (Hair et al., 2013). Homoscedasticity is desirable because we want the error term to be the same across all values of the independent variables.\n\n**Linearity**- The most common way to assess linearity is to examine scatter plots and search for linear patterns. If patterns are not linear, it would be worthwhile to explore data transformations. However, we'll not get into this because most of the scatter plots we've seen appear to have linear relationships.\n\n**Absence of correlated errors** - Correlated errors, like the definition suggests, happen when one error is correlated to another. For instance, if one positive error makes a negative error systematically, it means that there's a relationship between these variables. This occurs often in time series, where some patterns are time related. We'll also not get into this. However, if you detect something, try to add a variable that can explain the effect you're getting. That's the most common solution for correlated errors.", "_____no_output_____" ], [ "**Normality**\n\n- Histogram - Kurtosis and skewness.\n- Normal probability plot - Data distribution should closely follow the diagonal that represents the normal distribution.", "_____no_output_____" ] ], [ [ "#histogram and normal probability plot\nsns.set_style('darkgrid')\nsns.distplot(df['density']);\n\n# Add labels\nplt.title('Histogram of Density')\nplt.xlabel('Density')\nplt.ylabel('Count')", "_____no_output_____" ], [ "sns.distplot(df['density'], hist= True, kde=False)", "_____no_output_____" ], [ "help(sns.distplot)", "Help on function distplot in module seaborn.distributions:\n\ndistplot(a, bins=None, hist=True, kde=True, rug=False, fit=None, hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None, vertical=False, norm_hist=False, axlabel=None, label=None, ax=None)\n Flexibly plot a univariate distribution of observations.\n \n This function combines the matplotlib ``hist`` function (with automatic\n calculation of a good default bin size) with the seaborn :func:`kdeplot`\n and :func:`rugplot` functions. It can also fit ``scipy.stats``\n distributions and plot the estimated PDF over the data.\n \n Parameters\n ----------\n \n a : Series, 1d-array, or list.\n Observed data. If this is a Series object with a ``name`` attribute,\n the name will be used to label the data axis.\n bins : argument for matplotlib hist(), or None, optional\n Specification of hist bins, or None to use Freedman-Diaconis rule.\n hist : bool, optional\n Whether to plot a (normed) histogram.\n kde : bool, optional\n Whether to plot a gaussian kernel density estimate.\n rug : bool, optional\n Whether to draw a rugplot on the support axis.\n fit : random variable object, optional\n An object with `fit` method, returning a tuple that can be passed to a\n `pdf` method a positional arguments following an grid of values to\n evaluate the pdf on.\n {hist, kde, rug, fit}_kws : dictionaries, optional\n Keyword arguments for underlying plotting functions.\n color : matplotlib color, optional\n Color to plot everything but the fitted curve in.\n vertical : bool, optional\n If True, observed values are on y-axis.\n norm_hist : bool, optional\n If True, the histogram height shows a density rather than a count.\n This is implied if a KDE or fitted density is plotted.\n axlabel : string, False, or None, optional\n Name for the support axis label. If None, will try to get it\n from a.namel if False, do not set a label.\n label : string, optional\n Legend label for the relevent component of the plot\n ax : matplotlib axis, optional\n if provided, plot on this axis\n \n Returns\n -------\n ax : matplotlib Axes\n Returns the Axes object with the plot for further tweaking.\n \n See Also\n --------\n kdeplot : Show a univariate or bivariate distribution with a kernel\n density estimate.\n rugplot : Draw small vertical lines to show each observation in a\n distribution.\n \n Examples\n --------\n \n Show a default plot with a kernel density estimate and histogram with bin\n size determined automatically with a reference rule:\n \n .. plot::\n :context: close-figs\n \n >>> import seaborn as sns, numpy as np\n >>> sns.set(); np.random.seed(0)\n >>> x = np.random.randn(100)\n >>> ax = sns.distplot(x)\n \n Use Pandas objects to get an informative axis label:\n \n .. plot::\n :context: close-figs\n \n >>> import pandas as pd\n >>> x = pd.Series(x, name=\"x variable\")\n >>> ax = sns.distplot(x)\n \n Plot the distribution with a kernel density estimate and rug plot:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.distplot(x, rug=True, hist=False)\n \n Plot the distribution with a histogram and maximum likelihood gaussian\n distribution fit:\n \n .. plot::\n :context: close-figs\n \n >>> from scipy.stats import norm\n >>> ax = sns.distplot(x, fit=norm, kde=False)\n \n Plot the distribution on the vertical axis:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.distplot(x, vertical=True)\n \n Change the color of all the plot elements:\n \n .. plot::\n :context: close-figs\n \n >>> sns.set_color_codes()\n >>> ax = sns.distplot(x, color=\"y\")\n \n Pass specific parameters to the underlying plot functions:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.distplot(x, rug=True, rug_kws={\"color\": \"g\"},\n ... kde_kws={\"color\": \"k\", \"lw\": 3, \"label\": \"KDE\"},\n ... hist_kws={\"histtype\": \"step\", \"linewidth\": 3,\n ... \"alpha\": 1, \"color\": \"g\"})\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb40790fa3ce5e90cf307b6e42866804ae0402a5
230,076
ipynb
Jupyter Notebook
content/features/notebooks.ipynb
martinfleis/momepy-guide
fff1705642ee83e4b67b3275752b34f5b8a0b71b
[ "MIT" ]
null
null
null
content/features/notebooks.ipynb
martinfleis/momepy-guide
fff1705642ee83e4b67b3275752b34f5b8a0b71b
[ "MIT" ]
4
2019-11-07T20:02:17.000Z
2020-04-13T16:36:23.000Z
content/features/notebooks.ipynb
martinfleis/momepy-guide
fff1705642ee83e4b67b3275752b34f5b8a0b71b
[ "MIT" ]
null
null
null
642.670391
151,856
0.948061
[ [ [ "# Content with notebooks\n\nYou can also create content with Jupyter Notebooks. The content for the current page is contained\nin a Jupyter Notebook in the `notebooks/` folder of the repository. This means that we can include\ncode blocks and their outputs, and export them to Jekyll markdown.\n\n**You can find the original notebook for this page [at this address](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb)**\n\n## Markdown + notebooks\n\nAs it is markdown, you can embed images, HTML, etc into your posts!\n\n![](cool.jpg)\n\nYou an also $add_{math}$ and\n\n$$\nmath^{blocks}\n$$\n\nor\n\n$$\n\\begin{align*}\n\\mbox{mean} la_{tex} \\\\ \\\\\nmath blocks\n\\end{align*}\n$$\n\nBut make sure you \\$Escape \\$your \\$dollar signs \\$you want to keep!\n\n## Code blocks and image outputs\n\nTextbooks with Jupyter will also embed your code blocks and output in your site.\nFor example, here's some sample Matplotlib code:", "_____no_output_____" ] ], [ [ "from matplotlib import rcParams, cycler\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()", "_____no_output_____" ], [ "# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\nN = 10\ndata = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]\ndata = np.array(data).T\ncmap = plt.cm.coolwarm\nrcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))\n\n\nfrom matplotlib.lines import Line2D\ncustom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),\n Line2D([0], [0], color=cmap(.5), lw=4),\n Line2D([0], [0], color=cmap(1.), lw=4)]\n\nfig, ax = plt.subplots(figsize=(10, 5))\nlines = ax.plot(data)\nax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);", "_____no_output_____" ] ], [ [ "Note that the image above is captured and displayed by Jekyll.", "_____no_output_____" ], [ "## Removing content before publishing\n\nYou can also remove some content before publishing your book to the web. For example,\nin [the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb) there\nused to be a cell below...", "_____no_output_____" ] ], [ [ "thisvariable = \"none of this should show up in the textbook\"\n\nfig, ax = plt.subplots()\nx = np.random.randn(100)\ny = np.random.randn(100)\nax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)\nax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)\nax.set_axis_off()", "_____no_output_____" ] ], [ [ "You can also **remove only the code** so that images and other output still show up.\n\nBelow we'll *only* display an image. It was generated with Python code in a cell,\nwhich you can [see in the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb)", "_____no_output_____" ] ], [ [ "# NO CODE\nthisvariable = \"this plot *will* show up in the textbook.\"\n\nfig, ax = plt.subplots()\nx = np.random.randn(100)\ny = np.random.randn(100)\nax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)\nax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)\nax.set_axis_off()", "_____no_output_____" ] ], [ [ "And here we'll *only* display a Pandas DataFrame. Again, this was generated with Python code\nfrom [this original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb).", "_____no_output_____" ] ], [ [ "# NO CODE\nimport pandas as pd\npd.DataFrame([['hi', 'there'], ['this', 'is'], ['a', 'DataFrame']], columns=['Word A', 'Word B'])", "_____no_output_____" ] ], [ [ "You can configure the text that *Textbooks with Jupyter* uses for this by modifying your book's `_config.yml` file.", "_____no_output_____" ], [ "## Interactive outputs\n\nWe can even do the same for *interactive* material. Below we'll display a map using `ipyleaflet`. When the notebook\nis converted to Markdown, the code for creating the interactive map is retained.\n\n**Note that this will only work for some packages.** They need to be able to output standalone HTML/Javascript, and not\ndepend on an underlying Python kernel to work.", "_____no_output_____" ] ], [ [ "import folium", "_____no_output_____" ], [ "m = folium.Map(\n location=[45.372, -121.6972],\n zoom_start=12,\n tiles='Stamen Terrain'\n)\n\nfolium.Marker(\n location=[45.3288, -121.6625],\n popup='Mt. Hood Meadows',\n icon=folium.Icon(icon='cloud')\n).add_to(m)\n\nfolium.Marker(\n location=[45.3311, -121.7113],\n popup='Timberline Lodge',\n icon=folium.Icon(color='green')\n).add_to(m)\n\nfolium.Marker(\n location=[45.3300, -121.6823],\n popup='Some Other Location',\n icon=folium.Icon(color='red', icon='info-sign')\n).add_to(m)\n\n\nm\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
cb40822b4ac1c91227ba952c13e0fd3c4b0ba857
965
ipynb
Jupyter Notebook
Topic Wise Problems/Strings/Minimum changes to make all substrings distinct.ipynb
shakirmahmood/Tasks
8ebcda57cdfc127c08df04c41548f036a9ce77a5
[ "MIT" ]
null
null
null
Topic Wise Problems/Strings/Minimum changes to make all substrings distinct.ipynb
shakirmahmood/Tasks
8ebcda57cdfc127c08df04c41548f036a9ce77a5
[ "MIT" ]
null
null
null
Topic Wise Problems/Strings/Minimum changes to make all substrings distinct.ipynb
shakirmahmood/Tasks
8ebcda57cdfc127c08df04c41548f036a9ce77a5
[ "MIT" ]
null
null
null
16.929825
38
0.482902
[ [ [ "s = input(\"Enter string: \")\nprint(len(s)-len(set(s)))\n\n#Test Cases\n#aab\n#aebaecedabbee\n#ab", "Enter string: aebaecedabbee\n8\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb409021ede3d8d0e7fbbf2b64058deaecfd34f7
1,811
ipynb
Jupyter Notebook
utils/plot_metrics.ipynb
AIRI-Institute/DeepCT
8e23fda101bd4a2bce2c98c5a73d97072a3892de
[ "Apache-2.0" ]
null
null
null
utils/plot_metrics.ipynb
AIRI-Institute/DeepCT
8e23fda101bd4a2bce2c98c5a73d97072a3892de
[ "Apache-2.0" ]
null
null
null
utils/plot_metrics.ipynb
AIRI-Institute/DeepCT
8e23fda101bd4a2bce2c98c5a73d97072a3892de
[ "Apache-2.0" ]
null
null
null
20.816092
110
0.546659
[ [ [ "## Imports", "_____no_output_____" ] ], [ [ "from plot_metrics import plot_metrics", "_____no_output_____" ] ], [ [ "## Input your files", "_____no_output_____" ] ], [ [ "files = [\n \"/home/arlapin/DeepCT_outputs/new_sampler_0008/selene_sdk.train_model.validation.txt\",\n \"/home/arlapin/DeepCT_outputs/new_sampler_00008/selene_sdk.train_model.validation.txt\",\n \"/home/arlapin/DeepCT_outputs/old_sampler_00008/selene_sdk.train_model.validation.txt\", \n]", "_____no_output_____" ] ], [ [ "## Visualize", "_____no_output_____" ] ], [ [ "for filename in files:\n fig = plot_metrics(filename)\n fig.update_layout(autosize=False, width=1024, height=1024)\n fig.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb40940f624b466588fc5ee46c87804c949c220f
10,865
ipynb
Jupyter Notebook
How-to.ipynb
nthuy190991/geoseg
b679af5dc558720df36dddc7abfd4e6ecb46d7de
[ "MIT" ]
1
2021-10-02T05:23:28.000Z
2021-10-02T05:23:28.000Z
How-to.ipynb
nthuy190991/geoseg
b679af5dc558720df36dddc7abfd4e6ecb46d7de
[ "MIT" ]
null
null
null
How-to.ipynb
nthuy190991/geoseg
b679af5dc558720df36dddc7abfd4e6ecb46d7de
[ "MIT" ]
null
null
null
35.506536
138
0.544685
[ [ [ "# Tutorial for Geoseg \n> __version__ == 0.1.0\n\n> __author__ == Go-Hiroaki\n\n# Overview:\n\n## 1. Evaluating with pretrained models\n> Test model performance by providing pretrained models\n\n## 2. Re-training with provided dataset\n> Trained new models with provide training datastet\n\n## 3. Training with personal dataset\n> Train and test models with your own dataset\n\n", "_____no_output_____" ] ], [ [ "ls", "How-to.ipynb __init__.py \u001b[0m\u001b[01;34mdataset\u001b[0m/ \u001b[01;34mlogs\u001b[0m/ \u001b[01;34msrc\u001b[0m/\r\nLICENSE \u001b[01;34mcheckpoint\u001b[0m/ eva.sh \u001b[01;34mresult\u001b[0m/ visSingle.py\r\nREADME.md \u001b[01;34mdata\u001b[0m/ \u001b[01;34mexample\u001b[0m/ run.sh visSingleComparison.py\r\n" ] ], [ [ "## 1. Evaluating with pretrained models\n\n### 1.1 Prepared and loaded dataset\n#### > Prepared dataset\n\n```\nYOUR_DATASET/\n|-- img\n| |-- train_1.png\n| |-- train_2.png\n| `-- \n|-- msk\n| |-- train_1.png\n| |-- train_2.png\n| `-- \n|-- ref.csv\n|-- statistic.csv\n|-- train.csv\n|-- val.csv\n```\n\n#### > Modified src/datasets.py to make sure YOUR_DATASET \n\n```\n\nif __name__ == \"__main__\":\n # ====================== parameter initialization ======================= #\n parser = argparse.ArgumentParser(description='ArgumentParser')\n parser.add_argument('-idx', type=int, default=0,\n help='index of sample image')\n args = parser.parse_args()\n idx = args.idx\n for root in ['YOUR_DATASET']:\n for mode in [\"IM\", \"IMS\", \"IME\"]:\n print(\"Load {}/{}.\".format(root, mode))\n trainset, valset = load_dataset(root, mode)\n \n # print(\"Load train set = {} examples, val set = {} examples\".format(\n # len(trainset), len(valset)))\n sample = trainset[idx]\n trainset.show(idx)\n sample = valset[idx]\n valset.show(idx)\n print(\"\\tsrc:\", sample[\"src\"].shape,\n \"tar:\", sample[\"tar\"].shape,)\n\n```\n#### > Run src/datasets.py\n> python src/datasets.py\n\nif success, sample image will show up in example/\n", "_____no_output_____" ], [ "### 1.2 Download pretrained models\n\n> 1. FCN8s_iter_5000.pth [LINK](https://drive.google.com/open?id=1KHs7coyXAipz8t5cN_lbTC4MOYi8FddI)\n> 2. FCN16s_iter_5000.pth [LINK](https://drive.google.com/open?id=1wlORkMx_ykmHysShUKY4UcCYs-fVaen6)\n> 3. FCN32s_iter_5000.pth [LINK](https://drive.google.com/open?id=1OR_Sk66RAGtKrp0quvqazRkL0xtAH8RY)\n> 4. SegNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1J0aRjFG-zOSSXnynm02VaYxjw1tjx-qC)\n> 5. UNet_iter_5000.pth [LINK](https://drive.google.com/open?id=17X0aCgRx3XXgH1fcfLoLwgcbWIzxZe5K)\n> 6. FPN_iter_5000.pth [LINK](https://drive.google.com/open?id=1fWrCnGQJBZTw7m5OZlQvH5-R_JJlBA-r)\n> 7. ResUNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1jGs_PxEMXCshOzXdg9LuFJxe8kO39oxT)\n> 8. MC-FCN_iter_5000.pth [LINK](https://drive.google.com/open?id=1Kt_JmR0ZGXvK9kuTmDOek5l1SsHX4xhz)\n> 9. BR-Net_iter_5000.pth [LINK](https://drive.google.com/open?id=1rytD9tzAq2mne5yf3XEh-jTSHlvQvedT)\n> * Upcoming ...\n\n After downloading corresponding pretrained models, save them at checkpoints/ .\n", "_____no_output_____" ] ], [ [ "ls ./checkpoint/", "BRNet-3*1*24-NZ32km2_iter_5000.pth FPN-3*1*24-NZ32km2_iter_5000.pth\r\nBRNet-3*6*24-Vaihingen_iter_5000.pth FPN-3*6*24-Vaihingen_iter_5000.pth\r\nFCN16s-3*1*24-NZ32km2_iter_5000.pth MCFCN-3*1*24-NZ32km2_iter_5000.pth\r\nFCN16s-3*6*24-Vaihingen_iter_5000.pth MCFCN-3*6*24-Vaihingen_iter_5000.pth\r\nFCN32s-3*1*24-NZ32km2_iter_5000.pth SegNet-3*1*24-NZ32km2_iter_5000.pth\r\nFCN32s-3*6*24-PotsdamRGB_iter_5000.pth SegNet-3*6*24-Vaihingen_iter_5000.pth\r\nFCN32s-3*6*24-Vaihingen_iter_5000.pth UNet-3*1*24-NZ32km2_iter_5000.pth\r\nFCN8s-3*1*24-NZ32km2_iter_5000.pth UNet-3*6*24-Vaihingen_iter_5000.pth\r\nFCN8s-3*6*24-Vaihingen_iter_5000.pth checkpoint.csv\r\n" ] ], [ [ "### 1.3 Run evaluation scripts\n\n* sinle model\n\n```\nvisSingle.py -h\n optional arguments:\n -h, --help show this help message and exit\n -checkpoints CHECKPOINTS [CHECKPOINTS ...]\n checkpoints used for making prediction\n -spaces SPACES [SPACES ...]\n barrier space for merging\n -direction {horizontal,vertical}\n merge image direction\n -disp_cols DISP_COLS cols for displaying image\n -edge_fn {shift,canny}\n method used for edge extraction\n -gen_nb GEN_NB number of generated image\n -color COLOR background color for generated rgb result\n -partition PARTITION partition of dataset for loading\n -disk DISK dilation level\n -cuda CUDA using cuda for optimization\n```\n \n The generate result will show up at result/single\n - BR-Net ![time](./result/single/BR-Net_canny_segmap_edge_0.png)\n \n* multi models\n```\nvisSingleComparison.py -h\n optional arguments:\n -h, --help show this help message and exit\n -checkpoints CHECKPOINTS [CHECKPOINTS ...]\n checkpoints used for making prediction\n -spaces SPACES [SPACES ...]\n barrier spaces for merging\n -direction {horizontal,vertical}\n merge image direction\n -disp_cols DISP_COLS cols for displaying image\n -target {segmap,edge}\n target for model prediction [segmap, edge]\n -edge_fn {shift,canny}\n method used for edge extraction\n -gen_nb GEN_NB number of generated image\n -eval_fn {ov,precision,recall,f1_score,jaccard,kappa}\n method used for evaluate performance\n -significance SIGNIFICANCE\n significant different level between methods\n -color COLOR background color for generated rgb result\n -partition PARTITION partition of dataset for loading\n -disk DISK dilation level\n -batch_size BATCH_SIZE\n batch size for model prediction\n -cuda CUDA using cuda for optimization\n```\n The generate result will show up at result/single-comparison\n - Segmap FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/segmap_FCN32s_FCN16s_FCN8s_1.png)\n - Edge FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/edge_FCN32s_FCN16s_FCN8s_1.png)", "_____no_output_____" ], [ "## 2. Re-train with provided dataset\n\n### 2.1 Download training dataset\n> Training dataset [LINK](https://drive.google.com/file/d/1boGcJz9TyK9XB4GUhjCHVu8XGtbgjjbi/view?usp=sharing).\nUnzip and place to datasets/\n\n### 2.2 Run training scripts\n\n```\npython src/train.py -h\nusage: train.py [-h] [-root ROOT] [-net NET] [-base_kernel BASE_KERNEL] [-trigger {epoch,iter}] [-interval INTERVAL]\n [-terminal TERMINAL] [-batch_size BATCH_SIZE] [-lr LR] [-cuda CUDA]\n\nArgumentParser\n\noptional arguments:\n -h, --help show this help message and exit\n -root ROOT root dir of dataset for training models\n -net NET network type for training\n -base_kernel BASE_KERNEL\n base number of kernels\n -trigger {epoch,iter}\n trigger type for logging\n -interval INTERVAL interval for logging\n -terminal TERMINAL terminal for training\n -batch_size BATCH_SIZE\n batch_size for training\n -lr LR learning rate for optimization\n -cuda CUDA using cuda for optimization\n``` ", "_____no_output_____" ], [ "## 3. Training with personal dataset\n### 3.1 Prepare your own dataset\n### 3.2 Run training scripts", "_____no_output_____" ] ], [ [ "Step", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
cb4095c972618bb3ee3c5f69c617703d209d8d39
47,198
ipynb
Jupyter Notebook
Imoveis Residenciais.ipynb
cleristonnovaes/Curso-Pandas
1b42298e549797b548d4dc79c12e87f5a4969244
[ "MIT" ]
null
null
null
Imoveis Residenciais.ipynb
cleristonnovaes/Curso-Pandas
1b42298e549797b548d4dc79c12e87f5a4969244
[ "MIT" ]
null
null
null
Imoveis Residenciais.ipynb
cleristonnovaes/Curso-Pandas
1b42298e549797b548d4dc79c12e87f5a4969244
[ "MIT" ]
null
null
null
30.216389
92
0.312979
[ [ [ "# Relatório de Análise III", "_____no_output_____" ], [ "## Imóveis Residenciais", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "dados = pd.read_csv('dados/aluguel.csv', sep = ';')", "_____no_output_____" ], [ "dados.head(10)", "_____no_output_____" ], [ "list(dados['Tipo'].drop_duplicates())", "_____no_output_____" ], [ "residencial = ['Quitinete',\n 'Casa',\n 'Apartamento',\n 'Casa de Condomínio',\n 'Casa de Vila']", "_____no_output_____" ], [ "residencial", "_____no_output_____" ], [ "dados.head(10)", "_____no_output_____" ], [ "selecao = dados['Tipo'].isin(residencial)\nselecao", "_____no_output_____" ], [ "dados_residencial = dados[selecao]", "_____no_output_____" ], [ "dados_residencial", "_____no_output_____" ], [ "list(dados_residencial['Tipo'].drop_duplicates())", "_____no_output_____" ], [ "dados_residencial.shape[0]", "_____no_output_____" ], [ "dados.shape[0]", "_____no_output_____" ], [ "dados_residencial.index = range(dados_residencial.shape[0])", "_____no_output_____" ], [ "dados_residencial", "_____no_output_____" ] ], [ [ "## Exportando a Base de Dados", "_____no_output_____" ] ], [ [ "dados_residencial.to_csv('dados/aluguel_residencial.csv', sep = ';')", "_____no_output_____" ], [ "dados_residencial_2 = pd.read_csv('dados/aluguel_residencial.csv', sep = ';')", "_____no_output_____" ], [ "dados_residencial_2", "_____no_output_____" ], [ "dados_residencial.to_csv('dados/aluguel_residencial.csv', sep = ';', index = False)", "_____no_output_____" ], [ "dados_residencial_2 = pd.read_csv('dados/aluguel_residencial.csv', sep = ';')", "_____no_output_____" ], [ "dados_residencial_2", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb40988e511ccf2be4e49d4d5196217528b2849d
29,769
ipynb
Jupyter Notebook
lessons/02_Step_2.ipynb
JeffreyNederend/CFDPython
33702276a944987be91c4333687c924d06808e30
[ "CC-BY-3.0" ]
null
null
null
lessons/02_Step_2.ipynb
JeffreyNederend/CFDPython
33702276a944987be91c4333687c924d06808e30
[ "CC-BY-3.0" ]
null
null
null
lessons/02_Step_2.ipynb
JeffreyNederend/CFDPython
33702276a944987be91c4333687c924d06808e30
[ "CC-BY-3.0" ]
null
null
null
89.936556
9,852
0.79912
[ [ [ "Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.", "_____no_output_____" ] ], [ [ "[@LorenaABarba](https://twitter.com/LorenaABarba)", "_____no_output_____" ], [ "12 steps to Navier–Stokes\n======\n***", "_____no_output_____" ], [ "This Jupyter notebook continues the presentation of the **12 steps to Navier–Stokes**, the practical module taught in the interactive CFD class of [Prof. Lorena Barba](http://lorenabarba.com). You should have completed [Step 1](./01_Step_1.ipynb) before continuing, having written your own Python script or notebook and having experimented with varying the parameters of the discretization and observing what happens.\n", "_____no_output_____" ], [ "Step 2: Nonlinear Convection\n-----\n***", "_____no_output_____" ], [ "Now we're going to implement nonlinear convection using the same methods as in step 1. The 1D convection equation is:\n\n$$\\frac{\\partial u}{\\partial t} + u \\frac{\\partial u}{\\partial x} = 0$$\n\nInstead of a constant factor $c$ multiplying the second term, now we have the solution $u$ multiplying it. Thus, the second term of the equation is now *nonlinear*. We're going to use the same discretization as in Step 1 — forward difference in time and backward difference in space. Here is the discretized equation.\n\n$$\\frac{u_i^{n+1}-u_i^n}{\\Delta t} + u_i^n \\frac{u_i^n-u_{i-1}^n}{\\Delta x} = 0$$\n\nSolving for the only unknown term, $u_i^{n+1}$, yields:\n\n$$u_i^{n+1} = u_i^n - u_i^n \\frac{\\Delta t}{\\Delta x} (u_i^n - u_{i-1}^n)$$", "_____no_output_____" ], [ "As before, the Python code starts by loading the necessary libraries. Then, we declare some variables that determine the discretization in space and time (you should experiment by changing these parameters to see what happens). Then, we create the initial condition $u_0$ by initializing the array for the solution using $u = 2\\ @\\ 0.5 \\leq x \\leq 1$ and $u = 1$ everywhere else in $(0,2)$ (i.e., a hat function).", "_____no_output_____" ] ], [ [ "import numpy # we're importing numpy \nfrom matplotlib import pyplot # and our 2D plotting library\n%matplotlib inline\n\n\nnx = 41\ndx = 2 / (nx - 1)\nnt = 20 #nt is the number of timesteps we want to calculate\ndt = .025 #dt is the amount of time each timestep covers (delta t)\n\nu = numpy.ones(nx) #as before, we initialize u with every value equal to 1.\nu[int(.5 / dx) : int(1 / dx + 1)] = 2 #then set u = 2 between 0.5 and 1 as per our I.C.s\n\nun = numpy.ones(nx) #initialize our placeholder array un, to hold the time-stepped solution", "_____no_output_____" ] ], [ [ "The code snippet below is *unfinished*. We have copied over the line from [Step 1](./01_Step_1.ipynb) that executes the time-stepping update. Can you edit this code to execute the nonlinear convection instead?", "_____no_output_____" ] ], [ [ "for n in range(nt): #iterate through time\n un = u.copy() ##copy the existing values of u into un\n for i in range(1, nx): ##now we'll iterate through the u array\n u[i] = un[i]*(1 - (dt/dx)*(un[i]-un[i-1]))\n ###This is the line from Step 1, copied exactly. Edit it for our new equation.\n ###then uncomment it and run the cell to evaluate Step 2 \n \n ###u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])\n \n\n \npyplot.plot(numpy.linspace(0, 2, nx), u) ##Plot the results", "_____no_output_____" ] ], [ [ "What do you observe about the evolution of the hat function under the nonlinear convection equation? What happens when you change the numerical parameters and run again?", "_____no_output_____" ], [ "## Learn More", "_____no_output_____" ], [ "For a careful walk-through of the discretization of the convection equation with finite differences (and all steps from 1 to 4), watch **Video Lesson 4** by Prof. Barba on YouTube.", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nYouTubeVideo('y2WaK7_iMRI')", "_____no_output_____" ], [ "from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()", "_____no_output_____" ] ], [ [ "> (The cell above executes the style for this notebook.)", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "raw" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb40b8b0f8c26c0046b334cca75b3cd31a916412
33,847
ipynb
Jupyter Notebook
jupyter notebooks/2-2-fraud-detection-columns-exploration.ipynb
sergiomlop/Fraud-Detection-Model-for-Credit-Card-Transactions
3e842102b33220af959f1a1924e089af73e4c6d0
[ "Apache-2.0" ]
3
2021-01-03T11:20:49.000Z
2021-01-08T12:42:22.000Z
jupyter notebooks/2-2-fraud-detection-columns-exploration.ipynb
sergiomlop/Fraud-Detection-Model-for-Credit-Card-Transactions
3e842102b33220af959f1a1924e089af73e4c6d0
[ "Apache-2.0" ]
null
null
null
jupyter notebooks/2-2-fraud-detection-columns-exploration.ipynb
sergiomlop/Fraud-Detection-Model-for-Credit-Card-Transactions
3e842102b33220af959f1a1924e089af73e4c6d0
[ "Apache-2.0" ]
null
null
null
33,847
33,847
0.654563
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "##### Functions\n# 1st function: to graph time series based on TransactionDT vs the variable selected\n\ndef scatter(column):\n fr,no_fr = (train[train['isFraud'] == 1], train[train['isFraud'] == 0]) \n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,3)) \n ax1.title.set_text('Histogram ' + column + ' when isFraud == 0')\n ax1.set_ylim(train[column].min() - 1,train[column].max() + 1)\n ax1.scatter(x = no_fr['TransactionDT'], y = no_fr[column], color = 'blue', marker='o') \n ax2.title.set_text('Histogram ' + column + ' when isFraud == 1')\n ax2.set_ylim(train[column].min() - 1,train[column].max() + 1)\n ax2.scatter(x = fr['TransactionDT'], y = fr[column], color = 'red', marker='o')\n plt.show()\n \n# 2nd function: to show a ranking of pearson correlation with the variable selected\n\ndef corr(data,column):\n print('Correlation with ' + column)\n print(train[data].corrwith(train[column]).abs().sort_values(ascending = False)[1:])\n \n# 3rd function: to reduce the groups based on Nans agroupation and pearson correlation\n\ndef reduce(groups):\n result = list() \n for values in groups:\n maxval = 0\n val = values[0] \n for value in values:\n unique_values = train[value].nunique()\n if unique_values > maxval:\n maxval = unique_values\n val = value \n result.append(value)\n return result\n\n# 4th function: to sort each column in ascending order based on its number\n\ndef order_finalcolumns(final_Xcolumns):\n return sorted(final_Xcolumns, key=lambda x: int(\"\".join([i for i in x if i.isdigit()])))", "_____no_output_____" ], [ "##### Download of files.\n\nprint('Downloading datasets...')\nprint(' ')\ntrain = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/train_mred.pkl')\nprint('Train has been downloaded... (1/2)')\ntest = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/test_mred.pkl')\nprint('Test has been downloaded... (2/2)')\nprint(' ')\nprint('All files are downloaded')", "_____no_output_____" ], [ "##### All the columns of train dataset.\n\nprint(list(train))", "_____no_output_____" ] ], [ [ "# NaNs Exploration\nWe will search all the columns to determine which columns are related by the number of NANs present. After grouping them, we decide to keep the columns of each group with major amount of unique values (its supposed to be the most explanatory variable)", "_____no_output_____" ], [ "## Transaction columns", "_____no_output_____" ] ], [ [ "# These columns are the first ones in transaction dataset.\n\ncolumns= list(train.columns[:17])\ncolumns", "_____no_output_____" ], [ "for col in columns:\n print(f'{col} NaNs: {train[col].isna().sum()} | {train[col].isna().sum()/train.shape[0]:.2%}')", "_____no_output_____" ], [ "# If we look closely to % NaNs data, most of them have low number of missing information. We are keeping all the columns where % NaNs < 0.7\n\nfinal_transactioncolumns = list()\nfor col in columns:\n if train[col].isna().sum()/train.shape[0] < 0.7:\n final_transactioncolumns.append(col)\nprint('Final Transaction columns:',final_transactioncolumns)", "_____no_output_____" ] ], [ [ "## C columns", "_____no_output_____" ] ], [ [ "##### Group the C columns to determine which columns are related by the number of NANs present and analyze its groups independently.\n\ncolumns = ['C' + str(i) for i in range(1,15)]\ndf_nan = train.isna()\ndict_nans = dict()\n\nfor column in columns:\n number_nans = df_nan[column].sum()\n try:\n dict_nans[number_nans].append(column)\n except:\n dict_nans[number_nans] = [column]\n\ngroup_number = 1\nfor key,values in dict_nans.items():\n print('Group {}'.format(group_number),'| Number of NANs =',key)\n print(values)\n print(' ')\n group_number += 1", "_____no_output_____" ] ], [ [ "### Group 1 (single group)", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT\n# There is no column that does not have NaNs values so we get all the columns in the same group\n\ngroup_list = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['C1','C11','C2','C6','C8','C4','C10','C14','C12','C7','C13'], ['C3'], ['C5','C9']]\n \nresult = reduce(reduce_groups)\nprint('Final C columns:',result)\nfinal_ccolumns = result", "_____no_output_____" ] ], [ [ "## D columns", "_____no_output_____" ] ], [ [ "##### Group the D columns + Dachr columns to determine which columns are related by the number of NANs present and analyze its groups independently.\n\ncolumns = ['D' + str(i) for i in range(1,16)]\ncolumns.extend(['D1achr','D2achr','D4achr','D6achr','D10achr','D11achr','D12achr','D13achr','D14achr','D15achr'])\ndf_nan = train.isna()\ndict_nans = dict()\n\nfor column in columns:\n number_nans = df_nan[column].sum()\n try:\n dict_nans[number_nans].append(column)\n except:\n dict_nans[number_nans] = [column]\n\ngroup_number = 1\nfor key,values in dict_nans.items():\n print('Group {}'.format(group_number),'| Number of NANs =',key)\n print(values)\n print(' ')\n group_number += 1", "_____no_output_____" ] ], [ [ "### Group 1 (single group)", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n# Despite having different number of NaNs, we are analyzing it as a single group. But due to NaNs low number in D1, we keep it as a final column.\n\ngroup_list = ['D1achr', 'D2achr', 'D3', 'D4achr', 'D5', 'D6achr', 'D7', 'D8', 'D9', 'D10achr', 'D11achr', 'D12achr', 'D13achr', 'D14achr', 'D15achr']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n# On the first group, D1achr vs D2achr --> we keep D1achr due to the low number of NaNs.\n\nreduce_groups = [['D3','D7','D5'],['D4achr','D12achr','D6achr','D15achr','D10achr', 'D11achr'], ['D8'], ['D9'], ['D13achr'],['D14achr']]\n \nresult = reduce(reduce_groups)\nresult.append('D1achr')\nprint('Final D columns:',result)\nfinal_dcolumns = result", "_____no_output_____" ] ], [ [ "## M columns", "_____no_output_____" ] ], [ [ "##### Group the M columns to determine which columns are related by the number of NANs present and analyze its groups independently.\n\ncolumns = ['M' + str(i) for i in range(1,10)]\ndf_nan = train.isna()\ndict_nans = dict()\n\nfor column in columns:\n number_nans = df_nan[column].sum()\n try:\n dict_nans[number_nans].append(column)\n except:\n dict_nans[number_nans] = [column]\n\ngroup_number = 1\nfor key,values in dict_nans.items():\n print('Group {}'.format(group_number),'| Number of NANs =',key)\n print(values)\n print(' ')\n group_number += 1", "_____no_output_____" ] ], [ [ "### Group 1 (single group)", "_____no_output_____" ] ], [ [ "# To analize M columns, we need to transform strings to numbers. Instead of using Label Encoder, we use a dictionary.\n\nT_F_num = dict({'F': 0, 'T': 1, 'M0': 0, 'M1': 1, 'M2': 2})\n\nfor column in ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9']:\n print(f'{column}:', train[column].unique())\n print('Transforming strings to numbers...')\n train[column] = train[column].replace(T_F_num)\n print(f'{column}:', train[column].unique())\n print('')", "_____no_output_____" ], [ "##### Time series graph based on TransactionDT.\n# Despite having different number of NaNs, we are analyzing it as a single group.\n\ngroup_list = ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "#### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, We grouped together the columns with corr > 0.7 but in this case, no correlation is bigger than 0.7\n# That's why, in this particular case we grouped together the columns with corr > 0.5\n\nreduce_groups = ['M1'], ['M2','M3'], ['M4'], ['M5'], ['M6'], ['M7', 'M8'], ['M9']\n \nresult = reduce(reduce_groups)\nprint('Final M columns:',result)\nfinal_mcolumns = result", "_____no_output_____" ] ], [ [ "## V columns", "_____no_output_____" ] ], [ [ "##### Group the V columns to determine which columns are related by the number of NANs present and analyze its groups independently.\n\ncolumns = ['V' + str(i) for i in range(1,340)]\ndf_nan = train.isna()\ndict_nans = dict()\n\nfor column in columns:\n number_nans = df_nan[column].sum()\n try:\n dict_nans[number_nans].append(column)\n except:\n dict_nans[number_nans] = [column]\n\ngroup_number = 1\nfor key,values in dict_nans.items():\n print('Group {}'.format(group_number),'| Number of NANs =',key)\n print(values)\n print(' ')\n group_number += 1\n \nfinal_vcolumns = list()", "_____no_output_____" ] ], [ [ "### Group 1", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\ngroup_list = ['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = ['V1'], ['V2','V3'], ['V4','V5'], ['V6','V7'], ['V8','V9']\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group1 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 2", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27',\n 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V12','V13'], ['V14'], ['V15','V16','V33','V34','V31','V32','V21','V22','V17','V18'], ['V19','V20'],['V23','V24'],['V25','V26'],['V27','V28'],['V29','V30']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group2 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 3", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V35', 'V36', 'V37', 'V38', 'V39', 'V40', 'V41', 'V42', 'V43', 'V44', 'V45', 'V46', 'V47', 'V48', 'V49', 'V50', 'V51', 'V52']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V35','V36'], ['V37','V38'], ['V39','V40','V42','V43','V50','V51','V52'], ['V41'], ['V44','V45'],['V46','V47'],['V48','V49']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group3 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 4", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V53', 'V54', 'V55', 'V56', 'V57', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V65', 'V66', 'V67', 'V68', \n 'V69', 'V70', 'V71', 'V72', 'V73', 'V74']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V53','V54'], ['V55','V56'], ['V57','V58','V71','V73','V72','V74','V63','V59','V64','V60'],['V61','V62'],['V65'],\n ['V66','V67'],['V68'], ['V69','V70']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group4 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 5", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V75', 'V76', 'V77', 'V78', 'V79', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'V92', 'V93', 'V94']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V75','V76'],['V77','V78'], ['V79', 'V94', 'V93', 'V92', 'V84', 'V85', 'V80', 'V81'],['V82','V83'],['V86','V87'],['V88'],['V89'],['V90','V91']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group5 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 6", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V95', 'V96', 'V97', 'V98', 'V99', 'V100', 'V101', 'V102', 'V103', 'V104', 'V105', 'V106', 'V107', 'V108', 'V109', 'V110', 'V111', 'V112', \n 'V113', 'V114', 'V115', 'V116', 'V117', 'V118', 'V119', 'V120', 'V121', 'V122', 'V123', 'V124', 'V125', 'V126', 'V127', 'V128', 'V129', 'V130',\n 'V131', 'V132', 'V133', 'V134', 'V135', 'V136', 'V137']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n# We omit V107 since there is no info about corr with other columns and its unique values are 1.\n\nreduce_groups = [['V95','V101'],['V96','V102','V97','V99','V100','V103'],['V98'],['V104','V106','V105'],['V108','V110','V114','V109','V111','V113','V112','V115','V116'],\n ['V117','V119','V118'],['V120','V122','V121'],['V123','V125','V124'],['V126','V128','V132'],['V127','V133','V134'],['V129','V131','V130'],\n ['V135','V137','V136']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group6 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 7", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V138', 'V139', 'V140', 'V141', 'V142', 'V143', 'V144', 'V145', 'V146', 'V147', 'V148', 'V149', 'V150', 'V151', 'V152', 'V153', 'V154', \n 'V155', 'V156', 'V157', 'V158', 'V159', 'V160', 'V161', 'V162', 'V163', 'V164', 'V165', 'V166']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V138'],['V139','V140'],['V141','V142'],['V143','V159','V150','V151','V165','V144','V145','V160','V152','V164','V166'],['V146','V147'],\n ['V148','V155','V149','V153','V154','V156','V157','V158'],['V161','V163','V162']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group7 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 8", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V167', 'V168', 'V172', 'V173', 'V176', 'V177', 'V178', 'V179', 'V181', 'V182', 'V183', 'V186', 'V187', 'V190', 'V191', 'V192', 'V193', \n 'V196', 'V199', 'V202', 'V203', 'V204', 'V205', 'V206', 'V207', 'V211', 'V212', 'V213', 'V214', 'V215', 'V216']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = ['V167','V176','V199','V179','V190','V177','V186','V168','V172','V178','V196','V191','V204','V213','V207','V173'],['V181','V183','V182',\n 'V187','V192','V203','V215','V178','V193','V212','V204'],['V202','V216','V204','V214']\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group8 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 9", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V169', 'V170', 'V171', 'V174', 'V175', 'V180', 'V184', 'V185', 'V188', 'V189', 'V194', 'V195', 'V197', 'V198', 'V200', 'V201', 'V208', 'V209', 'V210']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V169'],['V170','V171','V200','V201'],['V174','V175'],['V180'],['V184','V185'],['V188','V189'],['V194','V197','V195','V198'],\n ['V208','V210','V209']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group9 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 10", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V217', 'V218', 'V219', 'V223', 'V224', 'V225', 'V226', 'V228', 'V229', 'V230', 'V231', 'V232', 'V233', 'V235', 'V236', 'V237','V240',\n 'V241', 'V242', 'V243', 'V244', 'V246', 'V247', 'V248', 'V249', 'V252', 'V253', 'V254', 'V257', 'V258', 'V260', 'V261', 'V262', 'V263',\n 'V264', 'V265', 'V266', 'V267', 'V268', 'V269', 'V273', 'V274', 'V275', 'V276', 'V277', 'V278']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V217','V231','V233','V228','V257','V219','V232','V246'],['V218','V229','V224','V225','V253','V243','V254','V248','V264','V261','V249','V258',\n 'V267','V274','V230','V236','V247','V262','V223','V252','V260'],['V226','V263','V276','V278'], ['V235','V237'],['V240','V241'],['V242','V244'],\n ['V265','V275','V277','V268','V273'],['V269','V266']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group10 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 11", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V220', 'V221', 'V222', 'V227', 'V234', 'V238', 'V239', 'V245', 'V250', 'V251', 'V255', 'V256', 'V259', 'V270', 'V271', 'V272']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = ['V220'],['V221','V222','V259','V245','V227','V255','V256'],['V234'],['V238','V239'],['V250','V251'],['V270','V272','V271']\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group11 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 12", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V279', 'V280', 'V284', 'V285', 'V286', 'V287', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V297', 'V298', 'V299', 'V302', 'V303', 'V304',\n 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = [['V279','V293','V290','V280','V295','V294','V292','V291','V317','V307','V318'],['V284'],['V285','V287'],['V286'],['V297','V299','V298'],\n ['V302','V304','V303'],['V305'],['V306','V308','V316','V319'],['V309','V311','V312','V310'],['V320','V321']]\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group12 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 13", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V281', 'V282', 'V283', 'V288', 'V289', 'V296', 'V300', 'V301', 'V313', 'V314', 'V315']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = ['V281','V282','V283'],['V288','V289'],['V296'],['V300','V301'],['V313','V315','V314']\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group13 columns:',result)", "_____no_output_____" ] ], [ [ "### Group 14", "_____no_output_____" ] ], [ [ "##### Time series graph based on TransactionDT.\n\ngroup_list = ['V322', 'V323', 'V324', 'V325', 'V326', 'V327', 'V328', 'V329', 'V330', 'V331', 'V332', 'V333', 'V334', 'V335', 'V336', 'V337', 'V338', 'V339']\n\nfor column in group_list:\n scatter(column)", "_____no_output_____" ], [ "##### Heatmap\n\nplt.figure(figsize = (15,15))\nsns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0)\nplt.show()", "_____no_output_____" ], [ "##### Ranking of pearson correlation.\n\nfor column in group_list:\n corr(group_list,column)\n print(' ')", "_____no_output_____" ], [ "##### Based on pearson correlation, we grouped together the columns with corr > 0.7\n\nreduce_groups = ['V322','V324'],['V323','V326','V324','V327','V326'],['V325'],['V328','V330','V329'],['V331','V333','V332','V337'],['V334','V336','V335']\n\nresult = reduce(reduce_groups)\nfinal_vcolumns.extend(result)\nprint('Final V_Group14 columns:',result)", "_____no_output_____" ] ], [ [ "### Final V columns", "_____no_output_____" ] ], [ [ "print('Number of V columns:', len(final_vcolumns))\nprint(final_vcolumns)", "_____no_output_____" ] ], [ [ "# Conclusions\nBased on previous process, we suggest keeping as final columns the ones describes below:", "_____no_output_____" ] ], [ [ "##### 1st we sort them (ascending order) with a function\n\nfinal_ccolumns = order_finalcolumns(final_ccolumns)\nfinal_dcolumns = order_finalcolumns(final_dcolumns)\nfinal_mcolumns = order_finalcolumns(final_mcolumns)\nfinal_vcolumns = order_finalcolumns(final_vcolumns)", "_____no_output_____" ], [ "##### Final columns\n\nprint(f'Final Transaction columns ({len(final_transactioncolumns)}): {final_transactioncolumns}')\nprint(' ')\nprint(f'Final C columns ({len(final_ccolumns)}): {final_ccolumns}')\nprint(' ')\nprint(f'Final D columns ({len(final_dcolumns)}): {final_dcolumns}')\nprint(' ')\nprint(f'Final M columns ({len(final_mcolumns)}): {final_mcolumns}')\nprint(' ')\nprint(f'Final V columns ({len(final_vcolumns)}): {final_vcolumns}')\nprint(' ')\n\nprint('#' * 50)\n\nfinal_columns = final_transactioncolumns + final_ccolumns + final_dcolumns + final_mcolumns + final_vcolumns\nprint(' ')\nprint('Final columns:', final_columns)\nprint(' ')\nprint('Lenght of final columns:', len(final_columns))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb40d7b53159dc94f9bed57c34634acfe10397a3
17,464
ipynb
Jupyter Notebook
02-PythonAndDocker/02-PythonAndDocker.ipynb
6za/runbooks-jupyter
482f92f3e0307b4f1cb4d504817678c48f98bd6a
[ "Apache-2.0" ]
null
null
null
02-PythonAndDocker/02-PythonAndDocker.ipynb
6za/runbooks-jupyter
482f92f3e0307b4f1cb4d504817678c48f98bd6a
[ "Apache-2.0" ]
null
null
null
02-PythonAndDocker/02-PythonAndDocker.ipynb
6za/runbooks-jupyter
482f92f3e0307b4f1cb4d504817678c48f98bd6a
[ "Apache-2.0" ]
null
null
null
32.460967
129
0.37792
[ [ [ "## Sample Notebook - Python Pandas and some docker triggers\n\nHow to start: [01-SimpleDockerTest.ipynb](https://github.com/6za/runbooks-jupyter/blob/master/01-SimpleDockerTest.ipynb)\n\nSample Notenook mixing pandas and command line goodies.", "_____no_output_____" ] ], [ [ "import pandas as pd\nprint(pd.__version__)", "0.25.3\n" ] ], [ [ "Sample `hosts.csv`:\n\n```txt\nhostname,ip,arch,user\npi-node1,11.22.33.44,armv7l,pi\n```\n\n> File content list of names, ips and some annotations. ", "_____no_output_____" ] ], [ [ "hosts = pd.read_csv(\"hosts.csv\")\nsupressed_columns = ['ip','user']", "_____no_output_____" ] ], [ [ "### List hostnames", "_____no_output_____" ] ], [ [ "hosts.drop(columns=supressed_columns)", "_____no_output_____" ], [ "hosts['ip'].to_csv(r'ip.txt', header=False, index=None, sep=' ')", "_____no_output_____" ], [ "!cat env.sh", "#!/bin/bash\r\nexport DOCKER_TLS_VERIFY=\"1\"\r\nexport DOCKER_CERT_PATH=\"/root/certs\"\r\nexport DOCKER_HOST=\"tcp://$demo_docker_host:2376\"" ] ], [ [ "### List containers running on hosts", "_____no_output_____" ] ], [ [ "%%bash \n#source ./env.sh && docker ps\necho \"ip|id|image|ports\" > containers.csv\nwhile read ip; do\n source ./env.sh \n export DOCKER_HOST=\"tcp://$ip:2376\"\n docker ps --format \"$ip|{{.ID}}|{{.Image}}|{{.Ports}}\" >> containers.csv 2>/dev/null || :\ndone <ip.txt", "_____no_output_____" ], [ "containers = pd.read_csv(\"containers.csv\", sep = '|')\npd.merge(containers, hosts, on='ip', how='inner').drop(columns=supressed_columns)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb40deffd2c27bba2cbb76e0f6ca781bffc0b268
53,273
ipynb
Jupyter Notebook
assignment2/Dropout_2018.ipynb
Ginko-L/CS231n
fe1b4d44537028f55b3b16f8411b34fff0298546
[ "MIT" ]
30
2018-09-18T18:28:27.000Z
2022-03-09T01:49:25.000Z
assignment2/Dropout_2018.ipynb
maxis42/CS231n
fe1b4d44537028f55b3b16f8411b34fff0298546
[ "MIT" ]
null
null
null
assignment2/Dropout_2018.ipynb
maxis42/CS231n
fe1b4d44537028f55b3b16f8411b34fff0298546
[ "MIT" ]
29
2018-03-22T15:15:09.000Z
2022-02-14T21:58:08.000Z
106.333333
35,844
0.835095
[ [ [ "# Dropout\nDropout [1] is a technique for regularizing neural networks by randomly setting some features to zero during the forward pass. In this exercise you will implement a dropout layer and modify your fully-connected network to optionally use dropout.\n\n[1] [Geoffrey E. Hinton et al, \"Improving neural networks by preventing co-adaptation of feature detectors\", arXiv 2012](https://arxiv.org/abs/1207.0580)", "_____no_output_____" ] ], [ [ "# As usual, a bit of setup\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "_____no_output_____" ], [ "# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.items():\n print('%s: ' % k, v.shape)", "X_train: (49000, 3, 32, 32)\ny_train: (49000,)\nX_val: (1000, 3, 32, 32)\ny_val: (1000,)\nX_test: (1000, 3, 32, 32)\ny_test: (1000,)\n" ] ], [ [ "# Dropout forward pass\nIn the file `cs231n/layers.py`, implement the forward pass for dropout. Since dropout behaves differently during training and testing, make sure to implement the operation for both modes.\n\nOnce you have done so, run the cell below to test your implementation.", "_____no_output_____" ] ], [ [ "np.random.seed(231)\nx = np.random.randn(500, 500) + 10\n\nfor p in [0.25, 0.4, 0.7]:\n out, _ = dropout_forward(x, {'mode': 'train', 'p': p})\n out_test, _ = dropout_forward(x, {'mode': 'test', 'p': p})\n\n print('Running tests with p = ', p)\n print('Mean of input: ', x.mean())\n print('Mean of train-time output: ', out.mean())\n print('Mean of test-time output: ', out_test.mean())\n print('Fraction of train-time output set to zero: ', (out == 0).mean())\n print('Fraction of test-time output set to zero: ', (out_test == 0).mean())\n print()", "Running tests with p = 0.25\nMean of input: 10.000207878477502\nMean of train-time output: 2.5035147792443206\nMean of test-time output: 10.000207878477502\nFraction of train-time output set to zero: 0.749784\nFraction of test-time output set to zero: 0.0\n\nRunning tests with p = 0.4\nMean of input: 10.000207878477502\nMean of train-time output: 3.991167063504464\nMean of test-time output: 10.000207878477502\nFraction of train-time output set to zero: 0.600796\nFraction of test-time output set to zero: 0.0\n\nRunning tests with p = 0.7\nMean of input: 10.000207878477502\nMean of train-time output: 6.9914683385116\nMean of test-time output: 10.000207878477502\nFraction of train-time output set to zero: 0.30074\nFraction of test-time output set to zero: 0.0\n\n" ] ], [ [ "# Dropout backward pass\nIn the file `cs231n/layers.py`, implement the backward pass for dropout. After doing so, run the following cell to numerically gradient-check your implementation.", "_____no_output_____" ] ], [ [ "np.random.seed(231)\nx = np.random.randn(10, 10) + 10\ndout = np.random.randn(*x.shape)\n\ndropout_param = {'mode': 'train', 'p': 0.2, 'seed': 123}\nout, cache = dropout_forward(x, dropout_param)\ndx = dropout_backward(dout, cache)\ndx_num = eval_numerical_gradient_array(lambda xx: dropout_forward(xx, dropout_param)[0], x, dout)\n\n# Error should be around e-10 or less\nprint('dx relative error: ', rel_error(dx, dx_num))", "dx relative error: 5.44560814873387e-11\n" ] ], [ [ "## Inline Question 1:\nWhat happens if we do not divide the values being passed through inverse dropout by `p` in the dropout layer? Why does that happen?", "_____no_output_____" ], [ "## Answer:\n", "_____no_output_____" ], [ "# Fully-connected nets with Dropout\nIn the file `cs231n/classifiers/fc_net.py`, modify your implementation to use dropout. Specifically, if the constructor of the net receives a value that is not 1 for the `dropout` parameter, then the net should add dropout immediately after every ReLU nonlinearity. After doing so, run the following to numerically gradient-check your implementation.", "_____no_output_____" ] ], [ [ "np.random.seed(231)\nN, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor dropout in [1, 0.75, 0.5]:\n print('Running check with dropout = ', dropout)\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n weight_scale=5e-2, dtype=np.float64,\n dropout=dropout, seed=123)\n\n loss, grads = model.loss(X, y)\n print('Initial loss: ', loss)\n\n # Relative errors should be around e-6 or less; Note that it's fine\n # if for dropout=1 you have W2 error be on the order of e-5.\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))\n print()", "Running check with dropout = 1\nInitial loss: 2.3004790897684924\nW1 relative error: 1.48e-07\nW2 relative error: 2.21e-05\nW3 relative error: 3.53e-07\nb1 relative error: 5.38e-09\nb2 relative error: 2.09e-09\nb3 relative error: 5.80e-11\n\nRunning check with dropout = 0.75\nInitial loss: 2.302371489704412\nW1 relative error: 1.90e-07\nW2 relative error: 4.76e-06\nW3 relative error: 2.60e-08\nb1 relative error: 4.73e-09\nb2 relative error: 1.82e-09\nb3 relative error: 1.70e-10\n\nRunning check with dropout = 0.5\nInitial loss: 2.3042759220785896\nW1 relative error: 3.11e-07\nW2 relative error: 1.84e-08\nW3 relative error: 5.35e-08\nb1 relative error: 5.37e-09\nb2 relative error: 2.99e-09\nb3 relative error: 1.13e-10\n\n" ] ], [ [ "# Regularization experiment\nAs an experiment, we will train a pair of two-layer networks on 500 training examples: one will use no dropout, and one will use a keep probability of 0.25. We will then visualize the training and validation accuracies of the two networks over time.", "_____no_output_____" ] ], [ [ "# Train two identical nets, one with dropout and one without\nnp.random.seed(231)\nnum_train = 500\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nsolvers = {}\ndropout_choices = [1, 0.25]\nfor dropout in dropout_choices:\n model = FullyConnectedNet([500], dropout=dropout)\n print(dropout)\n\n solver = Solver(model, small_data,\n num_epochs=25, batch_size=100,\n update_rule='adam',\n optim_config={\n 'learning_rate': 5e-4,\n },\n verbose=True, print_every=100)\n solver.train()\n solvers[dropout] = solver", "1\n(Iteration 1 / 125) loss: 7.856643\n(Epoch 0 / 25) train acc: 0.260000; val_acc: 0.184000\n(Epoch 1 / 25) train acc: 0.416000; val_acc: 0.258000\n(Epoch 2 / 25) train acc: 0.482000; val_acc: 0.276000\n(Epoch 3 / 25) train acc: 0.532000; val_acc: 0.277000\n(Epoch 4 / 25) train acc: 0.600000; val_acc: 0.271000\n(Epoch 5 / 25) train acc: 0.708000; val_acc: 0.299000\n(Epoch 6 / 25) train acc: 0.722000; val_acc: 0.282000\n(Epoch 7 / 25) train acc: 0.832000; val_acc: 0.255000\n(Epoch 8 / 25) train acc: 0.878000; val_acc: 0.269000\n(Epoch 9 / 25) train acc: 0.902000; val_acc: 0.275000\n(Epoch 10 / 25) train acc: 0.888000; val_acc: 0.261000\n(Epoch 11 / 25) train acc: 0.928000; val_acc: 0.276000\n(Epoch 12 / 25) train acc: 0.960000; val_acc: 0.304000\n(Epoch 13 / 25) train acc: 0.962000; val_acc: 0.305000\n(Epoch 14 / 25) train acc: 0.968000; val_acc: 0.304000\n(Epoch 15 / 25) train acc: 0.970000; val_acc: 0.277000\n(Epoch 16 / 25) train acc: 0.988000; val_acc: 0.301000\n(Epoch 17 / 25) train acc: 0.982000; val_acc: 0.305000\n(Epoch 18 / 25) train acc: 0.986000; val_acc: 0.300000\n(Epoch 19 / 25) train acc: 0.990000; val_acc: 0.292000\n(Epoch 20 / 25) train acc: 0.984000; val_acc: 0.304000\n(Iteration 101 / 125) loss: 0.048026\n(Epoch 21 / 25) train acc: 0.964000; val_acc: 0.310000\n(Epoch 22 / 25) train acc: 0.990000; val_acc: 0.314000\n(Epoch 23 / 25) train acc: 0.974000; val_acc: 0.308000\n(Epoch 24 / 25) train acc: 0.976000; val_acc: 0.291000\n(Epoch 25 / 25) train acc: 0.988000; val_acc: 0.325000\n0.25\n(Iteration 1 / 125) loss: 17.318478\n(Epoch 0 / 25) train acc: 0.230000; val_acc: 0.177000\n(Epoch 1 / 25) train acc: 0.378000; val_acc: 0.243000\n(Epoch 2 / 25) train acc: 0.402000; val_acc: 0.254000\n(Epoch 3 / 25) train acc: 0.502000; val_acc: 0.276000\n(Epoch 4 / 25) train acc: 0.528000; val_acc: 0.298000\n(Epoch 5 / 25) train acc: 0.562000; val_acc: 0.297000\n(Epoch 6 / 25) train acc: 0.626000; val_acc: 0.290000\n(Epoch 7 / 25) train acc: 0.628000; val_acc: 0.298000\n(Epoch 8 / 25) train acc: 0.686000; val_acc: 0.310000\n(Epoch 9 / 25) train acc: 0.722000; val_acc: 0.289000\n(Epoch 10 / 25) train acc: 0.724000; val_acc: 0.300000\n(Epoch 11 / 25) train acc: 0.760000; val_acc: 0.305000\n(Epoch 12 / 25) train acc: 0.772000; val_acc: 0.279000\n(Epoch 13 / 25) train acc: 0.818000; val_acc: 0.306000\n(Epoch 14 / 25) train acc: 0.812000; val_acc: 0.339000\n(Epoch 15 / 25) train acc: 0.852000; val_acc: 0.346000\n(Epoch 16 / 25) train acc: 0.834000; val_acc: 0.298000\n(Epoch 17 / 25) train acc: 0.860000; val_acc: 0.288000\n(Epoch 18 / 25) train acc: 0.846000; val_acc: 0.320000\n(Epoch 19 / 25) train acc: 0.880000; val_acc: 0.325000\n(Epoch 20 / 25) train acc: 0.864000; val_acc: 0.305000\n(Iteration 101 / 125) loss: 5.147926\n(Epoch 21 / 25) train acc: 0.874000; val_acc: 0.322000\n(Epoch 22 / 25) train acc: 0.900000; val_acc: 0.308000\n(Epoch 23 / 25) train acc: 0.918000; val_acc: 0.303000\n(Epoch 24 / 25) train acc: 0.908000; val_acc: 0.340000\n(Epoch 25 / 25) train acc: 0.904000; val_acc: 0.334000\n" ], [ "# Plot train and validation accuracies of the two models\n\ntrain_accs = []\nval_accs = []\nfor dropout in dropout_choices:\n solver = solvers[dropout]\n train_accs.append(solver.train_acc_history[-1])\n val_accs.append(solver.val_acc_history[-1])\n\nplt.subplot(3, 1, 1)\nfor dropout in dropout_choices:\n plt.plot(solvers[dropout].train_acc_history, 'o', label='%.2f dropout' % dropout)\nplt.title('Train accuracy')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend(ncol=2, loc='lower right')\n \nplt.subplot(3, 1, 2)\nfor dropout in dropout_choices:\n plt.plot(solvers[dropout].val_acc_history, 'o', label='%.2f dropout' % dropout)\nplt.title('Val accuracy')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend(ncol=2, loc='lower right')\n\nplt.gcf().set_size_inches(15, 15)\nplt.show()", "_____no_output_____" ] ], [ [ "## Inline Question 2:\nCompare the validation and training accuracies with and without dropout -- what do your results suggest about dropout as a regularizer?", "_____no_output_____" ], [ "## Answer:\n", "_____no_output_____" ], [ "## Inline Question 3:\nSuppose we are training a deep fully-connected network for image classification, with dropout after hidden layers (parameterized by keep probability p). How should we modify p, if at all, if we decide to decrease the size of the hidden layers (that is, the number of nodes in each layer)?", "_____no_output_____" ], [ "## Answer:\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
cb40e09ee352524cd267f2f08cee9dc94fb6e769
142,910
ipynb
Jupyter Notebook
sim_1/neonatal_infant_sim_1.ipynb
MichaelAllen1966/infant_sim
73cd3dd5c735bf8a9bdb750e3c212e343a87ac01
[ "MIT" ]
null
null
null
sim_1/neonatal_infant_sim_1.ipynb
MichaelAllen1966/infant_sim
73cd3dd5c735bf8a9bdb750e3c212e343a87ac01
[ "MIT" ]
null
null
null
sim_1/neonatal_infant_sim_1.ipynb
MichaelAllen1966/infant_sim
73cd3dd5c735bf8a9bdb750e3c212e343a87ac01
[ "MIT" ]
null
null
null
744.322917
138,791
0.952677
[ [ [ "# Neonatal infant simulator", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sim_utils.global_variables import GlobVars\nfrom sim_utils.patient import Patient", "_____no_output_____" ], [ "# Set up global variables object\nglobal_vars = GlobVars()", "_____no_output_____" ], [ "number_of_patient_records_to_generate = 250\npatient_observations = []\nfor i in range(number_of_patient_records_to_generate):\n print (f'\\rGenerating patient {i+1} of {number_of_patient_records_to_generate}', end='')\n patient = Patient(global_vars)\n patient.loop_through_days()\n patient_observations.append(patient.observations)", "Generating patient 250 of 250" ], [ "all_patients = pd.concat(patient_observations)", "_____no_output_____" ], [ "all_patients.to_csv('./output/sim_1_output.csv', index_label = 'day')", "_____no_output_____" ] ], [ [ "## Draw some example graphs", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10,12), facecolor='w')\nfor condition in range(6):\n # Get first patient to match condition\n for patient_obs in patient_observations:\n if patient_obs.iloc[0]['condition'] == condition:\n ax = fig.add_subplot(3,2, condition+1)\n fields = ['gi', 'pulmonary', 'brain']\n for field in fields:\n x = patient_obs.index\n y = patient_obs[field]\n ax.plot(x, y, label = field)\n ax.legend()\n title = global_vars.patient_types[condition]\n ax.set_title(title)\n ax.set_xlabel('Day')\n ax.set_ylabel('Observations')\n break\nplt.tight_layout(pad=2)\nplt.savefig('./output/examples.png', dpi=300)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb40ed511334ae5436dd7753c9c954365a7a6249
8,568
ipynb
Jupyter Notebook
Notebooks/0. Gather data.ipynb
maigimenez/bullies
fbee986d92160baf659b9b672340a3be518df06e
[ "MIT" ]
1
2016-05-11T17:13:32.000Z
2016-05-11T17:13:32.000Z
Notebooks/0. Gather data.ipynb
maigimenez/bullies
fbee986d92160baf659b9b672340a3be518df06e
[ "MIT" ]
null
null
null
Notebooks/0. Gather data.ipynb
maigimenez/bullies
fbee986d92160baf659b9b672340a3be518df06e
[ "MIT" ]
null
null
null
22.60686
153
0.493697
[ [ [ "from configparser import ConfigParser\nfrom os.path import join\nfrom os import pardir", "_____no_output_____" ] ], [ [ "### Configurar las credenciales para acceder al API de Twitter", "_____no_output_____" ] ], [ [ "config = ConfigParser()\nconfig.read(join(pardir,'src','credentials.ini'))", "_____no_output_____" ], [ "APP_KEY = config['twitter']['app_key']\nAPP_SECRET = config['twitter']['app_secret']\nOAUTH_TOKEN = config['twitter']['oauth_token']\nOAUTH_TOKEN_SECRET = config['twitter']['oauth_token_secret']", "_____no_output_____" ], [ "from twitter import oauth, Twitter, TwitterHTTPError", "_____no_output_____" ] ], [ [ "Esta es la molona librería que vamos a utilizar: https://github.com/sixohsix/twitter/tree/master", "_____no_output_____" ] ], [ [ "auth = oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,\n APP_KEY, APP_SECRET)\n\ntwitter_api = Twitter(auth=auth)\ntwitter_api.retry = True", "_____no_output_____" ] ], [ [ "### 1 . Recoger tweets a partir de un id", "_____no_output_____" ] ], [ [ "tweet = twitter_api.statuses.show(_id='628949369883000832')", "_____no_output_____" ], [ "tweet['text']", "_____no_output_____" ] ], [ [ "### 2. Recoger tweets de una usuaria", "_____no_output_____" ] ], [ [ "femfreq_tweet_search = twitter_api.statuses.user_timeline(screen_name=\"femfreq\", count=100)", "_____no_output_____" ], [ "femfreq_tweet_search[0]['user']['description']", "_____no_output_____" ], [ "femfreq_tweet_search[-1]['text']", "_____no_output_____" ] ], [ [ "### 3. Recoger tweets a partir de una consulta", "_____no_output_____" ] ], [ [ "tweets = twitter_api.search.tweets(q=\"#feminazi\", count=100)", "_____no_output_____" ], [ "tweets['search_metadata']", "_____no_output_____" ], [ "import pandas as pd\n\ntext_gathered = [tweet_data['text'] for tweet_data in tweets['statuses']]\nnum_tweets = len(text_gathered)\npd_tweets = pd.DataFrame( {'tweet_text': text_gathered,\n 'troll_tag': [False] * num_tweets})\n", "_____no_output_____" ], [ "pd_tweets.head()", "_____no_output_____" ], [ "pd_tweets.to_csv('maybe_troll.csv')", "_____no_output_____" ], [ "ls", "0. Gather data.ipynb maybe_troll.csv\r\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb41193364ee68ccc9fd1f8d2943f54b6e424653
186,564
ipynb
Jupyter Notebook
CGATPipelines/pipeline_docs/pipeline_bamstats/Jupyter_report/CGAT_idx_stats_report.ipynb
cdrakesmith/CGATPipelines
3c94ae4f9d87d51108255dc405c4b95af7c8b694
[ "MIT" ]
49
2015-04-13T16:49:25.000Z
2022-03-29T10:29:14.000Z
CGATPipelines/pipeline_docs/pipeline_bamstats/Jupyter_report/CGAT_idx_stats_report.ipynb
cdrakesmith/CGATPipelines
3c94ae4f9d87d51108255dc405c4b95af7c8b694
[ "MIT" ]
252
2015-04-08T13:23:34.000Z
2019-03-18T21:51:29.000Z
CGATPipelines/pipeline_docs/pipeline_bamstats/Jupyter_report/CGAT_idx_stats_report.ipynb
cdrakesmith/CGATPipelines
3c94ae4f9d87d51108255dc405c4b95af7c8b694
[ "MIT" ]
22
2015-05-21T00:37:52.000Z
2019-09-25T05:04:27.000Z
294.729858
87,960
0.885482
[ [ [ "<script>\n jQuery(document).ready(function($) {\n\n $(window).load(function(){\n $('#preloader').fadeOut('slow',function(){$(this).remove();});\n });\n\n });\n</script>\n\n<style type=\"text/css\">\n div#preloader { position: fixed;\n left: 0;\n top: 0;\n z-index: 999;\n width: 100%;\n height: 100%;\n overflow: visible;\n background: #fff url('http://preloaders.net/preloaders/720/Moving%20line.gif') no-repeat center center;\n }\n</style>\n<div id=\"preloader\"></div>", "_____no_output_____" ], [ "<script>\n function code_toggle() {\n if (code_shown){\n $('div.input').hide('500');\n $('#toggleButton').val('Show Code')\n } else {\n $('div.input').show('500');\n $('#toggleButton').val('Hide Code')\n }\n code_shown = !code_shown\n }\n\n $( document ).ready(function(){\n code_shown=false;\n $('div.input').hide()\n });\n</script>\n", "_____no_output_____" ] ], [ [ "# <font color='firebrick'><center>Idx Stats Report</center></font>\n### This report provides information from the output of samtools idxstats tool. It outputs the number of mapped reads per chromosome/contig.\n<br>\n\n", "_____no_output_____" ] ], [ [ "from IPython.display import display, Markdown\nfrom IPython.display import HTML\nimport IPython.core.display as di\nimport csv\nimport numpy as np\nimport zlib\nimport CGAT.IOTools as IOTools\nimport itertools as ITL\nimport os\nimport string\nimport pandas as pd\nimport sqlite3\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_pdf import PdfPages # noqa: E402\n#mpl.use('Agg') # noqa: E402\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport matplotlib.font_manager as font_manager\nimport matplotlib.lines as mlines\nfrom matplotlib.colors import ListedColormap\nfrom matplotlib import cm\nfrom matplotlib import rc, font_manager\nimport CGAT.Experiment as E\nimport math\nfrom random import shuffle\nimport matplotlib as mpl\nimport datetime\nimport seaborn as sns\nimport nbformat\n%matplotlib inline \n\n\n##################################################\n#Plot customization\n#plt.ioff()\nplt.style.use('seaborn-white')\n#plt.style.use('ggplot')\ntitle_font = {'size':'20','color':'darkblue', 'weight':'bold', 'verticalalignment':'bottom'} # Bottom vertical alignment for more space\naxis_font = {'size':'18', 'weight':'bold'}\n#For summary page pdf\n'''To add description page\nplt.figure() \nplt.axis('off')\nplt.text(0.5,0.5,\"my title\",ha='center',va='center')\npdf.savefig()\n'''\n#Panda data frame cutomization\npd.options.display.width = 80\npd.set_option('display.max_colwidth', -1)\n\nchr_feature=['total_reads','total_mapped_reads',\n 'chr1','chr2','chr3','chr4',\n 'chr5','chr6','chr7','chr8',\n 'chr9','chr10','chr11','chr12',\n 'chr13','chr14','chr15','chr16',\n 'chr17','chr18','chr19','chrX', \n 'chrY','chrM']\nchr_index=['Total reads','Total mapped reads',\n 'chr1','chr2','chr3','chr4',\n 'chr5','chr6','chr7','chr8',\n 'chr9','chr10','chr11','chr12',\n 'chr13','chr14','chr15','chr16',\n 'chr17','chr18','chr19','chrX', \n 'chrY','chrM']\ncolors_category = ['red','green','darkorange','yellowgreen', 'pink', 'gold', 'lightskyblue', \n 'orchid','darkgoldenrod','skyblue','b', 'red', \n 'darkorange','grey','violet','magenta','cyan',\n 'hotpink','mediumslateblue']\nthreshold = 5\n\ndef hover(hover_color=\"#ffff99\"):\n return dict(selector=\"tr:hover\",\n props=[(\"background-color\", \"%s\" % hover_color)])\n\ndef y_fmt(y, pos):\n decades = [1e9, 1e6, 1e3, 1e0, 1e-3, 1e-6, 1e-9 ]\n suffix = [\"G\", \"M\", \"k\", \"\" , \"m\" , \"u\", \"n\" ]\n if y == 0:\n return str(0)\n for i, d in enumerate(decades):\n if np.abs(y) >=d:\n val = y/float(d)\n signf = len(str(val).split(\".\")[1])\n if signf == 0:\n return '{val:d} {suffix}'.format(val=int(val), suffix=suffix[i])\n else:\n if signf == 1:\n #print(val, signf)\n if str(val).split(\".\")[1] == \"0\":\n return '{val:d} {suffix}'.format(val=int(round(val)), suffix=suffix[i]) \n tx = \"{\"+\"val:.{signf}f\".format(signf = signf) +\"} {suffix}\"\n return tx.format(val=val, suffix=suffix[i])\n\n #return y\n return y\n\ndef getTables(dbname):\n '''\n Retrieves the names of all tables in the database.\n Groups tables into dictionaries by annotation\n '''\n dbh = sqlite3.connect(dbname)\n c = dbh.cursor()\n statement = \"SELECT name FROM sqlite_master WHERE type='table'\"\n c.execute(statement)\n tables = c.fetchall()\n print(tables)\n c.close()\n dbh.close()\n return \n\ndef readDBTable(dbname, tablename):\n '''\n Reads the specified table from the specified database.\n Returns a list of tuples representing each row\n '''\n dbh = sqlite3.connect(dbname)\n c = dbh.cursor()\n statement = \"SELECT * FROM %s\" % tablename\n c.execute(statement)\n allresults = c.fetchall()\n c.close()\n dbh.close()\n return allresults\n\ndef getDBColumnNames(dbname, tablename):\n dbh = sqlite3.connect(dbname)\n res = pd.read_sql('SELECT * FROM %s' % tablename, dbh)\n dbh.close()\n return res.columns\n\n\ndef plotBar(df,samplename):\n fig, ax = plt.subplots()\n ax.set_frame_on(True)\n ax.xaxis.set_major_formatter(FuncFormatter(y_fmt))\n colors=['yellowgreen','darkorange']\n for ii in range(0,df.shape[0]):\n plt.barh(ii,df['chrX'][ii],color=colors[0], align=\"center\",height=0.6,edgecolor=colors[0])\n plt.barh(ii,df['chrY'][ii],color=colors[1], align=\"center\",height=0.6,edgecolor=colors[0])\n fig = plt.gcf()\n fig.set_size_inches(20,14)\n plt.yticks(fontsize =20,weight='bold')\n plt.yticks(range(df.shape[0]),df['track'])\n plt.xticks(fontsize =20,weight='bold')\n ax.grid(which='major', linestyle='-', linewidth='0.3')\n plt.ylabel(\"Sample\",labelpad=65,fontsize =25,weight='bold')\n plt.xlabel(\"\\nMapped reads\",fontsize =25,weight='bold')\n plt.title(\"Reads mapped to X and Y chromosome\\n\",fontsize =30,weight='bold',color='darkblue')\n plt.gca().invert_yaxis()\n legend_properties = {'weight':'bold','size':'20'}\n leg = plt.legend(chr_feature[21:23],title=\"Contigs\",prop=legend_properties,bbox_to_anchor=(1.14,0.65),frameon=True)\n leg.get_frame().set_edgecolor('k')\n leg.get_frame().set_linewidth(2)\n leg.get_title().set_fontsize(25)\n leg.get_title().set_fontweight('bold')\n plt.tight_layout()\n #plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6)\n plt.show()\n return fig\n\ndef displayTable(plotdf,name):\n # Display table\n styles = [\n hover(),\n dict(selector=\"th\", props=[(\"font-size\", \"130%\"),\n (\"text-align\", \"center\"),\n ]), \n dict(selector=\"td\", props=[(\"font-size\", \"120%\"),\n (\"text-align\", \"center\"),\n ]),\n dict(selector=\"caption\", props=[(\"caption-side\", \"top\"),\n (\"text-align\", \"center\"),\n (\"font-size\", \"100%\")])\n ]\n df1 = (plotdf.style.set_table_styles(styles).set_caption(name))\n display(df1)\n print(\"\\n\\n\")\n \ndef plot_idxstats(newdf,df,samplename):\n \n fig,ax = plt.subplots()\n ax.grid(which='major', linestyle='-', linewidth='0.25')\n ax.yaxis.set_major_formatter(FuncFormatter(y_fmt))\n index=list(range(newdf.shape[1]))\n colors = plt.cm.plasma(np.linspace(0,1,newdf.shape[0]))\n for ii in range(0,newdf.shape[0]):\n plt.plot(index,newdf.iloc[ii],linewidth=2,color=colors[ii],linestyle=\"-\",marker='o',fillstyle='full',markersize=8)\n fig = plt.gcf()\n fig.set_size_inches(11,8)\n plt.xticks(index,chr_feature[2:24],fontsize = 14,weight='bold')\n plt.yticks(fontsize = 14,weight='bold')\n labels = ax.get_xticklabels()\n plt.setp(labels, rotation=40)\n legend_properties = {'weight':'bold','size':'14'}\n leg = plt.legend(df['track'],title=\"Sample\",prop=legend_properties,bbox_to_anchor=(1.42,1.01),frameon=True)\n leg.get_frame().set_edgecolor('k')\n leg.get_frame().set_linewidth(2)\n leg.get_title().set_fontsize(16)\n leg.get_title().set_fontweight('bold')\n plt.xlabel('\\nContigs',**axis_font)\n plt.ylabel('Mapped Reads',**axis_font,labelpad=40)\n plt.title(\"Mapped reads per contig\", **title_font)\n plt.tight_layout()\n #plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6)\n print(\"\\n\\n\")\n plt.show()\n return fig\n \ndef idxStatsReport(dbname, tablename):\n trans = pd.DataFrame(readDBTable(dbname,tablename))\n trans.columns = getDBColumnNames(dbname,tablename)\n df=trans\n #print(df)\n #newdf = df[df.columns[0:25]] \n newdf = df[chr_feature[2:24]]\n #print(newdf)\n plotdf = df[chr_feature]\n plotdf.columns = chr_index\n plotdf.index = [df['track']]\n #del plotdf.index.name\n #pdf=PdfPages(\"idx_stats_summary.pdf\")\n displayTable(plotdf,\"Idx Full Stats\")\n fig = plot_idxstats(newdf,df,\"idx_full_stats\")\n #pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6)\n print(\"\\n\\n\\n\")\n fig = plotBar(df,\"idxStats_X_Y_mapped_reads\")\n #pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6)\n #pdf.close()\n#getTables(\"csvdb\")\nidxStatsReport(\"../csvdb\",\"idxstats_reads_per_chromosome\")\n\n\n\n\n", "_____no_output_____" ] ], [ [ "<script>\n $(document).ready(function(){\n $('div.prompt').hide();\n $('div.back-to-top').hide();\n $('nav#menubar').hide();\n $('.breadcrumb').hide();\n $('.hidden-print').hide();\n });\n</script>\n\n<footer id=\"attribution\" style=\"float:right; color:#999; background:#fff;\">\nCreated with Jupyter,by Reshma.\n</footer>", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "raw" ]
[ [ "raw", "raw" ], [ "markdown" ], [ "code" ], [ "raw" ] ]
cb411e4fa882c6d4bc111ce5d589d17e84cacbed
54,321
ipynb
Jupyter Notebook
Untitled Folder 2/02-Future_Value.ipynb
Savas21/FinancialAnalysis
d77116bfce2a5d2945ac59ed74c45f88449a0339
[ "MIT" ]
null
null
null
Untitled Folder 2/02-Future_Value.ipynb
Savas21/FinancialAnalysis
d77116bfce2a5d2945ac59ed74c45f88449a0339
[ "MIT" ]
null
null
null
Untitled Folder 2/02-Future_Value.ipynb
Savas21/FinancialAnalysis
d77116bfce2a5d2945ac59ed74c45f88449a0339
[ "MIT" ]
null
null
null
144.087533
24,532
0.891037
[ [ [ "# Future Value", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n%matplotlib inline", "_____no_output_____" ], [ "# Future value(FV), Interest Rate(r), Time(t) ,Present Value(PV)\nPV = 50000\nr = 0.05\nT = 1\nFV = PV * (1+ r) ** T\nprint(round(FV,2))", "52500.0\n" ], [ "# evaluate pv with custom func\ndef fv(PV, r, T):\n return round(PV * (1+ r) ** T, 2)", "_____no_output_____" ], [ "T = 5\nfv(PV,r,T)", "_____no_output_____" ], [ "# adding currency symbol to fuctıon\ndef fv(PV, r, T):\n return \"${:,.2f}\".format(PV * (1+ r) ** T, 2)", "_____no_output_____" ], [ "T = 5\nfv(PV,r,T)", "_____no_output_____" ], [ "# ending balance in 20 years\nending_balance = np.ones(20)*50000 \n\nfor time in range(1,len(ending_balance)):\n ending_balance[time] = round(ending_balance[time] * (1+r)**time,2)\n ", "_____no_output_____" ], [ "ending_balance", "_____no_output_____" ], [ "plt.plot(ending_balance)\nplt.title(\"Ending Balance Over Time\")\nplt.grid(ls=\"--\")\nplt.xticks([0,5,10,15,20])", "_____no_output_____" ], [ "np.random.normal(.095,.185)", "_____no_output_____" ], [ "ending_balance = np.ones(20)*50000\n\nfor T in range(len(ending_balance)):\n r = np.random.normal(.095, .185)\n print(r)\n if T > 0:\n ending_balance[T] = round(ending_balance[T-1]*(1+r))\n \n else:\n ending_balance[T] = ending_balance[T]\n \nending_balance\n ", "-0.10174211052013765\n0.05572913834390516\n0.12453892756109512\n0.08004174686455764\n-0.0730498083831147\n-0.14632029772446284\n0.22980163481582336\n-0.12463937504740638\n-0.06143677059238414\n0.031121018081650284\n0.16653205444191804\n0.14619472406823397\n0.15988503603415938\n0.07432303635596914\n0.2664954833642893\n-0.02154559012584116\n-0.072099550753259\n-0.138303886924066\n0.13662251554725174\n0.2912962962551317\n" ], [ "plt.plot(ending_balance)\nplt.title(\"Ending Balance Over Time\")\nplt.grid(ls=\"--\")\nplt.xticks([0,5,10,15,20])\n", "_____no_output_____" ], [ "# using Numpy fv function\nr = .05\nT = 1\npv = 50000\nnp.fv(r, T,-500,-pv,when=\"begin\")", "/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: DeprecationWarning: numpy.fv is deprecated and will be removed from NumPy 1.20. Use numpy_financial.fv instead (https://pypi.org/project/numpy-financial/).\n \"\"\"\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb41274ef059fc0ee820058f70de4e4c418544b1
104,777
ipynb
Jupyter Notebook
notebook.ipynb
Subhrajit91939/Handwashing-Data-Analysis
3a833b116574d66dec72cda7803b3add17e6eb4b
[ "CC0-1.0" ]
null
null
null
notebook.ipynb
Subhrajit91939/Handwashing-Data-Analysis
3a833b116574d66dec72cda7803b3add17e6eb4b
[ "CC0-1.0" ]
null
null
null
notebook.ipynb
Subhrajit91939/Handwashing-Data-Analysis
3a833b116574d66dec72cda7803b3add17e6eb4b
[ "CC0-1.0" ]
null
null
null
177.889643
33,032
0.904225
[ [ [ "## 1. Meet Dr. Ignaz Semmelweis\n<p><img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg\"></p>\n<!--\n<img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg\">\n-->\n<p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p>\n<p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p>", "_____no_output_____" ] ], [ [ "# importing modules\n# ... YOUR CODE FOR TASK 1 ...\nimport pandas as pd\n# Read datasets/yearly_deaths_by_clinic.csv into yearly\nyearly = pd.read_csv(\"yearly_deaths_by_clinic.csv\")\n\n# Print out yearly\n# ... YOUR CODE FOR TASK 1 ...\nprint(yearly)", " year births deaths clinic\n0 1841 3036 237 clinic 1\n1 1842 3287 518 clinic 1\n2 1843 3060 274 clinic 1\n3 1844 3157 260 clinic 1\n4 1845 3492 241 clinic 1\n5 1846 4010 459 clinic 1\n6 1841 2442 86 clinic 2\n7 1842 2659 202 clinic 2\n8 1843 2739 164 clinic 2\n9 1844 2956 68 clinic 2\n10 1845 3241 66 clinic 2\n11 1846 3754 105 clinic 2\n" ] ], [ [ "## 2. The alarming number of deaths\n<p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p>\n<p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p>", "_____no_output_____" ] ], [ [ "# Calculate proportion of deaths per no. births\n# ... YOUR CODE FOR TASK 2 ...\nyearly['proportion_deaths'] = yearly['deaths']/yearly['births']\n# Extract clinic 1 data into yearly1 and clinic 2 data into yearly2\nyearly1 = yearly[yearly['clinic'] == 'clinic 1']\nyearly2 = yearly[yearly['clinic'] == 'clinic 2']\n\n# Print out yearly1\n# ... YOUR CODE FOR TASK 2 ...\nprint(yearly1)", " year births deaths clinic proportion_deaths\n0 1841 3036 237 clinic 1 0.078063\n1 1842 3287 518 clinic 1 0.157591\n2 1843 3060 274 clinic 1 0.089542\n3 1844 3157 260 clinic 1 0.082357\n4 1845 3492 241 clinic 1 0.069015\n5 1846 4010 459 clinic 1 0.114464\n" ] ], [ [ "## 3. Death at the clinics\n<p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern...</p>", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n# This makes plots appear in the notebook\n%matplotlib inline\n\n# Plot yearly proportion of deaths at the two clinics\n# ... YOUR CODE FOR TASK 3 ...\nax = yearly1.plot(y=\"proportion_deaths\", x=\"year\", label=\"Yearly 1\")\nyearly2.plot(y=\"proportion_deaths\", x=\"year\", label= \"Yearly 2\", ax=ax)\nax.set_ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 4. The handwashing begins\n<p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p>\n<p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p>\n<p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p>", "_____no_output_____" ] ], [ [ "# Read datasets/monthly_deaths.csv into monthly\nmonthly = pd.read_csv(\"monthly_deaths.csv\" , parse_dates = [\"date\"])\n\n# Calculate proportion of deaths per no. births\n# ... YOUR CODE FOR TASK 4 ...\nmonthly['proportion_deaths'] = monthly['deaths']/ monthly['births']\n# Print out the first rows in monthly\n# ... YOUR CODE FOR TASK 4 ...\nprint(monthly.head(1))", " date births deaths proportion_deaths\n0 1841-01-01 254 37 0.145669\n" ] ], [ [ "## 5. The effect of handwashing\n<p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p>", "_____no_output_____" ] ], [ [ "# Plot monthly proportion of deaths\n# ... YOUR CODE FOR TASK 5 ...\nax = monthly.plot(y=\"proportion_deaths\", x=\"date\")\nplt.ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 6. The effect of handwashing highlighted\n<p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p>\n<p>The effect of handwashing is made even more clear if we highlight this in the graph.</p>", "_____no_output_____" ] ], [ [ "# Date when handwashing was made mandatory\nimport pandas as pd\nhandwashing_start = pd.to_datetime('1847-06-01')\n\n# Split monthly into before and after handwashing_start\nbefore_washing = monthly[ monthly[\"date\"] < handwashing_start]\nafter_washing = monthly[ monthly[\"date\"] >= handwashing_start]\n\n# Plot monthly proportion of deaths before and after handwashing\n# ... YOUR CODE FOR TASK 6 ...\nax = before_washing.plot(y=\"proportion_deaths\", x=\"date\", label=\"before_washing\")\nafter_washing.plot(y=\"proportion_deaths\", x=\"date\", label= \"after_washing\",ax=ax)\nplt.ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 7. More handwashing, fewer deaths?\n<p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p>", "_____no_output_____" ] ], [ [ "# Difference in mean monthly proportion of deaths due to handwashing\nbefore_proportion = before_washing['proportion_deaths']\nafter_proportion = after_washing['proportion_deaths']\nmean_diff = after_proportion.mean() - before_proportion.mean()\nmean_diff", "_____no_output_____" ] ], [ [ "## 8. A Bootstrap analysis of Semmelweis handwashing data\n<p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p>\n<p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p>", "_____no_output_____" ] ], [ [ "# A bootstrap analysis of the reduction of deaths due to handwashing\nboot_mean_diff = []\nfor i in range(3000):\n boot_before = before_proportion.sample(frac=1, replace= True)\n boot_after = after_proportion.sample(frac=1, replace= True)\n boot_mean_diff.append(boot_after.mean() - boot_before.mean())\n\n# Calculating a 95% confidence interval from boot_mean_diff \nconfidence_interval = pd.Series(boot_mean_diff).quantile([0.025, 0.975])\nconfidence_interval\n", "_____no_output_____" ] ], [ [ "## 9. The fate of Dr. Semmelweis\n<p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p>\n<p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some \"substance\" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p>\n<p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p>", "_____no_output_____" ] ], [ [ "# The data Semmelweis collected points to that:\ndoctors_should_wash_their_hands = True", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb41329bfb3f9c860ea872e42a8a4256fbb47d5d
875
ipynb
Jupyter Notebook
HelloGithub.ipynb
rafkaw/dw_matrix
2f76464a8f3af8baf4ca825a2b6c571989d69f51
[ "MIT" ]
null
null
null
HelloGithub.ipynb
rafkaw/dw_matrix
2f76464a8f3af8baf4ca825a2b6c571989d69f51
[ "MIT" ]
null
null
null
HelloGithub.ipynb
rafkaw/dw_matrix
2f76464a8f3af8baf4ca825a2b6c571989d69f51
[ "MIT" ]
null
null
null
875
875
0.690286
[ [ [ "print(\"Hello The Ministry of Silly Walks!\")", "Hello The Ministry of Silly Walks!\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb413733f047c382722088e029cf1962f32070e6
358,829
ipynb
Jupyter Notebook
main.ipynb
A2Zntu/OptionStrategy
dc4d2a96bb8d82461fa2422d16a286132833099d
[ "MIT" ]
null
null
null
main.ipynb
A2Zntu/OptionStrategy
dc4d2a96bb8d82461fa2422d16a286132833099d
[ "MIT" ]
null
null
null
main.ipynb
A2Zntu/OptionStrategy
dc4d2a96bb8d82461fa2422d16a286132833099d
[ "MIT" ]
null
null
null
198.907428
44,572
0.885901
[ [ [ "## Import the scripts ", "_____no_output_____" ] ], [ [ "%run Implied_Volatility.ipynb\n%run Option_Greeks.ipynb", "_____no_output_____" ] ], [ [ "## Draw the Impied Volatility Graph of 0130 and 0131", "_____no_output_____" ] ], [ [ "date_ = '0130'\nS = todayStockPrice(date = date_)\nlist_StockPrices = moneyness_list(S, gapType = \"month\", gapNum = 3) # only 3 OTM price on monthly basis\n\n# Split the df to df_call and df_put\ndf = df_generate(product = 'TXO', date = date_ )\ndf_put = df[df.買賣權 == 'Put'].reset_index(drop=True)\ndf_call = df[df.買賣權 == 'Call'].reset_index(drop=True)\n\n# append the IV \ndf_put = Flag_Moneyness(df_put, S = S, code = 'Put')\nlist_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put')\ndf_put[\"IV\"] = list_IV_put\n\ndf_call = Flag_Moneyness(df_call, S = S, code = 'Call')\nlist_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call')\ndf_call[\"IV\"] = list_IV_call\n\n# Combine call and put with only OTM \ndf_OTM = combine_OTM(df_call, df_put, list_StockPrices)\nplot_IV(df = df_OTM, S = S, date = date_)\n", "_____no_output_____" ], [ "df_OTM", "_____no_output_____" ] ], [ [ "## Call- Put Implied Volatility Spread", "_____no_output_____" ] ], [ [ "CPIV = []\nfor ivc, ivp in zip(list_IV_call,list_IV_put):\n if type(ivc) == str or type(ivp) == str: \n CPIV.append(\"NA\")\n else: \n CPIV.append(ivc - ivp)", "_____no_output_____" ], [ "df_CPIV = pd.DataFrame({'CPIV': CPIV})", "_____no_output_____" ], [ "K_cut = list(df_call[\"履約價\"][10:25])\nCPIV_cut = CPIV[10:25]\nplt.style.use('ggplot')\nplt.figure(figsize=(10,5))\nplt.plot(K_cut, CPIV_cut, marker='o')\nplt.axvline(x = S, linestyle = 'dashed', color = 'black')\nfor a, b in zip(K_cut, CPIV_cut): \n plt.text(a, b, str(round(b, 3)))\ntitle_name = \"CPIV Spread_\" + date_\nplt.title(title_name)\nplt.xlabel('Excercise Price', fontsize=14)\nplt.ylabel('Implied Volatility Spread', fontsize=14)\nplt.savefig(os.path.join(work_dir, 'Graph', title_name))\nplt.show()", "_____no_output_____" ], [ "date_ = '0130'\nS = todayStockPrice(date = date_)\nlist_StockPrices = moneyness_list(S, gapType = \"month\", gapNum = 3) # only 3 OTM price on monthly basis\n\n# Split the df to df_call and df_put\ndf = df_generate(product = 'TXO', date = date_ )\ndf_put = df[df.買賣權 == 'Put'].reset_index(drop=True)\ndf_call = df[df.買賣權 == 'Call'].reset_index(drop=True)\n\n# append the IV \ndf_put = Flag_Moneyness(df_put, S = S, code = 'Put')\nlist_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put')\ndf_put[\"IV\"] = list_IV_put\n\ndf_call = Flag_Moneyness(df_call, S = S, code = 'Call')\nlist_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call')\ndf_call[\"IV\"] = list_IV_call\n\n# Combine call and put with only OTM \ndf_OTM = combine_OTM(df_call, df_put, list_StockPrices)\nplot_IV(df = df_OTM, S = S, date = date_)\n", "_____no_output_____" ], [ "date_ = '0131'\nS = todayStockPrice(date = date_)\nlist_StockPrices = moneyness_list(S, gapType = \"month\", gapNum = 3) # only 3 OTM price on monthly basis\n\n# Split the df to df_call and df_put\ndf = df_generate(product = 'TXO', date = date_ )\ndf_put = df[df.買賣權 == 'Put'].reset_index(drop=True)\ndf_call = df[df.買賣權 == 'Call'].reset_index(drop=True)\n\n# append the IV \ndf_put = Flag_Moneyness(df_put, S = S, code = 'Put')\nlist_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put')\ndf_put[\"IV\"] = list_IV_put\n\ndf_call = Flag_Moneyness(df_call, S = S, code = 'Call')\nlist_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call')\ndf_call[\"IV\"] = list_IV_call\n\n# Combine call and put with only OTM \ndf_OTM_2 = combine_OTM(df_call, df_put, list_StockPrices)\nplot_IV(df = df_OTM_2, S = S, date = date_)", "_____no_output_____" ], [ "K1 = list(df_OTM[\"履約價\"])\nIV1 = list(df_OTM[\"IV\"])\nK2 = list(df_OTM_2[\"履約價\"])\nIV2 = list(df_OTM_2[\"IV\"])\nS1 = todayStockPrice(date = '0130')\nS2 = todayStockPrice(date = '0131')\nplt.style.use('ggplot')\nmeanIV1 = mean(IV1)\nmeanIV2 = mean(IV2)\nplt.figure(figsize=(10,5))\nplt.plot(K1, IV1, marker='o')\nplt.plot(K2, IV2, marker='o')\nplt.axvline(x = S1, linestyle = 'dashed', color = 'grey')\n\nfor a, b in zip(K1, IV1): \n plt.text(a, b, str(round(b, 3)))\nplt.axvline(x = S2, linestyle = 'dashed', color = 'black')\n\nfor a, b in zip(K2, IV2): \n plt.text(a, b, str(round(b, 3)))\ntitle_name = \"Implied Volatility Comparison\" \nplt.title(title_name)\nplt.xlabel('Excercise Price', fontsize=14)\nplt.ylabel('Implied Volatility', fontsize=14)\nplt.savefig(os.path.join(work_dir, 'Graph', title_name))\nplt.show()", "_____no_output_____" ], [ "## Des. Statistics of IV\ndef describe_list(list_iv, list_na):\n list_a = []\n for ele in list_iv:\n if not isinstance(ele, str): #NA value of Imp Vol is \"-\"\n list_a.append(ele)\n print(\"mean: \" , np.average(list_a))\n print(\"max: \", max(list_a))\n print(\"min: \", min(list_a))\n print(\"NA: \", sum(categoryNAPut))", "_____no_output_____" ], [ "describe_list(list_IV_put, categoryNAPut)", "mean: 0.2348773752865538\nmax: 0.3694234469998763\nmin: 0.13911491752984664\nNA: 20\n" ], [ "describe_list(list_IV_call, categoryNACall)", "mean: 0.2204522261876772\nmax: 0.604538921604526\nmin: 0.1408779988202344\nNA: 20\n" ] ], [ [ "## Calculate the Individual Option Greeks", "_____no_output_____" ] ], [ [ "s = 11368\nexp_date = '20200219'\neval_date = '20200130'\nrf = 0.0\ndiv = 0.0", "_____no_output_____" ], [ "# Build up a delta table\nlist_k = list(df_call[\"履約價\"])\ndelta_C = []\ndelta_P = []\nfor k in list_k:\n opt_C = Option(s=s, k=k, eval_date=eval_date, exp_date=exp_date, rf=rf, vol=vol, right='Call',\n div = div)\n delta_C.append(opt_C.get_greeks()[0])\n\n opt_P = Option(s=s, k=k, eval_date=eval_date, exp_date=exp_date, rf=rf, vol=vol, right='Put',\n div = div)\n delta_P.append(opt_P.get_greeks()[0])\n \ndf_delta = pd.DataFrame({'Excercise Price': list_k, 'Delta_C': delta_C, 'Delta_P': delta_P})\ndf_delta[10:25] # Search for certain area ", "_____no_output_____" ] ], [ [ "# Construct the Option Strategies ", "_____no_output_____" ], [ "### We expect the vega decrease, then we can construct the strategies with approximate delta neutral", "_____no_output_____" ], [ "### 1. Short Strangle: Sell 1 Call and Sell 1 Put ", "_____no_output_____" ] ], [ [ "df_call[df_call.履約價 == 11300] # delta_Call: 0.56", "_____no_output_____" ], [ "df_put[df_put.履約價 == 11500] # delta_Put: -0.58", "_____no_output_____" ], [ "d_option1 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11300', \n '今日': '20200130', '部位': '-1', '結算價': '245.0', 'IV': '0.196352'}\nd_option2 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11500', \n '今日': '20200130', '部位': '-1', '結算價': '265.0', 'IV': '0.179389'}\n\ndf_options_strangle = pd.DataFrame([d_option1, d_option2])\nopt_strat1 = Options_strategy(df_options_strangle)\ngreeks_strat1 = opt_strat1.get_greeks()\nopt_strat1.describe_portfolio()", "Aggr. Delta: 0.010757385562554789\nAggr. Gamma: -0.0015537833653484524\nAggr. Theta: 9.721860478906741\nAggr. Vega: -20.98393882857018\n" ], [ "print(\"max Payoff: \" , opt_strat1.get_maxPayoff())\nopt_strat1.portfolio_payoff(fileName = \"Short Strangle\")", "max Payoff: 310.0\n" ] ], [ [ "### 2. Long Condor: Short Strangle & Long Strangle", "_____no_output_____" ] ], [ [ "df_call[df_call.履約價 == 11100] # delta_Call: 0.27", "_____no_output_____" ], [ "df_put[df_put.履約價 == 11700] # delta_Put: -0.29 ", "_____no_output_____" ], [ "d_option3 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11100', \n '今日': '20200130', '部位': '1', '結算價': '384.0', 'IV': '0.210976'}\nd_option4 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11700', \n '今日': '20200130', '部位': '1', '結算價': '388.0', 'IV': '0.166417'}\n\ndf_options_condor = pd.DataFrame([d_option1, d_option2, d_option3, d_option4])\nopt_strat2 = Options_strategy(df_options_condor)\ngreeks_strat2 = opt_strat2.get_greeks()\nopt_strat2.describe_portfolio()", "Aggr. Delta: -0.034893577029446066\nAggr. Gamma: -0.00023181925762599353\nAggr. Theta: 1.3978179773737112\nAggr. Vega: -3.1809121974342727\n" ], [ "print(\"max Payoff: \" , opt_strat2.get_maxPayoff())\nopt_strat2.portfolio_payoff(fileName = \"Long Condor\")", "max Payoff: 138.0\n" ] ], [ [ "### 3. Bull Spread: Buy 1 Call (at lower K) and Sell 1 Call (at higher K)", "_____no_output_____" ] ], [ [ "df_call[df_call.履約價 == 11300]", "_____no_output_____" ], [ "df_call[df_call.履約價 == 11400]", "_____no_output_____" ], [ "d_option1 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11300', \n '今日': '20200130', '部位': '1', '結算價': '245.0', 'IV': '0.196352'}\nd_option2 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11400', \n '今日': '20200130', '部位': '-1', '結算價': '186.0', 'IV': '0.187406'}\n\ndf_options_strangle = pd.DataFrame([d_option1, d_option2])\nopt_strat3 = Options_strategy(df_options_strangle)\ngreeks_strat3 = opt_strat3.get_greeks()\nopt_strat3.describe_portfolio()", "Aggr. Delta: 0.07644224985971076\nAggr. Gamma: -4.92435994495548e-05\nAggr. Theta: -0.14644148375062027\nAggr. Vega: -0.18532054679650933\n" ], [ "print(\"max Payoff: \" , opt_strat3.get_maxPayoff())\nopt_strat3.portfolio_payoff(fileName = \"Bull Spread\")", "max Payoff: 41.0\n" ] ], [ [ "### if we offset the stratrgy tomorrow", "_____no_output_____" ], [ "## 1. Short Strangle", "_____no_output_____" ] ], [ [ "# 0130 \ncost_ss0130 = get_optionPrice(date = \"0130\", right = \"Call\", k = 11300) + get_optionPrice(date = \"0130\", right = \"Put\", k = 11500)\ncost_ss0130", "_____no_output_____" ], [ "# 0131\ncost_ss0131 = get_optionPrice(date = \"0131\", right = \"Call\", k = 11300) + get_optionPrice(date = \"0131\", right = \"Put\", k = 11500)\ncost_ss0131", "_____no_output_____" ], [ "# Short 1 call and 1 put and Buy back tomorrow\nPnl_ss = cost_ss0130 - cost_ss0131\nPnl_ss", "_____no_output_____" ] ], [ [ "## 2. Long Condor", "_____no_output_____" ] ], [ [ "cost_lc0130 = cost_ss0130 - get_optionPrice(date = \"0130\", right = \"Call\", k = 11100) - get_optionPrice(date = \"0130\", right = \"Put\", k = 11700)\ncost_lc0130", "_____no_output_____" ], [ "cost_lc0131 = cost_ss0131 - get_optionPrice(date = \"0131\", right = \"Call\", k = 11100) - get_optionPrice(date = \"0131\", right = \"Put\", k = 11700)\ncost_lc0131", "_____no_output_____" ], [ "# Long Condor and Sell back tomorrow\nPnl_lc = cost_lc0130 - cost_lc0131\nPnl_lc", "_____no_output_____" ], [ "call_price = 180 \nStock = 10125 \nK = 10100\nt = 5/252\nr = 0.0\nq = 0", "_____no_output_____" ], [ "call_iv = iv(price = call_price, \n flag = 'c', \n S = Stock, \n K = K, \n t = t, \n r = r,\n q = q)", "_____no_output_____" ], [ "call_iv", "_____no_output_____" ], [ "d_option7 = {'現貨價格': '10125', '到期日': '20200415', '買賣權': 'Call', '履約價': '10100', \n '今日': '20200408', '部位': '1', '結算價': '180.0', 'IV': '0.29425'}\n# d_option4 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11700', \n# '今日': '20200130', '部位': '1', '結算價': '388.0', 'IV': '0.166417'}\n\ndf_options_condor = pd.DataFrame([d_option7])\nopt_strat7 = Options_strategy(df_options_condor)\ngreeks_strat7 = opt_strat7.get_greeks()\nopt_strat7.describe_portfolio()", "Aggr. Delta: 0.5320284163467872\nAggr. Gamma: 0.0009475698011600604\nAggr. Theta: -11.521561917263178\nAggr. Vega: 5.671359927446777\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb4142e098f2a3c8188b34bafca2bbd17b72c017
73,386
ipynb
Jupyter Notebook
Neural Network.ipynb
tobyvg/Neural-Network-finite-difference-
e4f348922f2538e708499b1df165d0c648373059
[ "MIT" ]
null
null
null
Neural Network.ipynb
tobyvg/Neural-Network-finite-difference-
e4f348922f2538e708499b1df165d0c648373059
[ "MIT" ]
null
null
null
Neural Network.ipynb
tobyvg/Neural-Network-finite-difference-
e4f348922f2538e708499b1df165d0c648373059
[ "MIT" ]
null
null
null
190.119171
31,960
0.88521
[ [ [ "# Neural Network for binary classification using finite difference approximation to update the weights, Leaky ReLu in between the layers and sigmoid for the output", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport matplotlib as mpl\nglobal_dpi = 120\nmpl.rcParams['figure.dpi']= global_dpi", "_____no_output_____" ] ], [ [ "# Generate dummy data", "_____no_output_____" ] ], [ [ "_input = []\noutput = []\ninput_dims = 5\nstd_dev = 0.15\nnumber_of_points = 200\nfor i in range(number_of_points):\n intie = np.random.randint(0,2)\n if intie == 1:\n output.append(intie)\n data = np.ones(input_dims)\n data = np.asarray([np.random.normal(intie,std_dev) for x in data])\n _input.append(copy.deepcopy(data))\n else:\n output.append(intie)\n data = np.ones(input_dims)\n data = np.asarray([np.random.normal(intie,std_dev) for x in data])\n _input.append(copy.deepcopy(data))\n_input = np.asarray(_input)\noutput = np.asarray(output)", "_____no_output_____" ] ], [ [ "# Normalize input per variable (column)", "_____no_output_____" ] ], [ [ "_input = _input.T\nfor i in _input:\n mini = np.min(i)\n maxi = np.max(i)\n for k in range(len(i)):\n i[k] = (i[k]-mini)/(maxi-mini)\n_input = _input.T", "_____no_output_____" ] ], [ [ "# Split dummy data", "_____no_output_____" ] ], [ [ "fraction = 0.7\nindex = number_of_points*0.7\nindex = int(np.floor(index))\ntrain_x = _input[:index,:]\ntrain_y = output[:index]\ntest_x = _input[index:,:]\ntest_y = output[index:]", "_____no_output_____" ] ], [ [ "# Neural Network for binary classification", "_____no_output_____" ] ], [ [ "class Neural_Network:\n def __init__(self,layers):\n self.layers = layers\n ################## Outputs\n self.losses = []\n self.accuracies = []\n self.test_accuracies = []\n self.test_losses = []\n #################################\n ###################### Initialize weights randomly\n weights = []\n for i in range(len(layers)-1):\n cols =layers[i]\n rows = layers[i+1]\n w = np.random.uniform(-1,1,size = (rows,cols))\n weights.append(copy.deepcopy(w))\n self.weights = weights\n #################################\n def leaky_relu(self,x):\n result = []\n for i in x:\n result.append(max(0.01*i,i))\n return np.asarray(result)\n def sigmoid(self,x):\n return 1/(1+np.exp(-x))\n def loss(self,p,y):\n for i in range(len(p)):\n ##tolerance\n p[i] = min(p[i],0.9995)\n p[i] = max(0.0005,p[i])\n ###\n return -(y*np.log(p)+(1-y)*np.log(1-p))\n def predict(self,x,weights = None, custom = False):\n if not custom:\n weights = self.weights\n predictions = []\n if len(x.shape) == 2:\n for b in x:\n cache = b\n counter = 0 \n for i in weights:\n cache = np.dot(i,cache)\n if counter < (len(weights)-1):\n cache = self.leaky_relu(cache)\n else:\n cache = self.sigmoid(cache)\n counter = counter + 1\n predictions.append(cache[0])\n elif len(x.shape) == 1:\n cache = x\n counter = 0 \n for i in weights:\n cache = np.dot(i,cache)\n if counter < (len(weights)-1):\n cache = self.leaky_relu(cache)\n else:\n cache = self.sigmoid(cache)\n counter = counter + 1\n predictions.append(cache[0])\n else:\n raise Exception('Unsupported input dimensions: ' + str(x.shape))\n return np.asarray(predictions)\n \n def gradients(self,x,y,derriv_step_size = 0.00001):\n ## calculate current loss\n predictions = self.predict(x)\n ##### Train losses\n current_loss = self.loss(predictions,y)\n current_loss = np.sum(current_loss) \n ##### Train accuracies\n current_acc = self.accuracy(predictions,y)\n #####\n init_gradients = []\n for i in self.weights:\n init_gradients.append(np.zeros((i.shape)))\n for i in range(len(self.weights)):\n for k in range(self.weights[i].shape[0]):\n for j in range(self.weights[i].shape[1]):\n weights = copy.deepcopy(self.weights)\n weights[i][k,j] += derriv_step_size\n predictions = self.predict(x,weights = weights,custom = True)\n new_loss = np.sum(self.loss(predictions,y))\n gradient = (new_loss-current_loss)/derriv_step_size\n init_gradients[i][k,j] = gradient\n return init_gradients,current_loss,current_acc\n def train(self,x,y,step_size = 0.005,iters =10,derriv_step_size = 0.00001, calc_test = False, x_test = None, y_test = None):\n for i in range(iters):\n ################ add all to lists\n if calc_test:\n predictions = self.predict(x_test)\n self.test_losses.append(np.sum(self.loss(predictions,y_test)))\n self.test_accuracies.append(self.accuracy(predictions,y_test) )\n grads, loss, acc = self.gradients(x,y,derriv_step_size = derriv_step_size)\n self.losses.append(loss)\n self.accuracies.append(acc)\n #################################\n #### Update weights\n for w,g in zip(self.weights,grads):\n w += - step_size*g\n #####\n \n ###### final outputs here\n predictions = self.predict(x)\n self.losses.append(np.sum(self.loss(predictions,y)))\n self.accuracies.append(self.accuracy(predictions,y))\n if calc_test:\n predictions = self.predict(x_test)\n self.test_losses.append(np.sum(self.loss(predictions,y_test))) \n self.test_accuracies.append(self.accuracy(predictions,y_test))\n ############### \n def accuracy(self,p,y):\n cache = copy.deepcopy(p)\n for i in range(len(cache)):\n if cache[i] >= 0.5:\n cache[i] = 1\n else:\n cache[i] = 0\n cache = [int(i) for i in cache]\n total = 0\n correct = 0\n for i,j in zip(cache,y):\n if i == j:\n correct += 1\n total += 1\n return correct/total", "_____no_output_____" ] ], [ [ "# Test Neural Network", "_____no_output_____" ] ], [ [ "### First layer of input dimension and last layer of dimension 1 (sigmoid)\nNN = Neural_Network([input_dims,3,1])\n####\nNN.train(train_x,train_y,iters = 1000,step_size = 0.005,derriv_step_size = 0.00001,\n calc_test = True, x_test = test_x, y_test = test_y)", "_____no_output_____" ], [ "plt.plot(NN.losses)\nplt.plot(NN.test_losses)\nplt.xlabel('Iterations')\nplt.ylabel('Loss')\nplt.legend(('Train','Validation'))\nplt.show()", "_____no_output_____" ], [ "plt.plot(NN.accuracies)\nplt.plot(NN.test_accuracies)\nplt.xlabel('Iterations')\nplt.ylabel('Accuracy')\nplt.legend(('Train','Validation'))\nplt.show()", "_____no_output_____" ] ], [ [ "# Weights", "_____no_output_____" ] ], [ [ "print(NN.weights)", "[array([[-0.39265484, -0.94490794, -0.43228004, -0.43080122, -0.78527367],\n [ 2.31859851, 0.18715056, -2.19213802, 1.75322351, -1.01385579],\n [ 2.87075192, 0.41176584, -4.29734622, 2.6360357 , -2.33780254]]), array([[-0.37061708, 3.43756294, -5.95920596]])]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb4148bf89fcd4ee60f6ca64dc11c3cdd578c9c8
190,389
ipynb
Jupyter Notebook
dataquest/notebooks/project_star_wars_analysis/Star_Wars_Analysis.ipynb
monocongo/datascience_portfolio
816028ac2dfe79a99af6f045e7d1eb18cbb017ab
[ "Unlicense" ]
null
null
null
dataquest/notebooks/project_star_wars_analysis/Star_Wars_Analysis.ipynb
monocongo/datascience_portfolio
816028ac2dfe79a99af6f045e7d1eb18cbb017ab
[ "Unlicense" ]
null
null
null
dataquest/notebooks/project_star_wars_analysis/Star_Wars_Analysis.ipynb
monocongo/datascience_portfolio
816028ac2dfe79a99af6f045e7d1eb18cbb017ab
[ "Unlicense" ]
null
null
null
64.060902
30,550
0.508527
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "star_wars = pd.read_csv(\"star_wars.csv\", encoding=\"ISO-8859-1\")\nstar_wars.head(3)", "_____no_output_____" ] ], [ [ "Remove all rows where the `RespondentID` column is not null (NaN).", "_____no_output_____" ] ], [ [ "star_wars = star_wars[pd.notnull(star_wars[\"RespondentID\"])]\nstar_wars.head()", "_____no_output_____" ] ], [ [ "Convert column string values from \"Yes\"/\"No\" to corresponding booleans by mapping a dictionary to each value of the Series:", "_____no_output_____" ] ], [ [ "yes_no = {\n \"Yes\": True,\n \"No\": False\n}\nstar_wars[\"Have you seen any of the 6 films in the Star Wars franchise?\"] = \\\n star_wars[\"Have you seen any of the 6 films in the Star Wars franchise?\"].map(yes_no)\nstar_wars[\"Do you consider yourself to be a fan of the Star Wars film franchise?\"] = \\\n star_wars[\"Do you consider yourself to be a fan of the Star Wars film franchise?\"].map(yes_no)", "_____no_output_____" ], [ "star_wars.head()", "_____no_output_____" ], [ "star_wars.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1186 entries, 1 to 1186\nData columns (total 38 columns):\nRespondentID 1186 non-null float64\nHave you seen any of the 6 films in the Star Wars franchise? 1186 non-null bool\nDo you consider yourself to be a fan of the Star Wars film franchise? 836 non-null object\nWhich of the following Star Wars films have you seen? Please select all that apply. 673 non-null object\nUnnamed: 4 571 non-null object\nUnnamed: 5 550 non-null object\nUnnamed: 6 607 non-null object\nUnnamed: 7 758 non-null object\nUnnamed: 8 738 non-null object\nPlease rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film. 835 non-null object\nUnnamed: 10 836 non-null object\nUnnamed: 11 835 non-null object\nUnnamed: 12 836 non-null object\nUnnamed: 13 836 non-null object\nUnnamed: 14 836 non-null object\nPlease state whether you view the following characters favorably, unfavorably, or are unfamiliar with him/her. 829 non-null object\nUnnamed: 16 831 non-null object\nUnnamed: 17 831 non-null object\nUnnamed: 18 823 non-null object\nUnnamed: 19 825 non-null object\nUnnamed: 20 814 non-null object\nUnnamed: 21 826 non-null object\nUnnamed: 22 820 non-null object\nUnnamed: 23 812 non-null object\nUnnamed: 24 827 non-null object\nUnnamed: 25 830 non-null object\nUnnamed: 26 821 non-null object\nUnnamed: 27 814 non-null object\nUnnamed: 28 826 non-null object\nWhich character shot first? 828 non-null object\nAre you familiar with the Expanded Universe? 828 non-null object\nDo you consider yourself to be a fan of the Expanded Universe?ξ 213 non-null object\nDo you consider yourself to be a fan of the Star Trek franchise? 1068 non-null object\nGender 1046 non-null object\nAge 1046 non-null object\nHousehold Income 858 non-null object\nEducation 1036 non-null object\nLocation (Census Region) 1043 non-null object\ndtypes: bool(1), float64(1), object(36)\nmemory usage: 353.3+ KB\n" ] ], [ [ "Convert column string values from the name of the movie to True or Nan to False. Use a mapping dictionary for this whcih we'll create from the column name (if the column name is the value as well then it corresponds to True, otherwise it's NaN and corresponds to False):", "_____no_output_____" ] ], [ [ "print(\"BEFORE MAPPING\")\nstar_wars[star_wars.columns[3:9]]", "BEFORE MAPPING\n" ], [ "def t_or_f(value):\n if value is np.NaN:\n return False\n else:\n return True\n \nfor col in star_wars.columns[3:9]:\n# mapper = {col : True, np.NaN: False}\n star_wars[col] = star_wars[col].map(t_or_f)", "_____no_output_____" ], [ "star_wars[star_wars.columns[3:9]]", "_____no_output_____" ], [ "star_wars = star_wars.rename(columns={\n \"Which of the following Star Wars films have you seen? Please select all that apply.\": \"seen1\",\n \"Unnamed: 4\": \"seen2\",\n \"Unnamed: 5\": \"seen3\",\n \"Unnamed: 6\": \"seen4\",\n \"Unnamed: 7\": \"seen5\",\n \"Unnamed: 8\": \"seen6\",\n \"Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film.\": \"favorite1\",\n \"Unnamed: 10\": \"favorite2\",\n \"Unnamed: 11\": \"favorite3\",\n \"Unnamed: 12\": \"favorite4\",\n \"Unnamed: 13\": \"favorite5\",\n \"Unnamed: 14\": \"favorite6\"\n})", "_____no_output_____" ], [ "star_wars.head(3)", "_____no_output_____" ], [ "star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float)", "_____no_output_____" ], [ "means = star_wars[star_wars.columns[9:15]].mean(axis=0)\n%matplotlib inline\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nax = sns.barplot(x=star_wars.columns[9:15], \n y=means)", "_____no_output_____" ], [ "seens = star_wars[star_wars.columns[3:9]].sum()", "_____no_output_____" ], [ "ax = sns.barplot(x=star_wars.columns[3:9], \n y=seens)", "_____no_output_____" ], [ "males = star_wars[star_wars[\"Gender\"] == \"Male\"]\nfemales = star_wars[star_wars[\"Gender\"] == \"Female\"]\n\nmeans_female = females[females.columns[9:15]].mean(axis=0)\nmeans_male = males[males.columns[9:15]].mean(axis=0)\n\nseens_female = females[females.columns[3:9]].sum()\nseens_male = males[males.columns[3:9]].sum()\n\nfig = plt.figure(figsize=(12, 9))\nax1 = fig.add_subplot(221)\nax1.set_title(\"Male Ranking\")\nax2 = fig.add_subplot(223)\nax2.set_title(\"Male Totals\")\nax3 = fig.add_subplot(222)\nax3.set_title(\"Female Ranking\")\nax4 = fig.add_subplot(224)\nax4.set_title(\"Female Totals\")\n\nsns.barplot(x=star_wars.columns[9:15], \n y=means_male,\n ax=ax1)\nsns.barplot(x=star_wars.columns[9:15], \n y=means_female,\n ax=ax3)\nsns.barplot(x=star_wars.columns[3:9], \n y=seens_male,\n ax=ax2)\nsns.barplot(x=star_wars.columns[3:9], \n y=seens_female,\n ax=ax4)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb4158eedeb6b36b56d062408bb39ca7420f0d8e
266,355
ipynb
Jupyter Notebook
covariance_analysis.ipynb
linesn/xmen
65de1400e3b33f4584304176e2b90f8aa0e4e4ab
[ "MIT" ]
null
null
null
covariance_analysis.ipynb
linesn/xmen
65de1400e3b33f4584304176e2b90f8aa0e4e4ab
[ "MIT" ]
null
null
null
covariance_analysis.ipynb
linesn/xmen
65de1400e3b33f4584304176e2b90f8aa0e4e4ab
[ "MIT" ]
null
null
null
205.838485
205,210
0.813869
[ [ [ "<a href=\"https://colab.research.google.com/github/linesn/xmen/blob/main/covariance_analysis.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import make_classification", "_____no_output_____" ], [ "pd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)", "_____no_output_____" ], [ "# load the data\ncharacters = pd.read_csv('characters.csv')\ncv = pd.read_csv('character_visualization.csv')\ncb = pd.read_csv('comic_bechdel.csv')\ncovers = pd.read_csv('covers.csv')\nic = pd.read_csv('issue_collaborators.csv')\nlocations = pd.read_csv('locations.csv')\nxmen_bechdel = pd.read_csv('xmen_bechdel.csv')", "_____no_output_____" ], [ "characters.head()", "_____no_output_____" ], [ "# create x and Y matrices to store the data\nx = characters[['rendered_unconcious']].to_numpy()\ny = characters[['captured']].to_numpy()", "_____no_output_____" ], [ "# convert the target variable (X-Man character names) to integer representations\ncharacter_name = characters['character'].unique()\ncharacter_name_dict = dict(zip(set(character_name), range(len(character_name))))\ncharacters['character'] = characters['character'].apply(lambda x: character_name_dict[x])", "_____no_output_____" ], [ "# take a subset of the features\nmy_cols = ['character', 'rendered_unconcious', 'captured',\n 'declared_dead', 'redressed', 'depowered', 'clothing_torn',\n 'subject_to_torture', 'quits_team', 'surrenders',\n 'number_of_kills_humans', 'number_of_kills_non_humans','shower_number_of_panels_shower_lasts',\n 'bath_number_of_panels_bath_lasts', 'depicted_eating_food',\n 'visible_tears_number_of_panels',\n 'visible_tears_number_of_intances']", "_____no_output_____" ], [ "# subset the dataframe on the columns we selected\ncharacters_subset = characters[my_cols]", "_____no_output_____" ], [ "# store the correlation matrix\ncorrMatrix = characters_subset.corr()", "_____no_output_____" ], [ "# display the correlation matrix\ncorrMatrix", "_____no_output_____" ], [ "# view a heatmap of the correlations\nfig, ax = plt.subplots(figsize=(15,15)) \nsn.heatmap(corrMatrix, annot=True)", "_____no_output_____" ], [ "# store teh X and y variables\nX = characters[my_cols]\ny = characters[['character']]", "_____no_output_____" ], [ "# perform 75/25 test train split\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclf = RandomForestClassifier(max_depth=10, random_state=0)\nclf.fit(X, np.ravel(y))\nclf.score(X_test, y_test)", "_____no_output_____" ], [ "# perform 10 fold cross validation\nscores = cross_val_score(clf, X, np.ravel(y), cv=10)", "_____no_output_____" ], [ "# view the scores for the 10 folds\nscores", "_____no_output_____" ], [ "# view the average scores for the 10 folds\nnp.average(scores)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb416734fb91ebd7658445f28a02b4a95f06bac4
92,134
ipynb
Jupyter Notebook
Algo1 - Logistic Regression/Logistic Regression.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
1
2019-11-19T07:52:22.000Z
2019-11-19T07:52:22.000Z
Algo1 - Logistic Regression/Logistic Regression.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
null
null
null
Algo1 - Logistic Regression/Logistic Regression.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
null
null
null
126.557692
40,780
0.811231
[ [ [ "# Import lib\n# ===========================================================\nimport csv\nimport pandas as pd\nimport numpy as np\nimport random\nimport time\nimport collections\nimport math\nimport sys\nfrom tqdm import tqdm\nfrom time import sleep\n\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nplt.style.use('fivethirtyeight')\n\nfrom datascience import *\nfrom scipy import stats\n\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\n# from statsmodels.genmod.families.links import logit\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve", "_____no_output_____" ], [ "# Initialize useful data\n# ===========================================================\ndf = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False)\ndf = df.fillna(value=0)\n\n# resample to get a balanced dataset\ndf_zero = df.loc[df['CLASS'] == 0]\ndf_zero = df_zero.sample(n=1000)\ndf_one = df.loc[df['CLASS'] == 1]\ndf_one = df_one.sample(n=1000)\n\n# concatenate and reallocate all data\ndf = pd.concat([df_zero, df_one])\ndf = df.sample(n = df.shape[0])\nall_rows = df.values.tolist()\nrow_num = len(all_rows)\ndf.head()", "_____no_output_____" ], [ "# Divide whole dataset into Input and Output\n# ===========================================================\n# Features - all columns except 'CLASS'\n# Target label - 'CLASS' column\nX = df.drop('CLASS', axis=1)\ny = df['CLASS']\n\n# One hot encoding\nX = pd.get_dummies(X, drop_first=True)\ny = pd.get_dummies(y, drop_first=True)\n\n# Train/Test split\ntrain_X, test_X, train_y, test_y = train_test_split(X, y)\n\n# Normalize using StandardScaler\nscaler = StandardScaler()\ntrain_X = scaler.fit_transform(train_X)\ntest_X = scaler.transform(test_X)", "_____no_output_____" ], [ "# Train Model\n# ===========================================================\nmodel = LogisticRegression()\n\nstart = time.time()\n\nmodel.fit(train_X, train_y)\npred_y = model.predict(test_X)\nscore = accuracy_score(test_y, pred_y)\n\nend = time.time()\nprint(\"Logistic Regression Model Trained! Time: %.03fs\" % (end - start))", "/Users/frostace/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/Users/frostace/anaconda3/lib/python3.7/site-packages/sklearn/utils/validation.py:724: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n" ], [ "# Compare Actual label and Predicted label\n# ===========================================================\npred_score = model.predict_proba(test_X)\nfpr, tpr, thresholds = roc_curve(test_y, pred_score[:,1])\nfinal = Table().with_column('IDX', [i for i in range(len(pred_score))])\nfinal = final.with_columns('ACT_CLASS', test_y.transpose().values.tolist()[0], 'PRE_CLASS', pred_score[:, 1])", "_____no_output_____" ], [ "final.show(5)", "_____no_output_____" ], [ "# Compute TN, TP, FN, FP, etc.\n# ===========================================================\nROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC'))\nstep_size = 0.05\nfor cutoff in np.arange(0, 1 + step_size, step_size):\n temp_final = final.with_column('INDICATE', final.apply(lambda x, y: (int(x >= cutoff) << 1) + y, 'PRE_CLASS', 'ACT_CLASS'))\n # 00(0) -> TN\n # 01(1) -> FN\n # 10(2) -> FP\n # 11(3) -> TP\n group = temp_final.group('INDICATE')\n indicator = group.column(0)\n counts = group.column(1)\n# print(indicator, counts)\n output = [cutoff]\n idx = 0\n for i in range(4):\n# print(counts[idx])\n if i in indicator:\n output.append(counts[idx])\n idx += 1\n else:\n output.append(0)\n acc = (output[1] + output[4]) / sum(output[1:])\n output.append(acc)\n ROC = ROC.with_row(output)\nROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN'))\nROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP'))\nROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN'))", "/Users/frostace/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:29: RuntimeWarning: invalid value encountered in long_scalars\n" ], [ "ROC.show()", "_____no_output_____" ], [ "# Acc Curve by cutoff\n# ===========================================================\nfig = plt.figure()\nplt.xlabel('Cutoff')\nplt.ylabel('Accuracy')\nplt.title('Accuracy - Cutoff of Logistic Regression')\nplt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black')\nplt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange')\nplt.axis([0, 1, 0, 1.1])\nplt.show()\nfig.savefig('Logistic ACC.png', bbox_inches='tight')", "_____no_output_____" ], [ "# ROC_CURVE\n# ===========================================================\nfig = plt.figure()\nplt.xlabel('False Positive Rate')\nplt.ylabel('Sensitivity')\nplt.title('ROC - Curve of Logistic Regression')\nplt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black')\nplt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange')\nplt.legend(['Null', 'Logistic'])\nplt.axis([0, 1, 0, 1.1])\nplt.show()\nfig.savefig('Logistic ROC.png', bbox_inches='tight')", "_____no_output_____" ], [ "# Compute AUC\n# ===========================================================\nlength = len(ROC.column('FPR'))\nauc = 0\nfor i in range(length - 1):\n auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1])\nprint(\"auc = %.03f\" %auc)", "auc = 0.625\n" ], [ "acc, tpr, fpr = ROC.column('ACC'), ROC.column('SENSITIVITY'), ROC.column('FPR')", "_____no_output_____" ], [ "acc", "_____no_output_____" ], [ "tpr", "_____no_output_____" ], [ "fpr", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb4168acfe8c20a65c2ce54cef5527a58d8feee2
64,226
ipynb
Jupyter Notebook
ja/08_Introduction_to_NumPy.ipynb
mitmul/tutorials
d4c6b952eb53b1e1a867332ea08d8310b65c14bc
[ "BSD-3-Clause" ]
73
2019-04-10T03:25:32.000Z
2022-01-20T08:18:51.000Z
ja/08_Introduction_to_NumPy.ipynb
mitmul/tutorials
d4c6b952eb53b1e1a867332ea08d8310b65c14bc
[ "BSD-3-Clause" ]
29
2019-04-10T14:11:44.000Z
2019-10-11T04:58:25.000Z
ja/08_Introduction_to_NumPy.ipynb
mitmul/tutorials
d4c6b952eb53b1e1a867332ea08d8310b65c14bc
[ "BSD-3-Clause" ]
45
2019-04-10T09:06:53.000Z
2022-03-22T07:20:36.000Z
20.684702
195
0.471585
[ [ [ "# NumPy 入門\n\n本章では、Python で数値計算を高速に行うためのライブラリ([注釈1](#note1))である NumPy の使い方を学びます。\n本章の目標は、[単回帰分析と重回帰分析](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)の章で学んだ重回帰分析を行うアルゴリズムを**NumPy を用いて実装すること**です。\n\nNumPy による**多次元配列(multidimensional array)**の扱い方を知ることは、他の様々なライブラリを利用する際に役立ちます。\n例えば、様々な機械学習手法を統一的なインターフェースで利用できる **scikit-learn** や、ニューラルネットワークの記述・学習を行うためのフレームワークである **Chainer** は、NumPy に慣れておくことでとても使いやすくなります。\n\nそれでは、まず NumPy の基礎的な使用方法を説明します。", "_____no_output_____" ], [ "## NumPy を使う準備\n\nNumPy は Google Colaboratory(以下 Colab)上のノートブックにはデフォルトでインストールされているため、ここではインストールの方法は説明しません。自分のコンピュータに NumPy をインストールしたい場合は、こちらを参照してください。:[Installing packages](https://scipy.org/install.html)\n\nColab 上ではインストール作業は必要ないものの、ノートブックを開いた時点ではまだ `numpy` モジュールが読み込まれていません。\nライブラリの機能を利用するには、そのライブラリが提供するモジュールを読み込む必要があります。\n\n例えば `A` というモジュールを読み込みたいとき、一番シンプルな記述方法は `import A` です。\nただ、もし `A` というモジュール名が長い場合は、`import A as B` のようにして別名を付けることができます。\n`as` を使って別名が与えられると、以降そのモジュールはその別名を用いて利用することができます。\n`import A as B` と書くと、`A` というモジュールは `B` という名前で利用することができます。\nこれは Python の機能なので NumPy 以外のモジュールを読み込みたい場合にも使用可能です。\n\n慣習的に、`numpy` にはしばしば `np` という別名が与えられます。\nコード中で頻繁に使用するモジュールには、短い別名をつけて定義することがよく行われます。\n\nそれでは、`numpy` を `np` という名前で `import` してみましょう。", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "## 多次元配列を定義する\n\nベクトル・行列・テンソルなどは、プログラミング上は多次元配列により表現でき、NumPy では ndarray というクラスで多次元配列を表現します([注釈2](#note2))。早速、これを用いてベクトルを定義してみましょう。", "_____no_output_____" ] ], [ [ "# ベクトルの定義\na = np.array([1, 2, 3])\n\na", "_____no_output_____" ] ], [ [ "このように、Python リスト `[1, 2, 3]` を `np.array()` に渡すことで、$[1, 2, 3]$ というベクトルを表す ndarray オブジェクトを作ることができます。\nndarray オブジェクトは `shape` という**属性 (attribute)** を持っており、その多次元配列の**形 (shape)** が保存されています。\n上で定義した `a` という ndarray オブジェクトの形を調べてみましょう。", "_____no_output_____" ] ], [ [ "a.shape", "_____no_output_____" ] ], [ [ "`(3,)` という要素数が 1 の Python のタプルが表示されています。\nndarray の形は、要素が整数のタプルで表され、要素数はその多次元配列の**次元数 (dimensionality, number of dimensions)** を表します。\n形は、その多次元配列の各次元の大きさを順に並べた整数のタプルになっています。\n\n次元数は、ndarray の `ndim` という属性に保存されています。", "_____no_output_____" ] ], [ [ "a.ndim", "_____no_output_____" ] ], [ [ "これは、`len(a.shape)` と同じ値になります。\n今、`a` という ndarray は 1 次元配列なので、`a.shape` は要素数が 1 のタプルで、`ndim` の値は 1 でした([注釈3](#note3))。", "_____no_output_____" ], [ "では次に、$3 \\times 3$ 行列を定義してみましょう。", "_____no_output_____" ] ], [ [ "# 行列の定義\nb = np.array(\n [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n)\n\nb", "_____no_output_____" ] ], [ [ "形と次元数を調べます。", "_____no_output_____" ] ], [ [ "print('Shape:', b.shape)\nprint('Rank:', b.ndim)", "Shape: (3, 3)\nRank: 2\n" ] ], [ [ "ここで、`size` という属性も見てみましょう。", "_____no_output_____" ] ], [ [ "b.size", "_____no_output_____" ] ], [ [ "これは、`b` という ndarray が持つ要素の数を表しています。\n`b` は $3 \\times 3$ 行列なので、要素数は 9 です。\n**「形」「次元数」「サイズ」という言葉がそれぞれ意味するものの違いを確認してください。**\n\nNumPy の ndarray の作成方法には、`np.array()` を用いて Python のリストから多次元配列を作る方法以外にも、色々な方法があります。\n以下に代表的な例をいくつか紹介します。", "_____no_output_____" ] ], [ [ "# 形を指定して、要素が全て 0 で埋められた ndarray を作る\na = np.zeros((3, 3))\n\na", "_____no_output_____" ], [ "# 形を指定して、要素が全て 1 で埋められた ndarray を作る\nb = np.ones((2, 3))\n\nb", "_____no_output_____" ], [ "# 形と値を指定して、要素が指定した値で埋められた ndarray を作る\nc = np.full((3, 2), 9)\n\nc", "_____no_output_____" ], [ "# 指定された大きさの単位行列を表す ndarray を作る\nd = np.eye(5)\n\nd", "_____no_output_____" ], [ "# 形を指定して、 0 ~ 1 の間の乱数で要素を埋めた ndarray を作る\ne = np.random.random((4, 5))\n\ne", "_____no_output_____" ], [ "# 3 から始まり 10 になるまで 1 ずつ増加する数列を作る(10 は含まない)\nf = np.arange(3, 10, 1)\n\nf", "_____no_output_____" ] ], [ [ "## 多次元配列の要素を選択する\n\n前節では NumPy を使って多次元配列を定義するいくつかの方法を紹介しました。\n本節では、作成した ndarray のうちの特定の要素を選択して、値を取り出す方法を紹介します。\n最もよく行われる方法は `[]` を使った**添字表記 (subscription)** による要素の選択です。", "_____no_output_____" ], [ "### 整数による要素の選択\n\n例えば、上で作成した `e` という $4 \\times 5$ 行列を表す多次元配列から、1 行 2 列目の値を取り出すには、以下のようにします。", "_____no_output_____" ] ], [ [ "val = e[0, 1]\n\nval", "_____no_output_____" ] ], [ [ "「1 行 2 列目」を指定するのに、インデックスは `[0, 1]` でした。\nこれは、NumPy の ndarray の要素は Python リストと同じく、添字が 0 から始まる**ゼロベースインデックス (zero-based index)** が採用されているためです。\nつまり、この行列の i 行 j 列目の値は、`[i - 1, j - 1]` で取り出すことができます。", "_____no_output_____" ], [ "### スライスによる要素の選択\n\nNumPy の ndarray に対しても、Python のリストと同様に**スライス表記 (slicing)** を用いて選択したい要素を範囲指定することができます。\nndarray はさらに、カンマ区切りで複数の次元に対するスライスを指定できます。", "_____no_output_____" ] ], [ [ "# 4 x 5 行列 e の真ん中の 2 x 3 = 6 個の値を取り出す\ncenter = e[1:3, 1:4]\n\ncenter", "_____no_output_____" ] ], [ [ "前節最後にある `e` の出力を見返すと、ちょうど真ん中の部分の $2 \\times 3$ 個の数字が取り出せていることが分かります。\nここで、`e` の中から `[1, 1]` の要素を起点として 2 行 3 列を取り出して作られた `center` の形を、`e` の形と比較してみましょう。", "_____no_output_____" ] ], [ [ "print('Shape of e:', e.shape)\nprint('Shape of center:', center.shape)", "Shape of e: (4, 5)\nShape of center: (2, 3)\n" ] ], [ [ "また、インデックスを指定したり、スライスを用いて取り出した ndarray の一部に対し、値を代入することもできます。", "_____no_output_____" ] ], [ [ "# 先程の真ん中の 6 個の値を 0 にする\ne[1:3, 1:4] = 0\n\ne", "_____no_output_____" ] ], [ [ "### 整数配列による要素の選択\n\nndarray の `[]` には、整数やスライスの他に、整数配列を渡すこともできます。\n整数配列とは、ここでは整数を要素とする Python リストまたは ndarray のことを指しています。\n\n具体例を示します。\nまず、$3 \\times 3$ 行列を表す `a` という ndarray を定義します。", "_____no_output_____" ] ], [ [ "a = np.array(\n [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n)\n\na", "_____no_output_____" ] ], [ [ "この ndarray から、\n\n1. 1 行 2 列目:`a[0, 1]`\n2. 3 行 2 列目:`a[2, 1]`\n3. 2 行 1 列目:`a[1, 0]`\n\nの 3 つの要素を選択して並べ、形が `(3,)` であるような ndarray を作りたいとします。\n\nこれは、以下のように、順に対象の要素を指定して並べて新しい ndarray にすることでももちろん実現できます。", "_____no_output_____" ] ], [ [ "np.array([a[0, 1], a[2, 1], a[1, 0]])", "_____no_output_____" ] ], [ [ "しかし、同じことが**選択したい行、選択したい列を、順にそれぞれリストとして与える**ことでも行えます。", "_____no_output_____" ] ], [ [ "a[[0, 2, 1], [1, 1, 0]]", "_____no_output_____" ] ], [ [ "**選択したい 3 つの値がどの行にあるか**だけに着目すると、それぞれ 1 行目、3 行目、2 行目にある要素です。 \nゼロベースインデックスでは、それぞれ 0, 2, 1 行目です。 \nこれが `a` の `[]` に与えられた 1 つ目のリスト `[0, 2, 1]` の意味です。 \n\n同様に、**列に着目**すると、ゼロベースインデックスでそれぞれ 1, 1, 0 列目の要素です。 \nこれが `a` の `[]` に与えられた 2 つ目のリスト `[1, 1, 0]` の意味です。", "_____no_output_____" ], [ "## ndarray のデータ型\n\n1 つの ndarray の要素は、全て同じ型を持ちます。\nNumPy では様々なデータ型を使うことができますが、ここでは一部だけを紹介します。\nNumPy は Python リストを渡して ndarray を作る際などには、その値からデータ型を推測します。\nndarray のデータ型は、`dtype` という属性に保存されています。", "_____no_output_____" ] ], [ [ "# 整数(Python の int 型)の要素をもつリストを与えた場合\nx = np.array([1, 2, 3])\n\nx.dtype", "_____no_output_____" ], [ "# 浮動小数点数(Python の float 型)の要素をもつリストを与えた場合\nx = np.array([1., 2., 3.])\n\nx.dtype", "_____no_output_____" ] ], [ [ "以上のように、**Python の int 型は自動的に NumPy の int64 型**になりました。\nまた、**Python の float 型は自動的に NumPy の float64 型**になりました。\nPython の int 型は NumPy の int_ 型に対応づけられており、Python の float 型は NumPy の float_ 型に対応づけられています。\nこの int_ 型はプラットフォームによって int64 型と同じ場合と int32 型と同じ場合があります。\nfloat_ 型についても同様で、プラットフォームによって float64 型と同じ場合と float32 型と同じ場合があります。\n\n特定の型を指定して ndarray を作成するには、以下のようにします。", "_____no_output_____" ] ], [ [ "x = np.array([1, 2, 3], dtype=np.float32)\n\nx.dtype", "_____no_output_____" ] ], [ [ "このように、`dtype` という引数に NumPy の dtype オブジェクトを渡します。\nこれは 32 ビット浮動小数点数型を指定する例です。\n同じことが、文字列で指定することによっても行えます。", "_____no_output_____" ] ], [ [ "x = np.array([1, 2, 3], dtype='float32')\n\nx.dtype", "_____no_output_____" ] ], [ [ "これはさらに、以下のように短く書くこともできます。", "_____no_output_____" ] ], [ [ "x = np.array([1, 2, 3], dtype='f')\n\nx.dtype", "_____no_output_____" ] ], [ [ "一度あるデータ型で定義した配列のデータ型を別のものに変更するには、`astype` を用いて変換を行います。", "_____no_output_____" ] ], [ [ "x = x.astype(np.float64)\n\nx.dtype", "_____no_output_____" ] ], [ [ "## 多次元配列を用いた計算\n\nndarray を使って行列やベクトルを定義して、それらを用いていくつかの計算を行ってみましょう。\n\nndarray として定義されたベクトルや行列同士の**要素ごとの加減乗除**は、Python の数値同士の四則演算に用いられる `+`、`-`、`*`、`/` という記号を使って行えます。\n\nそれでは、同じ形の行列を 2 つ定義し、それらの**要素ごとの**加減乗除を実行してみましょう。", "_____no_output_____" ] ], [ [ "# 同じ形 (3 x 3) の行列を 2 つ定義する\na = np.array([\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]\n])\n\nb = np.array([\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n])", "_____no_output_____" ], [ "# 足し算\nc = a + b\n\nc", "_____no_output_____" ], [ "# 引き算\nc = a - b\n\nc", "_____no_output_____" ], [ "# 掛け算\nc = a * b\n\nc", "_____no_output_____" ], [ "# 割り算\nc = a / b\n\nc", "_____no_output_____" ] ], [ [ "NumPy では、与えられた多次元配列に対して要素ごとに計算を行う関数が色々と用意されています。\n以下にいくつかの例を示します。", "_____no_output_____" ] ], [ [ "# 要素ごとに平方根を計算する\nc = np.sqrt(b)\n\nc", "_____no_output_____" ], [ "# 要素ごとに値を n 乗する\nn = 2\nc = np.power(b, n)\n\nc", "_____no_output_____" ] ], [ [ "要素ごとに値を n 乗する計算は、以下のようにしても書くことができます。", "_____no_output_____" ] ], [ [ "c ** n", "_____no_output_____" ] ], [ [ "はじめに紹介した四則演算は、**同じ大きさの** 2 つの行列同士で行っていました。\nここで、$3 \\times 3$ 行列 `a` と 3 次元ベクトル `b` という大きさのことなる配列を定義して、それらを足してみましょう。", "_____no_output_____" ] ], [ [ "a = np.array([\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]\n])\n\nb = np.array([1, 2, 3])\n\nc = a + b\n\nc", "_____no_output_____" ] ], [ [ "形が同じ行列同士の場合と同様に計算することができました。\n\nこれは NumPy が自動的に**ブロードキャスト(broadcast)**と呼ばれる操作を行っているためです。\nこれについて次節で説明します。", "_____no_output_____" ], [ "## ブロードキャスト\n\n行列同士の要素ごとの四則演算は、通常は行列の形が同じでなければ定義できません。\nしかし、前節の最後では $3 \\times 3$ 行列に 3 次元ベクトルを足す計算が実行できました。\n\nこれが要素ごとの計算と同じように実行できる理由は、NumPy が自動的に 3 次元ベクトル `b` を 3 つ並べてできる $3 \\times 3$ 行列を想定し、`a` と同じ形に揃える操作を暗黙に行っているからです。\nこの操作を、**ブロードキャスト**と呼びます。\n\n算術演算を異なる形の配列同士で行う場合、NumPy は自動的に小さい方の配列を**ブロードキャスト**し、大きい方の配列と形を合わせます。\nただし、この自動的に行われるブロードキャストでは、行いたい算術演算が、大きい方の配列の一部に対して**繰り返し行われる**ことで実現されるため、実際に小さい方の配列のデータをコピーして大きい配列をメモリ上に作成することは可能な限り避けられます。\nまた、この繰り返しの計算は NumPy の内部の C 言語によって実装されたループで行われるため、高速です。\n\nよりシンプルな例で考えてみましょう。\n以下のような配列 `a` があり、この全ての要素を 2 倍にしたいとします。", "_____no_output_____" ] ], [ [ "a = np.array([1, 2, 3])\n\na", "_____no_output_____" ] ], [ [ "このとき、一つの方法は以下のように同じ形で要素が全て 2 である別の配列を定義し、これと要素ごとの積を計算するやり方です。", "_____no_output_____" ] ], [ [ "b = np.array([2, 2, 2])\n\nc = a * b\n\nc", "_____no_output_____" ] ], [ [ "しかし、スカラの 2 をただ `a` に掛けるだけでも同じ結果が得られます。", "_____no_output_____" ] ], [ [ "c = a * 2\n\nc", "_____no_output_____" ] ], [ [ "`* 2` という計算が、`c` の 3 つの要素の**どの要素に対する計算なのか**が明示されていないため、NumPy はこれを**全ての要素に対して行うという意味**だと解釈して、スカラの 2 を `a` の要素数 3 だけ引き伸ばしてから掛けてくれます。\n\n**形の異なる配列同士の計算がブロードキャストによって可能になるためにはルールがあります。**\n\nそれは、**「2 つの配列の各次元が同じ大きさになっているか、どちらかが 1 であること」**です。\nこのルールを満たさない場合、NumPy は \"ValueError: operands could not be broadcast together with shapes (1 つ目の配列の形) (2 つ目の配列の形)\" というエラーを出します。\n\nブロードキャストされた配列の各次元のサイズ([注釈4](#note4))は、入力された配列のその次元のサイズの中で最大の値と同じになっています。\n入力された配列は、各次元のサイズが入力のうち大きい方のサイズと同じになるようブロードキャストされ、その拡張されたサイズで計算されます。\n\nもう少し具体例を見てみましょう。\n以下のような 2 つの配列 `a` と `b` を定義し、足します。", "_____no_output_____" ] ], [ [ "# 0 ~ 9 の範囲の値をランダムに用いて埋められた (2, 1, 3) と (3, 1) という大きさの配列を作る\na = np.random.randint(0, 10, (2, 1, 3))\nb = np.random.randint(0, 10, (3, 1))\n\nprint('a:\\n', a)\nprint('\\na.shape:', a.shape)\nprint('\\nb:\\n', b)\nprint('\\nb.shape:', b.shape)\n\n# 加算\nc = a + b\n\nprint('\\na + b:\\n', c)\nprint('\\n(a + b).shape:', c.shape)", "a:\n [[[6 2 0]]\n\n [[1 9 7]]]\n\na.shape: (2, 1, 3)\n\nb:\n [[6]\n [2]\n [8]]\n\nb.shape: (3, 1)\n\na + b:\n [[[12 8 6]\n [ 8 4 2]\n [14 10 8]]\n\n [[ 7 15 13]\n [ 3 11 9]\n [ 9 17 15]]]\n\n(a + b).shape: (2, 3, 3)\n" ] ], [ [ "`a` の形は `(2, 1, 3)` で、`b` の形は `(3, 1)` でした。\nこの 2 つの配列の**末尾次元 (trailing dimension)**([注釈5](#note5)) はそれぞれ 3 と 1 なので、ルールにあった「次元が同じサイズであるか、どちらかが 1 であること」を満たしています。\n\n次に、各配列の第 2 次元に注目してみましょう。\nそれぞれ 1 と 3 です。\nこれもルールを満たしています。\n\nここで、`a` は 3 次元配列ですが、`b` は 2 次元配列です。\nつまり、次元数が異なっています。\nこのような場合は、`b` は**一番上の次元にサイズが 1 の次元が追加された形** `(1, 3, 1)` として扱われます。\nそして 2 つの配列の各次元ごとのサイズの最大値をとった形 `(2, 3, 3)` にブロードキャストされ、足し算が行われます。\n\nこのように、もし 2 つの配列のランクが異なる場合は、次元数が小さい方の配列が大きい方と同じ次元数になるまでその形の先頭に新たな次元が追加されます。\nサイズが 1 の次元がいくつ追加されても、要素の数は変わらないことに注意してください。\n要素数(`size` 属性で取得できる値)は、各次元のサイズの掛け算になるので、1 を何度かけても値は変わらないことから、これが成り立つことが分かります。\n\nNumPy がブロードキャストのために自動的に行う新しい次元の挿入は、`[]` を使った以下の表な表記を用いることで**手動で行うこともできます。**", "_____no_output_____" ] ], [ [ "print('Original shape:', b.shape)\n\nb_expanded = b[np.newaxis, :, :]\n\nprint('Added new axis to the top:', b_expanded.shape)\n\nb_expanded2 = b[:, np.newaxis, :]\n\nprint('Added new axis to the middle:', b_expanded2.shape)", "Original shape: (3, 1)\nAdded new axis to the top: (1, 3, 1)\nAdded new axis to the middle: (3, 1, 1)\n" ] ], [ [ "`np.newaxis` が指定された位置に、新しい次元が挿入されます。\n配列が持つ数値の数は変わっていません。\nそのため、挿入された次元のサイズは必ず 1 になります。", "_____no_output_____" ] ], [ [ "b", "_____no_output_____" ], [ "b_expanded", "_____no_output_____" ], [ "b_expanded2", "_____no_output_____" ] ], [ [ "NumPy のブロードキャストは慣れるまで直感に反するように感じる場合があるかもしれません。\nしかし、使いこなすと同じ計算が Python のループを使って行うよりも高速に行えるため、ブロードキャストを理解することは非常に重要です。\n一つ具体例を見てみます。\n\n$5 \\times 5$ 行列 `a` に、3 次元ベクトル `b` を足します。\nまず、`a`、`b` および結果を格納する配列 `c` を定義します。", "_____no_output_____" ] ], [ [ "a = np.array([\n [0, 1, 2, 1, 0],\n [3, 4, 5, 4, 3],\n [6, 7, 8, 7, 6],\n [3, 4, 5, 4, 4],\n [0, 1, 2, 1, 0]\n])\n\nb = np.array([1, 2, 3, 4, 5])\n\n# 結果を格納する配列を先に作る\nc = np.empty((5, 5))", "_____no_output_____" ] ], [ [ "`%%timeit` という Jupyter Notebook で使用できるそのセルの実行時間を計測するためのマジックを使って、`a` の各行(1 次元目)に `b` の値を足していく計算を Python のループを使って 1 行ずつ処理していくコードの実行時間を測ってみます。", "_____no_output_____" ] ], [ [ "%%timeit\nfor i in range(a.shape[0]):\n c[i, :] = a[i, :] + b", "The slowest run took 90.12 times longer than the fastest. This could mean that an intermediate result is being cached.\n100000 loops, best of 3: 7.44 µs per loop\n" ], [ "c", "_____no_output_____" ] ], [ [ "次に、NumPy のブロードキャストを活用した方法で同じ計算を行ってみます。", "_____no_output_____" ] ], [ [ "%%timeit\nc = a + b", "The slowest run took 32.88 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000000 loops, best of 3: 1.13 µs per loop\n" ], [ "c", "_____no_output_____" ] ], [ [ "計算結果は当然同じになります。\nしかし、実行時間が数倍短くなっています。\n\nこのように、ブロードキャストを理解して活用することで、記述が簡単になるだけでなく、実行速度という点においても有利になります。", "_____no_output_____" ], [ "## 行列積\n\n行列の要素ごとの積は `*` を用いて計算できました。\n一方、通常の行列同士の積(行列積)の計算は、`*` ではなく、別の方法で行います。\n方法は 2 種類あります。\n\n1つは、`np.dot()` 関数を用いる方法です。\n`np.dot()` は 2 つの引数をとり、それらの行列積を計算して返す関数です。\n今、`A` という行列と `B` という行列があり、行列積 `AB` を計算したいとします。\nこれは `np.dot(A, B)` と書くことで計算できます。\nもし `BA` を計算したい場合は、`np.dot(B, A)` と書きます。\n\nもう 1 つは、ndarray オブジェクトが持つ `dot()` メソッドを使う方法です。\nこれを用いると、同じ計算が `A.dot(B)` と書くことによって行えます。", "_____no_output_____" ] ], [ [ "# 行列 A の定義\nA = np.array([\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]\n])\n\n# 行列 B の定義\nB = np.array([\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n])", "_____no_output_____" ] ], [ [ "実際にこの $3 \\times 3$ の 2 つの行列の行列積を計算してみましょう。", "_____no_output_____" ] ], [ [ "# 行列積の計算 (1)\nC = np.dot(A, B)\n\nC", "_____no_output_____" ] ], [ [ "同じ計算をもう一つの記述方法で行ってみます。", "_____no_output_____" ] ], [ [ "C = A.dot(B)\n\nC", "_____no_output_____" ], [ "# データ型の確認(整数値)\na.dtype", "_____no_output_____" ] ], [ [ "## 基本的な統計量の求め方\n\n本節では、多次元配列に含まれる値の平均・分散・標準偏差・最大値・最小値といった統計値を計算する方法を紹介します。\n$8 \\times 10$ の行列を作成し、この中に含まれる値全体に渡るこれらの統計値を計算してみましょう。", "_____no_output_____" ] ], [ [ "x = np.random.randint(0, 10, (8, 10))\n\nx", "_____no_output_____" ], [ "# 平均値\nx.mean()", "_____no_output_____" ], [ "# 分散\nx.var()", "_____no_output_____" ], [ "# 標準偏差\nx.std()", "_____no_output_____" ], [ "# 最大値\nx.max()", "_____no_output_____" ], [ "# 最小値\nx.min()", "_____no_output_____" ] ], [ [ "ここで、`x` は 2 次元配列なので、各次元に沿ったこれらの統計値の計算も行えます。\n例えば、最後の次元内だけで平均をとると、8 個の平均値が得られるはずです。\n平均を計算したい軸(何次元目に沿って計算するか)を `axis` という引数に指定します。", "_____no_output_____" ] ], [ [ "x.mean(axis=1)", "_____no_output_____" ] ], [ [ "これは、以下のように 1 次元目の値の平均を計算していったものを並べているのと同じことです。\n(ゼロベースインデックスで考えています。`x` の形は `(8, 10)` なので、0 次元目のサイズが 8、1 次元目のサイズが 10 です。)", "_____no_output_____" ] ], [ [ "np.array([\n x[0, :].mean(),\n x[1, :].mean(),\n x[2, :].mean(),\n x[3, :].mean(),\n x[4, :].mean(),\n x[5, :].mean(),\n x[6, :].mean(),\n x[7, :].mean(),\n])", "_____no_output_____" ] ], [ [ "## NumPy を用いた重回帰分析\n\n[単回帰分析と重回帰分析](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)の章で説明した重回帰分析を NumPy を用いて行いましょう。\n\n4 つのデータをまとめた、以下のようなデザイン行列が与えられたとします。", "_____no_output_____" ] ], [ [ "# Xの定義\nX = np.array([\n [2, 3],\n [2, 5],\n [3, 4],\n [5, 9],\n])\n\nX", "_____no_output_____" ] ], [ [ "4 章の解説と同様に、切片を重みベクトルに含めて扱うため、デザイン行列の 0 列目に 1 という値を付け加えます。", "_____no_output_____" ] ], [ [ "# データ数(X.shape[0]) と同じ数だけ 1 が並んだ配列\nones = np.ones((X.shape[0], 1))\n\n# concatenate を使い、1 次元目に 1 を付け加える\nX = np.concatenate((ones, X), axis=1)\n\n# 先頭に 1 が付け加わったデザイン行列\nX", "_____no_output_____" ] ], [ [ "また、目標値が以下で与えられたとします。", "_____no_output_____" ] ], [ [ "# t の定義\nt = np.array([1, 5, 6, 8])\n\nt", "_____no_output_____" ] ], [ [ "重回帰分析は、正規方程式を解くことで最適な 1 次方程式の重みを決定することができました。\n正規方程式の解は以下のようなものでした。\n\n$$\n{\\bf w} = ({\\bf X}^{{\\rm T}}{\\bf X})^{\\rm -1}{\\bf X}^{\\rm T}{\\bf t}\n$$\n\nこれを、4 つのステップに分けて計算していきます。\n\nまずは、${\\bf X}^{\\rm T}{\\bf X}$ の計算です。ndarrayに対して `.T` で転置した配列を得られます。", "_____no_output_____" ] ], [ [ "# Step 1\nxx = np.dot(X.T, X)\n\nxx", "_____no_output_____" ] ], [ [ "次に、この逆行列を計算します。", "_____no_output_____" ] ], [ [ "# Step 2\nxx_inv = np.linalg.inv(xx)\n\nxx_inv", "_____no_output_____" ] ], [ [ "逆行列の計算は `np.linalg.inv()` で行うことができます。\n\n次に、${\\bf X}^{\\rm T}{\\bf t}$ の計算をします。", "_____no_output_____" ] ], [ [ "# Step 3\nxt = np.dot(X.T, t)\n\nxt", "_____no_output_____" ] ], [ [ "最後に、求めた `xx_inv` と `xt` を掛け合わせます。", "_____no_output_____" ] ], [ [ "# Step 4\nw = np.dot(xx_inv, xt)\n\nw", "_____no_output_____" ] ], [ [ "**以上の計算は、以下のように 1 行で行うこともできます。**", "_____no_output_____" ] ], [ [ "w_ = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(t)\n\nw_", "_____no_output_____" ] ], [ [ "実際には逆行列を陽に求めることは稀で、連立一次方程式を解く、すなわち逆行列を計算してベクトルに掛けるのに等しい計算をひとまとめに行う関数 `numpy.linalg.solve` を呼ぶ方が速度面でも精度面でも有利です。", "_____no_output_____" ] ], [ [ "w_ = np.linalg.solve(X.T.dot(X), X.T.dot(t))\n\nw_", "_____no_output_____" ] ], [ [ "数式を NumPy による配列の計算に落とし込むことに慣れていくには少し時間がかかりますが、慣れると少ない量のコードで記述できるだけでなく、高速に計算が行なえるため、大きな恩恵があります。", "_____no_output_____" ], [ "<hr />\n\n<div class=\"alert alert-info\">\n**注釈 1**\n\nライブラリとは、汎用性の高い複数の関数やクラスなどを再利用可能な形でひとまとまりにしたもので、Python の世界では**パッケージ**とも呼ばれます。また、Python で関数やクラスの定義、文などが書かれたファイルのことを**モジュール**と呼び、パッケージはモジュールが集まったものです。\n\n[▲上へ戻る](#ref_note1)\n</div>\n\n<div class=\"alert alert-info\">\n**注釈 2**\n\nNumPy には matrix というクラスも存在しますが、本チュートリアルでは基本的に多次元配列を表す ndarray をベクトルや行列を表すために用います。\n\n[▲上へ戻る](#ref_note2)\n</div>\n\n<div class=\"alert alert-info\">\n**注釈 3**\n\nこれは、その多次元配列が表すテンソルの**階数(rank、以下ランク)**と対応します。\n\n[▲上へ戻る](#ref_note3)\n</div>\n\n<div class=\"alert alert-info\">\n**注釈 4**\n \n「次元のサイズ」と言った場合はその次元の大きさを意味し、配列の `size` 属性とは異なるものを指しています。\n\n[▲上へ戻る](#ref_note4)\n</div>\n\n<div class=\"alert alert-info\">\n**注釈 5**\n \n末尾次元(trailing dimension)とは、その配列の形を表すタプルの一番最後の値のことを指します。\n\n[▲上へ戻る](#ref_note5)\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb41809b5874293a9fda86c820b675488040d602
9,553
ipynb
Jupyter Notebook
doc/examples/resuming.ipynb
basnijholt/pyABC
a1310ad071dc1c40737fefdf5b81e56dec4af868
[ "BSD-3-Clause" ]
1
2020-12-14T09:59:43.000Z
2020-12-14T09:59:43.000Z
doc/examples/resuming.ipynb
basnijholt/pyABC
a1310ad071dc1c40737fefdf5b81e56dec4af868
[ "BSD-3-Clause" ]
null
null
null
doc/examples/resuming.ipynb
basnijholt/pyABC
a1310ad071dc1c40737fefdf5b81e56dec4af868
[ "BSD-3-Clause" ]
null
null
null
25.272487
118
0.560138
[ [ [ "# Resuming stored ABC runs", "_____no_output_____" ], [ "In this examle, it is illustrated how stored ABC runs can be\nloaded and continued later on.\nThis might make sense if you decide later on to run a couple more\npopulations for increased accuracy.", "_____no_output_____" ] ], [ [ "The models used in this example\nare similar to the ones from the\n`parameter inference tutorial <parameter_inference.ipynb>`_.\n\nThis notebook can be downloaded here:\n:download:`Resuming stored ABC runs <resuming.ipynb>`.\n\nIn this example, we're going to use the following classes:\n\n* :class:`ABCSMC <pyabc.smc.ABCSMC>`,\n our entry point to parameter inference,\n* :class:`RV <pyabc.random_variables.RV>`,\n to define the prior over a single parameter,\n* :class:`Distribution <pyabc.random_variables.Distribution>`,\n to define the prior over a possibly higher dimensional parameter space,", "_____no_output_____" ] ], [ [ "Let's start with the imports.", "_____no_output_____" ] ], [ [ "from pyabc import ABCSMC, Distribution, RV\nimport numpy as np\nfrom tempfile import gettempdir\nimport os", "_____no_output_____" ] ], [ [ "As usually, we start with the definition of the model,\nthe prior and the distance function.", "_____no_output_____" ] ], [ [ "def model(parameter):\n return {\"data\": parameter[\"mean\"] + np.random.randn()}\n\nprior = Distribution(mean=RV(\"uniform\", 0, 5))\n\ndef distance(x, y):\n return abs(x[\"data\"] - y[\"data\"])\n\ndb = \"sqlite:///\" + os.path.join(gettempdir(), \"test.db\")", "_____no_output_____" ] ], [ [ "We next make a new ABC-SMC run and also print the id of this run.\nWe'll use the id later on to resume the run.", "_____no_output_____" ] ], [ [ "abc = ABCSMC(model, prior, distance)\nhistory = abc.new(db, {\"data\": 2.5})\nrun_id = history.id\nprint(\"Run ID:\", run_id)", "INFO:History:Start <ABCSMC(id=1, start_time=2020-01-10 19:58:36.207963, end_time=None)>\n" ] ], [ [ "We then run up to 3 generations, or until the acceptance threshold 0.1\nis reached -- whatever happens first.", "_____no_output_____" ] ], [ [ "history = abc.run(minimum_epsilon=.1, max_nr_populations=3)", "INFO:ABC:Calibration sample before t=0.\nINFO:Epsilon:initial epsilon is 1.281948779424301\nINFO:ABC:t: 0, eps: 1.281948779424301.\nINFO:ABC:Acceptance rate: 100 / 193 = 5.1813e-01, ESS=1.0000e+02.\nINFO:ABC:t: 1, eps: 0.593462311078578.\nINFO:ABC:Acceptance rate: 100 / 338 = 2.9586e-01, ESS=8.2825e+01.\nINFO:ABC:t: 2, eps: 0.3285232421992942.\nINFO:ABC:Acceptance rate: 100 / 506 = 1.9763e-01, ESS=7.8478e+01.\nINFO:History:Done <ABCSMC(id=1, start_time=2020-01-10 19:58:36.207963, end_time=2020-01-10 19:58:41.387478)>\n" ] ], [ [ "Let's verify that we have 3 populations.", "_____no_output_____" ] ], [ [ "history.n_populations", "_____no_output_____" ] ], [ [ "We now create a completely new ABCSMC object.\nWe pass the same model, prior and distance from before.", "_____no_output_____" ] ], [ [ "abc_continued = ABCSMC(model, prior, distance)", "_____no_output_____" ] ], [ [ ".. note::\n\n You could actually pass different models,\n priors and distance functions here. This might make sense\n if, for example, in the meantime you came up with a more \n efficient model implementation or distance function.\n \n For the experts: under certain circumstances it can even\n be mathematically correct to change the prior after a couple\n of populations.", "_____no_output_____" ] ], [ [ "To resume a run, we use the ``load`` method.\nThis loads the necessary data.\nWe pass to this method the id of the run we want to continue.", "_____no_output_____" ] ], [ [ "abc_continued.load(db, run_id)", "_____no_output_____" ], [ "abc_continued.run(minimum_epsilon=.1, max_nr_populations=1)", "INFO:Epsilon:initial epsilon is 0.19946300333077085\nINFO:ABC:t: 3, eps: 0.19946300333077085.\nINFO:ABC:Acceptance rate: 100 / 931 = 1.0741e-01, ESS=9.0195e+01.\nINFO:History:Done <ABCSMC(id=1, start_time=2020-01-10 19:58:36.207963, end_time=2020-01-10 19:58:48.110429)>\n" ] ], [ [ "Let's check the number of populations of the resumed run.\nIt should be 4, as we did 3 populations before and added another one.", "_____no_output_____" ] ], [ [ "abc_continued.history.n_populations", "_____no_output_____" ] ], [ [ "That's it. This was a basic tutorial on how to continue\nstored ABC-SMC runs.", "_____no_output_____" ] ], [ [ ".. note::\n \n For advanced users:\n \n In situations where the distance function or epsilon require\n initialization, it is possible that resuming a run via load(),\n we lose information because not everything can be stored in \n the database. This concerns hyper-parameters in individual\n objects specified by the user.\n \n If that is the case, however the user can somehow store e.g.\n the distance function used in the first run, and pass this \n very object to abc_continued. Then it is ideally fully\n initialized, so that setting \n distance_function.require_initialize = False, it is just as\n if the first run had not been interrupted.\n \n However, even if information was lost, after load() the process\n usually quickly re-adjusts itself in 1 or 2 iterations, so that\n this is not much of a problem.", "_____no_output_____" ] ] ]
[ "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "raw" ]
[ [ "markdown", "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ] ]
cb4183ac8936c6c918ecf5770d272588a955a33b
149,757
ipynb
Jupyter Notebook
Regression/Linear Models/HuberRegressor_StandardScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
53
2021-08-28T07:41:49.000Z
2022-03-09T02:20:17.000Z
Regression/Linear Models/HuberRegressor_StandardScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
142
2021-07-27T07:23:10.000Z
2021-08-25T14:57:24.000Z
Regression/Linear Models/HuberRegressor_StandardScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
38
2021-07-27T04:54:08.000Z
2021-08-23T02:27:20.000Z
229.68865
71,178
0.904071
[ [ [ "# HuberRegressorw with StandardScaler", "_____no_output_____" ], [ "This Code template is for the regression analysis using a Huber Regression and the feature rescaling technique StandardScaler in a pipeline. ", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "import warnings\r\nimport numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as se \r\nfrom sklearn.linear_model import HuberRegressor\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import StandardScaler \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error \r\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\n\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\r\nfile_path= \"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\r\nfeatures= []", "_____no_output_____" ] ], [ [ "Target feature for prediction.", "_____no_output_____" ] ], [ [ "#y_value\r\ntarget=''", "_____no_output_____" ] ], [ [ "### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "X=df[features]\nY=df[target]", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)", "_____no_output_____" ] ], [ [ "Calling preprocessing functions on the feature and target set.\n", "_____no_output_____" ] ], [ [ "x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i])\nX=EncodeX(X)\nY=NullClearner(Y)\nX.head()", "_____no_output_____" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "f,ax = plt.subplots(figsize=(18, 18))\nmatrix = np.triu(X.corr())\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\nplt.show()", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.", "_____no_output_____" ] ], [ [ "x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)", "_____no_output_____" ] ], [ [ "### Model\n\nLinear regression model that is robust to outliers.\n\nThe Huber Regressor optimizes the squared loss for the samples where |(y - X'w) / sigma| < epsilon and the absolute loss for the samples where |(y - X'w) / sigma| > epsilon, where w and sigma are parameters to be optimized. The parameter sigma makes sure that if y is scaled up or down by a certain factor, one does not need to rescale epsilon to achieve the same robustness. Note that this does not take into account the fact that the different features of X may be of different scales.\n\nThis makes sure that the loss function is not heavily influenced by the outliers while not completely ignoring their effect.", "_____no_output_____" ], [ "#### Data Scaling\nUsed sklearn.preprocessing.StandardScaler\n\nStandardize features by removing the mean and scaling to unit variance\nThe standard score of a sample x is calculated as:\n\nz = (x - u) / s\n\nWhere u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.\n\nRead more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)", "_____no_output_____" ] ], [ [ "Input=[(\"standard\",StandardScaler()),(\"model\",HuberRegressor())]\nmodel = Pipeline(Input)\nmodel.fit(x_train,y_train)", "_____no_output_____" ] ], [ [ "#### Model Accuracy\n\nWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.\n\n> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.", "_____no_output_____" ] ], [ [ "print(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))", "Accuracy score 74.88 %\n\n" ] ], [ [ "> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. \n\n> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. \n\n> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ", "_____no_output_____" ] ], [ [ "y_pred=model.predict(x_test)\nprint(\"R2 Score: {:.2f} %\".format(r2_score(y_test,y_pred)*100))\nprint(\"Mean Absolute Error {:.2f}\".format(mean_absolute_error(y_test,y_pred)))\nprint(\"Mean Squared Error {:.2f}\".format(mean_squared_error(y_test,y_pred)))", "R2 Score: 74.88 %\nMean Absolute Error 3008.03\nMean Squared Error 38405174.08\n" ] ], [ [ "#### Prediction Plot\n\nFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.\nFor the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(14,10))\nplt.plot(range(20),y_test[0:20], color = \"green\")\nplt.plot(range(20),model.predict(x_test[0:20]), color = \"red\")\nplt.legend([\"Actual\",\"prediction\"]) \nplt.title(\"Predicted vs True Value\")\nplt.xlabel(\"Record number\")\nplt.ylabel(target)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb41877b415ab12ebd318064a51dc957d82cd940
24,944
ipynb
Jupyter Notebook
src/ionotomo/notebooks/RealData.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
7
2017-06-22T08:47:07.000Z
2021-07-01T12:33:02.000Z
src/ionotomo/notebooks/RealData.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
1
2019-04-03T15:21:19.000Z
2019-04-03T15:48:31.000Z
src/ionotomo/notebooks/RealData.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
2
2020-03-01T16:20:00.000Z
2020-07-07T15:09:02.000Z
47.603053
158
0.505091
[ [ [ "import glob\nimport numpy as np\nfrom scipy.interpolate import interp2d\nimport astropy.units as au\nimport astropy.time as at\nimport astropy.coordinates as ac\nimport h5py\nimport os\nimport pylab as plt\n\nfrom RadioArray import RadioArray\nfrom UVWFrame import UVW\nfrom PointingFrame import Pointing\n\n\ndef getDatumIdx(antIdx,timeIdx,dirIdx,numAnt,numTimes):\n '''standarizes indexing'''\n idx = antIdx + numAnt*(timeIdx + numTimes*dirIdx)\n return idx\n\ndef getDatum(datumIdx,numAnt,numTimes):\n antIdx = datumIdx % numAnt\n timeIdx = (datumIdx - antIdx)/numAnt % numTimes\n dirIdx = (datumIdx - antIdx - numAnt*timeIdx)/numAnt/numTimes\n return antIdx,timeIdx,dirIdx\n\nclass DataPack(object):\n '''dataDict = {'radioArray':radioArray,'antennas':outAntennas,'antennaLabels':outAntennaLabels,\n 'times':outTimes,'timestamps':outTimestamps,\n 'directions':outDirections,'patchNames':outPatchNames,'dtec':outDtec}\n '''\n def __init__(self,dataDict=None,filename=None):\n '''get the astropy object defining rays and then also the dtec data'''\n if dataDict is not None:\n self.addDataDict(**dataDict)\n else:\n if filename is not None:\n self.load(filename)\n return\n self.refAnt = None\n print(\"Loaded {0} antennas, {1} times, {2} directions\".format(self.Na,self.Nt,self.Nd))\n \n def __repr__(self):\n return \"DataPack: numAntennas = {}, numTimes = {}, numDirections = {}\\nReference Antenna = {}\".format(self.Na,self.Nt,self.Nd,self.refAnt)\n def clone(self):\n dataPack = DataPack({'radioArray':self.radioArray, 'antennas':self.antennas, 'antennaLabels':self.antennaLabels,\n 'times':self.times, 'timestamps':self.timestamps, 'directions':self.directions,\n 'patchNames' : self.patchNames, 'dtec':self.dtec})\n dataPack.setReferenceAntenna(self.refAnt)\n return dataPack\n \n def save(self,filename):\n dt = h5py.special_dtype(vlen=str)\n f = h5py.File(filename,'w')\n antennaLabels = f.create_dataset(\"datapack/antennas/labels\",(self.Na,),dtype=dt)\n f[\"datapack/antennas\"].attrs['frequency'] = self.radioArray.frequency\n antennas = f.create_dataset(\"datapack/antennas/locs\",(self.Na,3),dtype=np.double)\n antennaLabels[...] = self.antennaLabels\n antennas[:,:] = self.antennas.cartesian.xyz.to(au.m).value.transpose()#to Nax3 in m\n patchNames = f.create_dataset(\"datapack/directions/patchnames\",(self.Nd,),dtype=dt)\n ra = f.create_dataset(\"datapack/directions/ra\",(self.Nd,),dtype=np.double)\n dec = f.create_dataset(\"datapack/directions/dec\",(self.Nd,),dtype=np.double)\n patchNames[...] = self.patchNames\n ra[...] = self.directions.ra.deg\n dec[...] = self.directions.dec.deg\n timestamps = f.create_dataset(\"datapack/times/timestamps\",(self.Nt,),dtype=dt)\n gps = f.create_dataset(\"datapack/times/gps\",(self.Nt,),dtype=np.double)\n timestamps[...] = self.timestamps\n gps[...] = self.times.gps\n dtec = f.create_dataset(\"datapack/dtec\",(self.Na,self.Nt,self.Nd),dtype=np.double)\n dtec[:,:,:] = self.dtec\n dtec.attrs['refAnt'] = str(self.refAnt)\n f.close()\n \n def load(self,filename):\n f = h5py.File(filename,'r')\n self.antennaLabels = f[\"datapack/antennas/labels\"][:].astype(str)\n antennas = f[\"datapack/antennas/locs\"][:,:]\n frequency = f[\"datapack/antennas\"].attrs['frequency']\n self.radioArray = RadioArray(antennaPos = antennas,frequency = frequency)\n self.antennas = ac.SkyCoord(antennas[:,0]*au.m,antennas[:,1]*au.m,antennas[:,2]*au.m,frame='itrs')\n self.patchNames = f[\"datapack/directions/patchnames\"][:].astype(str)\n ra = f[\"datapack/directions/ra\"][:]\n dec = f[\"datapack/directions/dec\"][:]\n self.directions = ac.SkyCoord(ra*au.deg,dec*au.deg,frame='icrs')\n self.timestamps = f[\"datapack/times/timestamps\"][:].astype(str)\n self.times = at.Time(self.timestamps,format='isot',scale='tai')\n self.dtec = f[\"datapack/dtec\"][:,:,:]\n self.refAnt = np.array(f[\"datapack/dtec\"].attrs['refAnt']).astype(str).item(0)\n self.Na = len(self.antennas)\n self.Nt = len(self.times)\n self.Nd = len(self.directions)\n self.setReferenceAntenna(self.refAnt)\n f.close()\n \n \n def addDataDict(self,**args):\n '''Set up variables here that will hold references throughout'''\n for attr in args.keys():\n try:\n setattr(self,attr,args[attr])\n except:\n print(\"Failed to set {0} to {1}\".format(attr,args[attr]))\n self.Na = len(self.antennas)\n self.Nt = len(self.times)\n self.Nd = len(self.directions)\n \n def set_dtec(self,dtec,antIdx=[],timeIdx=[], dirIdx=[],refAnt=None):\n '''Set the specified dtec solutions corresponding to the requested indices.\n value of -1 means all.'''\n if antIdx is -1:\n antIdx = np.arange(self.Na)\n if timeIdx is -1:\n timeIdx = np.arange(self.Nt)\n if dirIdx is -1:\n dirIdx = np.arange(self.Nd)\n antIdx = np.sort(antIdx)\n timeIdx = np.sort(timeIdx)\n dirIdx = np.sort(dirIdx)\n Na = len(antIdx)\n Nt = len(timeIdx)\n Nd = len(dirIdx)\n i = 0\n while i < Na:\n j = 0\n while j < Nt:\n k = 0\n while k < Nd:\n self.dtec[antIdx[i],timeIdx[j],dirIdx[k]] = dtec[i,j,k]\n k += 1\n j += 1\n i += 1\n if refAnt is not None:\n self.setReferenceAntenna(refAnt)\n else:\n if self.refAnt is not None:\n self.setReferenceAntenna(self.refAnt)\n \n\n def get_dtec(self,antIdx=[],timeIdx=[], dirIdx=[]):\n '''Retrieve the specified dtec solutions corresponding to the requested indices.\n value of -1 means all.'''\n if antIdx is -1:\n antIdx = np.arange(self.Na)\n if timeIdx is -1:\n timeIdx = np.arange(self.Nt)\n if dirIdx is -1:\n dirIdx = np.arange(self.Nd)\n antIdx = np.sort(antIdx)\n timeIdx = np.sort(timeIdx)\n dirIdx = np.sort(dirIdx)\n Na = len(antIdx)\n Nt = len(timeIdx)\n Nd = len(dirIdx)\n output = np.zeros([Na,Nt,Nd],dtype=np.double)\n i = 0\n while i < Na:\n j = 0\n while j < Nt:\n k = 0\n while k < Nd:\n output[i,j,k] = self.dtec[antIdx[i],timeIdx[j],dirIdx[k]]\n k += 1\n j += 1\n i += 1\n return output\n \n def get_antennas(self,antIdx=[]):\n '''Get the list of antenna locations in itrs'''\n if antIdx is -1:\n antIdx = np.arange(self.Na)\n antIdx = np.sort(antIdx)\n output = self.antennas[antIdx]\n Na = len(antIdx)\n outputLabels = []\n i = 0\n while i < Na:\n outputLabels.append(self.antennaLabels[antIdx[i]])\n i += 1\n return output, outputLabels\n \n def get_times(self,timeIdx=[]):\n '''Get the gps times'''\n if timeIdx is -1:\n timeIdx = np.arange(self.Nt)\n timeIdx = np.sort(timeIdx)\n output = self.times[timeIdx]\n Nt = len(timeIdx)\n outputLabels = []\n j = 0\n while j < Nt:\n outputLabels.append(self.timestamps[timeIdx[j]])\n j += 1\n return output, outputLabels\n \n def get_directions(self, dirIdx=[]):\n '''Get the array of directions in itrs'''\n if dirIdx is -1:\n dirIdx = np.arange(self.Nd)\n dirIdx = np.sort(dirIdx)\n output = self.directions[dirIdx]\n Nd = len(dirIdx)\n outputLabels = []\n k = 0\n while k < Nd:\n outputLabels.append(self.patchNames[dirIdx[k]])\n k += 1\n return output, outputLabels\n \n def setReferenceAntenna(self,refAnt):\n if refAnt is None:\n return\n refAntIdx = None\n i = 0\n while i < self.Na:\n if self.antennaLabels[i] == refAnt:\n refAntIdx = i\n break\n i += 1 \n assert refAntIdx is not None, \"{} is not a valid antenna. Choose from {}\".format(refAnt,self.antennaLabels)\n #print(\"Setting refAnt: {}\".format(refAnt))\n self.refAnt = refAnt\n self.dtec = self.dtec - self.dtec[refAntIdx,:,:]\n \n def getCenterDirection(self):\n raMean = np.mean(self.directions.transform_to('icrs').ra)\n decMean = np.mean(self.directions.transform_to('icrs').dec)\n phase = ac.SkyCoord(raMean,decMean,frame='icrs')\n return phase\n\n def findFlaggedAntennas(self):\n '''Determine which antennas are flagged'''\n assert self.refAnt is not None, \"Set a refAnt before finding flagged antennas\"\n mask = np.sum(np.sum(self.dtec,axis=2),axis=1) == 0\n i = 0\n while i < self.Na:\n if self.antennaLabels[i] == self.refAnt:\n refAntIdx = i\n break\n i += 1 \n mask[refAntIdx] = False\n return list(self.antennaLabels[mask])\n \n def flagAntennas(self,antennaLabels):\n '''remove data corresponding to the given antenna names if it exists'''\n assert type(antennaLabels) == type([]), \"{} is not a list of station names. Choose from {}\".format(antennaLabels,self.antennaLabels)\n mask = np.ones(len(self.antennaLabels), dtype=bool)\n antennasFound = 0\n i = 0\n while i < self.Na:\n if self.antennaLabels[i] in antennaLabels:\n antennasFound += 1\n mask[i] = False\n i += 1\n #some flags may have not existed in data\n self.antennaLabels = self.antennaLabels[mask]\n self.antennas = self.antennas[mask]\n self.dtec = self.dtec[mask,:,:]\n self.Na = len(self.antennas)\n \n def flagPatches(self,patchNames):\n '''remove data corresponding to the given antenna names if it exists'''\n assert type(patchNames) == type([]), \"{} is not a list of patch names. Choose from {}\".format(antennaLabels,self.antennaLabels)\n mask = np.ones(len(self.patchNames), dtype=bool)\n patchesFound = 0\n i = 0\n while i < self.Nd:\n if self.patchNames[i] in patchNames:\n patchesFound += 1\n mask[i] = False\n i += 1\n #some flags may have not existed in data\n self.patchNames = self.patchNames[mask]\n self.directions = self.directions[mask]\n self.dtec = self.dtec[:,:,mask]\n self.Nd = len(self.directions)\n \ndef transferPatchData(infoFile, dataFolder, hdf5Out):\n '''transfer old numpy format to hdf5. Only run with python 2.7'''\n \n assert os.path.isdir(dataFolder), \"{0} is not a directory\".format(dataFolder)\n dt = h5py.special_dtype(vlen=str)\n f = h5py.File(hdf5Out,\"w\")\n \n info = np.load(infoFile)\n #these define the direction order\n patches = info['patches']#names\n radec = info['directions']#astrpy.icrs\n Nd = len(patches)\n print(\"Loading {} patches\".format(Nd))\n namesds = f.create_dataset(\"dtecObservations/patchNames\",(Nd,),dtype=dt)\n #rads = f.create_dataset(\"dtecObservations/patches/ra\",(Nd,),dtype=np.double)\n #dec = f.create_dataset(\"dtecObservations/patches/dec\",(Nd,),dtype=np.double)\n dset = f['dtecObservations']\n dset.attrs['frequency'] = 150e6\n namesds[...] = patches\n #rads[...] = radec.ra.deg\n #decds[...] = radec.dec.deg\n \n patchIdx = 0\n while patchIdx < Nd:\n patch = patches[patchIdx]\n #find the appropriate file (this will be standardized later)\n files = glob.glob(\"{0}/*_{1}_*.npz\".format(dataFolder,patch))\n if len(files) == 1:\n patchFile = files[0]\n else:\n print('Too many files found. Could not find patch: {0}'.format(patch))\n patchIdx += 1\n continue\n try:\n d = np.load(patchFile)\n print(\"Loading data file: {0}\".format(patchFile))\n except:\n print(\"Failed loading data file: {0}\".format(patchFile))\n return \n if \"dtecObservations/antennaLabels\" not in f:\n antennaLabels = d['antennas']#labels\n Na = len(antennaLabels)\n antennaLabelsds = f.create_dataset(\"dtecObservations/antennaLabels\",(Na,),dtype=dt)\n antennaLabelsds[...] = antennaLabels\n if \"dtecObservations/timestamps\" not in f:\n times = d['times']#gps tai\n timestamps = at.Time(times,format='gps',scale='tai').isot\n Nt = len(times)\n print(len(timestamps[0]))\n timeds = f.create_dataset(\"dtecObservations/timestamps\",(Nt,),dtype=dt)\n timeds[...] = timestamps\n patchds = f.create_dataset(\"dtecObservations/patches/{}\".format(patch),(Nt,Na),dtype=np.double)\n patchds[...] = d['data']\n patchds.attrs['ra'] = radec[patchIdx].ra.deg\n patchds.attrs['dec'] = radec[patchIdx].dec.deg\n patchIdx += 1\n f.close()\n \n \ndef prepareDataPack(hdf5Datafile,timeStart=0,timeEnd=-1,arrayFile='arrays/lofar.hba.antenna.cfg'):\n '''Grab real data from soltions products. \n Stores in a DataPack object.'''\n \n f = h5py.File(hdf5Datafile,'r')\n dset = f['dtecObservations']\n frequency = dset.attrs['frequency']\n print(\"Using radio array file: {}\".format(arrayFile))\n #get array stations (they must be in the array file to be considered for data packaging)\n radioArray = RadioArray(arrayFile,frequency=frequency)#set frequency from solutions todo\n print(\"Created {}\".format(radioArray))\n patchNames = f[\"dtecObservations/patchNames\"][:].astype(str)\n Nd = len(patchNames)\n ra = np.zeros(Nd,dtype= np.double)\n dec = np.zeros(Nd,dtype=np.double)\n antennaLabels = f[\"dtecObservations/antennaLabels\"][:].astype(str)\n Na = len(antennaLabels)\n antennas = np.zeros([3,Na],dtype=np.double)\n antIdx = 0#index in solution table\n while antIdx < Na:\n ant = antennaLabels[antIdx]\n labelIdx = radioArray.getAntennaIdx(ant) \n if labelIdx is None:\n print(\"failed to find {} in {}\".format(ant,radioArray.labels))\n return\n #ITRS WGS84\n stationLoc = radioArray.locs[labelIdx]\n antennas[:,antIdx] = stationLoc.cartesian.xyz.to(au.km).value.flatten()\n antIdx += 1\n antennas = ac.SkyCoord(antennas[0,:]*au.km,antennas[1,:]*au.km,\n antennas[2,:]*au.km,frame='itrs')\n timestamps = f[\"dtecObservations/timestamps\"][:].astype(str)\n times = at.Time(timestamps,format=\"isot\",scale='tai')\n Nt = len(timestamps)\n dtec = np.zeros([Na,Nt,Nd],dtype=np.double)\n patchIdx = 0\n while patchIdx < Nd:\n patchName = patchNames[patchIdx]\n patchds = f[\"dtecObservations/patches/{}\".format(patchName)]\n ra[patchIdx] = patchds.attrs['ra']\n dec[patchIdx] = patchds.attrs['dec']\n dtec[:,:,patchIdx] = patchds[:,:].transpose()#from NtxNa to NaxNt\n patchIdx += 1\n f.close()\n directions = ac.SkyCoord(ra*au.deg,dec*au.deg,frame='icrs')\n dataDict = {'radioArray':radioArray,'antennas':antennas,'antennaLabels':antennaLabels,\n 'times':times,'timestamps':timestamps,\n 'directions':directions,'patchNames':patchNames,'dtec':dtec}\n return DataPack(dataDict)\n\ndef interpNearest(x,y,z,x_,y_):\n dx = np.subtract.outer(x_,x)\n dy = np.subtract.outer(y_,y)\n r = dx**2\n dy *= dy\n r += dy\n np.sqrt(r,out=r)\n arg = np.argmin(r,axis=1)\n z_ = z[arg]\n return z_\n\ndef plotDataPack(datapack,antIdx=-1,timeIdx=[0], dirIdx=-1,figname=None,vmin=None,vmax=None):\n assert datapack.refAnt is not None, \"set DataPack refAnt first\"\n directions, patchNames = datapack.get_directions(dirIdx=dirIdx)\n antennas, antLabels = datapack.get_antennas(antIdx=antIdx)\n times,timestamps = datapack.get_times(timeIdx=timeIdx)\n dtec = np.stack([np.mean(datapack.get_dtec(antIdx = antIdx,dirIdx=dirIdx,timeIdx=timeIdx),axis=1)],axis=1)\n Na = len(antennas)\n Nt = len(times)\n Nd = len(directions)\n refAntIdx = None\n for i in range(Na):\n if antLabels[i] == datapack.refAnt:\n refAntIdx = i\n fixtime = times[Nt>>1]\n phase = datapack.getCenterDirection()\n arrayCenter = datapack.radioArray.getCenter()\n uvw = UVW(location = arrayCenter.earth_location,obstime = fixtime,phase = phase)\n ants_uvw = antennas.transform_to(uvw)\n \n dtec = np.stack([np.mean(dtec,axis=1)],axis=1)\n #make plots, M by 4\n M = (Na>>2) + 1 + 1\n fig = plt.figure(figsize=(11.,11./4.*M))\n #use direction average as phase tracking direction\n if vmax is None: \n vmax = np.percentile(dtec.flatten(),99)\n #vmax=np.max(dtec)\n if vmin is None:\n vmin = np.percentile(dtec.flatten(),1)\n #vmin=np.min(dtec)\n \n \n N = 25\n dirs_uvw = directions.transform_to(uvw)\n factor300 = 300./dirs_uvw.w.value\n U,V = np.meshgrid(np.linspace(np.min(dirs_uvw.u.value*factor300),np.max(dirs_uvw.u.value*factor300),N),\n np.linspace(np.min(dirs_uvw.v.value*factor300),np.max(dirs_uvw.v.value*factor300),N))\n \n i = 0 \n while i < Na:\n ax = fig.add_subplot(M,4,i+1)\n\n dx = np.sqrt((ants_uvw.u[i] - ants_uvw.u[refAntIdx])**2 + (ants_uvw.v[i] - ants_uvw.v[refAntIdx])**2).to(au.km).value\n ax.annotate(s=\"{} : {:.2g} km\".format(antLabels[i],dx),xy=(.2,.8),xycoords='axes fraction')\n if i == 0:\n #ax.annotate(s=\"{} : {:.2g} km\\n{}\".format(antLabels[i],dx,fixtime.isot),xy=(.2,.8),xycoords='axes fraction')\n #ax.annotate(s=fixtime.isot,xy=(.2,0.05),xycoords='axes fraction')\n ax.set_title(fixtime.isot)\n #ax.set_title(\"Ref. Proj. Dist.: {:.2g} km\".format(dx))\n ax.set_xlabel(\"U km\")\n ax.set_ylabel(\"V km\")\n \n \n \n D = interpNearest(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300,dtec[i,0,:],U.flatten(),V.flatten()).reshape(U.shape)\n im = ax.imshow(D,origin='lower',extent=(np.min(U),np.max(U),np.min(V),np.max(V)),aspect='auto',\n vmin = vmin, vmax= vmax,cmap=plt.cm.coolwarm,alpha=1.)\n sc1 = ax.scatter(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300, c='black',\n marker='+')\n i += 1\n ax = fig.add_subplot(M,4,Na+1)\n plt.colorbar(im,cax=ax,orientation='vertical')\n if figname is not None:\n plt.savefig(\"{}.png\".format(figname),format='png')\n else:\n plt.show()\n plt.close()\n \ndef test_plotDataPack():\n datapack = DataPack(filename=\"output/test/datapackObs.hdf5\")\n try:\n os.makedirs('output/test/plotDataPack')\n except:\n pass\n plotDataPack(datapack,antIdx=-1,timeIdx=[0,1,2,3], dirIdx=-1,figname=None)#'output/test/plotDataPack/fig')\n\ndef test_prepareDataPack():\n dataPack = prepareDataPack('SB120-129/dtecData.hdf5',timeStart=0,timeEnd=-1,\n arrayFile='arrays/lofar.hba.antenna.cfg')\n dataPack.flagAntennas(['CS007HBA1','CS007HBA0','CS013HBA0','CS013HBA1'])\n dataPack.setReferenceAntenna(dataPack.antennaLabels[0])\n #'CS501HBA1'\n dataPack.save(\"output/test/datapackObs.hdf5\")\n\nif __name__ == '__main__':\n #transferPatchData(infoFile='SB120-129/WendysBootes.npz', \n # dataFolder='SB120-129/', \n # hdf5Out='SB120-129/dtecData.hdf5')\n test_plotDataPack()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb418b5f23b99b5349eb0315a3c26fdd92ec93e5
300,332
ipynb
Jupyter Notebook
training/sigtrainer_3prong_rnd_26_28_inputs.ipynb
violatingcp/QUASAR
60d1c00d0c461bc706631d4210e31a80d1a3c482
[ "MIT" ]
1
2020-05-27T20:18:15.000Z
2020-05-27T20:18:15.000Z
training/sigtrainer_3prong_rnd_26_28_inputs.ipynb
violatingcp/QUASAR
60d1c00d0c461bc706631d4210e31a80d1a3c482
[ "MIT" ]
3
2021-03-19T13:53:32.000Z
2022-03-12T00:38:06.000Z
training/sigtrainer_3prong_rnd_26_28_inputs.ipynb
violatingcp/QUASAR
60d1c00d0c461bc706631d4210e31a80d1a3c482
[ "MIT" ]
3
2020-05-11T08:30:01.000Z
2020-10-06T17:35:25.000Z
74.989263
36,584
0.779827
[ [ [ "# Signal Autoencoder", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy as sp\nimport scipy.stats\nimport itertools\nimport logging\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport torch.utils.data as utils\nimport math\nimport time\nimport tqdm\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom argparse import ArgumentParser\nfrom torch.distributions import MultivariateNormal\n\nimport torch.nn as nn\nimport torch.nn.init as init\nimport sys\nsys.path.append(\"../new_flows\")\nfrom flows import RealNVP, Planar, MAF\nfrom models import NormalizingFlowModel", "_____no_output_____" ], [ "####MAF \nclass VAE_NF(nn.Module):\n def __init__(self, K, D):\n super().__init__()\n self.dim = D\n self.K = K\n self.encoder = nn.Sequential(\n nn.Linear(12, 50),\n nn.LeakyReLU(True),\n nn.Linear(50, 30),\n nn.LeakyReLU(True),\n nn.Linear(30, 20),\n nn.LeakyReLU(True),\n nn.Linear(20, D * 2)\n )\n\n self.decoder = nn.Sequential(\n nn.Linear(D, 20),\n nn.LeakyReLU(True),\n nn.Linear(20, 30),\n nn.LeakyReLU(True),\n nn.Linear(30, 50),\n nn.LeakyReLU(True),\n nn.Linear(50, 12)\n )\n \n flow_init = MAF(dim=D)\n flows_init = [flow_init for _ in range(K)]\n prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda())\n self.flows = NormalizingFlowModel(prior, flows_init)\n\n def forward(self, x):\n # Run Encoder and get NF params\n enc = self.encoder(x)\n mu = enc[:, :self.dim]\n log_var = enc[:, self.dim: self.dim * 2]\n\n # Re-parametrize\n sigma = (log_var * .5).exp()\n z = mu + sigma * torch.randn_like(sigma)\n kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n # Construct more expressive posterior with NF\n \n z_k, _, sum_ladj = self.flows(z)\n \n kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch\n\n # Run Decoder\n x_prime = self.decoder(z_k)\n return x_prime, kl_div", "_____no_output_____" ], [ "#prong_2 = pd.read_hdf(\"/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5\")\n#prong_3 = pd.read_hdf(\"/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5\")", "_____no_output_____" ], [ "rnd_data = pd.read_hdf(\"/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5\")\n\ntestprior_data = pd.read_hdf(\"/data/t3home000/spark/QUASAR/preprocessing/ThreeProng_5000_500_500.h5\")", "_____no_output_____" ], [ "dt = rnd_data.values\ndt_prior = testprior_data.values\n\ncorrect = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)\ndt = dt[correct]\n\ncorrect = (dt_prior[:,3]>0) &(dt_prior[:,19]>0) & (dt_prior[:,1]>0) & (dt_prior[:,2]>0)\ndt_prior = dt_prior[correct]\n\nfor i in range(13,19):\n dt[:,i] = dt[:,i]/dt[:,3]\n\nfor i in range(29,35):\n dt[:,i] = dt[:,i]/(dt[:,19])\n\nfor i in range(13,19):\n dt_prior[:,i] = dt_prior[:,i]/dt_prior[:,3]\n\nfor i in range(29,35):\n dt_prior[:,i] = dt_prior[:,i]/(dt_prior[:,19])\n#correct = (dt[:,16]>0) & (dt[:,29]>=0) & (dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)\n#dt = dt[correct]\n\n\n#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n#Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included\n#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n#bkg_idx = np.where(idx==0)[0]\n##signal_idx = np.where((idx==1)) [0]\n#dt = dt[signal_idx]\ncorrect = (dt[:,0]>=2800)\ndt = dt[correct] \n\ncorrect = (dt_prior[:,0]>=2800)\ndt_prior = dt_prior[correct] \n\nidx = dt[:,-1]\n\n#sig_idx = np.where((dt_prior[:,3]>450) & (dt_prior[:,3]<550) & (dt_prior[:,19]>100) & (dt_prior[:,19]<200) & (dt_prior[:,0]>4200) & (dt_prior[:,0]<4800))[0]\nsig_idx = np.where((dt_prior[:,3]>200) & (dt_prior[:,19]>200) & (dt_prior[:,0]>4000) & (dt_prior[:,0]<6000))[0]\n\nbkg_idx = np.where(idx==0)[0]\n#bsmlike = np.where(dt[:,16]>0.9)[0]\n#dt = dt[bsmlike]\n\ndt_sig = dt_prior[sig_idx]\ndt_bkg = dt[bkg_idx]\n", "_____no_output_____" ], [ "#dt = prong_2.values\n#correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)\n#dt = dt[correct]#\n\n#for i in range(13,19):\n# dt[:,i] = dt[:,i]/dt[:,3]#\n\n#for i in range(29,35):\n# dt[:,i] = dt[:,i]/(dt[:,19])#\n\n##correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)\n##dt = dt[correct]#\n#\n\n##Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n##Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included\n##Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n#idx = dt[:,-1]\n#bkg_idx = np.where(idx==0)[0]\n#sig_idx = np.where((idx==1) & (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150))[0]\n##signal_idx = np.where((idx==1)) [0]\n#dt_sig = dt[sig_idx]\n# \n#sig_refine_range = (dt_sig[:,0]>3400) & (dt_sig[:,0]<3600)\n#dt_sig = dt_sig[sig_refine_range]", "_____no_output_____" ], [ "dt_sig.shape", "_____no_output_____" ], [ "plt.hist(dt_sig[:,0],bins=np.arange(0,8000,50));", "_____no_output_____" ], [ "f.columns[[3,4,5,6,11,12,19,20,21,22,27,28]]", "_____no_output_____" ], [ "#Y = dt_sig[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]]\n#[3,4,5,6,11,12,19,20,21,22,27,28]\nY = dt_sig[:,[3,4,5,6,11,12,19,20,21,22,27,28]]", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ], [ "#if nprong == 3:\n# dt = prong_3.values\n# correct = (dt[:,3]>20) &(dt[:,19]>20)\n# dt = dt[correct]\n# for i in range(13,19):\n# dt[:,i] = dt[:,i]/dt[:,3]\n \n# for i in range(29,35):\n# dt[:,i] = dt[:,i]/(dt[:,19])\n \n# correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)\n# dt = dt[correct] \n \n# Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]]\n# #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n# idx = dt[:,-1]\n# bkg_idx = np.where(idx==0)[0]\n# signal_idx = np.where((idx==1) & (dt[:,3]>400))[0]\n# #signal_idx = np.where((idx==1)) [0]\n# Y = Y[signal_idx]", "_____no_output_____" ], [ "bins = np.linspace(0,1,100)\nbins.shape\ncolumn = 3\n#print(f_rnd.columns[column])\nplt.hist(Y[:,0],bins,alpha=0.5,color='b');\n#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');\n#plt.hist(out2[:,column],bins,alpha=0.5,color='g');\n#plt.axvline(np.mean(Y[:,column]))", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ], [ "sig_mean = []\nsig_std = []\nfor i in range(12):\n mean = np.mean(Y[:,i])\n std = np.std(Y[:,i])\n sig_mean.append(mean)\n sig_std.append(std)\n Y[:,i] = (Y[:,i]-mean)/std", "_____no_output_____" ], [ "sig_mean", "_____no_output_____" ], [ "sig_std", "_____no_output_____" ], [ "total_sig = torch.tensor(Y)", "_____no_output_____" ], [ "total_sig.shape", "_____no_output_____" ], [ "bins = np.linspace(-3,3,100)\nbins.shape\ncolumn = 0\n#print(f_rnd.columns[column])\nplt.hist(Y[:,11],bins,alpha=0.5,color='b');\n#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');\n#plt.hist(out2[:,column],bins,alpha=0.5,color='g');\n#plt.axvline(np.mean(Y[:,column]))", "_____no_output_____" ], [ "N_EPOCHS = 30\nPRINT_INTERVAL = 2000\nNUM_WORKERS = 4\nLR = 1e-4\n\n#N_FLOWS = 6 \n#Z_DIM = 8\n\nN_FLOWS = 10\nZ_DIM = 8\n\nn_steps = 0", "_____no_output_____" ], [ "sigmodel = VAE_NF(N_FLOWS, Z_DIM).cuda()", "_____no_output_____" ], [ "bs = 800\nsig_train_iterator = utils.DataLoader(total_sig, batch_size=bs, shuffle=True) \nsig_test_iterator = utils.DataLoader(total_sig, batch_size=bs)", "_____no_output_____" ], [ "sigoptimizer = optim.Adam(sigmodel.parameters(), lr=1e-6)", "_____no_output_____" ], [ "beta = 1", "_____no_output_____" ], [ "def sigtrain():\n global n_steps\n train_loss = []\n sigmodel.train()\n\n for batch_idx, x in enumerate(sig_train_iterator):\n start_time = time.time()\n \n x = x.float().cuda()\n\n x_tilde, kl_div = sigmodel(x)\n mseloss = nn.MSELoss(size_average=False)\n huberloss = nn.SmoothL1Loss(size_average=False)\n #loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)\n loss_recons = mseloss(x_tilde,x ) / x.size(0)\n #loss_recons = huberloss(x_tilde,x ) / x.size(0)\n loss = loss_recons + beta* kl_div\n\n sigoptimizer.zero_grad()\n loss.backward()\n sigoptimizer.step()\n\n train_loss.append([loss_recons.item(), kl_div.item()])\n\n if (batch_idx + 1) % PRINT_INTERVAL == 0:\n print('\\tIter [{}/{} ({:.0f}%)]\\tLoss: {} Time: {:5.3f} ms/batch'.format(\n batch_idx * len(x), 50000,\n PRINT_INTERVAL * batch_idx / 50000,\n np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0),\n 1000 * (time.time() - start_time)\n ))\n\n n_steps += 1", "_____no_output_____" ], [ "def sigevaluate(split='valid'):\n global n_steps\n start_time = time.time()\n val_loss = []\n sigmodel.eval()\n\n with torch.no_grad():\n for batch_idx, x in enumerate(sig_test_iterator):\n \n x = x.float().cuda()\n\n x_tilde, kl_div = sigmodel(x)\n mseloss = nn.MSELoss(size_average=False)\n huberloss = nn.SmoothL1Loss(size_average=False)\n #loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)\n loss_recons = mseloss(x_tilde,x ) / x.size(0)\n #loss_recons = huberloss(x_tilde,x ) / x.size(0)\n loss = loss_recons + beta * kl_div\n\n val_loss.append(loss.item())\n #writer.add_scalar('loss/{}/ELBO'.format(split), loss.item(), n_steps)\n #writer.add_scalar('loss/{}/reconstruction'.format(split), loss_recons.item(), n_steps)\n #writer.add_scalar('loss/{}/KL'.format(split), kl_div.item(), n_steps)\n\n print('\\nEvaluation Completed ({})!\\tLoss: {:5.4f} Time: {:5.3f} s'.format(\n split,\n np.asarray(val_loss).mean(0),\n time.time() - start_time\n ))\n return np.asarray(val_loss).mean(0)", "_____no_output_____" ], [ "ae_def = {\n \"type\":\"sig\",\n \"trainon\":\"3prong\",\n \"features\":\"12features\",\n \"architecture\":\"MAF\",\n \"selection\":\"mjj4500_nojetmasscut\",\n \"trainloss\":\"MSELoss\",\n \"beta\":\"beta1\",\n \"zdimnflow\":\"z8f10\",\n}", "_____no_output_____" ], [ "ae_def", "_____no_output_____" ], [ "#from torchsummary import summary\n", "_____no_output_____" ], [ "sigmodel.load_state_dict(torch.load(f\"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5\"))", "_____no_output_____" ], [ "N_EPOCHS = 10\nBEST_LOSS = 0\nLAST_SAVED = -1\nPATIENCE_COUNT = 0\nPATIENCE_LIMIT = 5\nfor epoch in range(1, 1000):\n print(\"Epoch {}:\".format(epoch))\n sigtrain()\n cur_loss = sigevaluate()\n\n if cur_loss <= BEST_LOSS:\n PATIENCE_COUNT = 0\n BEST_LOSS = cur_loss\n LAST_SAVED = epoch\n print(\"Saving model!\")\n torch.save(sigmodel.state_dict(),f\"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5\")\n \n else:\n PATIENCE_COUNT += 1\n print(\"Not saving model! Last saved: {}\".format(LAST_SAVED))\n if PATIENCE_COUNT > 10:\n print(\"Patience Limit Reached\")\n break ", "Epoch 1:\n\nEvaluation Completed (valid)!\tLoss: -83.6661 Time: 0.526 s\nSaving model!\nEpoch 2:\n\nEvaluation Completed (valid)!\tLoss: -83.6874 Time: 0.523 s\nSaving model!\nEpoch 3:\n\nEvaluation Completed (valid)!\tLoss: -83.7292 Time: 0.522 s\nSaving model!\nEpoch 4:\n\nEvaluation Completed (valid)!\tLoss: -83.6624 Time: 0.524 s\nNot saving model! Last saved: 3\nEpoch 5:\n\nEvaluation Completed (valid)!\tLoss: -83.7086 Time: 0.524 s\nNot saving model! Last saved: 3\nEpoch 6:\n\nEvaluation Completed (valid)!\tLoss: -83.7434 Time: 0.523 s\nSaving model!\nEpoch 7:\n\nEvaluation Completed (valid)!\tLoss: -83.8253 Time: 0.523 s\nSaving model!\nEpoch 8:\n\nEvaluation Completed (valid)!\tLoss: -83.8497 Time: 0.528 s\nSaving model!\nEpoch 9:\n\nEvaluation Completed (valid)!\tLoss: -83.8737 Time: 0.525 s\nSaving model!\nEpoch 10:\n\nEvaluation Completed (valid)!\tLoss: -83.9343 Time: 0.524 s\nSaving model!\nEpoch 11:\n\nEvaluation Completed (valid)!\tLoss: -83.8383 Time: 0.524 s\nNot saving model! Last saved: 10\nEpoch 12:\n\nEvaluation Completed (valid)!\tLoss: -83.9355 Time: 0.523 s\nSaving model!\nEpoch 13:\n\nEvaluation Completed (valid)!\tLoss: -84.0091 Time: 0.525 s\nSaving model!\nEpoch 14:\n\nEvaluation Completed (valid)!\tLoss: -84.0233 Time: 0.524 s\nSaving model!\nEpoch 15:\n\nEvaluation Completed (valid)!\tLoss: -84.0788 Time: 0.548 s\nSaving model!\nEpoch 16:\n\nEvaluation Completed (valid)!\tLoss: -84.0712 Time: 0.522 s\nNot saving model! Last saved: 15\nEpoch 17:\n\nEvaluation Completed (valid)!\tLoss: -84.1013 Time: 0.525 s\nSaving model!\nEpoch 18:\n\nEvaluation Completed (valid)!\tLoss: -84.0499 Time: 0.522 s\nNot saving model! Last saved: 17\nEpoch 19:\n\nEvaluation Completed (valid)!\tLoss: -84.1685 Time: 0.524 s\nSaving model!\nEpoch 20:\n\nEvaluation Completed (valid)!\tLoss: -84.1143 Time: 0.523 s\nNot saving model! Last saved: 19\nEpoch 21:\n\nEvaluation Completed (valid)!\tLoss: -84.1831 Time: 0.523 s\nSaving model!\nEpoch 22:\n\nEvaluation Completed (valid)!\tLoss: -84.1948 Time: 0.524 s\nSaving model!\nEpoch 23:\n\nEvaluation Completed (valid)!\tLoss: -84.3285 Time: 0.523 s\nSaving model!\nEpoch 24:\n\nEvaluation Completed (valid)!\tLoss: -84.2939 Time: 0.549 s\nNot saving model! Last saved: 23\nEpoch 25:\n\nEvaluation Completed (valid)!\tLoss: -84.2259 Time: 0.525 s\nNot saving model! Last saved: 23\nEpoch 26:\n\nEvaluation Completed (valid)!\tLoss: -84.3255 Time: 0.525 s\nNot saving model! Last saved: 23\nEpoch 27:\n\nEvaluation Completed (valid)!\tLoss: -84.3075 Time: 0.526 s\nNot saving model! Last saved: 23\nEpoch 28:\n\nEvaluation Completed (valid)!\tLoss: -84.2432 Time: 0.523 s\nNot saving model! Last saved: 23\nEpoch 29:\n\nEvaluation Completed (valid)!\tLoss: -84.3690 Time: 0.524 s\nSaving model!\nEpoch 30:\n\nEvaluation Completed (valid)!\tLoss: -84.3957 Time: 0.524 s\nSaving model!\nEpoch 31:\n\nEvaluation Completed (valid)!\tLoss: -84.4141 Time: 0.525 s\nSaving model!\nEpoch 32:\n\nEvaluation Completed (valid)!\tLoss: -84.2884 Time: 0.523 s\nNot saving model! Last saved: 31\nEpoch 33:\n\nEvaluation Completed (valid)!\tLoss: -84.4528 Time: 0.524 s\nSaving model!\nEpoch 34:\n\nEvaluation Completed (valid)!\tLoss: -84.4351 Time: 0.525 s\nNot saving model! Last saved: 33\nEpoch 35:\n\nEvaluation Completed (valid)!\tLoss: -84.4174 Time: 0.525 s\nNot saving model! Last saved: 33\nEpoch 36:\n\nEvaluation Completed (valid)!\tLoss: -84.4811 Time: 0.523 s\nSaving model!\nEpoch 37:\n\nEvaluation Completed (valid)!\tLoss: -84.3799 Time: 0.526 s\nNot saving model! Last saved: 36\nEpoch 38:\n\nEvaluation Completed (valid)!\tLoss: -84.4219 Time: 0.524 s\nNot saving model! Last saved: 36\nEpoch 39:\n\nEvaluation Completed (valid)!\tLoss: -84.4673 Time: 0.524 s\nNot saving model! Last saved: 36\nEpoch 40:\n\nEvaluation Completed (valid)!\tLoss: -84.4770 Time: 0.525 s\nNot saving model! Last saved: 36\nEpoch 41:\n\nEvaluation Completed (valid)!\tLoss: -84.5726 Time: 0.530 s\nSaving model!\nEpoch 42:\n\nEvaluation Completed (valid)!\tLoss: -84.6252 Time: 0.523 s\nSaving model!\nEpoch 43:\n\nEvaluation Completed (valid)!\tLoss: -84.5784 Time: 0.525 s\nNot saving model! Last saved: 42\nEpoch 44:\n\nEvaluation Completed (valid)!\tLoss: -84.5309 Time: 0.525 s\nNot saving model! Last saved: 42\nEpoch 45:\n\nEvaluation Completed (valid)!\tLoss: -84.6479 Time: 0.524 s\nSaving model!\nEpoch 46:\n\nEvaluation Completed (valid)!\tLoss: -84.6093 Time: 0.525 s\nNot saving model! Last saved: 45\nEpoch 47:\n\nEvaluation Completed (valid)!\tLoss: -84.6306 Time: 0.525 s\nNot saving model! Last saved: 45\nEpoch 48:\n\nEvaluation Completed (valid)!\tLoss: -84.5857 Time: 0.524 s\nNot saving model! Last saved: 45\nEpoch 49:\n\nEvaluation Completed (valid)!\tLoss: -84.6989 Time: 0.524 s\nSaving model!\nEpoch 50:\n\nEvaluation Completed (valid)!\tLoss: -84.6645 Time: 0.525 s\nNot saving model! Last saved: 49\nEpoch 51:\n\nEvaluation Completed (valid)!\tLoss: -84.6914 Time: 0.523 s\nNot saving model! Last saved: 49\nEpoch 52:\n\nEvaluation Completed (valid)!\tLoss: -84.7606 Time: 0.525 s\nSaving model!\nEpoch 53:\n\nEvaluation Completed (valid)!\tLoss: -84.7538 Time: 0.525 s\nNot saving model! Last saved: 52\nEpoch 54:\n\nEvaluation Completed (valid)!\tLoss: -84.6890 Time: 0.525 s\nNot saving model! Last saved: 52\nEpoch 55:\n\nEvaluation Completed (valid)!\tLoss: -84.8624 Time: 0.528 s\nSaving model!\nEpoch 56:\n\nEvaluation Completed (valid)!\tLoss: -84.8077 Time: 0.528 s\nNot saving model! Last saved: 55\nEpoch 57:\n\nEvaluation Completed (valid)!\tLoss: -84.8452 Time: 0.524 s\nNot saving model! Last saved: 55\nEpoch 58:\n\nEvaluation Completed (valid)!\tLoss: -84.8111 Time: 0.524 s\nNot saving model! Last saved: 55\nEpoch 59:\n\nEvaluation Completed (valid)!\tLoss: -84.8266 Time: 0.526 s\nNot saving model! Last saved: 55\nEpoch 60:\n\nEvaluation Completed (valid)!\tLoss: -84.8896 Time: 0.523 s\nSaving model!\nEpoch 61:\n\nEvaluation Completed (valid)!\tLoss: -84.8817 Time: 0.526 s\nNot saving model! Last saved: 60\nEpoch 62:\n\nEvaluation Completed (valid)!\tLoss: -84.9329 Time: 0.523 s\nSaving model!\nEpoch 63:\n\nEvaluation Completed (valid)!\tLoss: -84.9103 Time: 0.524 s\nNot saving model! Last saved: 62\nEpoch 64:\n\nEvaluation Completed (valid)!\tLoss: -84.9120 Time: 0.523 s\nNot saving model! Last saved: 62\nEpoch 65:\n\nEvaluation Completed (valid)!\tLoss: -84.9485 Time: 0.524 s\nSaving model!\nEpoch 66:\n\nEvaluation Completed (valid)!\tLoss: -84.9833 Time: 0.528 s\nSaving model!\nEpoch 67:\n\nEvaluation Completed (valid)!\tLoss: -84.9883 Time: 0.524 s\nSaving model!\nEpoch 68:\n\nEvaluation Completed (valid)!\tLoss: -85.0543 Time: 0.548 s\nSaving model!\nEpoch 69:\n\nEvaluation Completed (valid)!\tLoss: -85.0478 Time: 0.524 s\nNot saving model! Last saved: 68\nEpoch 70:\n\nEvaluation Completed (valid)!\tLoss: -85.0097 Time: 0.523 s\nNot saving model! Last saved: 68\nEpoch 71:\n\nEvaluation Completed (valid)!\tLoss: -85.0449 Time: 0.525 s\nNot saving model! Last saved: 68\nEpoch 72:\n\nEvaluation Completed (valid)!\tLoss: -85.0922 Time: 0.524 s\nSaving model!\nEpoch 73:\n\nEvaluation Completed (valid)!\tLoss: -85.1137 Time: 0.524 s\nSaving model!\nEpoch 74:\n\nEvaluation Completed (valid)!\tLoss: -85.0995 Time: 0.523 s\nNot saving model! Last saved: 73\nEpoch 75:\n\nEvaluation Completed (valid)!\tLoss: -85.2748 Time: 0.524 s\nSaving model!\nEpoch 76:\n\nEvaluation Completed (valid)!\tLoss: -85.1871 Time: 0.525 s\nNot saving model! Last saved: 75\nEpoch 77:\n\nEvaluation Completed (valid)!\tLoss: -85.1829 Time: 0.549 s\nNot saving model! Last saved: 75\nEpoch 78:\n\nEvaluation Completed (valid)!\tLoss: -85.1584 Time: 0.525 s\nNot saving model! Last saved: 75\nEpoch 79:\n\nEvaluation Completed (valid)!\tLoss: -85.2684 Time: 0.526 s\nNot saving model! Last saved: 75\nEpoch 80:\n\nEvaluation Completed (valid)!\tLoss: -85.2316 Time: 0.525 s\nNot saving model! Last saved: 75\nEpoch 81:\n\nEvaluation Completed (valid)!\tLoss: -85.2908 Time: 0.525 s\nSaving model!\nEpoch 82:\n\nEvaluation Completed (valid)!\tLoss: -85.4306 Time: 0.524 s\nSaving model!\nEpoch 83:\n\nEvaluation Completed (valid)!\tLoss: -85.3819 Time: 0.524 s\nNot saving model! Last saved: 82\nEpoch 84:\n\nEvaluation Completed (valid)!\tLoss: -85.2134 Time: 0.525 s\nNot saving model! Last saved: 82\nEpoch 85:\n\nEvaluation Completed (valid)!\tLoss: -85.4174 Time: 0.526 s\nNot saving model! Last saved: 82\nEpoch 86:\n\nEvaluation Completed (valid)!\tLoss: -85.4058 Time: 0.546 s\nNot saving model! Last saved: 82\nEpoch 87:\n\nEvaluation Completed (valid)!\tLoss: -85.3996 Time: 0.526 s\nNot saving model! Last saved: 82\nEpoch 88:\n" ], [ "sigmodel.load_state_dict(torch.load(f\"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5\"))", "_____no_output_____" ], [ "sigout = sigmodel(torch.tensor(Y).float().cuda())[0]\nsigout = sigout.data.cpu().numpy()", "_____no_output_____" ], [ "bins = np.linspace(-3,3,100)\nbins.shape\ncolumn = 3\n#print(f_rnd.columns[column]\nplt.hist(Y[:,column],bins,alpha=0.5,color='b');\nplt.hist(sigout[:,column],bins,alpha=0.5,color='r');\n#plt.hist(out2[:,column],bins,alpha=0.5,color='g');\nplt.axvline(np.mean(Y[:,column]))", "_____no_output_____" ], [ "mjj, j1mass, j2mass = [4000, 150, 150]\nf = pd.read_hdf(f\"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5\")\ndt = f.values\ncorrect = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)\ndt = dt[correct]\nfor i in range(13,19):\n dt[:,i] = dt[:,i]/dt[:,3]\n\nfor i in range(29,35):\n dt[:,i] = dt[:,i]/(dt[:,19])\n\ncorrect = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300)\ndt = dt[correct]\n\ncorrect = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100)\ndt = dt[correct]\n\n\nY = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]", "_____no_output_____" ], [ "for i in range(12):\n Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]", "_____no_output_____" ], [ "sigout = sigmodel(torch.tensor(Y).float().cuda())[0]\nsigout = sigout.data.cpu().numpy()", "_____no_output_____" ], [ "bins = np.linspace(-3,3,101)\nbins.shape\ncolumn = 2\n#print(f_rnd.columns[column]\n#plt.hist(dt[:,column],bins,alpha=0.5,color='b');\nplt.hist(sigout[:,column],bins,alpha=0.5,color='r');\nplt.hist(Y[:,column],bins,alpha=0.5,color='g');\n#plt.axvline(np.mean(Y[:,column]))", "_____no_output_____" ], [ "bins = np.linspace(-3,3,100)\nbins.shape\ncolumn = 5\n#print(f_rn.columns[column]\nplt.hist(Y[:,column],bins,alpha=0.5,color='b');\nplt.hist(sigout[:,column],bins,alpha=0.5,color='r');\n#plt.hist(out2[:,column],bins,alpha=0.5,color='g');\nplt.axvline(np.mean(Y[:,column]))", "_____no_output_____" ], [ "varyj1mass_wps = ([4000, 150, 150],[4000, 300, 150],[4000, 450, 150],[4000, 500, 150],[4000, 650, 150],[4000, 700, 150],[4000, 850, 150],[4000, 900, 150])", "_____no_output_____" ], [ "for mjj, j1mass, j2mass in varyj1mass_wps:\n \n f = pd.read_hdf(f\"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5\")\n dt = f.values\n correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)\n dt = dt[correct]\n for i in range(13,19):\n dt[:,i] = dt[:,i]/dt[:,3]\n\n for i in range(29,35):\n dt[:,i] = dt[:,i]/(dt[:,19])\n \n correct = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300)\n dt = dt[correct]\n\n correct = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100)\n dt = dt[correct]\n\n \n Y = dt[:,[4,5,6,11,12,20,21,22,27,28]]\n #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n \n\n \n \n \n print(Y.shape)\n for i in range(10):\n Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]\n \n total_bb_test = torch.tensor(Y)\n #huberloss = nn.SmoothL1Loss(reduction='none')\n sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()\n bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()\n waic = sigae_bbloss + bbvar\n #sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy()\n print(waic[0:10])\n plt.hist(waic,bins=np.linspace(0,10,1001),density=True);\n plt.xlim([0,2])\n #np.save(out_file_waic,waic)\n np.save(f'sigaetestprior4500500150_wp_{mjj}_{j1mass}_{j2mass}.npy',sigae_bbloss)", "(50885, 10)\n[1.2562833 1.0150101 4.63527 3.1353111 3.1270933 2.5498352\n 0.98916984 3.3142097 0.5134599 2.5206301 ]\n(49079, 10)\n[0.8910228 0.4618418 1.2828003 1.0181931 0.7241632 0.6351117\n 1.7559447 0.3825097 0.9646135 0.95724446]\n(40305, 10)\n[0.5540186 1.2050751 0.3490556 1.8467045 0.75147474 0.671209\n 1.4278493 1.7209883 0.83306026 1.0873885 ]\n(36079, 10)\n[0.4340822 7.6371846 1.1879708 0.43213457 0.3415988 0.5919516\n 0.70254576 0.65806735 2.201469 0.5131256 ]\n(20364, 10)\n[0.5014051 1.0378898 0.6118281 0.4327641 1.1696905 0.35628533\n 0.48016113 0.14187819 0.88088095 0.4277892 ]\n(14582, 10)\n[1.0281534 1.0762899 0.7548882 0.17842868 0.44157785 1.3323195\n 0.7724545 0.33537352 0.5283326 0.60084414]\n(1110, 10)\n[1.4535477 0.2469969 3.3151753 0.22895774 0.8264796 2.600779\n 1.0788665 0.63972807 1.0814985 1.4302014 ]\n(305, 10)\n[1.5050468 1.4343903 2.8387651 1.0893123 1.8100127 0.6711144 5.2034693\n 1.4315116 1.7810924 1.0626707]\n" ], [ "losslist = []\nfor mjj, j1mass, j2mass in varyj1mass_wps:\n\n a = np.load(f'sigae_wp_{mjj}_{j1mass}_{j2mass}.npy')\n losslist.append(a)\n ", "_____no_output_____" ], [ "losslist[1]", "_____no_output_____" ], [ "plt.hist(losslist[0],bins = np.arange(0,10,.1),alpha=0.2);\nplt.hist(losslist[1],bins = np.arange(0,10,.1),alpha=0.2);\nplt.hist(losslist[2],bins = np.arange(0,10,.1),alpha=0.2);\nplt.hist(losslist[3],bins = np.arange(0,10,.1),alpha=0.2);\nplt.hist(losslist[4],bins = np.arange(0,10,.1),alpha=0.2);", "_____no_output_____" ], [ "inputlist = [\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB2.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB3.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_background.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5', \n '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5',\n '/data/t3home000/spark/QUASAR/preprocessing/delphes_output_4500_500_150.h5'\n \n]", "_____no_output_____" ], [ "ae_def", "_____no_output_____" ], [ "outputlist_waic = [\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb1.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb2.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb3.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_purebkg.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_rndbkg.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_2prong.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_3prong.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_4500.npy\"\n]\n\noutputlist_justloss = [\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb1.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb2.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb3.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy\",\n f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy\"\n]", "_____no_output_____" ], [ "exist_signalflag = [\n False,\n False,\n False,\n False,\n True,\n True,\n True,\n False,\n]\nis_signal = [\n False,\n False,\n False,\n False,\n False,\n True,\n True,\n True\n \n]\n\nnprong = [\n None,\n None,\n None,\n None,\n None,\n '2prong',\n '3prong',\n '4500'\n \n]", "_____no_output_____" ], [ "for in_file, out_file_waic, out_file_justloss, sigbit_flag, is_sig, n_prong in zip(inputlist,outputlist_waic,outputlist_justloss,exist_signalflag,is_signal, nprong): \n \n f_bb = pd.read_hdf(in_file)\n dt = f_bb.values\n #correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) &(dt[:,2]>0) & (dt[:,16]>0) & (dt[:,32]>0)\n #dt = dt[correct]\n correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)\n dt = dt[correct]\n for i in range(13,19):\n dt[:,i] = dt[:,i]/dt[:,3]\n\n for i in range(29,35):\n dt[:,i] = dt[:,i]/(dt[:,19])\n\n\n #correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)\n #dt = dt[correct]\n #correct = (dt[:,3]>100)\n #dt = dt[correct]\n\n #correct = (dt[:,19]>20)\n #dt = dt[correct]\n\n correct = (dt[:,0]>=2800)\n dt = dt[correct]\n\n #bsmlike = np.where(dt[:,16]>0.9)[0]\n #dt = dt[bsmlike]\n\n if sigbit_flag:\n idx = dt[:,-1]\n sigidx = (idx == 1)\n bkgidx = (idx == 0)\n if is_sig:\n dt = dt[sigidx]\n else:\n dt = dt[bkgidx]\n \n if n_prong == '2prong':\n \n correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600)\n dt = dt[correct]\n \n if n_prong == '3prong':\n \n correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600)\n dt = dt[correct] \n \n if n_prong == '4500':\n \n correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>100) & (dt[:,19]<200) & (dt[:,0]>4200) & (dt[:,0]<4800)\n dt = dt[correct] \n \n Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]\n #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\n \n\n \n \n \n print(Y.shape)\n for i in range(12):\n Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]\n \n total_bb_test = torch.tensor(Y)\n #huberloss = nn.SmoothL1Loss(reduction='none')\n sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()\n bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()\n waic = sigae_bbloss + bbvar\n #sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy()\n print(waic[0:10])\n plt.hist(waic,bins=np.linspace(0,10,1001),density=True);\n plt.xlim([0,2])\n np.save(out_file_waic,waic)\n np.save(out_file_justloss,sigae_bbloss)", "(488280, 12)\n[ 4.7194996 2.8188367 31.315178 2.756485 8.46459\n 9.533489 13.457454 2620.0085 2.9338183 4.8980436]\n(505248, 12)\n[ 11.387151 5.502649 6.66521 64.66336 291.8396 48.59631\n 1.56672 18.174734 3.5265427 1.0036907]\n(491884, 12)\n[2.9693577 3.638872 1.7521389 6.586632 4.030341 1.5751424 0.9993878\n 2.077943 6.8376703 3.4506798]\n(499489, 12)\n[ 6.3167 4.940252 0.5492481 12.029004 4.439932 15.031258\n 4.5505686 10.195691 2.188146 4.007696 ]\n(587907, 12)\n[ 0.71913344 4.3438206 6.439454 2.0758038 10.194782 95.84833\n 3.0597253 1.4067264 11.093575 8.291598 ]\n(31923, 12)\n[ 5.6426625 3.7302885 7.7646484 4.9270196 4.9736047 3.5290728\n 4.423126 7.62922 10.038872 3.2536607]\n(37441, 12)\n[3.1578026 6.480542 5.0173473 6.905368 8.612096 5.4395714 7.584311\n 6.36209 6.369811 5.0555463]\n(26448, 12)\n[2.5932555 2.7761445 5.172421 3.2248294 2.08993 3.0205383 3.8928516\n 4.5390253 3.4748507 2.9204848]\n" ], [ "print(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy\")", "../data_strings/sig_3prong_12features_mjj4500_nojetmasscut_MSELoss_beta1_z8f10_Justloss_3prong.npy\n" ], [ "loss_prong3 = np.load(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy\")\nloss_prong2 = np.load(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy\")\nloss_purebkg = np.load(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy\")\nloss_rndbkg = np.load(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy\")\nloss_4500 = np.load(f\"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy\")\n\n\n\n\n", "_____no_output_____" ], [ "plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg');\n#plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg');\n\nplt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig');\nplt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig');\nplt.hist(loss_4500,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong 4500');\n\n#plt.yscale('log')\nplt.xlabel('Loss (SigAE trained on 2prong sig)')\nplt.legend(loc='upper right')\n#plt.savefig('sigae_trained_on_2prongsig.png')", "_____no_output_____" ], [ "plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg');\n#plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg');\n\nplt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig');\nplt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig');\n#plt.yscale('log')\nplt.xlabel('Loss (SigAE trained on 2prong sig)')\nplt.legend(loc='upper right')\n#plt.savefig('sigae_trained_on_2prongsig.png')", "_____no_output_____" ], [ "len(loss_prong2)", "_____no_output_____" ], [ "outputlist_waic", "_____no_output_____" ], [ "outputlist_justloss", "_____no_output_____" ], [ "sigae_bbloss", "_____no_output_____" ], [ "ae_def", "_____no_output_____" ], [ "sigae_bbloss", "_____no_output_____" ], [ "plt.hist(sigae_bbloss,bins=np.linspace(0,10,1001));", "_____no_output_____" ], [ "np.save('../data_strings/sigae_2prong_loss_bb3.npy',sigae_bbloss)", "_____no_output_____" ], [ "X_bkg = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]\nX_bkg = X_bkg[bkg_idx]", "_____no_output_____" ], [ "for i in range(12):\n X_bkg[:,i] = (X_bkg[:,i]-sig_mean[i])/sig_std[i]", "_____no_output_____" ], [ "total_bkg_test = torch.tensor(X_bkg)", "_____no_output_____" ], [ "sigae_bkgloss = torch.mean((sigmodel(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()", "_____no_output_____" ], [ "sigae_sigloss = torch.mean((sigmodel(total_sig.float().cuda())[0]- total_sig.float().cuda())**2,dim=1).data.cpu().numpy()", "_____no_output_____" ], [ "f_3prong = pd.read_hdf(\"/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5\")", "_____no_output_____" ], [ "f_bb1 = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5')", "_____no_output_____" ], [ "dt_bb1 = f_bb1.values", "_____no_output_____" ], [ "X_bb1 = dt_bb1[:,[3,4,5,6,11,12,19,20,21,22,27,28]]", "_____no_output_____" ], [ "X_bb1.shape", "_____no_output_____" ], [ "sig_mean", "_____no_output_____" ], [ "sig_std", "_____no_output_____" ], [ "for i in range(12):\n X_bb1[:,i] = (X_bb1[:,i]-sig_mean[i])/sig_std[i]", "_____no_output_____" ], [ "plt.hist(X_bb1[:,0],bins = np.linspace(-2,2,10))", "_____no_output_____" ], [ "(torch.tensor(dt[i * chunk_size:(i + 1) * chunk_size]) for i in range ) ", "_____no_output_____" ], [ "def get_loss(dt):\n \n chunk_size=5000 \n total_size=1000000\n i = 0\n i_max = total_size // chunk_size\n\n print(i_max)\n \n \n \n gen = (torch.tensor(dt[i*chunk_size: (i + 1) * chunk_size]) for i in range(i_max)) \n\n \n with torch.no_grad():\n \n loss = [\n n\n for total_in_selection in gen\n for n in torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy()\n ]\n \n return loss", "_____no_output_____" ], [ "def get_loss(dt):\n \n def generator(dt, chunk_size=5000, total_size=1000000):\n\n i = 0\n i_max = total_size // chunk_size\n print(i_max)\n \n for i in range(i_max):\n start=i * chunk_size\n stop=(i + 1) * chunk_size\n yield torch.tensor(dt[start:stop])\n \n loss = []\n\n \n with torch.no_grad():\n \n for total_in_selection in generator(dt,chunk_size=5000, total_size=1000000):\n loss.extend(torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy())\n \n return loss", "_____no_output_____" ], [ "bb1_loss_sig = get_loss(X_bb1)", "200\n" ], [ "bb1_loss_sig = np.array(bb1_loss_sig,dtype=np.float)", "_____no_output_____" ], [ "print(bb1_loss_sig)", "[1.90564466 0.96934295 1.35960376 ... 0.59359992 2.21692467 2.44283652]\n" ], [ "plt.hist(bb1_loss_sig,bins=np.linspace(0,100,1001));", "_____no_output_____" ], [ "np.save('../data_strings/sigaeloss_bb1.npy',bb1_loss_sig)", "_____no_output_____" ], [ "dt_3prong = f_3prong.values", "_____no_output_____" ], [ "Z = dt_3prong[:,[3,4,5,6,11,12,19,20,21,22,27,28]]", "_____no_output_____" ], [ "Z.shape", "_____no_output_____" ], [ "for i in range(12):\n Z[:,i] = (Z[:,i]-sig_mean[i])/sig_std[i]", "_____no_output_____" ], [ "total_3prong = torch.tensor(Z)", "_____no_output_____" ], [ "bkgae_bkgloss = torch.mean((model(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()", "_____no_output_____" ], [ "bkgae_3prongloss = torch.mean((model(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()", "_____no_output_____" ], [ "sigae_3prongloss = torch.mean((sigmodel(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()", "_____no_output_____" ], [ "sigae_3prongloss.shape", "_____no_output_____" ], [ "bins = np.linspace(0,10,1001)\nplt.hist(sigae_sigloss,bins,weights = np.ones(len(signal_idx))*10,alpha=0.4,color='r',label='2 prong signal');\nplt.hist(sigae_3prongloss,bins,weights = np.ones(100000)*10,alpha=0.5,color='g',label='3 prong signal');\nplt.hist(sigae_bkgloss,bins,alpha=0.4,color='b',label='background');\n#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\nplt.legend(loc='upper right')\nplt.xlabel('Signal AE Loss',fontsize=15)\n\n", "_____no_output_____" ], [ "def get_tpr_fpr(sigloss,bkgloss,aetype='sig'):\n bins = np.linspace(0,50,1001)\n tpr = []\n fpr = []\n for cut in bins:\n if aetype == 'sig':\n tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))\n fpr.append(np.where(bkgloss<cut)[0].shape[0]/len(bkgloss))\n if aetype == 'bkg':\n tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))\n fpr.append(np.where(bkgloss>cut)[0].shape[0]/len(bkgloss))\n return tpr,fpr ", "_____no_output_____" ], [ "def get_precision_recall(sigloss,bkgloss,aetype='bkg'):\n bins = np.linspace(0,100,1001)\n tpr = []\n fpr = []\n precision = []\n for cut in bins:\n if aetype == 'sig':\n tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))\n precision.append((np.where(sigloss<cut)[0].shape[0])/(np.where(bkgloss<cut)[0].shape[0]+np.where(sigloss<cut)[0].shape[0]))\n \n if aetype == 'bkg':\n tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))\n precision.append((np.where(sigloss>cut)[0].shape[0])/(np.where(bkgloss>cut)[0].shape[0]+np.where(sigloss>cut)[0].shape[0]))\n return precision,tpr ", "_____no_output_____" ], [ "tpr_2prong, fpr_2prong = get_tpr_fpr(sigae_sigloss,sigae_bkgloss,'sig')\ntpr_3prong, fpr_3prong = get_tpr_fpr(sigae_3prongloss,sigae_bkgloss,'sig')", "_____no_output_____" ], [ "plt.plot(fpr_2prong,tpr_2prong,label='signal AE')\n#plt.plot(VAE_bkg_fpr,VAE_bkg_tpr,label='Bkg VAE-Vanilla')\nplt.plot(bkg_fpr4,bkg_tpr4,label='Bkg NFlowVAE-Planar')\n\nplt.xlabel(r'$1-\\epsilon_{bkg}$',fontsize=15)\nplt.ylabel(r'$\\epsilon_{sig}$',fontsize=15)\n#plt.semilogy()\n#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\nplt.legend(loc='lower right')\nplt.xlim([0.0,1.0])\nplt.ylim([0.0,1.0])\nplt.savefig('ROC_Curve_sigae.png')", "_____no_output_____" ], [ "precision,recall = get_precision_recall(loss_sig,loss_bkg,aetype='bkg')", "_____no_output_____" ], [ "np.save('NFLOWVAE_PlanarNEW_22var_sigloss.npy',loss_sig)\nnp.save('NFLOWVAE_PlanarNEW_22var_bkgloss.npy',loss_bkg)", "_____no_output_____" ], [ "np.save('NFLOWVAE_PlanarNEW_precision.npy',precision)\nnp.save('NFLOWVAE_PlanarNEW_recall.npy',recall)\nnp.save('NFLOWVAE_PlanarNEW_bkgAE_fpr.npy',bkg_fpr)\nnp.save('NFLOWVAE_PlanarNEW_bkgAE_tpr.npy',bkg_tpr)\nnp.save('NFLOWVAE_PlanarNEW_sigloss.npy',loss_sig)\nnp.save('NFLOWVAE_PlanarNEW_bkgloss.npy',loss_bkg)", "_____no_output_____" ], [ "plt.plot(recall,precision)", "_____no_output_____" ], [ "flows = [1,2,3,4,5,6]\nzdim = [1,2,3,4,5]\n\nfor N_flows in flows:\n for Z_DIM in zdim:\n model = VAE_NF(N_FLOWS, Z_DIM).cuda()\n optimizer = optim.Adam(model.parameters(), lr=LR)\n BEST_LOSS = 99999\n LAST_SAVED = -1\n PATIENCE_COUNT = 0\n PATIENCE_LIMIT = 5\n for epoch in range(1, N_EPOCHS):\n print(\"Epoch {}:\".format(epoch))\n train()\n cur_loss = evaluate()\n\n if cur_loss <= BEST_LOSS:\n PATIENCE_COUNT = 0\n BEST_LOSS = cur_loss\n LAST_SAVED = epoch\n print(\"Saving model!\")\n if mode == 'ROC':\n torch.save(model.state_dict(),f\"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_RND_22var_z{Z_DIM}_f{N_FLOWS}.h5\")\n else:\n torch.save(model.state_dict(), f\"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_PureBkg_22var_z{Z_DIM}_f{N_FLOWS}.h5\")\n else:\n PATIENCE_COUNT += 1\n print(\"Not saving model! Last saved: {}\".format(LAST_SAVED))\n if PATIENCE_COUNT > 3:\n print(\"Patience Limit Reached\")\n break \n \n loss_bkg = get_loss(dt_PureBkg[bkg_idx])\n loss_sig = get_loss(dt_PureBkg[signal_idx])\n np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_sigloss.npy',loss_sig)\n np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_bkgloss.npy',loss_bkg)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb419c195f46f3efe9f1b1669fe99591dbee5582
13,963
ipynb
Jupyter Notebook
Coursera/Machine-learning-data-analysis/Course 4/C4W4A1.ipynb
ALEXKIRNAS/DataScience
14119565b8fdde042f6ea3070bc0f30db26620c0
[ "MIT" ]
1
2022-03-07T12:56:36.000Z
2022-03-07T12:56:36.000Z
Coursera/Machine-learning-data-analysis/Course 4/C4W4A1.ipynb
ALEXKIRNAS/DataScience
14119565b8fdde042f6ea3070bc0f30db26620c0
[ "MIT" ]
null
null
null
Coursera/Machine-learning-data-analysis/Course 4/C4W4A1.ipynb
ALEXKIRNAS/DataScience
14119565b8fdde042f6ea3070bc0f30db26620c0
[ "MIT" ]
null
null
null
26.697897
101
0.433002
[ [ [ "import pandas as pd\nimport numpy as np\nimport statsmodels.stats.multitest as ssm\nimport scipy.stats as ss", "_____no_output_____" ], [ "data = pd.read_csv('gene_high_throughput_sequencing.csv')\ndata.shape", "_____no_output_____" ], [ "data_normal = data.loc[data.Diagnosis == 'normal']\ndata_normal.shape", "_____no_output_____" ], [ "data_early = data.loc[data.Diagnosis == 'early neoplasia']\ndata_early.shape", "_____no_output_____" ], [ "data_cancer = data.loc[data.Diagnosis == 'cancer']\ndata_cancer.shape", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "counter = 0\np_values_1 = []\nfor name in data.columns[2:]:\n p_values_1.append(ss.ttest_ind(data_normal[name], data_early[name], equal_var=False)[1])\n counter += int(p_values_1[-1] < 0.05)\ncounter", "_____no_output_____" ], [ "counter = 0\np_values_2 = []\nfor name in data.columns[2:]:\n p_values_2.append(ss.ttest_ind(data_cancer[name], data_early[name], equal_var=False)[1])\n counter += int(p_values_2[-1] < 0.05)\ncounter", "_____no_output_____" ], [ "def fold_change(c, t):\n nc = np.array(c).mean()\n nt = np.array(t).mean()\n if nt > nc:\n return nt/nc\n else:\n return - nc/nt", "_____no_output_____" ], [ "reject, p_corrected, _, _ = ssm.multipletests(p_values_1, method='hommel', alpha=0.025)", "_____no_output_____" ], [ "counter = 0\nfor idx, name in enumerate(data.columns[2:]):\n if reject[idx] and abs(fold_change(data_normal[name], data_early[name])) > 1.5:\n counter += 1\ncounter", "_____no_output_____" ], [ "reject, p_corrected, _, _ = ssm.multipletests(p_values_2, method='hommel', alpha=0.025)", "_____no_output_____" ], [ "counter = 0\nfor idx, name in enumerate(data.columns[2:]):\n if reject[idx] and abs(fold_change(data_early[name], data_cancer[name])) > 1.5:\n counter += 1\ncounter", "_____no_output_____" ], [ "reject, p_corrected, _, _ = ssm.multipletests(p_values_1, method='fdr_bh', alpha=0.025)\ncounter = 0\nfor idx, name in enumerate(data.columns[2:]):\n if reject[idx] and abs(fold_change(data_normal[name], data_early[name])) > 1.5:\n counter += 1\ncounter", "_____no_output_____" ], [ "reject, p_corrected, _, _ = ssm.multipletests(p_values_2, method='fdr_bh', alpha=0.025)\ncounter = 0\nfor idx, name in enumerate(data.columns[2:]):\n if reject[idx] and abs(fold_change(data_early[name], data_cancer[name])) > 1.5:\n counter += 1\ncounter", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb41ae8a1ca1046da9efe0cbd9fa599793ba1ae9
446,647
ipynb
Jupyter Notebook
course/4 Deep Learning Intro.ipynb
ResitKadir1/aws-DL
280e26daff56fe3350dbf47674200ecbf41c6dd3
[ "MIT" ]
null
null
null
course/4 Deep Learning Intro.ipynb
ResitKadir1/aws-DL
280e26daff56fe3350dbf47674200ecbf41c6dd3
[ "MIT" ]
null
null
null
course/4 Deep Learning Intro.ipynb
ResitKadir1/aws-DL
280e26daff56fe3350dbf47674200ecbf41c6dd3
[ "MIT" ]
null
null
null
416.648321
216,296
0.934006
[ [ [ "# Deep Learning Intro", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Shallow and Deep Networks", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_moons\n\nX, y = make_moons(n_samples=1000, noise=0.1, random_state=0)\nplt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5)\nplt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5)\nplt.legend(['0', '1'])", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.3,\n random_state=42)", "_____no_output_____" ], [ "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import SGD, Adam", "_____no_output_____" ] ], [ [ "### Shallow Model", "_____no_output_____" ] ], [ [ "model = Sequential()\nmodel.add(Dense(1, input_shape=(2,), activation='sigmoid'))\nmodel.compile(Adam(learning_rate=0.05), 'binary_crossentropy', metrics=['accuracy'])", "2022-01-03 11:28:22.609033: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ], [ "model.fit(X_train, y_train, epochs=200, verbose=0)", "2022-01-03 11:28:25.497822: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:176] None of the MLIR Optimization Passes are enabled (registered 2)\n" ], [ "results = model.evaluate(X_test, y_test)", "10/10 [==============================] - 0s 2ms/step - loss: 0.3167 - accuracy: 0.8400\n" ], [ "results", "_____no_output_____" ], [ "print(\"The Accuracy score on the Train set is:\\t{:0.3f}\".format(results[1]))", "The Accuracy score on the Train set is:\t0.840\n" ], [ "def plot_decision_boundary(model, X, y):\n amin, bmin = X.min(axis=0) - 0.1\n amax, bmax = X.max(axis=0) + 0.1\n hticks = np.linspace(amin, amax, 101)\n vticks = np.linspace(bmin, bmax, 101)\n \n aa, bb = np.meshgrid(hticks, vticks)\n ab = np.c_[aa.ravel(), bb.ravel()]\n \n c = model.predict(ab)\n cc = c.reshape(aa.shape)\n\n plt.figure(figsize=(12, 8))\n plt.contourf(aa, bb, cc, cmap='bwr', alpha=0.2)\n plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5)\n plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5)\n plt.legend(['0', '1'])\n \nplot_decision_boundary(model, X, y)", "_____no_output_____" ] ], [ [ "### Deep model", "_____no_output_____" ] ], [ [ "model = Sequential()\nmodel.add(Dense(4, input_shape=(2,), activation='tanh'))\nmodel.add(Dense(2, activation='tanh'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(Adam(learning_rate=0.05), 'binary_crossentropy', metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(X_train, y_train, epochs=100, verbose=0)", "_____no_output_____" ], [ "model.evaluate(X_test, y_test)", "10/10 [==============================] - 0s 2ms/step - loss: 0.0066 - accuracy: 0.9967\n" ], [ "from sklearn.metrics import accuracy_score, confusion_matrix", "_____no_output_____" ], [ "y_train_pred = model.predict_classes(X_train)\ny_test_pred = model.predict_classes(X_test)\n\nprint(\"The Accuracy score on the Train set is:\\t{:0.3f}\".format(accuracy_score(y_train, y_train_pred)))\nprint(\"The Accuracy score on the Test set is:\\t{:0.3f}\".format(accuracy_score(y_test, y_test_pred)))", "The Accuracy score on the Train set is:\t0.999\nThe Accuracy score on the Test set is:\t0.997\n" ], [ "plot_decision_boundary(model, X, y)", "_____no_output_____" ] ], [ [ "## Multiclass classification\n\n### The Iris dataset", "_____no_output_____" ] ], [ [ "df = pd.read_csv('../data/iris.csv')", "_____no_output_____" ], [ "import seaborn as sns\nsns.pairplot(df, hue=\"species\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "X = df.drop('species', axis=1)\nX.head()", "_____no_output_____" ], [ "target_names = df['species'].unique()\ntarget_names", "_____no_output_____" ], [ "target_dict = {n:i for i, n in enumerate(target_names)}\ntarget_dict", "_____no_output_____" ], [ "y= df['species'].map(target_dict)\ny.head()", "_____no_output_____" ], [ "from tensorflow.keras.utils import to_categorical", "_____no_output_____" ], [ "y_cat = to_categorical(y)", "_____no_output_____" ], [ "y_cat[:10]", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X.values, y_cat,\n test_size=0.2)", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Dense(3, input_shape=(4,), activation='softmax'))\nmodel.compile(Adam(learning_rate=0.1),\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(X_train, y_train, epochs=20, validation_split=0.1)", "Epoch 1/20\n4/4 [==============================] - 0s 60ms/step - loss: 1.4579 - accuracy: 0.5000 - val_loss: 1.8069 - val_accuracy: 0.5000\nEpoch 2/20\n4/4 [==============================] - 0s 18ms/step - loss: 1.1740 - accuracy: 0.6481 - val_loss: 0.6051 - val_accuracy: 0.8333\nEpoch 3/20\n4/4 [==============================] - 0s 14ms/step - loss: 0.6225 - accuracy: 0.7407 - val_loss: 0.5855 - val_accuracy: 0.8333\nEpoch 4/20\n4/4 [==============================] - 0s 25ms/step - loss: 0.5707 - accuracy: 0.7315 - val_loss: 0.6164 - val_accuracy: 0.5000\nEpoch 5/20\n4/4 [==============================] - 0s 19ms/step - loss: 0.4387 - accuracy: 0.7963 - val_loss: 0.4228 - val_accuracy: 0.8333\nEpoch 6/20\n4/4 [==============================] - 0s 18ms/step - loss: 0.4976 - accuracy: 0.6944 - val_loss: 0.4534 - val_accuracy: 1.0000\nEpoch 7/20\n4/4 [==============================] - 0s 19ms/step - loss: 0.4211 - accuracy: 0.7407 - val_loss: 0.5309 - val_accuracy: 0.5000\nEpoch 8/20\n4/4 [==============================] - 0s 11ms/step - loss: 0.4215 - accuracy: 0.7685 - val_loss: 0.3484 - val_accuracy: 0.8333\nEpoch 9/20\n4/4 [==============================] - 0s 15ms/step - loss: 0.3690 - accuracy: 0.8796 - val_loss: 0.4258 - val_accuracy: 0.8333\nEpoch 10/20\n4/4 [==============================] - 0s 43ms/step - loss: 0.3576 - accuracy: 0.8796 - val_loss: 0.3285 - val_accuracy: 0.9167\nEpoch 11/20\n4/4 [==============================] - 0s 20ms/step - loss: 0.3467 - accuracy: 0.8981 - val_loss: 0.3144 - val_accuracy: 1.0000\nEpoch 12/20\n4/4 [==============================] - 0s 20ms/step - loss: 0.3192 - accuracy: 0.9167 - val_loss: 0.3468 - val_accuracy: 1.0000\nEpoch 13/20\n4/4 [==============================] - 0s 11ms/step - loss: 0.3128 - accuracy: 0.9444 - val_loss: 0.2881 - val_accuracy: 1.0000\nEpoch 14/20\n4/4 [==============================] - 0s 21ms/step - loss: 0.2995 - accuracy: 0.9630 - val_loss: 0.2954 - val_accuracy: 1.0000\nEpoch 15/20\n4/4 [==============================] - 0s 21ms/step - loss: 0.2952 - accuracy: 0.9352 - val_loss: 0.2762 - val_accuracy: 1.0000\nEpoch 16/20\n4/4 [==============================] - 0s 33ms/step - loss: 0.2889 - accuracy: 0.9352 - val_loss: 0.2700 - val_accuracy: 1.0000\nEpoch 17/20\n4/4 [==============================] - 0s 28ms/step - loss: 0.2773 - accuracy: 0.9074 - val_loss: 0.2455 - val_accuracy: 0.9167\nEpoch 18/20\n4/4 [==============================] - 0s 17ms/step - loss: 0.2636 - accuracy: 0.9537 - val_loss: 0.2777 - val_accuracy: 1.0000\nEpoch 19/20\n4/4 [==============================] - 0s 21ms/step - loss: 0.2670 - accuracy: 0.9537 - val_loss: 0.2349 - val_accuracy: 1.0000\nEpoch 20/20\n4/4 [==============================] - 0s 26ms/step - loss: 0.2515 - accuracy: 0.9537 - val_loss: 0.2214 - val_accuracy: 1.0000\n" ], [ "y_pred = model.predict(X_test)", "_____no_output_____" ], [ "y_pred[:5]", "_____no_output_____" ], [ "y_test_class = np.argmax(y_test, axis=1)\ny_pred_class = np.argmax(y_pred, axis=1)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report", "_____no_output_____" ], [ "print(classification_report(y_test_class, y_pred_class))", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 10\n 1 0.86 1.00 0.92 6\n 2 1.00 0.93 0.96 14\n\n accuracy 0.97 30\n macro avg 0.95 0.98 0.96 30\nweighted avg 0.97 0.97 0.97 30\n\n" ], [ "confusion_matrix(y_test_class, y_pred_class)", "_____no_output_____" ] ], [ [ "## Exercise 1", "_____no_output_____" ], [ "The [Pima Indians dataset](https://archive.ics.uci.edu/ml/datasets/diabetes) is a very famous dataset distributed by UCI and originally collected from the National Institute of Diabetes and Digestive and Kidney Diseases. It contains data from clinical exams for women age 21 and above of Pima indian origins. The objective is to predict based on diagnostic measurements whether a patient has diabetes.\n\nIt has the following features:\n\n- Pregnancies: Number of times pregnant\n- Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test\n- BloodPressure: Diastolic blood pressure (mm Hg)\n- SkinThickness: Triceps skin fold thickness (mm)\n- Insulin: 2-Hour serum insulin (mu U/ml)\n- BMI: Body mass index (weight in kg/(height in m)^2)\n- DiabetesPedigreeFunction: Diabetes pedigree function\n- Age: Age (years)\n\nThe last colum is the outcome, and it is a binary variable.\n\nIn this first exercise we will explore it through the following steps:\n\n1. Load the ..data/diabetes.csv dataset, use pandas to explore the range of each feature\n- For each feature draw a histogram. Bonus points if you draw all the histograms in the same figure.\n- Explore correlations of features with the outcome column. You can do this in several ways, for example using the `sns.pairplot` we used above or drawing a heatmap of the correlations.\n- Do features need standardization? If so what stardardization technique will you use? MinMax? Standard?\n- Prepare your final `X` and `y` variables to be used by a ML model. Make sure you define your target variable well. Will you need dummy columns?", "_____no_output_____" ], [ "## Exercise 2", "_____no_output_____" ], [ "Build a fully connected NN model that predicts diabetes. Follow these steps:\n\n1. Split your data in a train/test with a test size of 20% and a `random_state = 22`\n- define a sequential model with at least one inner layer. You will have to make choices for the following things:\n - what is the size of the input?\n - how many nodes will you use in each layer?\n - what is the size of the output?\n - what activation functions will you use in the inner layers?\n - what activation function will you use at output?\n - what loss function will you use?\n - what optimizer will you use?\n- fit your model on the training set, using a validation_split of 0.1\n- test your trained model on the test data from the train/test split\n- check the accuracy score, the confusion matrix and the classification report", "_____no_output_____" ], [ "## Exercise 3\nCompare your work with the results presented in [this notebook](https://www.kaggle.com/futurist/d/uciml/pima-indians-diabetes-database/pima-data-visualisation-and-machine-learning). Are your Neural Network results better or worse than the results obtained by traditional Machine Learning techniques?\n\n- Try training a Support Vector Machine or a Random Forest model on the exact same train/test split. Is the performance better or worse?\n- Try restricting your features to only 4 features like in the suggested notebook. How does model performance change?", "_____no_output_____" ], [ "## Exercise 4\n\n[Tensorflow playground](http://playground.tensorflow.org/) is a web based neural network demo. It is really useful to develop an intuition about what happens when you change architecture, activation function or other parameters. Try playing with it for a few minutes. You don't need do understand the meaning of every knob and button in the page, just get a sense for what happens if you change something. In the next chapter we'll explore these things in more detail.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb41b03c70f739f822e06f64e99d777ced036ba7
16,924
ipynb
Jupyter Notebook
Machine Learning Projects/Kelas Pengembangan ML/PengembanganML_3_mnist.ipynb
ausfear/Python
c47b65b765bef0b65f0593c08c543267eeadb4d4
[ "MIT" ]
null
null
null
Machine Learning Projects/Kelas Pengembangan ML/PengembanganML_3_mnist.ipynb
ausfear/Python
c47b65b765bef0b65f0593c08c543267eeadb4d4
[ "MIT" ]
null
null
null
Machine Learning Projects/Kelas Pengembangan ML/PengembanganML_3_mnist.ipynb
ausfear/Python
c47b65b765bef0b65f0593c08c543267eeadb4d4
[ "MIT" ]
null
null
null
69.646091
5,290
0.757445
[ [ [ "<a href=\"https://colab.research.google.com/github/ausfear/Python/blob/main/Machine%20Learning%20Projects/Kelas%20Pengembangan%20ML/PengembanganML_3_mnist.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)", "2.5.0\n" ], [ "mnist = tf.keras.datasets.mnist", "_____no_output_____" ], [ "(gambar_latih, label_latih), (gambar_testing, label_testing) = mnist.load_data()", "_____no_output_____" ], [ "import numpy as np\nnp.set_printoptions(linewidth=200)\n\nimport matplotlib.pyplot as plt\nplt.imshow(gambar_latih[1])\nprint(label_latih[1])", "0\n" ], [ "gambar_latih = gambar_latih / 255.0\ngambar_testing = gambar_testing / 255.0", "_____no_output_____" ], [ "plt.imshow(gambar_latih[1])\nprint(label_latih[1])", "0\n" ], [ "model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28,28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax'),\n])", "_____no_output_____" ], [ "model.compile(optimizer='Adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(gambar_latih, label_latih, batch_size=128, epochs=5)", "Epoch 1/5\n469/469 [==============================] - 2s 4ms/step - loss: 0.3522 - accuracy: 0.9029\nEpoch 2/5\n469/469 [==============================] - 2s 3ms/step - loss: 0.1634 - accuracy: 0.9534\nEpoch 3/5\n469/469 [==============================] - 2s 3ms/step - loss: 0.1160 - accuracy: 0.9666\nEpoch 4/5\n469/469 [==============================] - 2s 3ms/step - loss: 0.0896 - accuracy: 0.9740\nEpoch 5/5\n469/469 [==============================] - 2s 3ms/step - loss: 0.0725 - accuracy: 0.9787\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb41b71beb58d9a72819d12a7fa7141700f9f539
27,291
ipynb
Jupyter Notebook
notebooks/04_optimizing_goodreads.ipynb
williecostello/BetterReads
318e438f342a08fe2db8f8dd4bce17cd85ef8795
[ "MIT" ]
2
2021-01-25T02:47:16.000Z
2021-11-15T14:48:59.000Z
notebooks/04_optimizing_goodreads.ipynb
williecostello/BetterReads
318e438f342a08fe2db8f8dd4bce17cd85ef8795
[ "MIT" ]
3
2020-06-25T14:27:53.000Z
2022-02-10T11:26:20.000Z
notebooks/04_optimizing_goodreads.ipynb
williecostello/BetterReads
318e438f342a08fe2db8f8dd4bce17cd85ef8795
[ "MIT" ]
1
2020-07-29T05:33:56.000Z
2020-07-29T05:33:56.000Z
50.726766
460
0.659668
[ [ [ "# BetterReads: Optimizing GoodReads review data\n\nThis notebook explores how to achieve the best results with the BetterReads algorithm when using review data scraped from GoodReads. It is a short follow-up to the exploration performed in the `03_optimizing_reviews.ipynb` notebook.\n\nWe have two options when scraping review data from GoodReads: For any given book, we can either scrape 1,500 reviews, with 300 reviews for each star rating (1 to 5), or we can scrape just the top 300 reviews, of any rating. (This is due to some quirks in the way that reviews are displayed on the GoodReads website; for more information, see my [GoodReadsReviewsScraper script](https://github.com/williecostello/GoodReadsReviewsScraper).)\n\nThere are advantages and disadvantages to both options. If we scrape 1,500 reviews, we obviously have more review data to work with; however, the data is artifically class-balanced, such that, for example, we'll still see a good number of negative reviews even if the vast majority of the book's reviews are positive. If we scrape just the top 300 reviews, we will have a more representative dataset, but much less data to work with.\n\nWe saw in the `03_optimizing_reviews.ipynb` notebook that the BetterReads algorithm can achieve meaningful and representative results from a dataset with less than 100 reviews. So we should not dismiss the 300 review option simply because it involves less data. We should only dismiss it if its smaller dataset leads to worse results. So let's try these two options out on a particular book and see how the algorithm performs.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport random\n\nfrom sklearn.cluster import KMeans\n\nimport tensorflow_hub as hub", "_____no_output_____" ], [ "# Loads Universal Sentence Encoder locally, from downloaded module\nembed = hub.load('../../Universal Sentence Encoder/module/')\n\n# Loads Universal Sentence Encoder remotely, from Tensorflow Hub\n# embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")", "_____no_output_____" ] ], [ [ "## Which set of reviews should we use?\n\nFor this notebook we'll work with a new example: Sally Rooney's *Conversations with Friends*.\n\n<img src='https://i.gr-assets.com/images/S/compressed.photo.goodreads.com/books/1500031338l/32187419._SY475_.jpg' width=250 align=center>\n\nWe have prepared two datasets, one of 1,500 reviews and another of 300 reviews, as described above. Both datasets were scraped from GoodReads at the same time, so there is some overlap between them. (Note that the total number of reviews in both datasets is less than advertised, since non-English and very short reviews are dropped during data cleaning.)", "_____no_output_____" ] ], [ [ "# Set path for processed file\nfile_path_1500 = 'data/32187419_conversations_with_friends.csv'\nfile_path_300 = 'data/32187419_conversations_with_friends_top_300.csv'\n\n# Read in processed file as dataframe\ndf_1500 = pd.read_csv(file_path_1500)\ndf_300 = pd.read_csv(file_path_300)\n\nprint(f'The first dataset consists of {df_1500.shape[0]} sentences from {df_1500[\"review_index\"].nunique()} reviews')\nprint(f'The second dataset consists of {df_300.shape[0]} sentences from {df_300[\"review_index\"].nunique()} reviews')", "The first dataset consists of 8604 sentences from 1190 reviews\nThe second dataset consists of 2874 sentences from 293 reviews\n" ] ], [ [ "As we can see above, in comparison to the smaller dataset, the bigger dataset contains approximately three times the number of sentences from four times the number of reviews. And as we can see below, the bigger dataset contains approximately the same number of reviews for each star rating, while the smaller dataset is much more heavily skewed toward 5 star and 4 star reviews.", "_____no_output_____" ] ], [ [ "df_1500.groupby('review_index')['rating'].mean().value_counts().sort_index()", "_____no_output_____" ], [ "df_300.groupby('review_index')['rating'].mean().value_counts().sort_index()", "_____no_output_____" ] ], [ [ "On [the book's actual GoodReads page](https://www.goodreads.com/book/show/32187419-conversations-with-friends), its average review rating is listed as 3.82 stars. This is nearly the same as the average review rating of our smaller dataset. The bigger dataset's average review rating, in contrast, is just less than 3. This confirms our earlier suspicion that the smaller dataset presents a more representative sample of the book's full set of reviews.", "_____no_output_____" ] ], [ [ "df_300.groupby('review_index')['rating'].mean().mean()", "_____no_output_____" ], [ "df_1500.groupby('review_index')['rating'].mean().mean()", "_____no_output_____" ] ], [ [ "Let's see how these high-level differences affect the output of our algorithm.", "_____no_output_____" ] ], [ [ "def load_sentences(file_path):\n '''\n Function to load and embed a book's sentences\n '''\n \n # Read in processed file as dataframe\n df = pd.read_csv(file_path)\n\n # Copy sentence column to new variable\n sentences = df['sentence'].copy()\n\n # Vectorize sentences\n sentence_vectors = embed(sentences)\n \n return sentences, sentence_vectors", "_____no_output_____" ], [ "def get_clusters(sentences, sentence_vectors, k, n):\n '''\n Function to extract the n most representative sentences from k clusters, with density scores\n '''\n \n # Instantiate the model\n kmeans_model = KMeans(n_clusters=k, random_state=24)\n\n # Fit the model\n kmeans_model.fit(sentence_vectors);\n\n # Set the number of cluster centre points to look at when calculating density score\n centre_points = int(len(sentences) * 0.02)\n \n # Initialize list to store mean inner product value for each cluster\n cluster_density_scores = []\n \n # Initialize dataframe to store cluster centre sentences\n df = pd.DataFrame()\n\n # Loop through number of clusters\n for i in range(k):\n\n # Define cluster centre\n centre = kmeans_model.cluster_centers_[i]\n\n # Calculate inner product of cluster centre and sentence vectors\n ips = np.inner(centre, sentence_vectors)\n\n # Find the sentences with the highest inner products\n top_indices = pd.Series(ips).nlargest(n).index\n top_sentences = list(sentences[top_indices])\n \n centre_ips = pd.Series(ips).nlargest(centre_points)\n density_score = round(np.mean(centre_ips), 5)\n \n # Append the cluster density score to master list\n cluster_density_scores.append(density_score)\n\n # Create new row with cluster's top 10 sentences and density score\n new_row = pd.Series([top_sentences, density_score])\n \n # Append new row to master dataframe\n df = df.append(new_row, ignore_index=True)\n\n # Rename dataframe columns\n df.columns = ['sentences', 'density']\n\n # Sort dataframe by density score, from highest to lowest\n df = df.sort_values(by='density', ascending=False).reset_index(drop=True)\n \n # Loop through number of clusters selected\n for i in range(k):\n \n # Save density / similarity score & sentence list to variables\n sim_score = round(df.loc[i][\"density\"], 3)\n sents = df.loc[i]['sentences'].copy()\n \n print(f'Cluster #{i+1} sentences (density score: {sim_score}):\\n')\n print(*sents, sep='\\n')\n print('\\n')\n \n model_density_score = round(np.mean(cluster_density_scores), 5)\n \n print(f'Model density score: {model_density_score}')", "_____no_output_____" ], [ "# Load and embed sentences\nsentences_1500, sentence_vectors_1500 = load_sentences(file_path_1500)\nsentences_300, sentence_vectors_300 = load_sentences(file_path_300)", "_____no_output_____" ], [ "# Get cluster sentences for bigger dataset\nget_clusters(sentences_1500, sentence_vectors_1500, k=6, n=8)", "Cluster #1 sentences (density score: 0.437):\n\nSally Rooney has a really interesting way of writing, which I deeply appreciate.\ni just cannot get over how well Sally Rooney writes.\nI think that Sally Rooney is a fantastic writer.\nI'm very happy I read Rooney's Normal People first and loved it so deeply, bc I feel certain I would actively avoid Sally Rooney if this book was the first piece of writing I read by her.\nSally Rooney is a brilliant writer, and I was really looking forward to this from reading her short fiction.\nI can only write that I love it even more than \"Normal people\" and I can't wait for more book by Sally Rooney.\nI love how Sally Rooney writes - naturally and simply.\nWell-written because it’s Sally Rooney and so even her debut is brilliant.\n\n\nCluster #2 sentences (density score: 0.392):\n\nI really just couldn't get with this book.\nI enjoyed this book way more than I thought I would at the beginning.\nDon’t get me wrong I did enjoy this book, but I think I expected more from it?\nReading this book is delightful, I didn’t want it to end.\nNot sure I'm a fan of the writing style of this book, but it was an easy read.\nI have never read a book that as I was reading it was so forgettable.\nI really don’t know how I feel about this book but the writing is undeniably good.\nThat being said, I actually did enjoy reading this book and devoured it quickly!\n\n\nCluster #3 sentences (density score: 0.38):\n\nI think the merits of this book lie in the writing and the characters (although I also thought the characters were somewhat insufferable and pretentious).\nUnbelievably even more than I disliked the characters, I did not enjoy the writing style of this book.\nI even felt in the beginning that the book felt not very special, with the odd writing style and the slightly unlikeable characters.\nI understand that a book having unlikable characters does not make the book unlikeable but they have to be compelling for the reader to want to follow them through the story and the protagonist and side characters were very much lacking in this regard.\nThe author's writing is pretty good, I just didn't really like the characters and never really seemed to connect with them, which then makes me not as engaged with the plot line.\nSuch deeply unlikeable characters, and whilst that doesn't normally stop me from enjoying a book in this instance it did and I found their conversations to be so self indulgent and dull, finishing it was a struggle.\nI found the book boring and the characters so self absorbed and overly dramatic.\nI will say that the writing is good, but the characters are weird and pretty horrible “people”.\n\n\nCluster #4 sentences (density score: 0.353):\n\nI think my main problem with the novel is that it seems like it should have been about the two young women, Frances and Bobbi, but it was actually about Frances and her totally predictable affair with Nick, so handsome!\nThere were times I was curious to see how it would play out with Frances & Nick, as well as Frances' relationship with Bobbi.\nFrances and Bobbi are great characters, but Frances spends so much of the book just involved with Nick, and unlike Connell, he's simply too blank, too opaque, too ideal of a guy in many ways, to be interesting in any way.\nIt is just Bobbi and Frances being horrible, Frances sleeping with Nick, that is about it.\nAnd I genuinely was invested in the plot between Bobbi and Frances and Frances and her parents but woo, did not care for Nick or Melissa.\nThe relationship between Bobbi and Frances is enjoyable to read, they are forging a different path and creating their own definition of relationship, but Frances and Nick is a snoozefest.\nWhen Frances writes a story in which she and her friend Bobbi are easily identified, Bobbi is truly shocked at how Frances sees their relationship in the story.\nFrances publishes a story about Bobbi, and Bobbi feels betrayed because Frances could never say those things to her.\n\n\nCluster #5 sentences (density score: 0.225):\n\nI didn’t really find this to be an enjoyable read.\nI didn’t quite enjoy this as much as normal people but I still thought it was a entertaining read\nbut actually I enjoyed it more than I thought I would.\nI didn’t get into it at all, it was just blah blah blah to me.\nMaybe I didn't like it because I couldn't relate to it?\nI wasn't expecting to like it - full of moany twenty-somethings - I'd heard.\nI didn't think I was going to like it but I liked it quite a lot.\nI won’t even begin to try to intellectualize why I liked it.\n\n\nCluster #6 sentences (density score: 0.215):\n\nThis feels both true and difficult, as I’ve never read a writer who so intimately seems to understand modern, young relationships, feelings and fears as she does.\nWhereas Normal People spoke a bit more to the gravitational pull of a romantic relationship, Conversations With Friends captured the main character’s dysfunction and yearning to just be seen and valued by those around her.\nShe writes about relationships with so much care and detail that it becomes hard to separate yourself from the characters.\nThis is how Frances feels and thinks and talks, all in one, and though there are a lot of things about her that are not objectively relatable to me, she has become one of the most relatable characters I've ever read.\nConversations with Friends is a tiresome story of an emotionally unavailable and slightly manipulative young woman and her romantic entanglements.\nHer characters can be so swooningly affectionate with one another--and so ferociously cutting and so perfectly empathetic--that even at their most toxic moments (and there are lots), watching their relationships unfold feels like a privilege.\nThe plot offers nothing new either, there's been plenty of books on naive young adults pursuing unhealthy relationships before, as well as characters who make drama out of nothing and try to drag others in to their narcissism.\nThe way she writes relationships and conversations between the characters, making them normal and not artefact at all but at the same time not being trivial, it's exquisite.\n\n\nModel density score: 0.33358\n" ], [ "# Get cluster sentences for smaller dataset\nget_clusters(sentences_300, sentence_vectors_300, k=6, n=8)", "Cluster #1 sentences (density score: 0.44):\n\ni just cannot get over how well Sally Rooney writes.\nI finished CONVERSATIONS WITH FRIENDS by Sally Rooney this morning and once again I am in awe of Rooney's writing.\nRooney really seems to understand the lives of her chracters.\nI'm looking forward to reading anything else that Sally Rooney writes.\nSally Rooney has become one of my favorite writers.\nRooney is an excellent writer; I desperately hope she is just getting started.\nSally Rooney makes me feel like I could do anything in life as long as I wrote about it well.\nI can’t wait to read whatever Sally Rooney comes out with next!\n\n\nCluster #2 sentences (density score: 0.365):\n\nThere were times I was curious to see how it would play out with Frances & Nick, as well as Frances' relationship with Bobbi.\nThe book follows Frances and her best friend Bobbi, who become entangled with a married couple, Nick and Melissa.\nFrances and Nick end up in a relationship and the conversations between them are low-key and unemotional on the surface; however, Frances is concealing her thoughts from herself.\nBobbie is interested in Melissa, while Frances falls in love with Nick and they start having an affair.\nThis book revolves around two college students in Dublin named Frances and Bobbi and their relationship with Melissa & Nick who are a married couple they meet early in the story.\nIn short, this novel focuses on friends Frances and Bobbi and the interesting relationship that they share with a married couple.\nTheir lives become entwined, but we mostly follow the relationships between Frances and Bobbi, and Frances and Nick, after they start having an affair.\nAnother problem is that the main protagonists, Frances and her older, married paramour, Nick are just not very interesting.\n\n\nCluster #3 sentences (density score: 0.361):\n\nTurns out that I absolutely loved this book.\nOk, this book is interesting and I am not disappointed to have read it.\nI am delightfully surprised by how much I loved this book.\nAnyway, I loved this book so much!\nI’ve had this book on my TBR list for a while, so I was really excited when I found out that it would be the next book group read.\nI really didn't find this book too interesting.\nI loved everything about this book.\nI heard very good reviews of this book before reading it so I’m not sure if I’m overly influenced by those .\n\n\nCluster #4 sentences (density score: 0.339):\n\nI think the merits of this book lie in the writing and the characters (although I also thought the characters were somewhat insufferable and pretentious).\nI even felt in the beginning that the book felt not very special, with the odd writing style and the slightly unlikeable characters.\nThe author's writing is pretty good, I just didn't really like the characters and never really seemed to connect with them, which then makes me not as engaged with the plot line.\nIt’s really interesting because some of her characters are unlikeable at times, but they feel realistic and they always develop as the story goes on and it’s really quite entertaining to read about.\nthe characters aren’t particularly likeable, and the situation they are in isn’t particularly common, but it is interesting and I can tell it’s very well written.\nI liked the way it facilitated the story, but unlike in some other novels, the writing style isn't a notable part of the experience of Conversations with Friends.\nI really liked the ending and the way the protagonist was portrayed by the author (I saw a lot of myself in her, or rather I saw the worst side of myself) but at the same time I was frustrated with her character arc.\nI enjoy reading about self-centered, unlikeable characters but they have to be interesting which was not the case for me.\n\n\nCluster #5 sentences (density score: 0.22):\n\nI'm glad I perserverd though and then it really drew me in.\nbut actually I enjoyed it more than I thought I would.\nI won’t even begin to try to intellectualize why I liked it.\nI wasn't expecting to like it - full of moany twenty-somethings - I'd heard.\nI felt like I was supposed to be predisposed to like this.\nSooooo, it took me a while to gather my thoughts on this one and I still have mixed feelings about it.\nI can see why people would hate this, but I loved it.\nThe weird thing, though, is that I did really enjoy it.\n\n\nCluster #6 sentences (density score: 0.205):\n\nWhereas Normal People spoke a bit more to the gravitational pull of a romantic relationship, Conversations With Friends captured the main character’s dysfunction and yearning to just be seen and valued by those around her.\nHer characters can be so swooningly affectionate with one another--and so ferociously cutting and so perfectly empathetic--that even at their most toxic moments (and there are lots), watching their relationships unfold feels like a privilege.\nShe's insightful about the emotions involved in falling in love when one is both young and doing one's best to not admit to any sort of emotional entanglement.\nThe way she writes relationships and conversations between the characters, making them normal and not artefact at all but at the same time not being trivial, it's exquisite.\nAltogether Conversations With Friends is an intelligent character study on falling in love, cultivating a relationship, and all of the simplicities and complexities that come with it.\nThe plot offers nothing new either, there's been plenty of books on naive young adults pursuing unhealthy relationships before, as well as characters who make drama out of nothing and try to drag others in to their narcissism.\nIt’s a beautiful and subtle novel with emotionally charged characters and nuances that felt so natural, mirroring the everyday aspects and constants in somebody’s lives: as simple as having a conversation with a friend.\nSally Rooney’s novel, Conversations With Friends, reveals the complexities of relationships in the onset of love, platonically and otherwise, with a direct honesty and realism that made it difficult not to relate to.\n\n\nModel density score: 0.32165\n" ] ], [ [ "Let's summarize our results. The bigger dataset's sentence clusters can be summed up as follows:\n\n1. Fantastic writing\n1. Reading experience (?)\n1. Unlikeable characters\n1. Plot synopsis\n1. Not enjoyable\n1. Thematic elements: relationships & emotions\n\nThe smaller dataset's clusters can be summed up like this:\n\n1. Fantastic writing\n1. Plot synopsis\n1. Loved it\n1. Unlikeable characters\n1. Reading experience\n1. Thematic elements: Relationships & emotions\n\nAs we can see, the two sets of results are broadly similar; there are no radical differences between the two sets of clusters. The only major difference is that the bigger dataset includes a cluster of sentences expressing dislike of the book, whereas the smaller dataset includes a cluster of sentences expressing love of the book. But this was to be expected, given the relative proportions of positive and negative reviews between the two datasets.\n\nGiven these results, we feel that the smaller dataset is preferable. Its clusters seem slightly more internally coherent and to better capture the general sentiment toward the book.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb41bda07ea86211f81df48beba5fdd06649f8ba
56,598
ipynb
Jupyter Notebook
Boston House Prices/Boston house prices.ipynb
pratyakshajha/Machine-Learning-Problems
b027f420409999c07881d0768aba023255001704
[ "MIT" ]
null
null
null
Boston House Prices/Boston house prices.ipynb
pratyakshajha/Machine-Learning-Problems
b027f420409999c07881d0768aba023255001704
[ "MIT" ]
null
null
null
Boston House Prices/Boston house prices.ipynb
pratyakshajha/Machine-Learning-Problems
b027f420409999c07881d0768aba023255001704
[ "MIT" ]
null
null
null
44.600473
18,660
0.591046
[ [ [ "## Load data", "_____no_output_____" ] ], [ [ "from sklearn import datasets\nimport pandas as pd\n\nboston = datasets.load_boston()\ndat = pd.DataFrame(boston.data, columns=boston.feature_names)\ndat.head()", "_____no_output_____" ], [ "target = pd.DataFrame(boston.target, columns=[\"MEDV\"])\ntarget.head()", "_____no_output_____" ] ], [ [ "## Analyse data", "_____no_output_____" ] ], [ [ "df = dat.copy()\ndf = pd.concat([df, target], axis=1)\ndf.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 506 entries, 0 to 505\nData columns (total 14 columns):\nCRIM 506 non-null float64\nZN 506 non-null float64\nINDUS 506 non-null float64\nCHAS 506 non-null float64\nNOX 506 non-null float64\nRM 506 non-null float64\nAGE 506 non-null float64\nDIS 506 non-null float64\nRAD 506 non-null float64\nTAX 506 non-null float64\nPTRATIO 506 non-null float64\nB 506 non-null float64\nLSTAT 506 non-null float64\nMEDV 506 non-null float64\ndtypes: float64(14)\nmemory usage: 55.4 KB\n" ], [ "df.describe()", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\nimport seaborn.apionly as snsapi\n\nsnsapi.set()\ndf.hist(bins = 10, figsize = (15,10));\nplt.show();", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\seaborn\\apionly.py:9: UserWarning: As seaborn no longer sets a default style on import, the seaborn.apionly module is deprecated. It will be removed in a future version.\n warnings.warn(msg, UserWarning)\n" ], [ "corr_matrix = df.corr()\ncorr_matrix['MEDV']", "_____no_output_____" ], [ "import seaborn as sns\n\nsns.heatmap(corr_matrix);\nplt.show()", "_____no_output_____" ], [ "print(boston['DESCR'])", "Boston House Prices dataset\n===========================\n\nNotes\n------\nData Set Characteristics: \n\n :Number of Instances: 506 \n\n :Number of Attributes: 13 numeric/categorical predictive\n \n :Median Value (attribute 14) is usually the target\n\n :Attribute Information (in order):\n - CRIM per capita crime rate by town\n - ZN proportion of residential land zoned for lots over 25,000 sq.ft.\n - INDUS proportion of non-retail business acres per town\n - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n - NOX nitric oxides concentration (parts per 10 million)\n - RM average number of rooms per dwelling\n - AGE proportion of owner-occupied units built prior to 1940\n - DIS weighted distances to five Boston employment centres\n - RAD index of accessibility to radial highways\n - TAX full-value property-tax rate per $10,000\n - PTRATIO pupil-teacher ratio by town\n - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n - LSTAT % lower status of the population\n - MEDV Median value of owner-occupied homes in $1000's\n\n :Missing Attribute Values: None\n\n :Creator: Harrison, D. and Rubinfeld, D.L.\n\nThis is a copy of UCI ML housing dataset.\nhttp://archive.ics.uci.edu/ml/datasets/Housing\n\n\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\n\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\nprices and the demand for clean air', J. Environ. Economics & Management,\nvol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics\n...', Wiley, 1980. N.B. Various transformations are used in the table on\npages 244-261 of the latter.\n\nThe Boston house-price data has been used in many machine learning papers that address regression\nproblems. \n \n**References**\n\n - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.\n - many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)\n\n" ] ], [ [ "remove features that are less correlated with our target variable.", "_____no_output_____" ] ], [ [ "dat1 = df.loc[:, ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']]", "_____no_output_____" ], [ "dat1.head()", "_____no_output_____" ] ], [ [ "## Split into train and test sets", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(dat1, target, test_size = 0.2, random_state=42)\ny_train = y_train.values.ravel()", "_____no_output_____" ] ], [ [ "## Cross validation to find best algorithm ", "_____no_output_____" ] ], [ [ "from sklearn import model_selection\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.svm import SVR\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import BaggingRegressor", "_____no_output_____" ], [ "models = []\nmodels.append(('SVR', SVR()))\nmodels.append(('KNN', KNeighborsRegressor()))\nmodels.append(('DT', DecisionTreeRegressor()))\nmodels.append(('RF', RandomForestRegressor()))\nmodels.append(('l', Lasso()))\nmodels.append(('EN', ElasticNet()))\nmodels.append(('R', Ridge()))\nmodels.append(('BR', BayesianRidge()))\nmodels.append(('GBR', GradientBoostingRegressor()))\nmodels.append(('RF', AdaBoostRegressor()))\nmodels.append(('ET', ExtraTreesRegressor()))\nmodels.append(('BgR', BaggingRegressor()))", "_____no_output_____" ], [ "scoring = 'neg_mean_squared_error'\n\nresults = []\nnames = []\nfor name, model in models:\n kfold = model_selection.KFold(n_splits=10, random_state=42)\n cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)\n results.append(cv_results)\n names.append(name)\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n print(msg)", "SVR: -87.365301 (24.963687)\nKNN: -44.569256 (16.172348)\nDT: -29.550215 (15.033950)\nRF: -16.141630 (7.590861)\nl: -29.853527 (7.780449)\nEN: -30.127045 (7.837329)\nR: -26.686751 (10.336454)\nBR: -27.015029 (9.687745)\nGBR: -13.929253 (7.043638)\nRF: -16.769726 (7.004807)\nET: -13.642329 (6.730021)\nBgR: -17.056394 (8.094692)\n" ] ], [ [ "## Create pipeline", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV", "_____no_output_____" ], [ "pipeline = make_pipeline(preprocessing.StandardScaler(), GradientBoostingRegressor(random_state=42))", "_____no_output_____" ] ], [ [ "## Cross validation to fine tune", "_____no_output_____" ] ], [ [ "hyperparameters = { 'gradientboostingregressor__max_features' : ['auto', 'sqrt', 'log2'],\n 'gradientboostingregressor__max_depth': [None, 5, 3, 1],\n 'gradientboostingregressor__n_estimators': [100, 150, 200, 250]}", "_____no_output_____" ], [ "clf = GridSearchCV(pipeline, hyperparameters, cv=10, scoring = scoring)\nclf.fit(X_train, y_train);", "_____no_output_____" ], [ "clf1 = RandomizedSearchCV(pipeline, hyperparameters, cv=10, random_state=42)\nclf1.fit(X_train, y_train);", "_____no_output_____" ] ], [ [ "## Evaluate ", "_____no_output_____" ] ], [ [ "pred = clf.predict(X_test)\nprint(\"MSE for GridSearchCV: {}\". format(mean_squared_error(y_test, pred)))", "MSE for GridSearchCV: 6.497547906012862\n" ], [ "pred1 = clf1.predict(X_test)\nprint(\"MSE for RandomizedSearchCV: {}\". format(mean_squared_error(y_test, pred1)))", "MSE for RandomizedSearchCV: 6.355811675800892\n" ] ], [ [ "## Save", "_____no_output_____" ] ], [ [ "from sklearn.externals import joblib \njoblib.dump(clf1, 'boston_regressor.pkl')", "_____no_output_____" ], [ "clf2 = joblib.load('boston_regressor.pkl')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb41c038b8209f69121579617a67b4f30cb093ce
1,920
ipynb
Jupyter Notebook
playbook/tactics/persistence/T1542.003.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
8
2021-05-25T15:25:31.000Z
2021-11-08T07:14:45.000Z
playbook/tactics/persistence/T1542.003.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
1
2021-08-23T17:38:02.000Z
2021-10-12T06:58:19.000Z
playbook/tactics/persistence/T1542.003.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
2
2021-05-29T20:24:24.000Z
2021-08-05T23:44:12.000Z
42.666667
956
0.690625
[ [ [ "# T1542.003 - Bootkit\nAdversaries may use bootkits to persist on systems. Bootkits reside at a layer below the operating system and may make it difficult to perform full remediation unless an organization suspects one was used and can act accordingly.\n\nA bootkit is a malware variant that modifies the boot sectors of a hard drive, including the Master Boot Record (MBR) and Volume Boot Record (VBR). (Citation: Mandiant M Trends 2016) The MBR is the section of disk that is first loaded after completing hardware initialization by the BIOS. It is the location of the boot loader. An adversary who has raw access to the boot drive may overwrite this area, diverting execution during startup from the normal boot loader to adversary code. (Citation: Lau 2011)\n\nThe MBR passes control of the boot process to the VBR. Similar to the case of MBR, an adversary who has raw access to the boot drive may overwrite the VBR to divert execution during startup to adversary code.", "_____no_output_____" ], [ "## Atomic Tests:\nCurrently, no tests are available for this technique.", "_____no_output_____" ], [ "## Detection\nPerform integrity checking on MBR and VBR. Take snapshots of MBR and VBR and compare against known good samples. Report changes to MBR and VBR as they occur for indicators of suspicious activity and further analysis.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
cb41c2ca75f5464234529cf4388623f49bbd789e
5,327
ipynb
Jupyter Notebook
notebook/python_class_test_2016.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
46
2015-07-17T10:28:42.000Z
2021-11-30T20:24:48.000Z
notebook/python_class_test_2016.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
null
null
null
notebook/python_class_test_2016.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
20
2015-02-12T16:05:36.000Z
2022-02-27T00:19:40.000Z
32.882716
351
0.608973
[ [ [ "# 2.18 Programming for Geoscientists class test 2016", "_____no_output_____" ], [ "# Test instructions\n\n* This test contains **4** questions each of which should be answered.\n* Write your program in a Python cell just under each question.\n* You can write an explanation of your solution as comments in your code.\n* In each case your solution program must fulfil all of the instructions - please check the instructions carefully and double check that your program fulfils all of the given instructions.\n* Save your work regularly.\n* At the end of the test you should email your IPython notebook document (i.e. this document) to [Gerard J. Gorman](http://www.imperial.ac.uk/people/g.gorman) at [email protected]", "_____no_output_____" ], [ "**1.** The following cells contain at least one programming bug each. For each cell add a comment to identify and explain the bug, and correct the program.", "_____no_output_____" ] ], [ [ "# Function to calculate wave velocity.\ndef wave_velocity(k, mu, rho):\n vp = sqrt((k+4*mu/3)/rho)\n \n return vp\n\n# Use the function to calculate the velocity of an\n# acoustic wave in water.\nvp = wave_velocity(k=0, mu=2.29e9, rho=1000)", "_____no_output_____" ], [ "print \"Velocity of acoustic wave in water: %d\", vp", "_____no_output_____" ], [ "data = (3.14, 2.29, 10, 12)\ndata.append(4)", "_____no_output_____" ], [ "line = \"2015-12-14T06:29:15.740Z,19.4333324,-155.2906647,1.66,2.14,ml,17,248,0.0123,0.36,hv,hv61126056,2015-12-14T06:34:58.500Z,5km W of Volcano, Hawaii,earthquake\"\nlatitude = line.split(',')[1]\nlongitude = line.split(',')[2]\nprint \"longitude, latitude = (%g, %g)\"%(longitude, latitude)", "_____no_output_____" ] ], [ [ "**2.** The Ricker wavelet is frequently employed to model seismic data. The amplitude of the Ricker wavelet with peak frequency $f$ at time $t$ is computed as:\n\n$$A = (1-2 \\pi^2 f^2 t^2) e^{-\\pi^2 f^2 t^2}$$\n\n* Implement a function which calculates the amplitude of the Ricker wavelet for a given peak frequency $f$ and time $t$.\n* Use a *for loop* to create a python *list* for time ranging from $-0.5$ to $0.5$, using a peak frequency, $f$, of $10$.\n* Using the function created above, calculate a numpy array of the Ricker wavelet amplitudes for these times.\n* Plot a graph of time against Ricker wavelet.", "_____no_output_____" ], [ "**3.** The data file [vp.dat](data/vp.dat) (all of the data files are stored in the sub-folder *data/* of this notebook library) contains a profile of the acoustic velocity with respect to depth. Depth is measured with respect to a reference point; therefore the first few entries contain NaN's indicating that they are actually above ground.\n\n* Write a function to read in the depth and acoustic velocity.\n* Ensure you skip the entries that contain NaN's.\n* Store depth and velocities in two seperate numpy arrays.\n* Plot depth against velocity ensuring you label your axis.", "_____no_output_____" ], [ "**4.** The file [BrachiopodBiometrics.csv](data/BrachiopodBiometrics.csv) contains the biometrics of Brachiopods found in 3 different locations.\n\n* Read the data file into a Python *dictionary*.\n* You should use the samples location as the *key*.\n* For each key you should form a Python *list* containing tuples of *length* and *width* of each sample.\n* For each location, calculate the mean length and width of the samples.\n* Print the result for each location using a formatted print statement. The mean values should only be printed to within one decimal place.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb41c4db7b55ba5f140d9ff42f9a148696bed27a
15,761
ipynb
Jupyter Notebook
notebooks/where_are_the_flowlines.ipynb
bearecinos/tutorials
e1de4b4d9386285477a7a3ad97b115a0d11a9820
[ "BSD-3-Clause" ]
1
2021-11-07T19:18:48.000Z
2021-11-07T19:18:48.000Z
notebooks/where_are_the_flowlines.ipynb
bearecinos/tutorials
e1de4b4d9386285477a7a3ad97b115a0d11a9820
[ "BSD-3-Clause" ]
null
null
null
notebooks/where_are_the_flowlines.ipynb
bearecinos/tutorials
e1de4b4d9386285477a7a3ad97b115a0d11a9820
[ "BSD-3-Clause" ]
null
null
null
26.758913
436
0.581752
[ [ [ "# OGGM flowlines: where are they?", "_____no_output_____" ], [ "In this notebook we show how to access the OGGM flowlines location before, during, and after a run.\n\nSome of the code shown here will make it to the OGGM codebase [eventually](https://github.com/OGGM/oggm/issues/1111).", "_____no_output_____" ] ], [ [ "from oggm import cfg, utils, workflow, tasks, graphics\nfrom oggm.core import flowline\nimport salem\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "cfg.initialize(logging_level='WARNING')", "_____no_output_____" ] ], [ [ "## Get ready", "_____no_output_____" ] ], [ [ "# Where to store the data \ncfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-flowlines', reset=True)\n# Which glaciers?\nrgi_ids = ['RGI60-11.00897']\n# We start from prepro level 3 with all data ready\ngdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=40)\ngdir = gdirs[0]\ngdir", "_____no_output_____" ] ], [ [ "## Where is the terminus of the RGI glacier?", "_____no_output_____" ], [ "There are several ways to get the terminus, depending on what you want. They are also not necessarily exact same:", "_____no_output_____" ], [ "### Terminus as the lowest point on the glacier", "_____no_output_____" ] ], [ [ "# Get the topo data and the glacier mask\nwith xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:\n topo = ds.topo\n # Glacier outline raster\n mask = ds.glacier_ext", "_____no_output_____" ], [ "topo.plot();", "_____no_output_____" ], [ "topo_ext = topo.where(mask==1)\ntopo_ext.plot();", "_____no_output_____" ], [ "# Get the terminus\nterminus = topo_ext.where(topo_ext==topo_ext.min(), drop=True)\n# Project its coordinates from the local UTM to WGS-84\nt_lon, t_lat = salem.transform_proj(gdir.grid.proj, 'EPSG:4326', terminus.x[0], terminus.y[0])\nprint('lon, lat:', t_lon, t_lat)\nprint('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')", "_____no_output_____" ] ], [ [ "### Terminus as the lowest point on the main centerline", "_____no_output_____" ] ], [ [ "# Get the centerlines\ncls = gdir.read_pickle('centerlines')\n# Get the coord of the last point of the main centerline\ncl = cls[-1]\ni, j = cl.line.coords[-1]\n# These coords are in glacier grid coordinates. Let's convert them to lon, lat:\nt_lon, t_lat = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326')\nprint('lon, lat:', t_lon, t_lat)\nprint('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')", "_____no_output_____" ] ], [ [ "### Terminus as the lowest point on the main flowline ", "_____no_output_____" ], [ "\"centerline\" in the OGGM jargon is not the same as \"flowline\". Flowlines have a fixed dx and their terminus is not necessarily exact on the glacier outline. Code-wise it's very similar though:", "_____no_output_____" ] ], [ [ "# Get the flowlines\ncls = gdir.read_pickle('inversion_flowlines')\n# Get the coord of the last point of the main centerline\ncl = cls[-1]\ni, j = cl.line.coords[-1]\n# These coords are in glacier grid coordinates. Let's convert them to lon, lat:\nt_lon, t_lat = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326')\nprint('lon, lat:', t_lon, t_lat)\nprint('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')", "_____no_output_____" ] ], [ [ "### Bonus: convert the centerlines to a shapefile ", "_____no_output_____" ] ], [ [ "output_dir = utils.mkdir('outputs')\nutils.write_centerlines_to_shape(gdirs, path=f'{output_dir}/centerlines.shp')", "_____no_output_____" ], [ "sh = gpd.read_file(f'{output_dir}/centerlines.shp')\nsh.plot();", "_____no_output_____" ] ], [ [ "Remember: the \"centerlines\" are not the same things as \"flowlines\" in OGGM. The later objects undergo further quality checks, such as the impossibility for ice to \"climb\", i.e. have negative slopes. The flowlines are therefore sometimes shorter than the centerlines:", "_____no_output_____" ] ], [ [ "utils.write_centerlines_to_shape(gdirs, path=f'{output_dir}/flowlines.shp', flowlines_output=True)\nsh = gpd.read_file(f'{output_dir}/flowlines.shp')\nsh.plot();", "_____no_output_____" ] ], [ [ "## Flowline geometry after a run: with the new flowline diagnostics (new in v1.6.0!!)", "_____no_output_____" ] ], [ [ "# TODO!!! Based on https://github.com/OGGM/oggm/pull/1308", "_____no_output_____" ] ], [ [ "## Flowline geometry after a run: with `FileModel`", "_____no_output_____" ], [ "Let's do a run first:", "_____no_output_____" ] ], [ [ "cfg.PARAMS['store_model_geometry'] = True # We want to get back to it later\n\ntasks.init_present_time_glacier(gdir)\ntasks.run_constant_climate(gdir, nyears=100, y0=2000);", "_____no_output_____" ] ], [ [ "We use a `FileModel` to read the model output:", "_____no_output_____" ] ], [ [ "fmod = flowline.FileModel(gdir.get_filepath('model_geometry'))", "_____no_output_____" ] ], [ [ "A FileModel behaves like a OGGM's `FlowlineModel`:", "_____no_output_____" ] ], [ [ "fmod.run_until(0) # Point the file model to year 0 in the output\ngraphics.plot_modeloutput_map(gdir, model=fmod) # plot it", "_____no_output_____" ], [ "fmod.run_until(100) # Point the file model to year 100 in the output\ngraphics.plot_modeloutput_map(gdir, model=fmod) # plot it", "_____no_output_____" ], [ "# Bonus - get back to e.g. the volume timeseries\nfmod.volume_km3_ts().plot();", "_____no_output_____" ] ], [ [ "OK, now create a table of the main flowline's grid points location and bed altitude (this does not change with time): ", "_____no_output_____" ] ], [ [ "fl = fmod.fls[-1] # Main flowline\ni, j = fl.line.xy # xy flowline on grid\nlons, lats = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326') # to WGS84\n\ndf_coords = pd.DataFrame(index=fl.dis_on_line*gdir.grid.dx)\ndf_coords.index.name = 'Distance along flowline'\ndf_coords['lon'] = lons\ndf_coords['lat'] = lats\ndf_coords['bed_elevation'] = fl.bed_h\ndf_coords.plot(x='lon', y='lat');", "_____no_output_____" ], [ "df_coords['bed_elevation'].plot();", "_____no_output_____" ] ], [ [ "Now store a time varying array of ice thickness, surface elevation along this line:", "_____no_output_____" ] ], [ [ "years = np.arange(0, 101)\ndf_thick = pd.DataFrame(index=df_coords.index, columns=years, dtype=np.float64)\ndf_surf_h = pd.DataFrame(index=df_coords.index, columns=years, dtype=np.float64)\ndf_bed_h = pd.DataFrame()\nfor year in years:\n fmod.run_until(year)\n fl = fmod.fls[-1]\n df_thick[year] = fl.thick\n df_surf_h[year] = fl.surface_h", "_____no_output_____" ], [ "df_thick[[0, 50, 100]].plot();\nplt.title('Ice thickness at three points in time');", "_____no_output_____" ], [ "f, ax = plt.subplots()\ndf_surf_h[[0, 50, 100]].plot(ax=ax);\ndf_coords['bed_elevation'].plot(ax=ax, color='k');\nplt.title('Glacier elevation at three points in time');", "_____no_output_____" ] ], [ [ "### Location of the terminus over time", "_____no_output_____" ], [ "Let's find the indices where the terminus is (i.e. the last point where ice is thicker than 1m), and link these to the lon, lat positions along the flowlines.\n\nThe first method uses fancy pandas functions but may be more cryptic for less experienced pandas users:", "_____no_output_____" ] ], [ [ "# Nice trick from https://stackoverflow.com/questions/34384349/find-index-of-last-true-value-in-pandas-series-or-dataframe\ndis_term = (df_thick > 1)[::-1].idxmax()\n\n# Select the terminus coordinates at these locations\nloc_over_time = df_coords.loc[dis_term].set_index(dis_term.index)\n\n# Plot them over time\nloc_over_time.plot.scatter(x='lon', y='lat', c=loc_over_time.index, colormap='viridis');\nplt.title('Location of the terminus over time');", "_____no_output_____" ], [ "# Plot them on a google image - you need an API key for this\n# api_key = ''\n# from motionless import DecoratedMap, LatLonMarker\n# dmap = DecoratedMap(maptype='satellite', key=api_key)\n# for y in [0, 20, 40, 60, 80, 100]:\n# tmp = loc_over_time.loc[y]\n# dmap.add_marker(LatLonMarker(tmp.lat, tmp.lon, ))\n# print(dmap.generate_url())", "_____no_output_____" ] ], [ [ "<img src='https://maps.googleapis.com/maps/api/staticmap?key=AIzaSyDWG_aTgfU7CeErtIzWfdGxpStTlvDXV_o&maptype=satellite&format=png&scale=1&size=400x400&sensor=false&language=en&markers=%7C46.818796056851475%2C10.802746777546085%7C46.81537664036365%2C10.793672904092187%7C46.80792268953582%2C10.777563608554978%7C46.7953190811109%2C10.766412086223571%7C46.79236232808986%2C10.75236937607986%7C46.79236232808986%2C10.75236937607986'>", "_____no_output_____" ], [ "\nAnd now, method 2: less fancy but maybe easier to read? ", "_____no_output_____" ] ], [ [ "for yr in [0, 20, 40, 60, 80, 100]:\n # Find the last index of the terminus\n p_term = np.nonzero(df_thick[yr].values > 1)[0][-1]\n # Print the location of the terminus\n print(f'Terminus pos at year {yr}', df_coords.iloc[p_term][['lon', 'lat']].values)", "_____no_output_____" ] ], [ [ "## Comments on \"elevation band flowlines\" ", "_____no_output_____" ], [ "If you use elevation band flowlines, the location of the flowlines is not known: indeed, the glacier is an even more simplified representation of the real world one. In this case, if you are interested in tracking the terminus position, you may need to use tricks, such as using the retreat from the terminus with time, or similar. ", "_____no_output_____" ], [ "## What's next?\n\n- return to the [OGGM documentation](https://docs.oggm.org)\n- back to the [table of contents](welcome.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb41d85fa082b20bb8f76eefabe3b6e01cacf922
611,959
ipynb
Jupyter Notebook
Datacamp Assignments/Data Science Track/Pandas Foundations/1. Data ingestion & inspection.ipynb
Ali-Parandeh/Data_Science_Playground
c529e9b3692381572de259e7c93938d6611d83da
[ "MIT" ]
null
null
null
Datacamp Assignments/Data Science Track/Pandas Foundations/1. Data ingestion & inspection.ipynb
Ali-Parandeh/Data_Science_Playground
c529e9b3692381572de259e7c93938d6611d83da
[ "MIT" ]
null
null
null
Datacamp Assignments/Data Science Track/Pandas Foundations/1. Data ingestion & inspection.ipynb
Ali-Parandeh/Data_Science_Playground
c529e9b3692381572de259e7c93938d6611d83da
[ "MIT" ]
1
2021-03-10T09:40:05.000Z
2021-03-10T09:40:05.000Z
358.709848
144,444
0.923998
[ [ [ "# Data ingestion & inspection\n", "_____no_output_____" ], [ "## 1. NumPy and pandas working together\nPandas depends upon and interoperates with NumPy, the Python library for fast numeric array computations. For example, you can use the DataFrame attribute .values to represent a DataFrame df as a NumPy array. You can also pass pandas data structures to NumPy methods. In this exercise, we have imported pandas as pd and loaded world population data every 10 years since 1960 into the DataFrame df. This dataset was derived from the one used in the previous exercise.\n\nYour job is to extract the values and store them in an array using the attribute .values. You'll then use those values as input into the NumPy np.log10() method to compute the base 10 logarithm of the population values. Finally, you will pass the entire pandas DataFrame into the same NumPy np.log10() method and compare the results.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv(\"datasets/world_population.csv\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6 entries, 0 to 5\nData columns (total 2 columns):\nYear 6 non-null int64\nTotal Population 6 non-null float64\ndtypes: float64(1), int64(1)\nmemory usage: 176.0 bytes\n" ], [ "# Import numpy\nimport numpy as np\n\n# Create array of DataFrame values: np_vals\nnp_vals = df.values\nnp_vals", "_____no_output_____" ], [ "# Create new array of base 10 logarithm values: np_vals_log10\nnp_vals_log10 = np.log10(np_vals)\nnp_vals_log10", "_____no_output_____" ], [ "# Create array of new DataFrame by passing df to np.log10(): df_log10\ndf_log10 = np.log10(df)\ndf_log10", "_____no_output_____" ], [ "# Print original and new data containers\n[print(x, 'has type', type(eval(x))) for x in ['np_vals', 'np_vals_log10', 'df', 'df_log10']]", "np_vals has type <class 'numpy.ndarray'>\nnp_vals_log10 has type <class 'numpy.ndarray'>\ndf has type <class 'pandas.core.frame.DataFrame'>\ndf_log10 has type <class 'pandas.core.frame.DataFrame'>\n" ] ], [ [ "Wonderful work! As a data scientist, you'll frequently interact with NumPy arrays, pandas Series, and pandas DataFrames, and you'll leverage a variety of NumPy and pandas methods to perform your desired computations. Understanding how NumPy and pandas work together will prove to be very useful.", "_____no_output_____" ], [ "## 2. Zip lists to build a DataFrame\nIn this exercise, you're going to make a pandas DataFrame of the top three countries to win gold medals since 1896 by first building a dictionary. list_keys contains the column names 'Country' and 'Total'. list_values contains the full names of each country and the number of gold medals awarded. The values have been taken from Wikipedia.\n\nYour job is to use these lists to construct a list of tuples, use the list of tuples to construct a dictionary, and then use that dictionary to construct a DataFrame. In doing so, you'll make use of the list(), zip(), dict() and pd.DataFrame() functions. Pandas has already been imported as pd.\n\nNote: The zip() function in Python 3 and above returns a special zip object, which is essentially a generator. To convert this zip object into a list, you'll need to use list(). ", "_____no_output_____" ] ], [ [ "list_values = [['United States', 'Soviet Union', 'United Kingdom'], [1118, 473, 273]]\nlist_keys = ['Country', 'Total']", "_____no_output_____" ], [ "# Zip the 2 lists together into one list of (key,value) tuples: zipped\nzipped = list(zip(list_keys, list_values))\n\n# Inspect the list using\nzipped", "_____no_output_____" ], [ "# Build a dictionary with the zipped list: data\ndata = dict(zipped)\ndata", "_____no_output_____" ], [ "# Build and inspect a DataFrame from the dictionary: df\ndf = pd.DataFrame(data)\ndf", "_____no_output_____" ] ], [ [ "Fantastic! Being able to build DataFrames from scratch is an important skill.", "_____no_output_____" ], [ "## 3. Labeling your data\nYou can use the DataFrame attribute df.columns to view and assign new string labels to columns in a pandas DataFrame.\n\nImport a DataFrame df containing top Billboard hits from the 1980s (from Wikipedia). Each row has the year, artist, song name and the number of weeks at the top. However, this DataFrame has the column labels a, b, c, d. Your job is to use the df.columns attribute to re-assign descriptive column labels.", "_____no_output_____" ] ], [ [ "list_values = [[1980, 1981, 1982], \n [\"Blondie\", \"Christopher Cross\", \"Joan Jett\"], \n [\"Call Me\", \"Arthurs Theme\", \"I Love Rock and Roll\"],\n [6, 3, 7]]\nlist_keys = [\"a\", \"b\", \"c\", \"d\"]", "_____no_output_____" ], [ "df = pd.DataFrame(dict(zip(list_keys, list_values)))\ndf", "_____no_output_____" ], [ "# Build a list of labels: list_labels\nlist_labels = [\"year\", \"artist\", \"song\", \"chart weeks\"]\n\n# Assign the list of labels to the columns attribute: df.columns\ndf.columns = list_labels\ndf", "_____no_output_____" ] ], [ [ "Great work! You'll often need to rename column names like this to be more informative.", "_____no_output_____" ], [ "## 4. Building DataFrames with broadcasting\nYou can implicitly use 'broadcasting', a feature of NumPy, when creating pandas DataFrames. In this exercise, you're going to create a DataFrame of cities in Pennsylvania that contains the city name in one column and the state name in the second. We have imported the names of 15 cities as the list cities.\n\nYour job is to construct a DataFrame from the list of cities and the string 'PA'.", "_____no_output_____" ] ], [ [ "cities = ['Manheim', 'Preston park', 'Biglerville', 'Indiana', 'Curwensville', 'Crown', 'Harveys lake','Mineral springs', 'Cassville','Hannastown','Saltsburg','Tunkhannock','Pittsburgh','Lemasters','Great bend']", "_____no_output_____" ], [ "# Make a string with the value 'PA': state\nstate = \"PA\"\n\n# Construct a dictionary: data\ndata = {'state':state, 'city':cities}\ndata", "_____no_output_____" ], [ "# Construct a DataFrame from dictionary data: df\ndf = pd.DataFrame(data)\n\n# Print the DataFrame\ndf", "_____no_output_____" ] ], [ [ "Excellent job! Broadcasting is a powerful technique.\n\n", "_____no_output_____" ], [ "## 5. Reading a flat file\nYour job is to read the World Bank population data into a DataFrame using read_csv(). \n\nThe next step is to reread the same file, but simultaneously rename the columns using the names keyword input parameter, set equal to a list of new column labels. You will also need to set header=0 to rename the column labels.\n\nFinish up by inspecting the result with df.head() and df.info() (changing df to the name of your DataFrame variable).", "_____no_output_____" ] ], [ [ "data_file = \"datasets/world_population.csv\"", "_____no_output_____" ], [ "# Read in the file: df1\ndf1 = pd.read_csv(data_file)\ndf1", "_____no_output_____" ], [ "# Create a list of the new column labels: new_labels\nnew_labels = [\"year\", \"population\"]\n\n# Read in the file, specifying the header and names parameters: df2\ndf2 = pd.read_csv(data_file, header=0, names=new_labels)\ndf2", "_____no_output_____" ] ], [ [ "Well done! Knowing how to read in flat files using pandas is a vital skill.", "_____no_output_____" ], [ "## 6. Delimiters, headers, and extensions\nNot all data files are clean and tidy. Pandas provides methods for reading those not-so-perfect data files that you encounter far too often.\n\nIn this exercise, you have monthly stock data for four companies downloaded from Yahoo Finance. The data is stored as one row for each company and each column is the end-of-month closing price.\n\nIn addition, this file has three aspects that may cause trouble for lesser tools: multiple header lines, comment records (rows) interleaved throughout the data rows, and space delimiters instead of commas.\n\nYour job is to use pandas to read the data from this problematic file_messy using non-default input options with read_csv() so as to tidy up the mess at read time. Then, write the cleaned up data to a CSV file with the variable file_clean that has been prepared for you, as you might do in a real data workflow.\n\nYou can learn about the option input parameters needed by using help() on the pandas function pd.read_csv().", "_____no_output_____" ] ], [ [ "file_messy = \"datasets/messy_stock_data.tsv\"", "_____no_output_____" ], [ "# Inside messy_stock_data.tsv\n\nThe following stock data was collect on 2016-AUG-25 from an unknown source\nThese kind of ocmments are not very useful, are they?\nprobably should just throw this line away too, but not the next since those are column labels\nname Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec\n# So that line you just read has all the column headers labels\nIBM 156.08 160.01 159.81 165.22 172.25 167.15 164.75 152.77 145.36 146.11 137.21 137.96\nMSFT 45.51 43.08 42.13 43.47 47.53 45.96 45.61 45.51 43.56 48.70 53.88 55.40\n# That MSFT is MicroSoft\nGOOGLE 512.42 537.99 559.72 540.50 535.24 532.92 590.09 636.84 617.93 663.59 735.39 755.35\nAPPLE 110.64 125.43 125.97 127.29 128.76 127.81 125.34 113.39 112.80 113.36 118.16 111.73\n# Maybe we should have bought some Apple stock in 2008?", "_____no_output_____" ], [ "# Read the raw file as-is: df1\ndf1 = pd.read_csv(file_messy)\ndf1", "_____no_output_____" ], [ "# Read in the file with the correct parameters: df2\n# Comment attribute Indicates remainder of line should not be parsed. If found at the beginning\n# of a line, the line will be ignored altogether. \n\ndf2 = pd.read_csv(file_messy, delimiter=\" \", header=3, comment=\"#\", index_col=\"name\")\ndf2", "_____no_output_____" ] ], [ [ "Superb! It's important to be able to save your cleaned DataFrames in the desired file format!", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\ndf2.plot(kind=\"box\");", "_____no_output_____" ] ], [ [ "## 7. Plotting series using pandas\nData visualization is often a very effective first step in gaining a rough understanding of a data set to be analyzed. Pandas provides data visualization by both depending upon and interoperating with the matplotlib library. You will now explore some of the basic plotting mechanics with pandas as well as related matplotlib options. We have pre-loaded a pandas DataFrame df which contains the data you need. Your job is to use the DataFrame method df.plot() to visualize the data, and then explore the optional matplotlib input parameters that this .plot() method accepts.\n\nThe pandas .plot() method makes calls to matplotlib to construct the plots. This means that you can use the skills you've learned in previous visualization courses to customize the plot. In this exercise, you'll add a custom title and axis labels to the figure.\n\nBefore plotting, inspect the DataFrame in the IPython Shell using df.head(). Also, use type(df) and note that it is a single column DataFrame.", "_____no_output_____" ] ], [ [ "temp = pd.read_csv(\"datasets/weather_data_austin_2010.csv\", index_col = \"Date\", parse_dates = True)\ntemp.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 8759 entries, 2010-01-01 00:00:00 to 2010-12-31 23:00:00\nData columns (total 3 columns):\nTemperature 8759 non-null float64\nDewPoint 8759 non-null float64\nPressure 8759 non-null float64\ndtypes: float64(3)\nmemory usage: 273.7 KB\n" ], [ "temp[temp.index < \"2010-04-01\"].tail()", "_____no_output_____" ], [ "temp_to_april = temp[temp.index < \"2010-04-01\"]", "_____no_output_____" ], [ "# Create a plot with color='red'\ntemp_to_april.plot(y = \"Temperature\", color = \"red\", figsize=(18, 5))\n\n# Add a title\nplt.title(\"Temperature in Austin\")\n\n# Specify the x-axis label\nplt.xlabel(\"Hours since midnight August 1, 2010\")\n\n# Specify the y-axis label\nplt.ylabel(\"Temperature (degrees F)\")\n\n# Display the plot\nplt.show()", "_____no_output_____" ] ], [ [ "## 8. Plotting DataFrames\nComparing data from several columns can be very illuminating. Pandas makes doing so easy with multi-column DataFrames. By default, calling df.plot() will cause pandas to over-plot all column data, with each column as a single line. In this exercise, load the three columns of data from a weather data set - temperature, dew point, and pressure - but the problem is that pressure has different units of measure. The pressure data, measured in Atmospheres, has a different vertical scaling than that of the other two data columns, which are both measured in degrees Fahrenheit.\n\nYour job is to plot all columns as a multi-line plot, to see the nature of vertical scaling problem. Then, use a list of column names passed into the DataFrame df[column_list] to limit plotting to just one column, and then just 2 columns of data. When you are finished, you will have created 4 plots. You can cycle through them by clicking on the 'Previous Plot' and 'Next Plot' buttons.\n\nAs in the previous exercise, inspect the DataFrame df in the IPython Shell using the .head() and .info() methods.", "_____no_output_____" ] ], [ [ "# Plot all columns (default)\ntemp.plot(figsize=(18, 10))\nplt.show()", "_____no_output_____" ], [ "# Plot all columns as subplots\ntemp.plot(subplots=True,figsize=(18, 20))\nplt.show()", "_____no_output_____" ], [ "# Plot just the Dew Point data\ncolumn_list1 = ['DewPoint']\ntemp[column_list1].plot(figsize=(18, 10))\nplt.show()", "_____no_output_____" ], [ "# Plot the Dew Point and Temperature data, but not the Pressure data\ncolumn_list2 = ['Temperature','DewPoint']\ntemp[column_list2].plot(figsize=(18, 10))\nplt.show()", "_____no_output_____" ] ], [ [ "Great work!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb41e283bc42e55e6f88d4320173da0093cf2720
280,611
ipynb
Jupyter Notebook
notebook/.ipynb_checkpoints/2020.03.30_feat_sel_shuff_dynamic-checkpoint.ipynb
wconnell/metrx
54296bac5bdd861dc4f43c37a5024de8d285afaa
[ "BSD-3-Clause" ]
null
null
null
notebook/.ipynb_checkpoints/2020.03.30_feat_sel_shuff_dynamic-checkpoint.ipynb
wconnell/metrx
54296bac5bdd861dc4f43c37a5024de8d285afaa
[ "BSD-3-Clause" ]
null
null
null
notebook/.ipynb_checkpoints/2020.03.30_feat_sel_shuff_dynamic-checkpoint.ipynb
wconnell/metrx
54296bac5bdd861dc4f43c37a5024de8d285afaa
[ "BSD-3-Clause" ]
null
null
null
52.489899
1,723
0.564668
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import os\nimport sys\nsys.path.append(\"..\")\n\nimport datetime\nimport pathlib\n\nfrom collections import OrderedDict \n\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "# Pytorch\nimport torch\nfrom torch.optim import lr_scheduler\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n# Custom\nfrom dutils import Experiment\nfrom trainer import fit\nimport visualization as vis\nfrom tcga_datasets import SiameseDataset\n\n# Models\nfrom tcga_networks import EmbeddingNet, SiameseNet\nfrom losses import ContrastiveLoss\n\n# Metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import adjusted_mutual_info_score as ANMI", "_____no_output_____" ], [ "def getTCGA(disease):\n path = \"/srv/nas/mk2/projects/pan-cancer/TCGA_CCLE_GCP/TCGA/TCGA_{}_counts.tsv.gz\"\n files = [path.format(d) for d in disease]\n return files\n\n\ndef readGCP(files, biotype='protein_coding', mean=True):\n \"\"\"\n Paths to count matrices.\n \"\"\"\n data_dict = {}\n for f in files:\n key = os.path.basename(f).split(\"_\")[1]\n data = pd.read_csv(f, sep='\\t', index_col=0)\n # transcript metadata\n meta = pd.DataFrame([row[:-1] for row in data.index.str.split(\"|\")],\n columns=['ENST', 'ENSG', 'OTTHUMG', 'OTTHUMT', 'GENE-NUM', 'GENE', 'BP', 'BIOTYPE'])\n meta = pd.MultiIndex.from_frame(meta)\n data.index = meta\n # subset transcripts\n data = data.xs(key=biotype, level='BIOTYPE')\n data = data.droplevel(['ENST', 'ENSG', 'OTTHUMG', 'OTTHUMT', 'GENE-NUM', 'BP'])\n # average gene expression of splice variants\n data = data.T\n if mean:\n data = data.groupby(by=data.columns, axis=1).mean()\n data_dict[key] = data\n return data_dict\n\n\ndef uq_norm(df, q=0.75):\n \"\"\"\n Upper quartile normalization of GEX for samples.\n \"\"\"\n quantiles = df.quantile(q=q, axis=1)\n norm = df.divide(quantiles, axis=0)\n return norm\n\n\ndef process_TCGA(disease=['BRCA', 'LUAD', 'KIRC', 'THCA', 'PRAD', 'SKCM']):\n base=\"/srv/nas/mk2/projects/pan-cancer/TCGA_CCLE_GCP\"\n # get files\n tcga_files = getTCGA(disease)\n # read meta/data\n tcga_meta = pd.read_csv(os.path.join(base, \"TCGA/TCGA_GDC_ID_MAP.tsv\"), sep=\"\\t\")\n tcga_raw = readGCP(tcga_files, mean=True)\n # combine samples\n tcga_raw = pd.concat(tcga_raw.values())\n # Upper quartile normalization\n tcga_raw = uq_norm(tcga_raw)\n # log norm\n tcga = tcga_raw.transform(np.log1p)\n return tcga, tcga_meta", "_____no_output_____" ], [ "def generate_fsets(data, n_features, steps=5):\n r = np.linspace(start=1, stop=n_features, num=steps, dtype='int')\n idx = [np.random.choice(data.shape[1], size=i, replace=False) for i in r]\n return idx", "_____no_output_____" ], [ "def feature_training(train_data, train_labels, test_data, test_labels, feature_idx, embedding, exp_dir, cuda=True):\n # Meta data\n meta_data = {\"n_features\":[],\n \"model\":[],\n \"ANMI\":[]}\n # Params\n batch_size = 8\n kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10}\n \n # Feature Index\n for batch, feat in enumerate(feature_idx):\n print(\"Number features: {}\\n\".format(len(feat)))\n exp_data = {'feature_idx':feat}\n # Define data\n siamese_train_dataset = SiameseDataset(data=train_data.iloc[:,feat],\n labels=train_labels,\n train=True)\n siamese_test_dataset = SiameseDataset(data=test_data.iloc[:,feat],\n labels=test_labels,\n train=False)\n # Loaders\n siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)\n siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n # Instantiate model\n n_samples, n_features = siamese_train_dataset.train_data.shape\n for i in range(3):\n nmodel = 'model_{}'.format(i)\n print(\"\\t{}\".format(nmodel))\n embedding_net = EmbeddingNet(n_features, embedding)\n model = SiameseNet(embedding_net)\n if cuda:\n model.cuda()\n # Parameters\n margin = 1.\n loss_fn = ContrastiveLoss(margin)\n lr = 1e-3\n optimizer = optim.Adam(model.parameters(), lr=lr)\n scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)\n n_epochs = 10\n log_interval = round(len(siamese_train_dataset)/1/batch_size)\n # Train\n train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler, \n n_epochs, cuda, log_interval)\n # Test Embeddings\n val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(siamese_test_dataset.test_data, siamese_test_dataset.labels, model)\n # Evaluation\n n_clusters = len(np.unique(test_labels))\n kmeans = KMeans(n_clusters=n_clusters)\n siamese_clusters = kmeans.fit_predict(val_embeddings_baseline)\n anmi = ANMI(siamese_clusters, val_labels_baseline)\n # Store\n meta_data['n_features'].append(len(feat))\n meta_data['model'].append(nmodel)\n meta_data['ANMI'].append(anmi)\n exp_data[nmodel] = {'data': (val_embeddings_baseline, val_labels_baseline),\n 'loss': (train_loss, val_loss),\n 'ANMI': anmi}\n pd.to_pickle(exp_data, os.path.join(exp_dir, \"model_{}.pkl\".format(len(feat))))\n pd.to_pickle(meta_data, os.path.join(exp_dir, \"model_meta_data.pkl\"))", "_____no_output_____" ], [ "def main(disease, sample_type, **kwargs):\n # GPUs\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = kwargs['device']\n cuda = torch.cuda.is_available()\n print(\"Cuda is available: {}\".format(cuda))\n \n # Read / write / process\n tcga, tcga_meta = process_TCGA(disease)\n # Feature design\n feature_idx = generate_fsets(tcga, n_features=kwargs['n_features'], steps=kwargs['steps'])\n # Experiment design\n hierarchy = OrderedDict({'Disease':disease,\n 'Sample Type':sample_type})\n # Define experiment\n exp = Experiment(meta_data=tcga_meta,\n hierarchy=hierarchy,\n index='CGHubAnalysisID',\n cases='Case ID',\n min_samples=20)\n # Train / Test split\n exp.train_test_split(cases='Case ID')\n # Return data \n train_data, train_labels = exp.get_data(tcga, subset=\"train\", dtype=np.float32)\n test_data, test_labels = exp.get_data(tcga, subset=\"test\", dtype=np.float32)\n \n # randomize labels\n np.random.shuffle(train_labels)\n \n # Path *fix*\n dtime = datetime.datetime.today().strftime(\"%Y.%m.%d_%H:%M\")\n exp_dir = \"/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/{}_{}_{}_{}_{}-{}\".format(dtime,\n kwargs['note'],\n len(exp.labels_dict),\n kwargs['embedding'],\n kwargs['n_features'], \n kwargs['steps'])\n pathlib.Path(exp_dir).mkdir(parents=True, exist_ok=False)\n print('Saving to: \\n{}'.format(exp_dir))\n \n # Meta data\n experiments = {'experiment': exp,\n 'train':(train_data, train_labels),\n 'test': (test_data, test_labels)}\n pd.to_pickle(experiments, os.path.join(exp_dir, \"experiment_meta_data.pkl\"))\n \n # Training\n feature_training(train_data, train_labels, test_data, test_labels, feature_idx, kwargs['embedding'], exp_dir)", "_____no_output_____" ] ], [ [ "### Setup", "_____no_output_____" ] ], [ [ "base=\"/srv/nas/mk2/projects/pan-cancer/TCGA_CCLE_GCP\"\n# read meta/data\ntcga_meta = pd.read_csv(os.path.join(base, \"TCGA/TCGA_GDC_ID_MAP.tsv\"), sep=\"\\t\")\n# select disease\ndisease = tcga_meta[tcga_meta['Sample Type']=='Solid Tissue Normal']['Disease'].value_counts()\ndisease = list(disease[disease>=20].index)\ndisease", "_____no_output_____" ], [ "disease = ['BRCA', 'LUAD', 'KIRC', 'THCA', 'PRAD', 'SKCM']\nsample_type = ['Primary Tumor', 'Solid Tissue Normal']\nparams = {\"device\":\"4\",\n \"note\":\"shuffle\",\n \"n_features\":50,\n \"steps\":50,\n \"embedding\":2}", "_____no_output_____" ], [ "main(disease=disease, sample_type=sample_type, **params)", "Cuda is available: True\nSaving to: \n/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.29_22:18_shuffle_11_2_50-50\nNumber features: 1\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374927\nTrain: [1098/2931 (100%)]\tLoss: 0.193504\nEpoch: 1/10. Train set: Average loss: 0.1940\nEpoch: 1/10. Validation set: Average loss: 0.1557\nTrain: [0/2931 (0%)]\tLoss: 0.222659\nTrain: [1098/2931 (100%)]\tLoss: 0.181222\nEpoch: 2/10. Train set: Average loss: 0.1813\nEpoch: 2/10. Validation set: Average loss: 0.1508\nTrain: [0/2931 (0%)]\tLoss: 0.212997\nTrain: [1098/2931 (100%)]\tLoss: 0.183003\nEpoch: 3/10. Train set: Average loss: 0.1831\nEpoch: 3/10. Validation set: Average loss: 0.1537\nTrain: [0/2931 (0%)]\tLoss: 0.189994\nTrain: [1098/2931 (100%)]\tLoss: 0.180737\nEpoch: 4/10. Train set: Average loss: 0.1808\nEpoch: 4/10. Validation set: Average loss: 0.1913\nTrain: [0/2931 (0%)]\tLoss: 0.186037\nTrain: [1098/2931 (100%)]\tLoss: 0.179655\nEpoch: 5/10. Train set: Average loss: 0.1797\nEpoch: 5/10. Validation set: Average loss: 0.1603\nTrain: [0/2931 (0%)]\tLoss: 0.170767\nTrain: [1098/2931 (100%)]\tLoss: 0.179679\nEpoch: 6/10. Train set: Average loss: 0.1797\nEpoch: 6/10. Validation set: Average loss: 0.1545\nTrain: [0/2931 (0%)]\tLoss: 0.134427\nTrain: [1098/2931 (100%)]\tLoss: 0.183294\nEpoch: 7/10. Train set: Average loss: 0.1832\nEpoch: 7/10. Validation set: Average loss: 0.1731\nTrain: [0/2931 (0%)]\tLoss: 0.252801\nTrain: [1098/2931 (100%)]\tLoss: 0.178871\nEpoch: 8/10. Train set: Average loss: 0.1791\nEpoch: 8/10. Validation set: Average loss: 0.1582\nTrain: [0/2931 (0%)]\tLoss: 0.182920\nTrain: [1098/2931 (100%)]\tLoss: 0.178184\nEpoch: 9/10. Train set: Average loss: 0.1782\nEpoch: 9/10. Validation set: Average loss: 0.1501\nTrain: [0/2931 (0%)]\tLoss: 0.190192\nTrain: [1098/2931 (100%)]\tLoss: 0.173972\nEpoch: 10/10. Train set: Average loss: 0.1740\nEpoch: 10/10. Validation set: Average loss: 0.1503\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.187336\nTrain: [1098/2931 (100%)]\tLoss: 0.183202\nEpoch: 1/10. Train set: Average loss: 0.1832\nEpoch: 1/10. Validation set: Average loss: 0.1597\nTrain: [0/2931 (0%)]\tLoss: 0.213644\nTrain: [1098/2931 (100%)]\tLoss: 0.190787\nEpoch: 2/10. Train set: Average loss: 0.1908\nEpoch: 2/10. Validation set: Average loss: 0.2095\nTrain: [0/2931 (0%)]\tLoss: 0.173457\nTrain: [1098/2931 (100%)]\tLoss: 0.187406\nEpoch: 3/10. Train set: Average loss: 0.1874\nEpoch: 3/10. Validation set: Average loss: 0.1529\nTrain: [0/2931 (0%)]\tLoss: 0.164775\nTrain: [1098/2931 (100%)]\tLoss: 0.181925\nEpoch: 4/10. Train set: Average loss: 0.1819\nEpoch: 4/10. Validation set: Average loss: 0.1545\nTrain: [0/2931 (0%)]\tLoss: 0.220412\nTrain: [1098/2931 (100%)]\tLoss: 0.181454\nEpoch: 5/10. Train set: Average loss: 0.1816\nEpoch: 5/10. Validation set: Average loss: 0.1558\nTrain: [0/2931 (0%)]\tLoss: 0.161692\nTrain: [1098/2931 (100%)]\tLoss: 0.178918\nEpoch: 6/10. Train set: Average loss: 0.1789\nEpoch: 6/10. Validation set: Average loss: 0.1530\nTrain: [0/2931 (0%)]\tLoss: 0.227426\nTrain: [1098/2931 (100%)]\tLoss: 0.183900\nEpoch: 7/10. Train set: Average loss: 0.1840\nEpoch: 7/10. Validation set: Average loss: 0.1539\nTrain: [0/2931 (0%)]\tLoss: 0.142838\nTrain: [1098/2931 (100%)]\tLoss: 0.182443\nEpoch: 8/10. Train set: Average loss: 0.1823\nEpoch: 8/10. Validation set: Average loss: 0.1921\nTrain: [0/2931 (0%)]\tLoss: 0.252340\nTrain: [1098/2931 (100%)]\tLoss: 0.183801\nEpoch: 9/10. Train set: Average loss: 0.1840\nEpoch: 9/10. Validation set: Average loss: 0.1522\nTrain: [0/2931 (0%)]\tLoss: 0.138726\nTrain: [1098/2931 (100%)]\tLoss: 0.180158\nEpoch: 10/10. Train set: Average loss: 0.1800\nEpoch: 10/10. Validation set: Average loss: 0.1521\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187464\nTrain: [1098/2931 (100%)]\tLoss: 0.195713\nEpoch: 1/10. Train set: Average loss: 0.1957\nEpoch: 1/10. Validation set: Average loss: 0.1539\nTrain: [0/2931 (0%)]\tLoss: 0.337188\nTrain: [1098/2931 (100%)]\tLoss: 0.188781\nEpoch: 2/10. Train set: Average loss: 0.1892\nEpoch: 2/10. Validation set: Average loss: 0.1695\nTrain: [0/2931 (0%)]\tLoss: 0.127360\nTrain: [1098/2931 (100%)]\tLoss: 0.188006\nEpoch: 3/10. Train set: Average loss: 0.1878\nEpoch: 3/10. Validation set: Average loss: 0.1525\nTrain: [0/2931 (0%)]\tLoss: 0.096825\nTrain: [1098/2931 (100%)]\tLoss: 0.182501\nEpoch: 4/10. Train set: Average loss: 0.1823\nEpoch: 4/10. Validation set: Average loss: 0.1553\nTrain: [0/2931 (0%)]\tLoss: 0.172454\nTrain: [1098/2931 (100%)]\tLoss: 0.183237\nEpoch: 5/10. Train set: Average loss: 0.1832\nEpoch: 5/10. Validation set: Average loss: 0.1562\nTrain: [0/2931 (0%)]\tLoss: 0.180744\nTrain: [1098/2931 (100%)]\tLoss: 0.183042\nEpoch: 6/10. Train set: Average loss: 0.1830\nEpoch: 6/10. Validation set: Average loss: 0.1502\nTrain: [0/2931 (0%)]\tLoss: 0.208963\nTrain: [1098/2931 (100%)]\tLoss: 0.180374\nEpoch: 7/10. Train set: Average loss: 0.1805\nEpoch: 7/10. Validation set: Average loss: 0.1564\nTrain: [0/2931 (0%)]\tLoss: 0.175874\nTrain: [1098/2931 (100%)]\tLoss: 0.181164\nEpoch: 8/10. Train set: Average loss: 0.1812\nEpoch: 8/10. Validation set: Average loss: 0.1509\nTrain: [0/2931 (0%)]\tLoss: 0.128747\nTrain: [1098/2931 (100%)]\tLoss: 0.175506\nEpoch: 9/10. Train set: Average loss: 0.1754\nEpoch: 9/10. Validation set: Average loss: 0.1516\nTrain: [0/2931 (0%)]\tLoss: 0.203135\nTrain: [1098/2931 (100%)]\tLoss: 0.168607\nEpoch: 10/10. Train set: Average loss: 0.1687\nEpoch: 10/10. Validation set: Average loss: 0.1515\nNumber features: 2\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249222\nTrain: [1098/2931 (100%)]\tLoss: 0.171710\nEpoch: 1/10. Train set: Average loss: 0.1719\nEpoch: 1/10. Validation set: Average loss: 0.1652\nTrain: [0/2931 (0%)]\tLoss: 0.220624\nTrain: [1098/2931 (100%)]\tLoss: 0.180322\nEpoch: 2/10. Train set: Average loss: 0.1804\nEpoch: 2/10. Validation set: Average loss: 0.1642\nTrain: [0/2931 (0%)]\tLoss: 0.153036\nTrain: [1098/2931 (100%)]\tLoss: 0.172054\nEpoch: 3/10. Train set: Average loss: 0.1720\nEpoch: 3/10. Validation set: Average loss: 0.1671\nTrain: [0/2931 (0%)]\tLoss: 0.211337\nTrain: [1098/2931 (100%)]\tLoss: 0.165912\nEpoch: 4/10. Train set: Average loss: 0.1660\nEpoch: 4/10. Validation set: Average loss: 0.1638\nTrain: [0/2931 (0%)]\tLoss: 0.202172\nTrain: [1098/2931 (100%)]\tLoss: 0.171318\nEpoch: 5/10. Train set: Average loss: 0.1714\nEpoch: 5/10. Validation set: Average loss: 0.1678\nTrain: [0/2931 (0%)]\tLoss: 0.197127\nTrain: [1098/2931 (100%)]\tLoss: 0.170522\nEpoch: 6/10. Train set: Average loss: 0.1706\nEpoch: 6/10. Validation set: Average loss: 0.1660\nTrain: [0/2931 (0%)]\tLoss: 0.214880\nTrain: [1098/2931 (100%)]\tLoss: 0.163700\nEpoch: 7/10. Train set: Average loss: 0.1638\nEpoch: 7/10. Validation set: Average loss: 0.1730\nTrain: [0/2931 (0%)]\tLoss: 0.217082\nTrain: [1098/2931 (100%)]\tLoss: 0.169587\nEpoch: 8/10. Train set: Average loss: 0.1697\nEpoch: 8/10. Validation set: Average loss: 0.1879\nTrain: [0/2931 (0%)]\tLoss: 0.227795\nTrain: [1098/2931 (100%)]\tLoss: 0.165456\nEpoch: 9/10. Train set: Average loss: 0.1656\nEpoch: 9/10. Validation set: Average loss: 0.1596\nTrain: [0/2931 (0%)]\tLoss: 0.122143\nTrain: [1098/2931 (100%)]\tLoss: 0.157381\nEpoch: 10/10. Train set: Average loss: 0.1573\nEpoch: 10/10. Validation set: Average loss: 0.1586\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.248103\nTrain: [1098/2931 (100%)]\tLoss: 0.170453\nEpoch: 1/10. Train set: Average loss: 0.1707\nEpoch: 1/10. Validation set: Average loss: 0.1772\nTrain: [0/2931 (0%)]\tLoss: 0.192732\nTrain: [1098/2931 (100%)]\tLoss: 0.181071\nEpoch: 2/10. Train set: Average loss: 0.1811\nEpoch: 2/10. Validation set: Average loss: 0.1623\nTrain: [0/2931 (0%)]\tLoss: 0.138900\nTrain: [1098/2931 (100%)]\tLoss: 0.171251\nEpoch: 3/10. Train set: Average loss: 0.1712\nEpoch: 3/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.227918\nTrain: [1098/2931 (100%)]\tLoss: 0.183757\nEpoch: 4/10. Train set: Average loss: 0.1839\nEpoch: 4/10. Validation set: Average loss: 0.1731\nTrain: [0/2931 (0%)]\tLoss: 0.157927\nTrain: [1098/2931 (100%)]\tLoss: 0.171614\nEpoch: 5/10. Train set: Average loss: 0.1716\nEpoch: 5/10. Validation set: Average loss: 0.1657\nTrain: [0/2931 (0%)]\tLoss: 0.159822\nTrain: [1098/2931 (100%)]\tLoss: 0.170320\nEpoch: 6/10. Train set: Average loss: 0.1703\nEpoch: 6/10. Validation set: Average loss: 0.1642\nTrain: [0/2931 (0%)]\tLoss: 0.153967\nTrain: [1098/2931 (100%)]\tLoss: 0.166519\nEpoch: 7/10. Train set: Average loss: 0.1665\nEpoch: 7/10. Validation set: Average loss: 0.1547\nTrain: [0/2931 (0%)]\tLoss: 0.189017\nTrain: [1098/2931 (100%)]\tLoss: 0.157176\nEpoch: 8/10. Train set: Average loss: 0.1573\nEpoch: 8/10. Validation set: Average loss: 0.1593\nTrain: [0/2931 (0%)]\tLoss: 0.111575\nTrain: [1098/2931 (100%)]\tLoss: 0.156069\nEpoch: 9/10. Train set: Average loss: 0.1559\nEpoch: 9/10. Validation set: Average loss: 0.1425\nTrain: [0/2931 (0%)]\tLoss: 0.192282\nTrain: [1098/2931 (100%)]\tLoss: 0.150287\nEpoch: 10/10. Train set: Average loss: 0.1504\nEpoch: 10/10. Validation set: Average loss: 0.1433\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187099\nTrain: [1098/2931 (100%)]\tLoss: 0.174603\nEpoch: 1/10. Train set: Average loss: 0.1746\nEpoch: 1/10. Validation set: Average loss: 0.1575\nTrain: [0/2931 (0%)]\tLoss: 0.098627\nTrain: [1098/2931 (100%)]\tLoss: 0.171403\nEpoch: 2/10. Train set: Average loss: 0.1712\nEpoch: 2/10. Validation set: Average loss: 0.1566\nTrain: [0/2931 (0%)]\tLoss: 0.118006\nTrain: [1098/2931 (100%)]\tLoss: 0.175257\nEpoch: 3/10. Train set: Average loss: 0.1751\nEpoch: 3/10. Validation set: Average loss: 0.1669\nTrain: [0/2931 (0%)]\tLoss: 0.131965\nTrain: [1098/2931 (100%)]\tLoss: 0.169449\nEpoch: 4/10. Train set: Average loss: 0.1693\nEpoch: 4/10. Validation set: Average loss: 0.1640\nTrain: [0/2931 (0%)]\tLoss: 0.156112\nTrain: [1098/2931 (100%)]\tLoss: 0.169533\nEpoch: 5/10. Train set: Average loss: 0.1695\nEpoch: 5/10. Validation set: Average loss: 0.1598\nTrain: [0/2931 (0%)]\tLoss: 0.179457\nTrain: [1098/2931 (100%)]\tLoss: 0.169698\nEpoch: 6/10. Train set: Average loss: 0.1697\nEpoch: 6/10. Validation set: Average loss: 0.1607\nTrain: [0/2931 (0%)]\tLoss: 0.190843\nTrain: [1098/2931 (100%)]\tLoss: 0.171622\nEpoch: 7/10. Train set: Average loss: 0.1717\nEpoch: 7/10. Validation set: Average loss: 0.1584\nTrain: [0/2931 (0%)]\tLoss: 0.254858\nTrain: [1098/2931 (100%)]\tLoss: 0.165034\nEpoch: 8/10. Train set: Average loss: 0.1653\nEpoch: 8/10. Validation set: Average loss: 0.1587\nTrain: [0/2931 (0%)]\tLoss: 0.119103\nTrain: [1098/2931 (100%)]\tLoss: 0.161403\nEpoch: 9/10. Train set: Average loss: 0.1613\nEpoch: 9/10. Validation set: Average loss: 0.1611\nTrain: [0/2931 (0%)]\tLoss: 0.129035\nTrain: [1098/2931 (100%)]\tLoss: 0.155339\nEpoch: 10/10. Train set: Average loss: 0.1553\nEpoch: 10/10. Validation set: Average loss: 0.1566\nNumber features: 3\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249297\nTrain: [1098/2931 (100%)]\tLoss: 0.183006\nEpoch: 1/10. Train set: Average loss: 0.1832\nEpoch: 1/10. Validation set: Average loss: 0.2024\nTrain: [0/2931 (0%)]\tLoss: 0.162215\nTrain: [1098/2931 (100%)]\tLoss: 0.182924\nEpoch: 2/10. Train set: Average loss: 0.1829\nEpoch: 2/10. Validation set: Average loss: 0.1635\nTrain: [0/2931 (0%)]\tLoss: 0.156759\nTrain: [1098/2931 (100%)]\tLoss: 0.167828\nEpoch: 3/10. Train set: Average loss: 0.1678\nEpoch: 3/10. Validation set: Average loss: 0.1406\nTrain: [0/2931 (0%)]\tLoss: 0.201045\nTrain: [1098/2931 (100%)]\tLoss: 0.160625\nEpoch: 4/10. Train set: Average loss: 0.1607\nEpoch: 4/10. Validation set: Average loss: 0.1347\nTrain: [0/2931 (0%)]\tLoss: 0.205500\nTrain: [1098/2931 (100%)]\tLoss: 0.158861\nEpoch: 5/10. Train set: Average loss: 0.1590\nEpoch: 5/10. Validation set: Average loss: 0.1433\nTrain: [0/2931 (0%)]\tLoss: 0.134409\nTrain: [1098/2931 (100%)]\tLoss: 0.154523\nEpoch: 6/10. Train set: Average loss: 0.1545\nEpoch: 6/10. Validation set: Average loss: 0.1395\nTrain: [0/2931 (0%)]\tLoss: 0.138358\nTrain: [1098/2931 (100%)]\tLoss: 0.159633\nEpoch: 7/10. Train set: Average loss: 0.1596\nEpoch: 7/10. Validation set: Average loss: 0.1459\nTrain: [0/2931 (0%)]\tLoss: 0.142148\nTrain: [1098/2931 (100%)]\tLoss: 0.158758\nEpoch: 8/10. Train set: Average loss: 0.1587\nEpoch: 8/10. Validation set: Average loss: 0.1478\nTrain: [0/2931 (0%)]\tLoss: 0.101143\nTrain: [1098/2931 (100%)]\tLoss: 0.148371\nEpoch: 9/10. Train set: Average loss: 0.1482\nEpoch: 9/10. Validation set: Average loss: 0.1347\nTrain: [0/2931 (0%)]\tLoss: 0.159657\nTrain: [1098/2931 (100%)]\tLoss: 0.147856\nEpoch: 10/10. Train set: Average loss: 0.1479\nEpoch: 10/10. Validation set: Average loss: 0.1359\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.312120\nTrain: [1098/2931 (100%)]\tLoss: 0.172076\nEpoch: 1/10. Train set: Average loss: 0.1725\nEpoch: 1/10. Validation set: Average loss: 0.1350\nTrain: [0/2931 (0%)]\tLoss: 0.193717\nTrain: [1098/2931 (100%)]\tLoss: 0.166789\nEpoch: 2/10. Train set: Average loss: 0.1669\nEpoch: 2/10. Validation set: Average loss: 0.1505\nTrain: [0/2931 (0%)]\tLoss: 0.164718\nTrain: [1098/2931 (100%)]\tLoss: 0.163732\nEpoch: 3/10. Train set: Average loss: 0.1637\nEpoch: 3/10. Validation set: Average loss: 0.1486\nTrain: [0/2931 (0%)]\tLoss: 0.232325\nTrain: [1098/2931 (100%)]\tLoss: 0.158148\nEpoch: 4/10. Train set: Average loss: 0.1584\nEpoch: 4/10. Validation set: Average loss: 0.1431\nTrain: [0/2931 (0%)]\tLoss: 0.185917\nTrain: [1098/2931 (100%)]\tLoss: 0.155473\nEpoch: 5/10. Train set: Average loss: 0.1556\nEpoch: 5/10. Validation set: Average loss: 0.1405\nTrain: [0/2931 (0%)]\tLoss: 0.128152\nTrain: [1098/2931 (100%)]\tLoss: 0.149164\nEpoch: 6/10. Train set: Average loss: 0.1491\nEpoch: 6/10. Validation set: Average loss: 0.1385\nTrain: [0/2931 (0%)]\tLoss: 0.110048\nTrain: [1098/2931 (100%)]\tLoss: 0.151142\nEpoch: 7/10. Train set: Average loss: 0.1510\nEpoch: 7/10. Validation set: Average loss: 0.1524\nTrain: [0/2931 (0%)]\tLoss: 0.195928\nTrain: [1098/2931 (100%)]\tLoss: 0.180620\nEpoch: 8/10. Train set: Average loss: 0.1807\nEpoch: 8/10. Validation set: Average loss: 0.1696\nTrain: [0/2931 (0%)]\tLoss: 0.253078\nTrain: [1098/2931 (100%)]\tLoss: 0.162789\nEpoch: 9/10. Train set: Average loss: 0.1630\nEpoch: 9/10. Validation set: Average loss: 0.1520\nTrain: [0/2931 (0%)]\tLoss: 0.106072\nTrain: [1098/2931 (100%)]\tLoss: 0.155799\nEpoch: 10/10. Train set: Average loss: 0.1557\nEpoch: 10/10. Validation set: Average loss: 0.1358\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.373910\nTrain: [1098/2931 (100%)]\tLoss: 0.178899\nEpoch: 1/10. Train set: Average loss: 0.1794\nEpoch: 1/10. Validation set: Average loss: 0.1843\nTrain: [0/2931 (0%)]\tLoss: 0.262310\nTrain: [1098/2931 (100%)]\tLoss: 0.177270\nEpoch: 2/10. Train set: Average loss: 0.1775\nEpoch: 2/10. Validation set: Average loss: 0.1765\nTrain: [0/2931 (0%)]\tLoss: 0.253810\nTrain: [1098/2931 (100%)]\tLoss: 0.173688\nEpoch: 3/10. Train set: Average loss: 0.1739\nEpoch: 3/10. Validation set: Average loss: 0.1634\nTrain: [0/2931 (0%)]\tLoss: 0.174857\nTrain: [1098/2931 (100%)]\tLoss: 0.170011\nEpoch: 4/10. Train set: Average loss: 0.1700\nEpoch: 4/10. Validation set: Average loss: 0.1913\nTrain: [0/2931 (0%)]\tLoss: 0.204993\nTrain: [1098/2931 (100%)]\tLoss: 0.160092\nEpoch: 5/10. Train set: Average loss: 0.1602\nEpoch: 5/10. Validation set: Average loss: 0.1548\nTrain: [0/2931 (0%)]\tLoss: 0.173894\nTrain: [1098/2931 (100%)]\tLoss: 0.164165\nEpoch: 6/10. Train set: Average loss: 0.1642\nEpoch: 6/10. Validation set: Average loss: 0.1529\nTrain: [0/2931 (0%)]\tLoss: 0.128250\nTrain: [1098/2931 (100%)]\tLoss: 0.171647\nEpoch: 7/10. Train set: Average loss: 0.1715\nEpoch: 7/10. Validation set: Average loss: 0.1677\nTrain: [0/2931 (0%)]\tLoss: 0.234524\nTrain: [1098/2931 (100%)]\tLoss: 0.156328\nEpoch: 8/10. Train set: Average loss: 0.1565\nEpoch: 8/10. Validation set: Average loss: 0.1412\nTrain: [0/2931 (0%)]\tLoss: 0.210525\nTrain: [1098/2931 (100%)]\tLoss: 0.148034\nEpoch: 9/10. Train set: Average loss: 0.1482\nEpoch: 9/10. Validation set: Average loss: 0.1359\nTrain: [0/2931 (0%)]\tLoss: 0.140319\nTrain: [1098/2931 (100%)]\tLoss: 0.151046\nEpoch: 10/10. Train set: Average loss: 0.1510\nEpoch: 10/10. Validation set: Average loss: 0.1382\nNumber features: 4\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374823\nTrain: [1098/2931 (100%)]\tLoss: 0.172730\nEpoch: 1/10. Train set: Average loss: 0.1733\nEpoch: 1/10. Validation set: Average loss: 0.1673\nTrain: [0/2931 (0%)]\tLoss: 0.275353\nTrain: [1098/2931 (100%)]\tLoss: 0.165270\nEpoch: 2/10. Train set: Average loss: 0.1656\nEpoch: 2/10. Validation set: Average loss: 0.1845\nTrain: [0/2931 (0%)]\tLoss: 0.287510\nTrain: [1098/2931 (100%)]\tLoss: 0.167999\nEpoch: 3/10. Train set: Average loss: 0.1683\nEpoch: 3/10. Validation set: Average loss: 0.1944\nTrain: [0/2931 (0%)]\tLoss: 0.281919\nTrain: [1098/2931 (100%)]\tLoss: 0.160950\nEpoch: 4/10. Train set: Average loss: 0.1613\nEpoch: 4/10. Validation set: Average loss: 0.1649\nTrain: [0/2931 (0%)]\tLoss: 0.154450\nTrain: [1098/2931 (100%)]\tLoss: 0.164805\nEpoch: 5/10. Train set: Average loss: 0.1648\nEpoch: 5/10. Validation set: Average loss: 0.1614\nTrain: [0/2931 (0%)]\tLoss: 0.141266\nTrain: [1098/2931 (100%)]\tLoss: 0.153363\nEpoch: 6/10. Train set: Average loss: 0.1533\nEpoch: 6/10. Validation set: Average loss: 0.1774\nTrain: [0/2931 (0%)]\tLoss: 0.280593\nTrain: [1098/2931 (100%)]\tLoss: 0.161699\nEpoch: 7/10. Train set: Average loss: 0.1620\nEpoch: 7/10. Validation set: Average loss: 0.1586\nTrain: [0/2931 (0%)]\tLoss: 0.172083\nTrain: [1098/2931 (100%)]\tLoss: 0.157448\nEpoch: 8/10. Train set: Average loss: 0.1575\nEpoch: 8/10. Validation set: Average loss: 0.1627\nTrain: [0/2931 (0%)]\tLoss: 0.220455\nTrain: [1098/2931 (100%)]\tLoss: 0.148148\nEpoch: 9/10. Train set: Average loss: 0.1483\nEpoch: 9/10. Validation set: Average loss: 0.1490\nTrain: [0/2931 (0%)]\tLoss: 0.253321\nTrain: [1098/2931 (100%)]\tLoss: 0.144990\nEpoch: 10/10. Train set: Average loss: 0.1453\nEpoch: 10/10. Validation set: Average loss: 0.1488\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.311977\nTrain: [1098/2931 (100%)]\tLoss: 0.162350\nEpoch: 1/10. Train set: Average loss: 0.1628\nEpoch: 1/10. Validation set: Average loss: 0.1734\nTrain: [0/2931 (0%)]\tLoss: 0.267583\nTrain: [1098/2931 (100%)]\tLoss: 0.161174\nEpoch: 2/10. Train set: Average loss: 0.1615\nEpoch: 2/10. Validation set: Average loss: 0.1850\nTrain: [0/2931 (0%)]\tLoss: 0.215827\nTrain: [1098/2931 (100%)]\tLoss: 0.163120\nEpoch: 3/10. Train set: Average loss: 0.1633\nEpoch: 3/10. Validation set: Average loss: 0.1573\nTrain: [0/2931 (0%)]\tLoss: 0.168145\nTrain: [1098/2931 (100%)]\tLoss: 0.158328\nEpoch: 4/10. Train set: Average loss: 0.1584\nEpoch: 4/10. Validation set: Average loss: 0.1646\nTrain: [0/2931 (0%)]\tLoss: 0.149752\nTrain: [1098/2931 (100%)]\tLoss: 0.148420\nEpoch: 5/10. Train set: Average loss: 0.1484\nEpoch: 5/10. Validation set: Average loss: 0.1716\nTrain: [0/2931 (0%)]\tLoss: 0.221934\nTrain: [1098/2931 (100%)]\tLoss: 0.154812\nEpoch: 6/10. Train set: Average loss: 0.1550\nEpoch: 6/10. Validation set: Average loss: 0.1587\nTrain: [0/2931 (0%)]\tLoss: 0.195982\nTrain: [1098/2931 (100%)]\tLoss: 0.158045\nEpoch: 7/10. Train set: Average loss: 0.1581\nEpoch: 7/10. Validation set: Average loss: 0.1797\nTrain: [0/2931 (0%)]\tLoss: 0.178622\nTrain: [1098/2931 (100%)]\tLoss: 0.152797\nEpoch: 8/10. Train set: Average loss: 0.1529\nEpoch: 8/10. Validation set: Average loss: 0.1673\nTrain: [0/2931 (0%)]\tLoss: 0.185816\nTrain: [1098/2931 (100%)]\tLoss: 0.148903\nEpoch: 9/10. Train set: Average loss: 0.1490\nEpoch: 9/10. Validation set: Average loss: 0.1436\nTrain: [0/2931 (0%)]\tLoss: 0.170490\nTrain: [1098/2931 (100%)]\tLoss: 0.147605\nEpoch: 10/10. Train set: Average loss: 0.1477\nEpoch: 10/10. Validation set: Average loss: 0.1446\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.374606\nTrain: [1098/2931 (100%)]\tLoss: 0.166498\nEpoch: 1/10. Train set: Average loss: 0.1671\nEpoch: 1/10. Validation set: Average loss: 0.1650\nTrain: [0/2931 (0%)]\tLoss: 0.223835\nTrain: [1098/2931 (100%)]\tLoss: 0.160892\nEpoch: 2/10. Train set: Average loss: 0.1611\nEpoch: 2/10. Validation set: Average loss: 0.2064\nTrain: [0/2931 (0%)]\tLoss: 0.297244\nTrain: [1098/2931 (100%)]\tLoss: 0.159561\nEpoch: 3/10. Train set: Average loss: 0.1599\nEpoch: 3/10. Validation set: Average loss: 0.1775\nTrain: [0/2931 (0%)]\tLoss: 0.284444\nTrain: [1098/2931 (100%)]\tLoss: 0.157880\nEpoch: 4/10. Train set: Average loss: 0.1582\nEpoch: 4/10. Validation set: Average loss: 0.1828\nTrain: [0/2931 (0%)]\tLoss: 0.296700\nTrain: [1098/2931 (100%)]\tLoss: 0.153783\nEpoch: 5/10. Train set: Average loss: 0.1542\nEpoch: 5/10. Validation set: Average loss: 0.1666\nTrain: [0/2931 (0%)]\tLoss: 0.232787\nTrain: [1098/2931 (100%)]\tLoss: 0.150341\nEpoch: 6/10. Train set: Average loss: 0.1506\nEpoch: 6/10. Validation set: Average loss: 0.1697\nTrain: [0/2931 (0%)]\tLoss: 0.239347\nTrain: [1098/2931 (100%)]\tLoss: 0.147370\nEpoch: 7/10. Train set: Average loss: 0.1476\nEpoch: 7/10. Validation set: Average loss: 0.1673\nTrain: [0/2931 (0%)]\tLoss: 0.196420\nTrain: [1098/2931 (100%)]\tLoss: 0.152661\nEpoch: 8/10. Train set: Average loss: 0.1528\nEpoch: 8/10. Validation set: Average loss: 0.1729\nTrain: [0/2931 (0%)]\tLoss: 0.253451\nTrain: [1098/2931 (100%)]\tLoss: 0.151929\nEpoch: 9/10. Train set: Average loss: 0.1522\nEpoch: 9/10. Validation set: Average loss: 0.1505\nTrain: [0/2931 (0%)]\tLoss: 0.210607\nTrain: [1098/2931 (100%)]\tLoss: 0.151411\nEpoch: 10/10. Train set: Average loss: 0.1516\nEpoch: 10/10. Validation set: Average loss: 0.1498\nNumber features: 5\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124857\nTrain: [1098/2931 (100%)]\tLoss: 0.165936\nEpoch: 1/10. Train set: Average loss: 0.1658\nEpoch: 1/10. Validation set: Average loss: 0.1684\nTrain: [0/2931 (0%)]\tLoss: 0.085786\nTrain: [1098/2931 (100%)]\tLoss: 0.173113\nEpoch: 2/10. Train set: Average loss: 0.1729\nEpoch: 2/10. Validation set: Average loss: 0.1706\nTrain: [0/2931 (0%)]\tLoss: 0.167698\nTrain: [1098/2931 (100%)]\tLoss: 0.174692\nEpoch: 3/10. Train set: Average loss: 0.1747\nEpoch: 3/10. Validation set: Average loss: 0.1761\nTrain: [0/2931 (0%)]\tLoss: 0.088598\nTrain: [1098/2931 (100%)]\tLoss: 0.169374\nEpoch: 4/10. Train set: Average loss: 0.1692\nEpoch: 4/10. Validation set: Average loss: 0.1657\nTrain: [0/2931 (0%)]\tLoss: 0.056715\nTrain: [1098/2931 (100%)]\tLoss: 0.169159\nEpoch: 5/10. Train set: Average loss: 0.1689\nEpoch: 5/10. Validation set: Average loss: 0.1810\nTrain: [0/2931 (0%)]\tLoss: 0.145081\nTrain: [1098/2931 (100%)]\tLoss: 0.159125\nEpoch: 6/10. Train set: Average loss: 0.1591\nEpoch: 6/10. Validation set: Average loss: 0.1542\nTrain: [0/2931 (0%)]\tLoss: 0.061239\nTrain: [1098/2931 (100%)]\tLoss: 0.150857\nEpoch: 7/10. Train set: Average loss: 0.1506\nEpoch: 7/10. Validation set: Average loss: 0.1519\nTrain: [0/2931 (0%)]\tLoss: 0.119074\nTrain: [1098/2931 (100%)]\tLoss: 0.153661\nEpoch: 8/10. Train set: Average loss: 0.1536\nEpoch: 8/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.094694\nTrain: [1098/2931 (100%)]\tLoss: 0.148640\nEpoch: 9/10. Train set: Average loss: 0.1485\nEpoch: 9/10. Validation set: Average loss: 0.1380\nTrain: [0/2931 (0%)]\tLoss: 0.082544\nTrain: [1098/2931 (100%)]\tLoss: 0.147101\nEpoch: 10/10. Train set: Average loss: 0.1469\nEpoch: 10/10. Validation set: Average loss: 0.1357\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.187147\nTrain: [1098/2931 (100%)]\tLoss: 0.166602\nEpoch: 1/10. Train set: Average loss: 0.1667\nEpoch: 1/10. Validation set: Average loss: 0.1708\nTrain: [0/2931 (0%)]\tLoss: 0.129088\nTrain: [1098/2931 (100%)]\tLoss: 0.164543\nEpoch: 2/10. Train set: Average loss: 0.1644\nEpoch: 2/10. Validation set: Average loss: 0.1551\nTrain: [0/2931 (0%)]\tLoss: 0.181703\nTrain: [1098/2931 (100%)]\tLoss: 0.165528\nEpoch: 3/10. Train set: Average loss: 0.1656\nEpoch: 3/10. Validation set: Average loss: 0.1476\nTrain: [0/2931 (0%)]\tLoss: 0.179429\nTrain: [1098/2931 (100%)]\tLoss: 0.164059\nEpoch: 4/10. Train set: Average loss: 0.1641\nEpoch: 4/10. Validation set: Average loss: 0.1604\nTrain: [0/2931 (0%)]\tLoss: 0.150228\nTrain: [1098/2931 (100%)]\tLoss: 0.150497\nEpoch: 5/10. Train set: Average loss: 0.1505\nEpoch: 5/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.109927\nTrain: [1098/2931 (100%)]\tLoss: 0.154266\nEpoch: 6/10. Train set: Average loss: 0.1541\nEpoch: 6/10. Validation set: Average loss: 0.1495\nTrain: [0/2931 (0%)]\tLoss: 0.149067\nTrain: [1098/2931 (100%)]\tLoss: 0.150350\nEpoch: 7/10. Train set: Average loss: 0.1503\nEpoch: 7/10. Validation set: Average loss: 0.1540\nTrain: [0/2931 (0%)]\tLoss: 0.093017\nTrain: [1098/2931 (100%)]\tLoss: 0.146262\nEpoch: 8/10. Train set: Average loss: 0.1461\nEpoch: 8/10. Validation set: Average loss: 0.1491\nTrain: [0/2931 (0%)]\tLoss: 0.129397\nTrain: [1098/2931 (100%)]\tLoss: 0.140383\nEpoch: 9/10. Train set: Average loss: 0.1404\nEpoch: 9/10. Validation set: Average loss: 0.1424\nTrain: [0/2931 (0%)]\tLoss: 0.212504\nTrain: [1098/2931 (100%)]\tLoss: 0.140477\nEpoch: 10/10. Train set: Average loss: 0.1407\nEpoch: 10/10. Validation set: Average loss: 0.1425\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249253\nTrain: [1098/2931 (100%)]\tLoss: 0.174190\nEpoch: 1/10. Train set: Average loss: 0.1744\nEpoch: 1/10. Validation set: Average loss: 0.1839\nTrain: [0/2931 (0%)]\tLoss: 0.212208\nTrain: [1098/2931 (100%)]\tLoss: 0.167214\nEpoch: 2/10. Train set: Average loss: 0.1673\nEpoch: 2/10. Validation set: Average loss: 0.1641\nTrain: [0/2931 (0%)]\tLoss: 0.213794\nTrain: [1098/2931 (100%)]\tLoss: 0.165356\nEpoch: 3/10. Train set: Average loss: 0.1655\nEpoch: 3/10. Validation set: Average loss: 0.1492\nTrain: [0/2931 (0%)]\tLoss: 0.195251\nTrain: [1098/2931 (100%)]\tLoss: 0.160546\nEpoch: 4/10. Train set: Average loss: 0.1606\nEpoch: 4/10. Validation set: Average loss: 0.1473\nTrain: [0/2931 (0%)]\tLoss: 0.127635\nTrain: [1098/2931 (100%)]\tLoss: 0.157487\nEpoch: 5/10. Train set: Average loss: 0.1574\nEpoch: 5/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.170474\nTrain: [1098/2931 (100%)]\tLoss: 0.155770\nEpoch: 6/10. Train set: Average loss: 0.1558\nEpoch: 6/10. Validation set: Average loss: 0.1616\nTrain: [0/2931 (0%)]\tLoss: 0.190919\nTrain: [1098/2931 (100%)]\tLoss: 0.154362\nEpoch: 7/10. Train set: Average loss: 0.1545\nEpoch: 7/10. Validation set: Average loss: 0.1478\nTrain: [0/2931 (0%)]\tLoss: 0.168325\nTrain: [1098/2931 (100%)]\tLoss: 0.160862\nEpoch: 8/10. Train set: Average loss: 0.1609\nEpoch: 8/10. Validation set: Average loss: 0.1790\nTrain: [0/2931 (0%)]\tLoss: 0.170163\nTrain: [1098/2931 (100%)]\tLoss: 0.155603\nEpoch: 9/10. Train set: Average loss: 0.1556\nEpoch: 9/10. Validation set: Average loss: 0.1457\nTrain: [0/2931 (0%)]\tLoss: 0.208320\nTrain: [1098/2931 (100%)]\tLoss: 0.147081\nEpoch: 10/10. Train set: Average loss: 0.1472\nEpoch: 10/10. Validation set: Average loss: 0.1480\nNumber features: 6\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311413\nTrain: [1098/2931 (100%)]\tLoss: 0.176498\nEpoch: 1/10. Train set: Average loss: 0.1769\nEpoch: 1/10. Validation set: Average loss: 0.1208\nTrain: [0/2931 (0%)]\tLoss: 0.237609\nTrain: [1098/2931 (100%)]\tLoss: 0.164335\nEpoch: 2/10. Train set: Average loss: 0.1645\nEpoch: 2/10. Validation set: Average loss: 0.1155\nTrain: [0/2931 (0%)]\tLoss: 0.168682\nTrain: [1098/2931 (100%)]\tLoss: 0.167052\nEpoch: 3/10. Train set: Average loss: 0.1671\nEpoch: 3/10. Validation set: Average loss: 0.1178\nTrain: [0/2931 (0%)]\tLoss: 0.113196\nTrain: [1098/2931 (100%)]\tLoss: 0.158919\nEpoch: 4/10. Train set: Average loss: 0.1588\nEpoch: 4/10. Validation set: Average loss: 0.1152\nTrain: [0/2931 (0%)]\tLoss: 0.107863\nTrain: [1098/2931 (100%)]\tLoss: 0.162475\nEpoch: 5/10. Train set: Average loss: 0.1623\nEpoch: 5/10. Validation set: Average loss: 0.1271\nTrain: [0/2931 (0%)]\tLoss: 0.144208\nTrain: [1098/2931 (100%)]\tLoss: 0.166705\nEpoch: 6/10. Train set: Average loss: 0.1666\nEpoch: 6/10. Validation set: Average loss: 0.1061\nTrain: [0/2931 (0%)]\tLoss: 0.163886\nTrain: [1098/2931 (100%)]\tLoss: 0.163629\nEpoch: 7/10. Train set: Average loss: 0.1636\nEpoch: 7/10. Validation set: Average loss: 0.1128\nTrain: [0/2931 (0%)]\tLoss: 0.424440\nTrain: [1098/2931 (100%)]\tLoss: 0.163175\nEpoch: 8/10. Train set: Average loss: 0.1639\nEpoch: 8/10. Validation set: Average loss: 0.1308\nTrain: [0/2931 (0%)]\tLoss: 0.137951\nTrain: [1098/2931 (100%)]\tLoss: 0.158275\nEpoch: 9/10. Train set: Average loss: 0.1582\nEpoch: 9/10. Validation set: Average loss: 0.1139\nTrain: [0/2931 (0%)]\tLoss: 0.168296\nTrain: [1098/2931 (100%)]\tLoss: 0.154313\nEpoch: 10/10. Train set: Average loss: 0.1544\nEpoch: 10/10. Validation set: Average loss: 0.1151\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249438\nTrain: [1098/2931 (100%)]\tLoss: 0.179247\nEpoch: 1/10. Train set: Average loss: 0.1794\nEpoch: 1/10. Validation set: Average loss: 0.1434\nTrain: [0/2931 (0%)]\tLoss: 0.148359\nTrain: [1098/2931 (100%)]\tLoss: 0.172173\nEpoch: 2/10. Train set: Average loss: 0.1721\nEpoch: 2/10. Validation set: Average loss: 0.1401\nTrain: [0/2931 (0%)]\tLoss: 0.151753\nTrain: [1098/2931 (100%)]\tLoss: 0.164262\nEpoch: 3/10. Train set: Average loss: 0.1642\nEpoch: 3/10. Validation set: Average loss: 0.1431\nTrain: [0/2931 (0%)]\tLoss: 0.237706\nTrain: [1098/2931 (100%)]\tLoss: 0.166988\nEpoch: 4/10. Train set: Average loss: 0.1672\nEpoch: 4/10. Validation set: Average loss: 0.1367\nTrain: [0/2931 (0%)]\tLoss: 0.229354\nTrain: [1098/2931 (100%)]\tLoss: 0.161887\nEpoch: 5/10. Train set: Average loss: 0.1621\nEpoch: 5/10. Validation set: Average loss: 0.1220\nTrain: [0/2931 (0%)]\tLoss: 0.107949\nTrain: [1098/2931 (100%)]\tLoss: 0.163049\nEpoch: 6/10. Train set: Average loss: 0.1629\nEpoch: 6/10. Validation set: Average loss: 0.1523\nTrain: [0/2931 (0%)]\tLoss: 0.189181\nTrain: [1098/2931 (100%)]\tLoss: 0.168945\nEpoch: 7/10. Train set: Average loss: 0.1690\nEpoch: 7/10. Validation set: Average loss: 0.1516\nTrain: [0/2931 (0%)]\tLoss: 0.107110\nTrain: [1098/2931 (100%)]\tLoss: 0.162230\nEpoch: 8/10. Train set: Average loss: 0.1621\nEpoch: 8/10. Validation set: Average loss: 0.1280\nTrain: [0/2931 (0%)]\tLoss: 0.142682\nTrain: [1098/2931 (100%)]\tLoss: 0.153418\nEpoch: 9/10. Train set: Average loss: 0.1534\nEpoch: 9/10. Validation set: Average loss: 0.1294\nTrain: [0/2931 (0%)]\tLoss: 0.169053\nTrain: [1098/2931 (100%)]\tLoss: 0.152245\nEpoch: 10/10. Train set: Average loss: 0.1523\nEpoch: 10/10. Validation set: Average loss: 0.1256\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187170\nTrain: [1098/2931 (100%)]\tLoss: 0.169790\nEpoch: 1/10. Train set: Average loss: 0.1698\nEpoch: 1/10. Validation set: Average loss: 0.1460\nTrain: [0/2931 (0%)]\tLoss: 0.136048\nTrain: [1098/2931 (100%)]\tLoss: 0.169321\nEpoch: 2/10. Train set: Average loss: 0.1692\nEpoch: 2/10. Validation set: Average loss: 0.1324\nTrain: [0/2931 (0%)]\tLoss: 0.157580\nTrain: [1098/2931 (100%)]\tLoss: 0.177934\nEpoch: 3/10. Train set: Average loss: 0.1779\nEpoch: 3/10. Validation set: Average loss: 0.1337\nTrain: [0/2931 (0%)]\tLoss: 0.190568\nTrain: [1098/2931 (100%)]\tLoss: 0.164036\nEpoch: 4/10. Train set: Average loss: 0.1641\nEpoch: 4/10. Validation set: Average loss: 0.1650\nTrain: [0/2931 (0%)]\tLoss: 0.127062\nTrain: [1098/2931 (100%)]\tLoss: 0.171318\nEpoch: 5/10. Train set: Average loss: 0.1712\nEpoch: 5/10. Validation set: Average loss: 0.1426\nTrain: [0/2931 (0%)]\tLoss: 0.199422\nTrain: [1098/2931 (100%)]\tLoss: 0.159607\nEpoch: 6/10. Train set: Average loss: 0.1597\nEpoch: 6/10. Validation set: Average loss: 0.1378\nTrain: [0/2931 (0%)]\tLoss: 0.096196\nTrain: [1098/2931 (100%)]\tLoss: 0.162173\nEpoch: 7/10. Train set: Average loss: 0.1620\nEpoch: 7/10. Validation set: Average loss: 0.1514\nTrain: [0/2931 (0%)]\tLoss: 0.086531\nTrain: [1098/2931 (100%)]\tLoss: 0.173176\nEpoch: 8/10. Train set: Average loss: 0.1729\nEpoch: 8/10. Validation set: Average loss: 0.1661\nTrain: [0/2931 (0%)]\tLoss: 0.170200\nTrain: [1098/2931 (100%)]\tLoss: 0.174863\nEpoch: 9/10. Train set: Average loss: 0.1749\nEpoch: 9/10. Validation set: Average loss: 0.1570\nTrain: [0/2931 (0%)]\tLoss: 0.215146\nTrain: [1098/2931 (100%)]\tLoss: 0.173852\nEpoch: 10/10. Train set: Average loss: 0.1740\nEpoch: 10/10. Validation set: Average loss: 0.1516\nNumber features: 7\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249817\nTrain: [1098/2931 (100%)]\tLoss: 0.174665\nEpoch: 1/10. Train set: Average loss: 0.1749\nEpoch: 1/10. Validation set: Average loss: 0.1878\nTrain: [0/2931 (0%)]\tLoss: 0.224087\nTrain: [1098/2931 (100%)]\tLoss: 0.167836\nEpoch: 2/10. Train set: Average loss: 0.1680\nEpoch: 2/10. Validation set: Average loss: 0.1680\nTrain: [0/2931 (0%)]\tLoss: 0.162937\nTrain: [1098/2931 (100%)]\tLoss: 0.166308\nEpoch: 3/10. Train set: Average loss: 0.1663\nEpoch: 3/10. Validation set: Average loss: 0.1710\nTrain: [0/2931 (0%)]\tLoss: 0.204904\nTrain: [1098/2931 (100%)]\tLoss: 0.167750\nEpoch: 4/10. Train set: Average loss: 0.1679\nEpoch: 4/10. Validation set: Average loss: 0.1700\nTrain: [0/2931 (0%)]\tLoss: 0.169095\nTrain: [1098/2931 (100%)]\tLoss: 0.162316\nEpoch: 5/10. Train set: Average loss: 0.1623\nEpoch: 5/10. Validation set: Average loss: 0.1641\nTrain: [0/2931 (0%)]\tLoss: 0.147804\nTrain: [1098/2931 (100%)]\tLoss: 0.158624\nEpoch: 6/10. Train set: Average loss: 0.1586\nEpoch: 6/10. Validation set: Average loss: 0.1564\nTrain: [0/2931 (0%)]\tLoss: 0.145413\nTrain: [1098/2931 (100%)]\tLoss: 0.157419\nEpoch: 7/10. Train set: Average loss: 0.1574\nEpoch: 7/10. Validation set: Average loss: 0.1591\nTrain: [0/2931 (0%)]\tLoss: 0.203829\nTrain: [1098/2931 (100%)]\tLoss: 0.158378\nEpoch: 8/10. Train set: Average loss: 0.1585\nEpoch: 8/10. Validation set: Average loss: 0.1781\nTrain: [0/2931 (0%)]\tLoss: 0.169693\nTrain: [1098/2931 (100%)]\tLoss: 0.151708\nEpoch: 9/10. Train set: Average loss: 0.1518\nEpoch: 9/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.210166\nTrain: [1098/2931 (100%)]\tLoss: 0.147197\nEpoch: 10/10. Train set: Average loss: 0.1474\nEpoch: 10/10. Validation set: Average loss: 0.1502\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.187184\nTrain: [1098/2931 (100%)]\tLoss: 0.166506\nEpoch: 1/10. Train set: Average loss: 0.1666\nEpoch: 1/10. Validation set: Average loss: 0.1570\nTrain: [0/2931 (0%)]\tLoss: 0.223115\nTrain: [1098/2931 (100%)]\tLoss: 0.167209\nEpoch: 2/10. Train set: Average loss: 0.1674\nEpoch: 2/10. Validation set: Average loss: 0.1462\nTrain: [0/2931 (0%)]\tLoss: 0.125175\nTrain: [1098/2931 (100%)]\tLoss: 0.161000\nEpoch: 3/10. Train set: Average loss: 0.1609\nEpoch: 3/10. Validation set: Average loss: 0.1651\nTrain: [0/2931 (0%)]\tLoss: 0.161160\nTrain: [1098/2931 (100%)]\tLoss: 0.160025\nEpoch: 4/10. Train set: Average loss: 0.1600\nEpoch: 4/10. Validation set: Average loss: 0.1741\nTrain: [0/2931 (0%)]\tLoss: 0.109305\nTrain: [1098/2931 (100%)]\tLoss: 0.171091\nEpoch: 5/10. Train set: Average loss: 0.1709\nEpoch: 5/10. Validation set: Average loss: 0.1570\nTrain: [0/2931 (0%)]\tLoss: 0.147703\nTrain: [1098/2931 (100%)]\tLoss: 0.165525\nEpoch: 6/10. Train set: Average loss: 0.1655\nEpoch: 6/10. Validation set: Average loss: 0.1657\nTrain: [0/2931 (0%)]\tLoss: 0.120449\nTrain: [1098/2931 (100%)]\tLoss: 0.158056\nEpoch: 7/10. Train set: Average loss: 0.1580\nEpoch: 7/10. Validation set: Average loss: 0.1788\nTrain: [0/2931 (0%)]\tLoss: 0.157770\nTrain: [1098/2931 (100%)]\tLoss: 0.159510\nEpoch: 8/10. Train set: Average loss: 0.1595\nEpoch: 8/10. Validation set: Average loss: 0.1607\nTrain: [0/2931 (0%)]\tLoss: 0.150150\nTrain: [1098/2931 (100%)]\tLoss: 0.152876\nEpoch: 9/10. Train set: Average loss: 0.1529\nEpoch: 9/10. Validation set: Average loss: 0.1482\nTrain: [0/2931 (0%)]\tLoss: 0.208431\nTrain: [1098/2931 (100%)]\tLoss: 0.150062\nEpoch: 10/10. Train set: Average loss: 0.1502\nEpoch: 10/10. Validation set: Average loss: 0.1500\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124877\nTrain: [1098/2931 (100%)]\tLoss: 0.165653\nEpoch: 1/10. Train set: Average loss: 0.1655\nEpoch: 1/10. Validation set: Average loss: 0.1544\nTrain: [0/2931 (0%)]\tLoss: 0.166773\nTrain: [1098/2931 (100%)]\tLoss: 0.165337\nEpoch: 2/10. Train set: Average loss: 0.1653\nEpoch: 2/10. Validation set: Average loss: 0.1588\nTrain: [0/2931 (0%)]\tLoss: 0.350084\nTrain: [1098/2931 (100%)]\tLoss: 0.166624\nEpoch: 3/10. Train set: Average loss: 0.1671\nEpoch: 3/10. Validation set: Average loss: 0.1588\nTrain: [0/2931 (0%)]\tLoss: 0.131807\nTrain: [1098/2931 (100%)]\tLoss: 0.163511\nEpoch: 4/10. Train set: Average loss: 0.1634\nEpoch: 4/10. Validation set: Average loss: 0.1447\nTrain: [0/2931 (0%)]\tLoss: 0.073869\nTrain: [1098/2931 (100%)]\tLoss: 0.159991\nEpoch: 5/10. Train set: Average loss: 0.1598\nEpoch: 5/10. Validation set: Average loss: 0.1499\nTrain: [0/2931 (0%)]\tLoss: 0.093219\nTrain: [1098/2931 (100%)]\tLoss: 0.159269\nEpoch: 6/10. Train set: Average loss: 0.1591\nEpoch: 6/10. Validation set: Average loss: 0.1516\nTrain: [0/2931 (0%)]\tLoss: 0.143600\nTrain: [1098/2931 (100%)]\tLoss: 0.159832\nEpoch: 7/10. Train set: Average loss: 0.1598\nEpoch: 7/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.112954\nTrain: [1098/2931 (100%)]\tLoss: 0.159864\nEpoch: 8/10. Train set: Average loss: 0.1597\nEpoch: 8/10. Validation set: Average loss: 0.1769\nTrain: [0/2931 (0%)]\tLoss: 0.223670\nTrain: [1098/2931 (100%)]\tLoss: 0.162095\nEpoch: 9/10. Train set: Average loss: 0.1623\nEpoch: 9/10. Validation set: Average loss: 0.1536\nTrain: [0/2931 (0%)]\tLoss: 0.126086\nTrain: [1098/2931 (100%)]\tLoss: 0.152392\nEpoch: 10/10. Train set: Average loss: 0.1523\nEpoch: 10/10. Validation set: Average loss: 0.1531\nNumber features: 8\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374174\nTrain: [1098/2931 (100%)]\tLoss: 0.172439\nEpoch: 1/10. Train set: Average loss: 0.1730\nEpoch: 1/10. Validation set: Average loss: 0.1569\nTrain: [0/2931 (0%)]\tLoss: 0.252058\nTrain: [1098/2931 (100%)]\tLoss: 0.157756\nEpoch: 2/10. Train set: Average loss: 0.1580\nEpoch: 2/10. Validation set: Average loss: 0.1528\nTrain: [0/2931 (0%)]\tLoss: 0.223080\nTrain: [1098/2931 (100%)]\tLoss: 0.158187\nEpoch: 3/10. Train set: Average loss: 0.1584\nEpoch: 3/10. Validation set: Average loss: 0.1535\nTrain: [0/2931 (0%)]\tLoss: 0.230918\nTrain: [1098/2931 (100%)]\tLoss: 0.154024\nEpoch: 4/10. Train set: Average loss: 0.1542\nEpoch: 4/10. Validation set: Average loss: 0.1599\nTrain: [0/2931 (0%)]\tLoss: 0.151587\nTrain: [1098/2931 (100%)]\tLoss: 0.147392\nEpoch: 5/10. Train set: Average loss: 0.1474\nEpoch: 5/10. Validation set: Average loss: 0.1442\nTrain: [0/2931 (0%)]\tLoss: 0.255017\nTrain: [1098/2931 (100%)]\tLoss: 0.153787\nEpoch: 6/10. Train set: Average loss: 0.1541\nEpoch: 6/10. Validation set: Average loss: 0.1476\nTrain: [0/2931 (0%)]\tLoss: 0.191706\nTrain: [1098/2931 (100%)]\tLoss: 0.150746\nEpoch: 7/10. Train set: Average loss: 0.1509\nEpoch: 7/10. Validation set: Average loss: 0.1563\nTrain: [0/2931 (0%)]\tLoss: 0.223330\nTrain: [1098/2931 (100%)]\tLoss: 0.150246\nEpoch: 8/10. Train set: Average loss: 0.1504\nEpoch: 8/10. Validation set: Average loss: 0.1472\nTrain: [0/2931 (0%)]\tLoss: 0.210071\nTrain: [1098/2931 (100%)]\tLoss: 0.149352\nEpoch: 9/10. Train set: Average loss: 0.1495\nEpoch: 9/10. Validation set: Average loss: 0.1338\nTrain: [0/2931 (0%)]\tLoss: 0.191847\nTrain: [1098/2931 (100%)]\tLoss: 0.145099\nEpoch: 10/10. Train set: Average loss: 0.1452\nEpoch: 10/10. Validation set: Average loss: 0.1339\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.373834\nTrain: [1098/2931 (100%)]\tLoss: 0.171769\nEpoch: 1/10. Train set: Average loss: 0.1723\nEpoch: 1/10. Validation set: Average loss: 0.1797\nTrain: [0/2931 (0%)]\tLoss: 0.250009\nTrain: [1098/2931 (100%)]\tLoss: 0.173330\nEpoch: 2/10. Train set: Average loss: 0.1735\nEpoch: 2/10. Validation set: Average loss: 0.1618\nTrain: [0/2931 (0%)]\tLoss: 0.226483\nTrain: [1098/2931 (100%)]\tLoss: 0.170841\nEpoch: 3/10. Train set: Average loss: 0.1710\nEpoch: 3/10. Validation set: Average loss: 0.1629\nTrain: [0/2931 (0%)]\tLoss: 0.193298\nTrain: [1098/2931 (100%)]\tLoss: 0.160712\nEpoch: 4/10. Train set: Average loss: 0.1608\nEpoch: 4/10. Validation set: Average loss: 0.1656\nTrain: [0/2931 (0%)]\tLoss: 0.221440\nTrain: [1098/2931 (100%)]\tLoss: 0.158113\nEpoch: 5/10. Train set: Average loss: 0.1583\nEpoch: 5/10. Validation set: Average loss: 0.1744\nTrain: [0/2931 (0%)]\tLoss: 0.245799\nTrain: [1098/2931 (100%)]\tLoss: 0.156356\nEpoch: 6/10. Train set: Average loss: 0.1566\nEpoch: 6/10. Validation set: Average loss: 0.1667\nTrain: [0/2931 (0%)]\tLoss: 0.208689\nTrain: [1098/2931 (100%)]\tLoss: 0.154242\nEpoch: 7/10. Train set: Average loss: 0.1544\nEpoch: 7/10. Validation set: Average loss: 0.1615\nTrain: [0/2931 (0%)]\tLoss: 0.248228\nTrain: [1098/2931 (100%)]\tLoss: 0.154723\nEpoch: 8/10. Train set: Average loss: 0.1550\nEpoch: 8/10. Validation set: Average loss: 0.1619\nTrain: [0/2931 (0%)]\tLoss: 0.184548\nTrain: [1098/2931 (100%)]\tLoss: 0.152260\nEpoch: 9/10. Train set: Average loss: 0.1523\nEpoch: 9/10. Validation set: Average loss: 0.1469\nTrain: [0/2931 (0%)]\tLoss: 0.166806\nTrain: [1098/2931 (100%)]\tLoss: 0.143824\nEpoch: 10/10. Train set: Average loss: 0.1439\nEpoch: 10/10. Validation set: Average loss: 0.1440\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.374010\nTrain: [1098/2931 (100%)]\tLoss: 0.174528\nEpoch: 1/10. Train set: Average loss: 0.1751\nEpoch: 1/10. Validation set: Average loss: 0.1664\nTrain: [0/2931 (0%)]\tLoss: 0.254224\nTrain: [1098/2931 (100%)]\tLoss: 0.155605\nEpoch: 2/10. Train set: Average loss: 0.1559\nEpoch: 2/10. Validation set: Average loss: 0.1558\nTrain: [0/2931 (0%)]\tLoss: 0.159115\nTrain: [1098/2931 (100%)]\tLoss: 0.160584\nEpoch: 3/10. Train set: Average loss: 0.1606\nEpoch: 3/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.234069\nTrain: [1098/2931 (100%)]\tLoss: 0.152910\nEpoch: 4/10. Train set: Average loss: 0.1531\nEpoch: 4/10. Validation set: Average loss: 0.1574\nTrain: [0/2931 (0%)]\tLoss: 0.223141\nTrain: [1098/2931 (100%)]\tLoss: 0.153152\nEpoch: 5/10. Train set: Average loss: 0.1533\nEpoch: 5/10. Validation set: Average loss: 0.1451\nTrain: [0/2931 (0%)]\tLoss: 0.194084\nTrain: [1098/2931 (100%)]\tLoss: 0.158779\nEpoch: 6/10. Train set: Average loss: 0.1589\nEpoch: 6/10. Validation set: Average loss: 0.1586\nTrain: [0/2931 (0%)]\tLoss: 0.341197\nTrain: [1098/2931 (100%)]\tLoss: 0.160693\nEpoch: 7/10. Train set: Average loss: 0.1612\nEpoch: 7/10. Validation set: Average loss: 0.1684\nTrain: [0/2931 (0%)]\tLoss: 0.239985\nTrain: [1098/2931 (100%)]\tLoss: 0.151661\nEpoch: 8/10. Train set: Average loss: 0.1519\nEpoch: 8/10. Validation set: Average loss: 0.1611\nTrain: [0/2931 (0%)]\tLoss: 0.221484\nTrain: [1098/2931 (100%)]\tLoss: 0.151790\nEpoch: 9/10. Train set: Average loss: 0.1520\nEpoch: 9/10. Validation set: Average loss: 0.1420\nTrain: [0/2931 (0%)]\tLoss: 0.172030\nTrain: [1098/2931 (100%)]\tLoss: 0.143799\nEpoch: 10/10. Train set: Average loss: 0.1439\nEpoch: 10/10. Validation set: Average loss: 0.1424\nNumber features: 9\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311739\nTrain: [1098/2931 (100%)]\tLoss: 0.161763\nEpoch: 1/10. Train set: Average loss: 0.1622\nEpoch: 1/10. Validation set: Average loss: 0.1704\nTrain: [0/2931 (0%)]\tLoss: 0.228470\nTrain: [1098/2931 (100%)]\tLoss: 0.159768\nEpoch: 2/10. Train set: Average loss: 0.1600\nEpoch: 2/10. Validation set: Average loss: 0.1491\nTrain: [0/2931 (0%)]\tLoss: 0.157460\nTrain: [1098/2931 (100%)]\tLoss: 0.150000\nEpoch: 3/10. Train set: Average loss: 0.1500\nEpoch: 3/10. Validation set: Average loss: 0.1526\nTrain: [0/2931 (0%)]\tLoss: 0.160575\nTrain: [1098/2931 (100%)]\tLoss: 0.161516\nEpoch: 4/10. Train set: Average loss: 0.1615\nEpoch: 4/10. Validation set: Average loss: 0.1723\nTrain: [0/2931 (0%)]\tLoss: 0.223825\nTrain: [1098/2931 (100%)]\tLoss: 0.158067\nEpoch: 5/10. Train set: Average loss: 0.1582\nEpoch: 5/10. Validation set: Average loss: 0.1546\nTrain: [0/2931 (0%)]\tLoss: 0.155100\nTrain: [1098/2931 (100%)]\tLoss: 0.162263\nEpoch: 6/10. Train set: Average loss: 0.1622\nEpoch: 6/10. Validation set: Average loss: 0.1438\nTrain: [0/2931 (0%)]\tLoss: 0.127193\nTrain: [1098/2931 (100%)]\tLoss: 0.155447\nEpoch: 7/10. Train set: Average loss: 0.1554\nEpoch: 7/10. Validation set: Average loss: 0.1496\nTrain: [0/2931 (0%)]\tLoss: 0.101585\nTrain: [1098/2931 (100%)]\tLoss: 0.157011\nEpoch: 8/10. Train set: Average loss: 0.1569\nEpoch: 8/10. Validation set: Average loss: 0.1472\nTrain: [0/2931 (0%)]\tLoss: 0.177108\nTrain: [1098/2931 (100%)]\tLoss: 0.154770\nEpoch: 9/10. Train set: Average loss: 0.1548\nEpoch: 9/10. Validation set: Average loss: 0.1338\nTrain: [0/2931 (0%)]\tLoss: 0.131928\nTrain: [1098/2931 (100%)]\tLoss: 0.146285\nEpoch: 10/10. Train set: Average loss: 0.1462\nEpoch: 10/10. Validation set: Average loss: 0.1322\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186602\nTrain: [1098/2931 (100%)]\tLoss: 0.167459\nEpoch: 1/10. Train set: Average loss: 0.1675\nEpoch: 1/10. Validation set: Average loss: 0.2084\nTrain: [0/2931 (0%)]\tLoss: 0.163635\nTrain: [1098/2931 (100%)]\tLoss: 0.158512\nEpoch: 2/10. Train set: Average loss: 0.1585\nEpoch: 2/10. Validation set: Average loss: 0.2015\nTrain: [0/2931 (0%)]\tLoss: 0.095429\nTrain: [1098/2931 (100%)]\tLoss: 0.167992\nEpoch: 3/10. Train set: Average loss: 0.1678\nEpoch: 3/10. Validation set: Average loss: 0.1761\nTrain: [0/2931 (0%)]\tLoss: 0.093552\nTrain: [1098/2931 (100%)]\tLoss: 0.156883\nEpoch: 4/10. Train set: Average loss: 0.1567\nEpoch: 4/10. Validation set: Average loss: 0.1610\nTrain: [0/2931 (0%)]\tLoss: 0.202603\nTrain: [1098/2931 (100%)]\tLoss: 0.157509\nEpoch: 5/10. Train set: Average loss: 0.1576\nEpoch: 5/10. Validation set: Average loss: 0.1657\nTrain: [0/2931 (0%)]\tLoss: 0.165595\nTrain: [1098/2931 (100%)]\tLoss: 0.166301\nEpoch: 6/10. Train set: Average loss: 0.1663\nEpoch: 6/10. Validation set: Average loss: 0.1507\nTrain: [0/2931 (0%)]\tLoss: 0.144109\nTrain: [1098/2931 (100%)]\tLoss: 0.156015\nEpoch: 7/10. Train set: Average loss: 0.1560\nEpoch: 7/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.137737\nTrain: [1098/2931 (100%)]\tLoss: 0.161587\nEpoch: 8/10. Train set: Average loss: 0.1615\nEpoch: 8/10. Validation set: Average loss: 0.1467\nTrain: [0/2931 (0%)]\tLoss: 0.141070\nTrain: [1098/2931 (100%)]\tLoss: 0.150438\nEpoch: 9/10. Train set: Average loss: 0.1504\nEpoch: 9/10. Validation set: Average loss: 0.1362\nTrain: [0/2931 (0%)]\tLoss: 0.211718\nTrain: [1098/2931 (100%)]\tLoss: 0.144329\nEpoch: 10/10. Train set: Average loss: 0.1445\nEpoch: 10/10. Validation set: Average loss: 0.1352\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249329\nTrain: [1098/2931 (100%)]\tLoss: 0.171320\nEpoch: 1/10. Train set: Average loss: 0.1715\nEpoch: 1/10. Validation set: Average loss: 0.1609\nTrain: [0/2931 (0%)]\tLoss: 0.123918\nTrain: [1098/2931 (100%)]\tLoss: 0.160403\nEpoch: 2/10. Train set: Average loss: 0.1603\nEpoch: 2/10. Validation set: Average loss: 0.1501\nTrain: [0/2931 (0%)]\tLoss: 0.088278\nTrain: [1098/2931 (100%)]\tLoss: 0.164773\nEpoch: 3/10. Train set: Average loss: 0.1646\nEpoch: 3/10. Validation set: Average loss: 0.1822\nTrain: [0/2931 (0%)]\tLoss: 0.204437\nTrain: [1098/2931 (100%)]\tLoss: 0.161415\nEpoch: 4/10. Train set: Average loss: 0.1615\nEpoch: 4/10. Validation set: Average loss: 0.1496\nTrain: [0/2931 (0%)]\tLoss: 0.218900\nTrain: [1098/2931 (100%)]\tLoss: 0.159666\nEpoch: 5/10. Train set: Average loss: 0.1598\nEpoch: 5/10. Validation set: Average loss: 0.1481\nTrain: [0/2931 (0%)]\tLoss: 0.152889\nTrain: [1098/2931 (100%)]\tLoss: 0.155767\nEpoch: 6/10. Train set: Average loss: 0.1558\nEpoch: 6/10. Validation set: Average loss: 0.1396\nTrain: [0/2931 (0%)]\tLoss: 0.093512\nTrain: [1098/2931 (100%)]\tLoss: 0.158164\nEpoch: 7/10. Train set: Average loss: 0.1580\nEpoch: 7/10. Validation set: Average loss: 0.1479\nTrain: [0/2931 (0%)]\tLoss: 0.197385\nTrain: [1098/2931 (100%)]\tLoss: 0.150361\nEpoch: 8/10. Train set: Average loss: 0.1505\nEpoch: 8/10. Validation set: Average loss: 0.1525\nTrain: [0/2931 (0%)]\tLoss: 0.102113\nTrain: [1098/2931 (100%)]\tLoss: 0.146019\nEpoch: 9/10. Train set: Average loss: 0.1459\nEpoch: 9/10. Validation set: Average loss: 0.1334\nTrain: [0/2931 (0%)]\tLoss: 0.099769\nTrain: [1098/2931 (100%)]\tLoss: 0.148620\nEpoch: 10/10. Train set: Average loss: 0.1485\nEpoch: 10/10. Validation set: Average loss: 0.1337\nNumber features: 10\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249412\nTrain: [1098/2931 (100%)]\tLoss: 0.187082\nEpoch: 1/10. Train set: Average loss: 0.1873\nEpoch: 1/10. Validation set: Average loss: 0.1655\nTrain: [0/2931 (0%)]\tLoss: 0.250895\nTrain: [1098/2931 (100%)]\tLoss: 0.182110\nEpoch: 2/10. Train set: Average loss: 0.1823\nEpoch: 2/10. Validation set: Average loss: 0.1622\nTrain: [0/2931 (0%)]\tLoss: 0.123092\nTrain: [1098/2931 (100%)]\tLoss: 0.161674\nEpoch: 3/10. Train set: Average loss: 0.1616\nEpoch: 3/10. Validation set: Average loss: 0.1458\nTrain: [0/2931 (0%)]\tLoss: 0.196010\nTrain: [1098/2931 (100%)]\tLoss: 0.176337\nEpoch: 4/10. Train set: Average loss: 0.1764\nEpoch: 4/10. Validation set: Average loss: 0.1779\nTrain: [0/2931 (0%)]\tLoss: 0.199691\nTrain: [1098/2931 (100%)]\tLoss: 0.169670\nEpoch: 5/10. Train set: Average loss: 0.1698\nEpoch: 5/10. Validation set: Average loss: 0.1404\nTrain: [0/2931 (0%)]\tLoss: 0.134894\nTrain: [1098/2931 (100%)]\tLoss: 0.163425\nEpoch: 6/10. Train set: Average loss: 0.1633\nEpoch: 6/10. Validation set: Average loss: 0.1333\nTrain: [0/2931 (0%)]\tLoss: 0.102510\nTrain: [1098/2931 (100%)]\tLoss: 0.157410\nEpoch: 7/10. Train set: Average loss: 0.1573\nEpoch: 7/10. Validation set: Average loss: 0.1401\nTrain: [0/2931 (0%)]\tLoss: 0.162592\nTrain: [1098/2931 (100%)]\tLoss: 0.158957\nEpoch: 8/10. Train set: Average loss: 0.1590\nEpoch: 8/10. Validation set: Average loss: 0.1330\nTrain: [0/2931 (0%)]\tLoss: 0.160712\nTrain: [1098/2931 (100%)]\tLoss: 0.154028\nEpoch: 9/10. Train set: Average loss: 0.1540\nEpoch: 9/10. Validation set: Average loss: 0.1264\nTrain: [0/2931 (0%)]\tLoss: 0.160319\nTrain: [1098/2931 (100%)]\tLoss: 0.152868\nEpoch: 10/10. Train set: Average loss: 0.1529\nEpoch: 10/10. Validation set: Average loss: 0.1271\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.373996\nTrain: [1098/2931 (100%)]\tLoss: 0.172552\nEpoch: 1/10. Train set: Average loss: 0.1731\nEpoch: 1/10. Validation set: Average loss: 0.2431\nTrain: [0/2931 (0%)]\tLoss: 0.156250\nTrain: [1098/2931 (100%)]\tLoss: 0.174181\nEpoch: 2/10. Train set: Average loss: 0.1741\nEpoch: 2/10. Validation set: Average loss: 0.2098\nTrain: [0/2931 (0%)]\tLoss: 0.249445\nTrain: [1098/2931 (100%)]\tLoss: 0.173141\nEpoch: 3/10. Train set: Average loss: 0.1733\nEpoch: 3/10. Validation set: Average loss: 0.1726\nTrain: [0/2931 (0%)]\tLoss: 0.163062\nTrain: [1098/2931 (100%)]\tLoss: 0.167174\nEpoch: 4/10. Train set: Average loss: 0.1672\nEpoch: 4/10. Validation set: Average loss: 0.1908\nTrain: [0/2931 (0%)]\tLoss: 0.256956\nTrain: [1098/2931 (100%)]\tLoss: 0.161539\nEpoch: 5/10. Train set: Average loss: 0.1618\nEpoch: 5/10. Validation set: Average loss: 0.1563\nTrain: [0/2931 (0%)]\tLoss: 0.118587\nTrain: [1098/2931 (100%)]\tLoss: 0.159734\nEpoch: 6/10. Train set: Average loss: 0.1596\nEpoch: 6/10. Validation set: Average loss: 0.1572\nTrain: [0/2931 (0%)]\tLoss: 0.217206\nTrain: [1098/2931 (100%)]\tLoss: 0.161703\nEpoch: 7/10. Train set: Average loss: 0.1619\nEpoch: 7/10. Validation set: Average loss: 0.1568\nTrain: [0/2931 (0%)]\tLoss: 0.194384\nTrain: [1098/2931 (100%)]\tLoss: 0.160779\nEpoch: 8/10. Train set: Average loss: 0.1609\nEpoch: 8/10. Validation set: Average loss: 0.1596\nTrain: [0/2931 (0%)]\tLoss: 0.203464\nTrain: [1098/2931 (100%)]\tLoss: 0.157606\nEpoch: 9/10. Train set: Average loss: 0.1577\nEpoch: 9/10. Validation set: Average loss: 0.1357\nTrain: [0/2931 (0%)]\tLoss: 0.185702\nTrain: [1098/2931 (100%)]\tLoss: 0.153994\nEpoch: 10/10. Train set: Average loss: 0.1541\nEpoch: 10/10. Validation set: Average loss: 0.1346\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.372829\nTrain: [1098/2931 (100%)]\tLoss: 0.177363\nEpoch: 1/10. Train set: Average loss: 0.1779\nEpoch: 1/10. Validation set: Average loss: 0.1640\nTrain: [0/2931 (0%)]\tLoss: 0.264099\nTrain: [1098/2931 (100%)]\tLoss: 0.171105\nEpoch: 2/10. Train set: Average loss: 0.1714\nEpoch: 2/10. Validation set: Average loss: 0.1556\nTrain: [0/2931 (0%)]\tLoss: 0.211866\nTrain: [1098/2931 (100%)]\tLoss: 0.183897\nEpoch: 3/10. Train set: Average loss: 0.1840\nEpoch: 3/10. Validation set: Average loss: 0.1890\nTrain: [0/2931 (0%)]\tLoss: 0.216176\nTrain: [1098/2931 (100%)]\tLoss: 0.166163\nEpoch: 4/10. Train set: Average loss: 0.1663\nEpoch: 4/10. Validation set: Average loss: 0.1632\nTrain: [0/2931 (0%)]\tLoss: 0.200188\nTrain: [1098/2931 (100%)]\tLoss: 0.163710\nEpoch: 5/10. Train set: Average loss: 0.1638\nEpoch: 5/10. Validation set: Average loss: 0.1689\nTrain: [0/2931 (0%)]\tLoss: 0.112324\nTrain: [1098/2931 (100%)]\tLoss: 0.164526\nEpoch: 6/10. Train set: Average loss: 0.1644\nEpoch: 6/10. Validation set: Average loss: 0.1919\nTrain: [0/2931 (0%)]\tLoss: 0.184597\nTrain: [1098/2931 (100%)]\tLoss: 0.173952\nEpoch: 7/10. Train set: Average loss: 0.1740\nEpoch: 7/10. Validation set: Average loss: 0.1836\nTrain: [0/2931 (0%)]\tLoss: 0.176221\nTrain: [1098/2931 (100%)]\tLoss: 0.167745\nEpoch: 8/10. Train set: Average loss: 0.1678\nEpoch: 8/10. Validation set: Average loss: 0.1569\nTrain: [0/2931 (0%)]\tLoss: 0.073566\nTrain: [1098/2931 (100%)]\tLoss: 0.155699\nEpoch: 9/10. Train set: Average loss: 0.1555\nEpoch: 9/10. Validation set: Average loss: 0.1548\nTrain: [0/2931 (0%)]\tLoss: 0.091202\nTrain: [1098/2931 (100%)]\tLoss: 0.149078\nEpoch: 10/10. Train set: Average loss: 0.1489\nEpoch: 10/10. Validation set: Average loss: 0.1524\nNumber features: 11\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.312177\nTrain: [1098/2931 (100%)]\tLoss: 0.168041\nEpoch: 1/10. Train set: Average loss: 0.1684\nEpoch: 1/10. Validation set: Average loss: 0.1332\nTrain: [0/2931 (0%)]\tLoss: 0.132364\nTrain: [1098/2931 (100%)]\tLoss: 0.165831\nEpoch: 2/10. Train set: Average loss: 0.1657\nEpoch: 2/10. Validation set: Average loss: 0.1304\nTrain: [0/2931 (0%)]\tLoss: 0.112254\nTrain: [1098/2931 (100%)]\tLoss: 0.158665\nEpoch: 3/10. Train set: Average loss: 0.1585\nEpoch: 3/10. Validation set: Average loss: 0.1404\nTrain: [0/2931 (0%)]\tLoss: 0.180067\nTrain: [1098/2931 (100%)]\tLoss: 0.160611\nEpoch: 4/10. Train set: Average loss: 0.1607\nEpoch: 4/10. Validation set: Average loss: 0.1294\nTrain: [0/2931 (0%)]\tLoss: 0.163732\nTrain: [1098/2931 (100%)]\tLoss: 0.157914\nEpoch: 5/10. Train set: Average loss: 0.1579\nEpoch: 5/10. Validation set: Average loss: 0.1336\nTrain: [0/2931 (0%)]\tLoss: 0.189788\nTrain: [1098/2931 (100%)]\tLoss: 0.153006\nEpoch: 6/10. Train set: Average loss: 0.1531\nEpoch: 6/10. Validation set: Average loss: 0.1484\nTrain: [0/2931 (0%)]\tLoss: 0.202032\nTrain: [1098/2931 (100%)]\tLoss: 0.158801\nEpoch: 7/10. Train set: Average loss: 0.1589\nEpoch: 7/10. Validation set: Average loss: 0.1383\nTrain: [0/2931 (0%)]\tLoss: 0.166501\nTrain: [1098/2931 (100%)]\tLoss: 0.149896\nEpoch: 8/10. Train set: Average loss: 0.1499\nEpoch: 8/10. Validation set: Average loss: 0.1249\nTrain: [0/2931 (0%)]\tLoss: 0.213199\nTrain: [1098/2931 (100%)]\tLoss: 0.148180\nEpoch: 9/10. Train set: Average loss: 0.1484\nEpoch: 9/10. Validation set: Average loss: 0.1285\nTrain: [0/2931 (0%)]\tLoss: 0.114280\nTrain: [1098/2931 (100%)]\tLoss: 0.147572\nEpoch: 10/10. Train set: Average loss: 0.1475\nEpoch: 10/10. Validation set: Average loss: 0.1304\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.312005\nTrain: [1098/2931 (100%)]\tLoss: 0.175944\nEpoch: 1/10. Train set: Average loss: 0.1763\nEpoch: 1/10. Validation set: Average loss: 0.2002\nTrain: [0/2931 (0%)]\tLoss: 0.312919\nTrain: [1098/2931 (100%)]\tLoss: 0.170425\nEpoch: 2/10. Train set: Average loss: 0.1708\nEpoch: 2/10. Validation set: Average loss: 0.1388\nTrain: [0/2931 (0%)]\tLoss: 0.197988\nTrain: [1098/2931 (100%)]\tLoss: 0.157850\nEpoch: 3/10. Train set: Average loss: 0.1580\nEpoch: 3/10. Validation set: Average loss: 0.1497\nTrain: [0/2931 (0%)]\tLoss: 0.136628\nTrain: [1098/2931 (100%)]\tLoss: 0.159658\nEpoch: 4/10. Train set: Average loss: 0.1596\nEpoch: 4/10. Validation set: Average loss: 0.1322\nTrain: [0/2931 (0%)]\tLoss: 0.096568\nTrain: [1098/2931 (100%)]\tLoss: 0.156240\nEpoch: 5/10. Train set: Average loss: 0.1561\nEpoch: 5/10. Validation set: Average loss: 0.1347\nTrain: [0/2931 (0%)]\tLoss: 0.174852\nTrain: [1098/2931 (100%)]\tLoss: 0.153904\nEpoch: 6/10. Train set: Average loss: 0.1540\nEpoch: 6/10. Validation set: Average loss: 0.1349\nTrain: [0/2931 (0%)]\tLoss: 0.202289\nTrain: [1098/2931 (100%)]\tLoss: 0.157255\nEpoch: 7/10. Train set: Average loss: 0.1574\nEpoch: 7/10. Validation set: Average loss: 0.1301\nTrain: [0/2931 (0%)]\tLoss: 0.089711\nTrain: [1098/2931 (100%)]\tLoss: 0.148369\nEpoch: 8/10. Train set: Average loss: 0.1482\nEpoch: 8/10. Validation set: Average loss: 0.1300\nTrain: [0/2931 (0%)]\tLoss: 0.192598\nTrain: [1098/2931 (100%)]\tLoss: 0.151589\nEpoch: 9/10. Train set: Average loss: 0.1517\nEpoch: 9/10. Validation set: Average loss: 0.1311\nTrain: [0/2931 (0%)]\tLoss: 0.179440\nTrain: [1098/2931 (100%)]\tLoss: 0.148712\nEpoch: 10/10. Train set: Average loss: 0.1488\nEpoch: 10/10. Validation set: Average loss: 0.1333\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124834\nTrain: [1098/2931 (100%)]\tLoss: 0.176548\nEpoch: 1/10. Train set: Average loss: 0.1764\nEpoch: 1/10. Validation set: Average loss: 0.1668\nTrain: [0/2931 (0%)]\tLoss: 0.102105\nTrain: [1098/2931 (100%)]\tLoss: 0.170898\nEpoch: 2/10. Train set: Average loss: 0.1707\nEpoch: 2/10. Validation set: Average loss: 0.1326\nTrain: [0/2931 (0%)]\tLoss: 0.255631\nTrain: [1098/2931 (100%)]\tLoss: 0.162758\nEpoch: 3/10. Train set: Average loss: 0.1630\nEpoch: 3/10. Validation set: Average loss: 0.1527\nTrain: [0/2931 (0%)]\tLoss: 0.408119\nTrain: [1098/2931 (100%)]\tLoss: 0.159014\nEpoch: 4/10. Train set: Average loss: 0.1597\nEpoch: 4/10. Validation set: Average loss: 0.1271\nTrain: [0/2931 (0%)]\tLoss: 0.157194\nTrain: [1098/2931 (100%)]\tLoss: 0.157520\nEpoch: 5/10. Train set: Average loss: 0.1575\nEpoch: 5/10. Validation set: Average loss: 0.1364\nTrain: [0/2931 (0%)]\tLoss: 0.211618\nTrain: [1098/2931 (100%)]\tLoss: 0.155954\nEpoch: 6/10. Train set: Average loss: 0.1561\nEpoch: 6/10. Validation set: Average loss: 0.1287\nTrain: [0/2931 (0%)]\tLoss: 0.181626\nTrain: [1098/2931 (100%)]\tLoss: 0.153132\nEpoch: 7/10. Train set: Average loss: 0.1532\nEpoch: 7/10. Validation set: Average loss: 0.1277\nTrain: [0/2931 (0%)]\tLoss: 0.142254\nTrain: [1098/2931 (100%)]\tLoss: 0.151576\nEpoch: 8/10. Train set: Average loss: 0.1516\nEpoch: 8/10. Validation set: Average loss: 0.1396\nTrain: [0/2931 (0%)]\tLoss: 0.157139\nTrain: [1098/2931 (100%)]\tLoss: 0.146200\nEpoch: 9/10. Train set: Average loss: 0.1462\nEpoch: 9/10. Validation set: Average loss: 0.1271\nTrain: [0/2931 (0%)]\tLoss: 0.169215\nTrain: [1098/2931 (100%)]\tLoss: 0.142956\nEpoch: 10/10. Train set: Average loss: 0.1430\nEpoch: 10/10. Validation set: Average loss: 0.1255\nNumber features: 12\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124846\nTrain: [1098/2931 (100%)]\tLoss: 0.167120\nEpoch: 1/10. Train set: Average loss: 0.1670\nEpoch: 1/10. Validation set: Average loss: 0.1552\nTrain: [0/2931 (0%)]\tLoss: 0.101031\nTrain: [1098/2931 (100%)]\tLoss: 0.164560\nEpoch: 2/10. Train set: Average loss: 0.1644\nEpoch: 2/10. Validation set: Average loss: 0.1443\nTrain: [0/2931 (0%)]\tLoss: 0.029425\nTrain: [1098/2931 (100%)]\tLoss: 0.160298\nEpoch: 3/10. Train set: Average loss: 0.1599\nEpoch: 3/10. Validation set: Average loss: 0.1541\nTrain: [0/2931 (0%)]\tLoss: 0.102553\nTrain: [1098/2931 (100%)]\tLoss: 0.159567\nEpoch: 4/10. Train set: Average loss: 0.1594\nEpoch: 4/10. Validation set: Average loss: 0.1585\nTrain: [0/2931 (0%)]\tLoss: 0.261287\nTrain: [1098/2931 (100%)]\tLoss: 0.158422\nEpoch: 5/10. Train set: Average loss: 0.1587\nEpoch: 5/10. Validation set: Average loss: 0.1630\nTrain: [0/2931 (0%)]\tLoss: 0.284492\nTrain: [1098/2931 (100%)]\tLoss: 0.162757\nEpoch: 6/10. Train set: Average loss: 0.1631\nEpoch: 6/10. Validation set: Average loss: 0.1531\nTrain: [0/2931 (0%)]\tLoss: 0.146258\nTrain: [1098/2931 (100%)]\tLoss: 0.150515\nEpoch: 7/10. Train set: Average loss: 0.1505\nEpoch: 7/10. Validation set: Average loss: 0.1495\nTrain: [0/2931 (0%)]\tLoss: 0.100882\nTrain: [1098/2931 (100%)]\tLoss: 0.152031\nEpoch: 8/10. Train set: Average loss: 0.1519\nEpoch: 8/10. Validation set: Average loss: 0.1584\nTrain: [0/2931 (0%)]\tLoss: 0.229376\nTrain: [1098/2931 (100%)]\tLoss: 0.152146\nEpoch: 9/10. Train set: Average loss: 0.1524\nEpoch: 9/10. Validation set: Average loss: 0.1494\nTrain: [0/2931 (0%)]\tLoss: 0.149636\nTrain: [1098/2931 (100%)]\tLoss: 0.143578\nEpoch: 10/10. Train set: Average loss: 0.1436\nEpoch: 10/10. Validation set: Average loss: 0.1515\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249808\nTrain: [1098/2931 (100%)]\tLoss: 0.161854\nEpoch: 1/10. Train set: Average loss: 0.1621\nEpoch: 1/10. Validation set: Average loss: 0.1633\nTrain: [0/2931 (0%)]\tLoss: 0.176933\nTrain: [1098/2931 (100%)]\tLoss: 0.168080\nEpoch: 2/10. Train set: Average loss: 0.1681\nEpoch: 2/10. Validation set: Average loss: 0.1771\nTrain: [0/2931 (0%)]\tLoss: 0.238057\nTrain: [1098/2931 (100%)]\tLoss: 0.159450\nEpoch: 3/10. Train set: Average loss: 0.1597\nEpoch: 3/10. Validation set: Average loss: 0.1550\nTrain: [0/2931 (0%)]\tLoss: 0.139022\nTrain: [1098/2931 (100%)]\tLoss: 0.173770\nEpoch: 4/10. Train set: Average loss: 0.1737\nEpoch: 4/10. Validation set: Average loss: 0.1647\nTrain: [0/2931 (0%)]\tLoss: 0.179869\nTrain: [1098/2931 (100%)]\tLoss: 0.164578\nEpoch: 5/10. Train set: Average loss: 0.1646\nEpoch: 5/10. Validation set: Average loss: 0.1557\nTrain: [0/2931 (0%)]\tLoss: 0.181220\nTrain: [1098/2931 (100%)]\tLoss: 0.153914\nEpoch: 6/10. Train set: Average loss: 0.1540\nEpoch: 6/10. Validation set: Average loss: 0.1463\nTrain: [0/2931 (0%)]\tLoss: 0.146850\nTrain: [1098/2931 (100%)]\tLoss: 0.157944\nEpoch: 7/10. Train set: Average loss: 0.1579\nEpoch: 7/10. Validation set: Average loss: 0.1461\nTrain: [0/2931 (0%)]\tLoss: 0.125385\nTrain: [1098/2931 (100%)]\tLoss: 0.157141\nEpoch: 8/10. Train set: Average loss: 0.1571\nEpoch: 8/10. Validation set: Average loss: 0.1478\nTrain: [0/2931 (0%)]\tLoss: 0.124587\nTrain: [1098/2931 (100%)]\tLoss: 0.148190\nEpoch: 9/10. Train set: Average loss: 0.1481\nEpoch: 9/10. Validation set: Average loss: 0.1388\nTrain: [0/2931 (0%)]\tLoss: 0.153556\nTrain: [1098/2931 (100%)]\tLoss: 0.148180\nEpoch: 10/10. Train set: Average loss: 0.1482\nEpoch: 10/10. Validation set: Average loss: 0.1388\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.062471\nTrain: [1098/2931 (100%)]\tLoss: 0.176194\nEpoch: 1/10. Train set: Average loss: 0.1759\nEpoch: 1/10. Validation set: Average loss: 0.1647\nTrain: [0/2931 (0%)]\tLoss: 0.124979\nTrain: [1098/2931 (100%)]\tLoss: 0.164169\nEpoch: 2/10. Train set: Average loss: 0.1641\nEpoch: 2/10. Validation set: Average loss: 0.1591\nTrain: [0/2931 (0%)]\tLoss: 0.194013\nTrain: [1098/2931 (100%)]\tLoss: 0.161464\nEpoch: 3/10. Train set: Average loss: 0.1616\nEpoch: 3/10. Validation set: Average loss: 0.1696\nTrain: [0/2931 (0%)]\tLoss: 0.105732\nTrain: [1098/2931 (100%)]\tLoss: 0.167445\nEpoch: 4/10. Train set: Average loss: 0.1673\nEpoch: 4/10. Validation set: Average loss: 0.1929\nTrain: [0/2931 (0%)]\tLoss: 0.153379\nTrain: [1098/2931 (100%)]\tLoss: 0.155339\nEpoch: 5/10. Train set: Average loss: 0.1553\nEpoch: 5/10. Validation set: Average loss: 0.1499\nTrain: [0/2931 (0%)]\tLoss: 0.136056\nTrain: [1098/2931 (100%)]\tLoss: 0.157275\nEpoch: 6/10. Train set: Average loss: 0.1572\nEpoch: 6/10. Validation set: Average loss: 0.1546\nTrain: [0/2931 (0%)]\tLoss: 0.073974\nTrain: [1098/2931 (100%)]\tLoss: 0.158201\nEpoch: 7/10. Train set: Average loss: 0.1580\nEpoch: 7/10. Validation set: Average loss: 0.1635\nTrain: [0/2931 (0%)]\tLoss: 0.100368\nTrain: [1098/2931 (100%)]\tLoss: 0.153201\nEpoch: 8/10. Train set: Average loss: 0.1531\nEpoch: 8/10. Validation set: Average loss: 0.1580\nTrain: [0/2931 (0%)]\tLoss: 0.099074\nTrain: [1098/2931 (100%)]\tLoss: 0.149263\nEpoch: 9/10. Train set: Average loss: 0.1491\nEpoch: 9/10. Validation set: Average loss: 0.1470\nTrain: [0/2931 (0%)]\tLoss: 0.099755\nTrain: [1098/2931 (100%)]\tLoss: 0.148099\nEpoch: 10/10. Train set: Average loss: 0.1480\nEpoch: 10/10. Validation set: Average loss: 0.1457\nNumber features: 13\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249722\nTrain: [1098/2931 (100%)]\tLoss: 0.170928\nEpoch: 1/10. Train set: Average loss: 0.1711\nEpoch: 1/10. Validation set: Average loss: 0.1578\nTrain: [0/2931 (0%)]\tLoss: 0.125957\nTrain: [1098/2931 (100%)]\tLoss: 0.161343\nEpoch: 2/10. Train set: Average loss: 0.1612\nEpoch: 2/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.133766\nTrain: [1098/2931 (100%)]\tLoss: 0.165905\nEpoch: 3/10. Train set: Average loss: 0.1658\nEpoch: 3/10. Validation set: Average loss: 0.1320\nTrain: [0/2931 (0%)]\tLoss: 0.127214\nTrain: [1098/2931 (100%)]\tLoss: 0.160706\nEpoch: 4/10. Train set: Average loss: 0.1606\nEpoch: 4/10. Validation set: Average loss: 0.1294\nTrain: [0/2931 (0%)]\tLoss: 0.182462\nTrain: [1098/2931 (100%)]\tLoss: 0.157801\nEpoch: 5/10. Train set: Average loss: 0.1579\nEpoch: 5/10. Validation set: Average loss: 0.1428\nTrain: [0/2931 (0%)]\tLoss: 0.101304\nTrain: [1098/2931 (100%)]\tLoss: 0.157238\nEpoch: 6/10. Train set: Average loss: 0.1571\nEpoch: 6/10. Validation set: Average loss: 0.1473\nTrain: [0/2931 (0%)]\tLoss: 0.159777\nTrain: [1098/2931 (100%)]\tLoss: 0.158900\nEpoch: 7/10. Train set: Average loss: 0.1589\nEpoch: 7/10. Validation set: Average loss: 0.1400\nTrain: [0/2931 (0%)]\tLoss: 0.172493\nTrain: [1098/2931 (100%)]\tLoss: 0.156865\nEpoch: 8/10. Train set: Average loss: 0.1569\nEpoch: 8/10. Validation set: Average loss: 0.1236\nTrain: [0/2931 (0%)]\tLoss: 0.166630\nTrain: [1098/2931 (100%)]\tLoss: 0.144938\nEpoch: 9/10. Train set: Average loss: 0.1450\nEpoch: 9/10. Validation set: Average loss: 0.1188\nTrain: [0/2931 (0%)]\tLoss: 0.127844\nTrain: [1098/2931 (100%)]\tLoss: 0.148606\nEpoch: 10/10. Train set: Average loss: 0.1485\nEpoch: 10/10. Validation set: Average loss: 0.1208\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249688\nTrain: [1098/2931 (100%)]\tLoss: 0.165473\nEpoch: 1/10. Train set: Average loss: 0.1657\nEpoch: 1/10. Validation set: Average loss: 0.1405\nTrain: [0/2931 (0%)]\tLoss: 0.154052\nTrain: [1098/2931 (100%)]\tLoss: 0.159096\nEpoch: 2/10. Train set: Average loss: 0.1591\nEpoch: 2/10. Validation set: Average loss: 0.1464\nTrain: [0/2931 (0%)]\tLoss: 0.243976\nTrain: [1098/2931 (100%)]\tLoss: 0.170764\nEpoch: 3/10. Train set: Average loss: 0.1710\nEpoch: 3/10. Validation set: Average loss: 0.1478\nTrain: [0/2931 (0%)]\tLoss: 0.224453\nTrain: [1098/2931 (100%)]\tLoss: 0.160204\nEpoch: 4/10. Train set: Average loss: 0.1604\nEpoch: 4/10. Validation set: Average loss: 0.1339\nTrain: [0/2931 (0%)]\tLoss: 0.140216\nTrain: [1098/2931 (100%)]\tLoss: 0.158101\nEpoch: 5/10. Train set: Average loss: 0.1581\nEpoch: 5/10. Validation set: Average loss: 0.1361\nTrain: [0/2931 (0%)]\tLoss: 0.193429\nTrain: [1098/2931 (100%)]\tLoss: 0.157666\nEpoch: 6/10. Train set: Average loss: 0.1578\nEpoch: 6/10. Validation set: Average loss: 0.1395\nTrain: [0/2931 (0%)]\tLoss: 0.198858\nTrain: [1098/2931 (100%)]\tLoss: 0.157941\nEpoch: 7/10. Train set: Average loss: 0.1581\nEpoch: 7/10. Validation set: Average loss: 0.1286\nTrain: [0/2931 (0%)]\tLoss: 0.179480\nTrain: [1098/2931 (100%)]\tLoss: 0.159295\nEpoch: 8/10. Train set: Average loss: 0.1593\nEpoch: 8/10. Validation set: Average loss: 0.1295\nTrain: [0/2931 (0%)]\tLoss: 0.105008\nTrain: [1098/2931 (100%)]\tLoss: 0.147565\nEpoch: 9/10. Train set: Average loss: 0.1474\nEpoch: 9/10. Validation set: Average loss: 0.1264\nTrain: [0/2931 (0%)]\tLoss: 0.117981\nTrain: [1098/2931 (100%)]\tLoss: 0.151202\nEpoch: 10/10. Train set: Average loss: 0.1511\nEpoch: 10/10. Validation set: Average loss: 0.1269\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.186820\nTrain: [1098/2931 (100%)]\tLoss: 0.167451\nEpoch: 1/10. Train set: Average loss: 0.1675\nEpoch: 1/10. Validation set: Average loss: 0.1771\nTrain: [0/2931 (0%)]\tLoss: 0.132608\nTrain: [1098/2931 (100%)]\tLoss: 0.162559\nEpoch: 2/10. Train set: Average loss: 0.1625\nEpoch: 2/10. Validation set: Average loss: 0.1527\nTrain: [0/2931 (0%)]\tLoss: 0.133469\nTrain: [1098/2931 (100%)]\tLoss: 0.165529\nEpoch: 3/10. Train set: Average loss: 0.1654\nEpoch: 3/10. Validation set: Average loss: 0.1478\nTrain: [0/2931 (0%)]\tLoss: 0.187875\nTrain: [1098/2931 (100%)]\tLoss: 0.169626\nEpoch: 4/10. Train set: Average loss: 0.1697\nEpoch: 4/10. Validation set: Average loss: 0.1402\nTrain: [0/2931 (0%)]\tLoss: 0.101742\nTrain: [1098/2931 (100%)]\tLoss: 0.159960\nEpoch: 5/10. Train set: Average loss: 0.1598\nEpoch: 5/10. Validation set: Average loss: 0.1386\nTrain: [0/2931 (0%)]\tLoss: 0.129781\nTrain: [1098/2931 (100%)]\tLoss: 0.159523\nEpoch: 6/10. Train set: Average loss: 0.1594\nEpoch: 6/10. Validation set: Average loss: 0.1272\nTrain: [0/2931 (0%)]\tLoss: 0.139906\nTrain: [1098/2931 (100%)]\tLoss: 0.154326\nEpoch: 7/10. Train set: Average loss: 0.1543\nEpoch: 7/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.142215\nTrain: [1098/2931 (100%)]\tLoss: 0.153193\nEpoch: 8/10. Train set: Average loss: 0.1532\nEpoch: 8/10. Validation set: Average loss: 0.1443\nTrain: [0/2931 (0%)]\tLoss: 0.122812\nTrain: [1098/2931 (100%)]\tLoss: 0.151648\nEpoch: 9/10. Train set: Average loss: 0.1516\nEpoch: 9/10. Validation set: Average loss: 0.1210\nTrain: [0/2931 (0%)]\tLoss: 0.125212\nTrain: [1098/2931 (100%)]\tLoss: 0.145386\nEpoch: 10/10. Train set: Average loss: 0.1453\nEpoch: 10/10. Validation set: Average loss: 0.1211\nNumber features: 14\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311782\nTrain: [1098/2931 (100%)]\tLoss: 0.171142\nEpoch: 1/10. Train set: Average loss: 0.1715\nEpoch: 1/10. Validation set: Average loss: 0.1523\nTrain: [0/2931 (0%)]\tLoss: 0.152628\nTrain: [1098/2931 (100%)]\tLoss: 0.169321\nEpoch: 2/10. Train set: Average loss: 0.1693\nEpoch: 2/10. Validation set: Average loss: 0.1799\nTrain: [0/2931 (0%)]\tLoss: 0.180688\nTrain: [1098/2931 (100%)]\tLoss: 0.170696\nEpoch: 3/10. Train set: Average loss: 0.1707\nEpoch: 3/10. Validation set: Average loss: 0.1479\nTrain: [0/2931 (0%)]\tLoss: 0.118194\nTrain: [1098/2931 (100%)]\tLoss: 0.160951\nEpoch: 4/10. Train set: Average loss: 0.1608\nEpoch: 4/10. Validation set: Average loss: 0.1461\nTrain: [0/2931 (0%)]\tLoss: 0.083107\nTrain: [1098/2931 (100%)]\tLoss: 0.160856\nEpoch: 5/10. Train set: Average loss: 0.1606\nEpoch: 5/10. Validation set: Average loss: 0.1436\nTrain: [0/2931 (0%)]\tLoss: 0.189100\nTrain: [1098/2931 (100%)]\tLoss: 0.162687\nEpoch: 6/10. Train set: Average loss: 0.1628\nEpoch: 6/10. Validation set: Average loss: 0.1398\nTrain: [0/2931 (0%)]\tLoss: 0.104530\nTrain: [1098/2931 (100%)]\tLoss: 0.162396\nEpoch: 7/10. Train set: Average loss: 0.1622\nEpoch: 7/10. Validation set: Average loss: 0.1480\nTrain: [0/2931 (0%)]\tLoss: 0.106146\nTrain: [1098/2931 (100%)]\tLoss: 0.159736\nEpoch: 8/10. Train set: Average loss: 0.1596\nEpoch: 8/10. Validation set: Average loss: 0.1394\nTrain: [0/2931 (0%)]\tLoss: 0.133384\nTrain: [1098/2931 (100%)]\tLoss: 0.155127\nEpoch: 9/10. Train set: Average loss: 0.1551\nEpoch: 9/10. Validation set: Average loss: 0.1385\nTrain: [0/2931 (0%)]\tLoss: 0.164516\nTrain: [1098/2931 (100%)]\tLoss: 0.151263\nEpoch: 10/10. Train set: Average loss: 0.1513\nEpoch: 10/10. Validation set: Average loss: 0.1401\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.124548\nTrain: [1098/2931 (100%)]\tLoss: 0.169221\nEpoch: 1/10. Train set: Average loss: 0.1691\nEpoch: 1/10. Validation set: Average loss: 0.1493\nTrain: [0/2931 (0%)]\tLoss: 0.103781\nTrain: [1098/2931 (100%)]\tLoss: 0.165229\nEpoch: 2/10. Train set: Average loss: 0.1651\nEpoch: 2/10. Validation set: Average loss: 0.1621\nTrain: [0/2931 (0%)]\tLoss: 0.100728\nTrain: [1098/2931 (100%)]\tLoss: 0.160378\nEpoch: 3/10. Train set: Average loss: 0.1602\nEpoch: 3/10. Validation set: Average loss: 0.1252\nTrain: [0/2931 (0%)]\tLoss: 0.154313\nTrain: [1098/2931 (100%)]\tLoss: 0.157410\nEpoch: 4/10. Train set: Average loss: 0.1574\nEpoch: 4/10. Validation set: Average loss: 0.1440\nTrain: [0/2931 (0%)]\tLoss: 0.128331\nTrain: [1098/2931 (100%)]\tLoss: 0.164039\nEpoch: 5/10. Train set: Average loss: 0.1639\nEpoch: 5/10. Validation set: Average loss: 0.1883\nTrain: [0/2931 (0%)]\tLoss: 0.125215\nTrain: [1098/2931 (100%)]\tLoss: 0.168806\nEpoch: 6/10. Train set: Average loss: 0.1687\nEpoch: 6/10. Validation set: Average loss: 0.1860\nTrain: [0/2931 (0%)]\tLoss: 0.079327\nTrain: [1098/2931 (100%)]\tLoss: 0.167780\nEpoch: 7/10. Train set: Average loss: 0.1675\nEpoch: 7/10. Validation set: Average loss: 0.1378\nTrain: [0/2931 (0%)]\tLoss: 0.100648\nTrain: [1098/2931 (100%)]\tLoss: 0.155497\nEpoch: 8/10. Train set: Average loss: 0.1553\nEpoch: 8/10. Validation set: Average loss: 0.1363\nTrain: [0/2931 (0%)]\tLoss: 0.158128\nTrain: [1098/2931 (100%)]\tLoss: 0.149840\nEpoch: 9/10. Train set: Average loss: 0.1499\nEpoch: 9/10. Validation set: Average loss: 0.1341\nTrain: [0/2931 (0%)]\tLoss: 0.147025\nTrain: [1098/2931 (100%)]\tLoss: 0.150036\nEpoch: 10/10. Train set: Average loss: 0.1500\nEpoch: 10/10. Validation set: Average loss: 0.1342\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187077\nTrain: [1098/2931 (100%)]\tLoss: 0.166705\nEpoch: 1/10. Train set: Average loss: 0.1668\nEpoch: 1/10. Validation set: Average loss: 0.1637\nTrain: [0/2931 (0%)]\tLoss: 0.160034\nTrain: [1098/2931 (100%)]\tLoss: 0.175702\nEpoch: 2/10. Train set: Average loss: 0.1757\nEpoch: 2/10. Validation set: Average loss: 0.1681\nTrain: [0/2931 (0%)]\tLoss: 0.162259\nTrain: [1098/2931 (100%)]\tLoss: 0.163207\nEpoch: 3/10. Train set: Average loss: 0.1632\nEpoch: 3/10. Validation set: Average loss: 0.1774\nTrain: [0/2931 (0%)]\tLoss: 0.143641\nTrain: [1098/2931 (100%)]\tLoss: 0.166412\nEpoch: 4/10. Train set: Average loss: 0.1664\nEpoch: 4/10. Validation set: Average loss: 0.1765\nTrain: [0/2931 (0%)]\tLoss: 0.136546\nTrain: [1098/2931 (100%)]\tLoss: 0.159771\nEpoch: 5/10. Train set: Average loss: 0.1597\nEpoch: 5/10. Validation set: Average loss: 0.1533\nTrain: [0/2931 (0%)]\tLoss: 0.151770\nTrain: [1098/2931 (100%)]\tLoss: 0.160618\nEpoch: 6/10. Train set: Average loss: 0.1606\nEpoch: 6/10. Validation set: Average loss: 0.1504\nTrain: [0/2931 (0%)]\tLoss: 0.123361\nTrain: [1098/2931 (100%)]\tLoss: 0.151119\nEpoch: 7/10. Train set: Average loss: 0.1510\nEpoch: 7/10. Validation set: Average loss: 0.1467\nTrain: [0/2931 (0%)]\tLoss: 0.156258\nTrain: [1098/2931 (100%)]\tLoss: 0.156110\nEpoch: 8/10. Train set: Average loss: 0.1561\nEpoch: 8/10. Validation set: Average loss: 0.1686\nTrain: [0/2931 (0%)]\tLoss: 0.153889\nTrain: [1098/2931 (100%)]\tLoss: 0.155886\nEpoch: 9/10. Train set: Average loss: 0.1559\nEpoch: 9/10. Validation set: Average loss: 0.1414\nTrain: [0/2931 (0%)]\tLoss: 0.179934\nTrain: [1098/2931 (100%)]\tLoss: 0.144610\nEpoch: 10/10. Train set: Average loss: 0.1447\nEpoch: 10/10. Validation set: Average loss: 0.1414\nNumber features: 15\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187218\nTrain: [1098/2931 (100%)]\tLoss: 0.169886\nEpoch: 1/10. Train set: Average loss: 0.1699\nEpoch: 1/10. Validation set: Average loss: 0.1591\nTrain: [0/2931 (0%)]\tLoss: 0.207101\nTrain: [1098/2931 (100%)]\tLoss: 0.168929\nEpoch: 2/10. Train set: Average loss: 0.1690\nEpoch: 2/10. Validation set: Average loss: 0.1495\nTrain: [0/2931 (0%)]\tLoss: 0.071338\nTrain: [1098/2931 (100%)]\tLoss: 0.157009\nEpoch: 3/10. Train set: Average loss: 0.1568\nEpoch: 3/10. Validation set: Average loss: 0.1726\nTrain: [0/2931 (0%)]\tLoss: 0.189601\nTrain: [1098/2931 (100%)]\tLoss: 0.168629\nEpoch: 4/10. Train set: Average loss: 0.1687\nEpoch: 4/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.163232\nTrain: [1098/2931 (100%)]\tLoss: 0.163086\nEpoch: 5/10. Train set: Average loss: 0.1631\nEpoch: 5/10. Validation set: Average loss: 0.1781\nTrain: [0/2931 (0%)]\tLoss: 0.204972\nTrain: [1098/2931 (100%)]\tLoss: 0.155482\nEpoch: 6/10. Train set: Average loss: 0.1556\nEpoch: 6/10. Validation set: Average loss: 0.1525\nTrain: [0/2931 (0%)]\tLoss: 0.198200\nTrain: [1098/2931 (100%)]\tLoss: 0.167773\nEpoch: 7/10. Train set: Average loss: 0.1679\nEpoch: 7/10. Validation set: Average loss: 0.1682\nTrain: [0/2931 (0%)]\tLoss: 0.204565\nTrain: [1098/2931 (100%)]\tLoss: 0.156119\nEpoch: 8/10. Train set: Average loss: 0.1563\nEpoch: 8/10. Validation set: Average loss: 0.1703\nTrain: [0/2931 (0%)]\tLoss: 0.190123\nTrain: [1098/2931 (100%)]\tLoss: 0.155336\nEpoch: 9/10. Train set: Average loss: 0.1554\nEpoch: 9/10. Validation set: Average loss: 0.1486\nTrain: [0/2931 (0%)]\tLoss: 0.141820\nTrain: [1098/2931 (100%)]\tLoss: 0.147669\nEpoch: 10/10. Train set: Average loss: 0.1477\nEpoch: 10/10. Validation set: Average loss: 0.1489\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.248835\nTrain: [1098/2931 (100%)]\tLoss: 0.177620\nEpoch: 1/10. Train set: Average loss: 0.1778\nEpoch: 1/10. Validation set: Average loss: 0.1714\nTrain: [0/2931 (0%)]\tLoss: 0.128312\nTrain: [1098/2931 (100%)]\tLoss: 0.180385\nEpoch: 2/10. Train set: Average loss: 0.1802\nEpoch: 2/10. Validation set: Average loss: 0.1882\nTrain: [0/2931 (0%)]\tLoss: 0.197571\nTrain: [1098/2931 (100%)]\tLoss: 0.186177\nEpoch: 3/10. Train set: Average loss: 0.1862\nEpoch: 3/10. Validation set: Average loss: 0.1784\nTrain: [0/2931 (0%)]\tLoss: 0.180224\nTrain: [1098/2931 (100%)]\tLoss: 0.159931\nEpoch: 4/10. Train set: Average loss: 0.1600\nEpoch: 4/10. Validation set: Average loss: 0.1821\nTrain: [0/2931 (0%)]\tLoss: 0.133966\nTrain: [1098/2931 (100%)]\tLoss: 0.161610\nEpoch: 5/10. Train set: Average loss: 0.1615\nEpoch: 5/10. Validation set: Average loss: 0.1498\nTrain: [0/2931 (0%)]\tLoss: 0.180559\nTrain: [1098/2931 (100%)]\tLoss: 0.167781\nEpoch: 6/10. Train set: Average loss: 0.1678\nEpoch: 6/10. Validation set: Average loss: 0.1762\nTrain: [0/2931 (0%)]\tLoss: 0.149246\nTrain: [1098/2931 (100%)]\tLoss: 0.162563\nEpoch: 7/10. Train set: Average loss: 0.1625\nEpoch: 7/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.175564\nTrain: [1098/2931 (100%)]\tLoss: 0.159047\nEpoch: 8/10. Train set: Average loss: 0.1591\nEpoch: 8/10. Validation set: Average loss: 0.1692\nTrain: [0/2931 (0%)]\tLoss: 0.186975\nTrain: [1098/2931 (100%)]\tLoss: 0.159223\nEpoch: 9/10. Train set: Average loss: 0.1593\nEpoch: 9/10. Validation set: Average loss: 0.1420\nTrain: [0/2931 (0%)]\tLoss: 0.189447\nTrain: [1098/2931 (100%)]\tLoss: 0.151837\nEpoch: 10/10. Train set: Average loss: 0.1519\nEpoch: 10/10. Validation set: Average loss: 0.1417\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124908\nTrain: [1098/2931 (100%)]\tLoss: 0.169033\nEpoch: 1/10. Train set: Average loss: 0.1689\nEpoch: 1/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.169911\nTrain: [1098/2931 (100%)]\tLoss: 0.169521\nEpoch: 2/10. Train set: Average loss: 0.1695\nEpoch: 2/10. Validation set: Average loss: 0.1735\nTrain: [0/2931 (0%)]\tLoss: 0.152619\nTrain: [1098/2931 (100%)]\tLoss: 0.161033\nEpoch: 3/10. Train set: Average loss: 0.1610\nEpoch: 3/10. Validation set: Average loss: 0.1506\nTrain: [0/2931 (0%)]\tLoss: 0.097098\nTrain: [1098/2931 (100%)]\tLoss: 0.158109\nEpoch: 4/10. Train set: Average loss: 0.1579\nEpoch: 4/10. Validation set: Average loss: 0.1756\nTrain: [0/2931 (0%)]\tLoss: 0.089424\nTrain: [1098/2931 (100%)]\tLoss: 0.161251\nEpoch: 5/10. Train set: Average loss: 0.1611\nEpoch: 5/10. Validation set: Average loss: 0.1685\nTrain: [0/2931 (0%)]\tLoss: 0.062694\nTrain: [1098/2931 (100%)]\tLoss: 0.160819\nEpoch: 6/10. Train set: Average loss: 0.1606\nEpoch: 6/10. Validation set: Average loss: 0.1751\nTrain: [0/2931 (0%)]\tLoss: 0.133999\nTrain: [1098/2931 (100%)]\tLoss: 0.154364\nEpoch: 7/10. Train set: Average loss: 0.1543\nEpoch: 7/10. Validation set: Average loss: 0.1607\nTrain: [0/2931 (0%)]\tLoss: 0.111309\nTrain: [1098/2931 (100%)]\tLoss: 0.155447\nEpoch: 8/10. Train set: Average loss: 0.1553\nEpoch: 8/10. Validation set: Average loss: 0.1604\nTrain: [0/2931 (0%)]\tLoss: 0.105627\nTrain: [1098/2931 (100%)]\tLoss: 0.151532\nEpoch: 9/10. Train set: Average loss: 0.1514\nEpoch: 9/10. Validation set: Average loss: 0.1445\nTrain: [0/2931 (0%)]\tLoss: 0.105700\nTrain: [1098/2931 (100%)]\tLoss: 0.147182\nEpoch: 10/10. Train set: Average loss: 0.1471\nEpoch: 10/10. Validation set: Average loss: 0.1410\nNumber features: 16\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187312\nTrain: [1098/2931 (100%)]\tLoss: 0.175063\nEpoch: 1/10. Train set: Average loss: 0.1751\nEpoch: 1/10. Validation set: Average loss: 0.1490\nTrain: [0/2931 (0%)]\tLoss: 0.155324\nTrain: [1098/2931 (100%)]\tLoss: 0.161024\nEpoch: 2/10. Train set: Average loss: 0.1610\nEpoch: 2/10. Validation set: Average loss: 0.1248\nTrain: [0/2931 (0%)]\tLoss: 0.191281\nTrain: [1098/2931 (100%)]\tLoss: 0.161380\nEpoch: 3/10. Train set: Average loss: 0.1615\nEpoch: 3/10. Validation set: Average loss: 0.1321\nTrain: [0/2931 (0%)]\tLoss: 0.116309\nTrain: [1098/2931 (100%)]\tLoss: 0.158789\nEpoch: 4/10. Train set: Average loss: 0.1587\nEpoch: 4/10. Validation set: Average loss: 0.1297\nTrain: [0/2931 (0%)]\tLoss: 0.250284\nTrain: [1098/2931 (100%)]\tLoss: 0.158402\nEpoch: 5/10. Train set: Average loss: 0.1587\nEpoch: 5/10. Validation set: Average loss: 0.1341\nTrain: [0/2931 (0%)]\tLoss: 0.201631\nTrain: [1098/2931 (100%)]\tLoss: 0.160060\nEpoch: 6/10. Train set: Average loss: 0.1602\nEpoch: 6/10. Validation set: Average loss: 0.1482\nTrain: [0/2931 (0%)]\tLoss: 0.193952\nTrain: [1098/2931 (100%)]\tLoss: 0.163231\nEpoch: 7/10. Train set: Average loss: 0.1633\nEpoch: 7/10. Validation set: Average loss: 0.1518\nTrain: [0/2931 (0%)]\tLoss: 0.116618\nTrain: [1098/2931 (100%)]\tLoss: 0.159096\nEpoch: 8/10. Train set: Average loss: 0.1590\nEpoch: 8/10. Validation set: Average loss: 0.1566\nTrain: [0/2931 (0%)]\tLoss: 0.145078\nTrain: [1098/2931 (100%)]\tLoss: 0.153266\nEpoch: 9/10. Train set: Average loss: 0.1532\nEpoch: 9/10. Validation set: Average loss: 0.1293\nTrain: [0/2931 (0%)]\tLoss: 0.096912\nTrain: [1098/2931 (100%)]\tLoss: 0.152427\nEpoch: 10/10. Train set: Average loss: 0.1523\nEpoch: 10/10. Validation set: Average loss: 0.1276\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.311945\nTrain: [1098/2931 (100%)]\tLoss: 0.182047\nEpoch: 1/10. Train set: Average loss: 0.1824\nEpoch: 1/10. Validation set: Average loss: 0.1542\nTrain: [0/2931 (0%)]\tLoss: 0.240431\nTrain: [1098/2931 (100%)]\tLoss: 0.181122\nEpoch: 2/10. Train set: Average loss: 0.1813\nEpoch: 2/10. Validation set: Average loss: 0.1645\nTrain: [0/2931 (0%)]\tLoss: 0.237757\nTrain: [1098/2931 (100%)]\tLoss: 0.172784\nEpoch: 3/10. Train set: Average loss: 0.1730\nEpoch: 3/10. Validation set: Average loss: 0.1530\nTrain: [0/2931 (0%)]\tLoss: 0.256449\nTrain: [1098/2931 (100%)]\tLoss: 0.162615\nEpoch: 4/10. Train set: Average loss: 0.1629\nEpoch: 4/10. Validation set: Average loss: 0.1427\nTrain: [0/2931 (0%)]\tLoss: 0.252131\nTrain: [1098/2931 (100%)]\tLoss: 0.160812\nEpoch: 5/10. Train set: Average loss: 0.1611\nEpoch: 5/10. Validation set: Average loss: 0.1253\nTrain: [0/2931 (0%)]\tLoss: 0.160119\nTrain: [1098/2931 (100%)]\tLoss: 0.163293\nEpoch: 6/10. Train set: Average loss: 0.1633\nEpoch: 6/10. Validation set: Average loss: 0.1532\nTrain: [0/2931 (0%)]\tLoss: 0.263887\nTrain: [1098/2931 (100%)]\tLoss: 0.164410\nEpoch: 7/10. Train set: Average loss: 0.1647\nEpoch: 7/10. Validation set: Average loss: 0.1547\nTrain: [0/2931 (0%)]\tLoss: 0.279145\nTrain: [1098/2931 (100%)]\tLoss: 0.160035\nEpoch: 8/10. Train set: Average loss: 0.1604\nEpoch: 8/10. Validation set: Average loss: 0.1613\nTrain: [0/2931 (0%)]\tLoss: 0.159373\nTrain: [1098/2931 (100%)]\tLoss: 0.156264\nEpoch: 9/10. Train set: Average loss: 0.1563\nEpoch: 9/10. Validation set: Average loss: 0.1348\nTrain: [0/2931 (0%)]\tLoss: 0.168038\nTrain: [1098/2931 (100%)]\tLoss: 0.153070\nEpoch: 10/10. Train set: Average loss: 0.1531\nEpoch: 10/10. Validation set: Average loss: 0.1374\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124958\nTrain: [1098/2931 (100%)]\tLoss: 0.168861\nEpoch: 1/10. Train set: Average loss: 0.1687\nEpoch: 1/10. Validation set: Average loss: 0.1674\nTrain: [0/2931 (0%)]\tLoss: 0.094877\nTrain: [1098/2931 (100%)]\tLoss: 0.169132\nEpoch: 2/10. Train set: Average loss: 0.1689\nEpoch: 2/10. Validation set: Average loss: 0.1640\nTrain: [0/2931 (0%)]\tLoss: 0.164889\nTrain: [1098/2931 (100%)]\tLoss: 0.156713\nEpoch: 3/10. Train set: Average loss: 0.1567\nEpoch: 3/10. Validation set: Average loss: 0.1616\nTrain: [0/2931 (0%)]\tLoss: 0.078786\nTrain: [1098/2931 (100%)]\tLoss: 0.163334\nEpoch: 4/10. Train set: Average loss: 0.1631\nEpoch: 4/10. Validation set: Average loss: 0.1508\nTrain: [0/2931 (0%)]\tLoss: 0.180394\nTrain: [1098/2931 (100%)]\tLoss: 0.173822\nEpoch: 5/10. Train set: Average loss: 0.1738\nEpoch: 5/10. Validation set: Average loss: 0.1653\nTrain: [0/2931 (0%)]\tLoss: 0.107438\nTrain: [1098/2931 (100%)]\tLoss: 0.169924\nEpoch: 6/10. Train set: Average loss: 0.1698\nEpoch: 6/10. Validation set: Average loss: 0.1538\nTrain: [0/2931 (0%)]\tLoss: 0.135732\nTrain: [1098/2931 (100%)]\tLoss: 0.157809\nEpoch: 7/10. Train set: Average loss: 0.1577\nEpoch: 7/10. Validation set: Average loss: 0.1658\nTrain: [0/2931 (0%)]\tLoss: 0.134964\nTrain: [1098/2931 (100%)]\tLoss: 0.159558\nEpoch: 8/10. Train set: Average loss: 0.1595\nEpoch: 8/10. Validation set: Average loss: 0.1444\nTrain: [0/2931 (0%)]\tLoss: 0.231067\nTrain: [1098/2931 (100%)]\tLoss: 0.157680\nEpoch: 9/10. Train set: Average loss: 0.1579\nEpoch: 9/10. Validation set: Average loss: 0.1322\nTrain: [0/2931 (0%)]\tLoss: 0.117573\nTrain: [1098/2931 (100%)]\tLoss: 0.152796\nEpoch: 10/10. Train set: Average loss: 0.1527\nEpoch: 10/10. Validation set: Average loss: 0.1331\nNumber features: 17\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374628\nTrain: [1098/2931 (100%)]\tLoss: 0.162002\nEpoch: 1/10. Train set: Average loss: 0.1626\nEpoch: 1/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.180819\nTrain: [1098/2931 (100%)]\tLoss: 0.163059\nEpoch: 2/10. Train set: Average loss: 0.1631\nEpoch: 2/10. Validation set: Average loss: 0.1531\nTrain: [0/2931 (0%)]\tLoss: 0.184415\nTrain: [1098/2931 (100%)]\tLoss: 0.167436\nEpoch: 3/10. Train set: Average loss: 0.1675\nEpoch: 3/10. Validation set: Average loss: 0.1449\nTrain: [0/2931 (0%)]\tLoss: 0.170762\nTrain: [1098/2931 (100%)]\tLoss: 0.157348\nEpoch: 4/10. Train set: Average loss: 0.1574\nEpoch: 4/10. Validation set: Average loss: 0.1508\nTrain: [0/2931 (0%)]\tLoss: 0.146019\nTrain: [1098/2931 (100%)]\tLoss: 0.152132\nEpoch: 5/10. Train set: Average loss: 0.1521\nEpoch: 5/10. Validation set: Average loss: 0.1524\nTrain: [0/2931 (0%)]\tLoss: 0.235489\nTrain: [1098/2931 (100%)]\tLoss: 0.154585\nEpoch: 6/10. Train set: Average loss: 0.1548\nEpoch: 6/10. Validation set: Average loss: 0.1847\nTrain: [0/2931 (0%)]\tLoss: 0.220954\nTrain: [1098/2931 (100%)]\tLoss: 0.157061\nEpoch: 7/10. Train set: Average loss: 0.1572\nEpoch: 7/10. Validation set: Average loss: 0.1512\nTrain: [0/2931 (0%)]\tLoss: 0.150713\nTrain: [1098/2931 (100%)]\tLoss: 0.157604\nEpoch: 8/10. Train set: Average loss: 0.1576\nEpoch: 8/10. Validation set: Average loss: 0.1576\nTrain: [0/2931 (0%)]\tLoss: 0.151361\nTrain: [1098/2931 (100%)]\tLoss: 0.147709\nEpoch: 9/10. Train set: Average loss: 0.1477\nEpoch: 9/10. Validation set: Average loss: 0.1498\nTrain: [0/2931 (0%)]\tLoss: 0.103571\nTrain: [1098/2931 (100%)]\tLoss: 0.151553\nEpoch: 10/10. Train set: Average loss: 0.1514\nEpoch: 10/10. Validation set: Average loss: 0.1500\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.374799\nTrain: [1098/2931 (100%)]\tLoss: 0.174493\nEpoch: 1/10. Train set: Average loss: 0.1750\nEpoch: 1/10. Validation set: Average loss: 0.1585\nTrain: [0/2931 (0%)]\tLoss: 0.252822\nTrain: [1098/2931 (100%)]\tLoss: 0.156563\nEpoch: 2/10. Train set: Average loss: 0.1568\nEpoch: 2/10. Validation set: Average loss: 0.1531\nTrain: [0/2931 (0%)]\tLoss: 0.160695\nTrain: [1098/2931 (100%)]\tLoss: 0.161935\nEpoch: 3/10. Train set: Average loss: 0.1619\nEpoch: 3/10. Validation set: Average loss: 0.1541\nTrain: [0/2931 (0%)]\tLoss: 0.200844\nTrain: [1098/2931 (100%)]\tLoss: 0.160702\nEpoch: 4/10. Train set: Average loss: 0.1608\nEpoch: 4/10. Validation set: Average loss: 0.1720\nTrain: [0/2931 (0%)]\tLoss: 0.152854\nTrain: [1098/2931 (100%)]\tLoss: 0.159028\nEpoch: 5/10. Train set: Average loss: 0.1590\nEpoch: 5/10. Validation set: Average loss: 0.1586\nTrain: [0/2931 (0%)]\tLoss: 0.168218\nTrain: [1098/2931 (100%)]\tLoss: 0.153289\nEpoch: 6/10. Train set: Average loss: 0.1533\nEpoch: 6/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.154774\nTrain: [1098/2931 (100%)]\tLoss: 0.159331\nEpoch: 7/10. Train set: Average loss: 0.1593\nEpoch: 7/10. Validation set: Average loss: 0.1596\nTrain: [0/2931 (0%)]\tLoss: 0.253805\nTrain: [1098/2931 (100%)]\tLoss: 0.154245\nEpoch: 8/10. Train set: Average loss: 0.1545\nEpoch: 8/10. Validation set: Average loss: 0.1594\nTrain: [0/2931 (0%)]\tLoss: 0.067803\nTrain: [1098/2931 (100%)]\tLoss: 0.149459\nEpoch: 9/10. Train set: Average loss: 0.1492\nEpoch: 9/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.176805\nTrain: [1098/2931 (100%)]\tLoss: 0.151878\nEpoch: 10/10. Train set: Average loss: 0.1519\nEpoch: 10/10. Validation set: Average loss: 0.1467\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.374619\nTrain: [1098/2931 (100%)]\tLoss: 0.170318\nEpoch: 1/10. Train set: Average loss: 0.1709\nEpoch: 1/10. Validation set: Average loss: 0.1590\nTrain: [0/2931 (0%)]\tLoss: 0.165027\nTrain: [1098/2931 (100%)]\tLoss: 0.165866\nEpoch: 2/10. Train set: Average loss: 0.1659\nEpoch: 2/10. Validation set: Average loss: 0.1880\nTrain: [0/2931 (0%)]\tLoss: 0.452332\nTrain: [1098/2931 (100%)]\tLoss: 0.172410\nEpoch: 3/10. Train set: Average loss: 0.1732\nEpoch: 3/10. Validation set: Average loss: 0.1855\nTrain: [0/2931 (0%)]\tLoss: 0.142133\nTrain: [1098/2931 (100%)]\tLoss: 0.169023\nEpoch: 4/10. Train set: Average loss: 0.1689\nEpoch: 4/10. Validation set: Average loss: 0.1517\nTrain: [0/2931 (0%)]\tLoss: 0.227498\nTrain: [1098/2931 (100%)]\tLoss: 0.154219\nEpoch: 5/10. Train set: Average loss: 0.1544\nEpoch: 5/10. Validation set: Average loss: 0.1663\nTrain: [0/2931 (0%)]\tLoss: 0.250061\nTrain: [1098/2931 (100%)]\tLoss: 0.161862\nEpoch: 6/10. Train set: Average loss: 0.1621\nEpoch: 6/10. Validation set: Average loss: 0.1571\nTrain: [0/2931 (0%)]\tLoss: 0.198977\nTrain: [1098/2931 (100%)]\tLoss: 0.161722\nEpoch: 7/10. Train set: Average loss: 0.1618\nEpoch: 7/10. Validation set: Average loss: 0.1700\nTrain: [0/2931 (0%)]\tLoss: 0.232387\nTrain: [1098/2931 (100%)]\tLoss: 0.156251\nEpoch: 8/10. Train set: Average loss: 0.1565\nEpoch: 8/10. Validation set: Average loss: 0.1475\nTrain: [0/2931 (0%)]\tLoss: 0.248716\nTrain: [1098/2931 (100%)]\tLoss: 0.151702\nEpoch: 9/10. Train set: Average loss: 0.1520\nEpoch: 9/10. Validation set: Average loss: 0.1457\nTrain: [0/2931 (0%)]\tLoss: 0.103638\nTrain: [1098/2931 (100%)]\tLoss: 0.148452\nEpoch: 10/10. Train set: Average loss: 0.1483\nEpoch: 10/10. Validation set: Average loss: 0.1463\nNumber features: 18\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374099\nTrain: [1098/2931 (100%)]\tLoss: 0.165380\nEpoch: 1/10. Train set: Average loss: 0.1659\nEpoch: 1/10. Validation set: Average loss: 0.1660\nTrain: [0/2931 (0%)]\tLoss: 0.244431\nTrain: [1098/2931 (100%)]\tLoss: 0.157949\nEpoch: 2/10. Train set: Average loss: 0.1582\nEpoch: 2/10. Validation set: Average loss: 0.1575\nTrain: [0/2931 (0%)]\tLoss: 0.160236\nTrain: [1098/2931 (100%)]\tLoss: 0.147767\nEpoch: 3/10. Train set: Average loss: 0.1478\nEpoch: 3/10. Validation set: Average loss: 0.1511\nTrain: [0/2931 (0%)]\tLoss: 0.182919\nTrain: [1098/2931 (100%)]\tLoss: 0.152665\nEpoch: 4/10. Train set: Average loss: 0.1527\nEpoch: 4/10. Validation set: Average loss: 0.1758\nTrain: [0/2931 (0%)]\tLoss: 0.217551\nTrain: [1098/2931 (100%)]\tLoss: 0.151266\nEpoch: 5/10. Train set: Average loss: 0.1514\nEpoch: 5/10. Validation set: Average loss: 0.1660\nTrain: [0/2931 (0%)]\tLoss: 0.262580\nTrain: [1098/2931 (100%)]\tLoss: 0.149001\nEpoch: 6/10. Train set: Average loss: 0.1493\nEpoch: 6/10. Validation set: Average loss: 0.1576\nTrain: [0/2931 (0%)]\tLoss: 0.270618\nTrain: [1098/2931 (100%)]\tLoss: 0.149245\nEpoch: 7/10. Train set: Average loss: 0.1496\nEpoch: 7/10. Validation set: Average loss: 0.1671\nTrain: [0/2931 (0%)]\tLoss: 0.259048\nTrain: [1098/2931 (100%)]\tLoss: 0.153705\nEpoch: 8/10. Train set: Average loss: 0.1540\nEpoch: 8/10. Validation set: Average loss: 0.1836\nTrain: [0/2931 (0%)]\tLoss: 0.313416\nTrain: [1098/2931 (100%)]\tLoss: 0.160404\nEpoch: 9/10. Train set: Average loss: 0.1608\nEpoch: 9/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.239434\nTrain: [1098/2931 (100%)]\tLoss: 0.148789\nEpoch: 10/10. Train set: Average loss: 0.1490\nEpoch: 10/10. Validation set: Average loss: 0.1407\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186902\nTrain: [1098/2931 (100%)]\tLoss: 0.161736\nEpoch: 1/10. Train set: Average loss: 0.1618\nEpoch: 1/10. Validation set: Average loss: 0.1630\nTrain: [0/2931 (0%)]\tLoss: 0.176008\nTrain: [1098/2931 (100%)]\tLoss: 0.161370\nEpoch: 2/10. Train set: Average loss: 0.1614\nEpoch: 2/10. Validation set: Average loss: 0.1810\nTrain: [0/2931 (0%)]\tLoss: 0.156174\nTrain: [1098/2931 (100%)]\tLoss: 0.157422\nEpoch: 3/10. Train set: Average loss: 0.1574\nEpoch: 3/10. Validation set: Average loss: 0.1877\nTrain: [0/2931 (0%)]\tLoss: 0.159963\nTrain: [1098/2931 (100%)]\tLoss: 0.165032\nEpoch: 4/10. Train set: Average loss: 0.1650\nEpoch: 4/10. Validation set: Average loss: 0.2191\nTrain: [0/2931 (0%)]\tLoss: 0.172581\nTrain: [1098/2931 (100%)]\tLoss: 0.157407\nEpoch: 5/10. Train set: Average loss: 0.1574\nEpoch: 5/10. Validation set: Average loss: 0.1704\nTrain: [0/2931 (0%)]\tLoss: 0.147848\nTrain: [1098/2931 (100%)]\tLoss: 0.146895\nEpoch: 6/10. Train set: Average loss: 0.1469\nEpoch: 6/10. Validation set: Average loss: 0.1673\nTrain: [0/2931 (0%)]\tLoss: 0.230451\nTrain: [1098/2931 (100%)]\tLoss: 0.153555\nEpoch: 7/10. Train set: Average loss: 0.1538\nEpoch: 7/10. Validation set: Average loss: 0.1832\nTrain: [0/2931 (0%)]\tLoss: 0.180007\nTrain: [1098/2931 (100%)]\tLoss: 0.149249\nEpoch: 8/10. Train set: Average loss: 0.1493\nEpoch: 8/10. Validation set: Average loss: 0.1862\nTrain: [0/2931 (0%)]\tLoss: 0.206393\nTrain: [1098/2931 (100%)]\tLoss: 0.155397\nEpoch: 9/10. Train set: Average loss: 0.1555\nEpoch: 9/10. Validation set: Average loss: 0.1601\nTrain: [0/2931 (0%)]\tLoss: 0.187902\nTrain: [1098/2931 (100%)]\tLoss: 0.148551\nEpoch: 10/10. Train set: Average loss: 0.1487\nEpoch: 10/10. Validation set: Average loss: 0.1353\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311659\nTrain: [1098/2931 (100%)]\tLoss: 0.165482\nEpoch: 1/10. Train set: Average loss: 0.1659\nEpoch: 1/10. Validation set: Average loss: 0.1890\nTrain: [0/2931 (0%)]\tLoss: 0.218419\nTrain: [1098/2931 (100%)]\tLoss: 0.147106\nEpoch: 2/10. Train set: Average loss: 0.1473\nEpoch: 2/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.135239\nTrain: [1098/2931 (100%)]\tLoss: 0.148486\nEpoch: 3/10. Train set: Average loss: 0.1485\nEpoch: 3/10. Validation set: Average loss: 0.1963\nTrain: [0/2931 (0%)]\tLoss: 0.100641\nTrain: [1098/2931 (100%)]\tLoss: 0.155706\nEpoch: 4/10. Train set: Average loss: 0.1556\nEpoch: 4/10. Validation set: Average loss: 0.1751\nTrain: [0/2931 (0%)]\tLoss: 0.153470\nTrain: [1098/2931 (100%)]\tLoss: 0.146922\nEpoch: 5/10. Train set: Average loss: 0.1469\nEpoch: 5/10. Validation set: Average loss: 0.1583\nTrain: [0/2931 (0%)]\tLoss: 0.215489\nTrain: [1098/2931 (100%)]\tLoss: 0.155898\nEpoch: 6/10. Train set: Average loss: 0.1561\nEpoch: 6/10. Validation set: Average loss: 0.1905\nTrain: [0/2931 (0%)]\tLoss: 0.238558\nTrain: [1098/2931 (100%)]\tLoss: 0.149396\nEpoch: 7/10. Train set: Average loss: 0.1496\nEpoch: 7/10. Validation set: Average loss: 0.1695\nTrain: [0/2931 (0%)]\tLoss: 0.074113\nTrain: [1098/2931 (100%)]\tLoss: 0.146136\nEpoch: 8/10. Train set: Average loss: 0.1459\nEpoch: 8/10. Validation set: Average loss: 0.1638\nTrain: [0/2931 (0%)]\tLoss: 0.161287\nTrain: [1098/2931 (100%)]\tLoss: 0.144221\nEpoch: 9/10. Train set: Average loss: 0.1443\nEpoch: 9/10. Validation set: Average loss: 0.1325\nTrain: [0/2931 (0%)]\tLoss: 0.084114\nTrain: [1098/2931 (100%)]\tLoss: 0.139603\nEpoch: 10/10. Train set: Average loss: 0.1395\nEpoch: 10/10. Validation set: Average loss: 0.1375\nNumber features: 19\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.000003\nTrain: [1098/2931 (100%)]\tLoss: 0.163966\nEpoch: 1/10. Train set: Average loss: 0.1635\nEpoch: 1/10. Validation set: Average loss: 0.1601\nTrain: [0/2931 (0%)]\tLoss: 0.095407\nTrain: [1098/2931 (100%)]\tLoss: 0.157911\nEpoch: 2/10. Train set: Average loss: 0.1577\nEpoch: 2/10. Validation set: Average loss: 0.1165\nTrain: [0/2931 (0%)]\tLoss: 0.062938\nTrain: [1098/2931 (100%)]\tLoss: 0.160182\nEpoch: 3/10. Train set: Average loss: 0.1599\nEpoch: 3/10. Validation set: Average loss: 0.1350\nTrain: [0/2931 (0%)]\tLoss: 0.157520\nTrain: [1098/2931 (100%)]\tLoss: 0.162006\nEpoch: 4/10. Train set: Average loss: 0.1620\nEpoch: 4/10. Validation set: Average loss: 0.1310\nTrain: [0/2931 (0%)]\tLoss: 0.221023\nTrain: [1098/2931 (100%)]\tLoss: 0.156551\nEpoch: 5/10. Train set: Average loss: 0.1567\nEpoch: 5/10. Validation set: Average loss: 0.1287\nTrain: [0/2931 (0%)]\tLoss: 0.229073\nTrain: [1098/2931 (100%)]\tLoss: 0.159371\nEpoch: 6/10. Train set: Average loss: 0.1596\nEpoch: 6/10. Validation set: Average loss: 0.1261\nTrain: [0/2931 (0%)]\tLoss: 0.128206\nTrain: [1098/2931 (100%)]\tLoss: 0.164015\nEpoch: 7/10. Train set: Average loss: 0.1639\nEpoch: 7/10. Validation set: Average loss: 0.1232\nTrain: [0/2931 (0%)]\tLoss: 0.137431\nTrain: [1098/2931 (100%)]\tLoss: 0.169250\nEpoch: 8/10. Train set: Average loss: 0.1692\nEpoch: 8/10. Validation set: Average loss: 0.1683\nTrain: [0/2931 (0%)]\tLoss: 0.070367\nTrain: [1098/2931 (100%)]\tLoss: 0.161515\nEpoch: 9/10. Train set: Average loss: 0.1613\nEpoch: 9/10. Validation set: Average loss: 0.1393\nTrain: [0/2931 (0%)]\tLoss: 0.104784\nTrain: [1098/2931 (100%)]\tLoss: 0.164694\nEpoch: 10/10. Train set: Average loss: 0.1645\nEpoch: 10/10. Validation set: Average loss: 0.1403\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.311458\nTrain: [1098/2931 (100%)]\tLoss: 0.161688\nEpoch: 1/10. Train set: Average loss: 0.1621\nEpoch: 1/10. Validation set: Average loss: 0.1397\nTrain: [0/2931 (0%)]\tLoss: 0.202103\nTrain: [1098/2931 (100%)]\tLoss: 0.161657\nEpoch: 2/10. Train set: Average loss: 0.1618\nEpoch: 2/10. Validation set: Average loss: 0.1130\nTrain: [0/2931 (0%)]\tLoss: 0.103716\nTrain: [1098/2931 (100%)]\tLoss: 0.158635\nEpoch: 3/10. Train set: Average loss: 0.1585\nEpoch: 3/10. Validation set: Average loss: 0.1562\nTrain: [0/2931 (0%)]\tLoss: 0.140149\nTrain: [1098/2931 (100%)]\tLoss: 0.164984\nEpoch: 4/10. Train set: Average loss: 0.1649\nEpoch: 4/10. Validation set: Average loss: 0.1352\nTrain: [0/2931 (0%)]\tLoss: 0.148123\nTrain: [1098/2931 (100%)]\tLoss: 0.161216\nEpoch: 5/10. Train set: Average loss: 0.1612\nEpoch: 5/10. Validation set: Average loss: 0.1133\nTrain: [0/2931 (0%)]\tLoss: 0.217151\nTrain: [1098/2931 (100%)]\tLoss: 0.152310\nEpoch: 6/10. Train set: Average loss: 0.1525\nEpoch: 6/10. Validation set: Average loss: 0.1343\nTrain: [0/2931 (0%)]\tLoss: 0.101676\nTrain: [1098/2931 (100%)]\tLoss: 0.157975\nEpoch: 7/10. Train set: Average loss: 0.1578\nEpoch: 7/10. Validation set: Average loss: 0.1262\nTrain: [0/2931 (0%)]\tLoss: 0.286095\nTrain: [1098/2931 (100%)]\tLoss: 0.151791\nEpoch: 8/10. Train set: Average loss: 0.1522\nEpoch: 8/10. Validation set: Average loss: 0.1256\nTrain: [0/2931 (0%)]\tLoss: 0.246815\nTrain: [1098/2931 (100%)]\tLoss: 0.149226\nEpoch: 9/10. Train set: Average loss: 0.1495\nEpoch: 9/10. Validation set: Average loss: 0.1302\nTrain: [0/2931 (0%)]\tLoss: 0.110721\nTrain: [1098/2931 (100%)]\tLoss: 0.141262\nEpoch: 10/10. Train set: Average loss: 0.1412\nEpoch: 10/10. Validation set: Average loss: 0.1285\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249466\nTrain: [1098/2931 (100%)]\tLoss: 0.161627\nEpoch: 1/10. Train set: Average loss: 0.1619\nEpoch: 1/10. Validation set: Average loss: 0.1470\nTrain: [0/2931 (0%)]\tLoss: 0.221130\nTrain: [1098/2931 (100%)]\tLoss: 0.159445\nEpoch: 2/10. Train set: Average loss: 0.1596\nEpoch: 2/10. Validation set: Average loss: 0.1492\nTrain: [0/2931 (0%)]\tLoss: 0.198451\nTrain: [1098/2931 (100%)]\tLoss: 0.153658\nEpoch: 3/10. Train set: Average loss: 0.1538\nEpoch: 3/10. Validation set: Average loss: 0.1328\nTrain: [0/2931 (0%)]\tLoss: 0.155692\nTrain: [1098/2931 (100%)]\tLoss: 0.163997\nEpoch: 4/10. Train set: Average loss: 0.1640\nEpoch: 4/10. Validation set: Average loss: 0.1535\nTrain: [0/2931 (0%)]\tLoss: 0.183914\nTrain: [1098/2931 (100%)]\tLoss: 0.157993\nEpoch: 5/10. Train set: Average loss: 0.1581\nEpoch: 5/10. Validation set: Average loss: 0.1511\nTrain: [0/2931 (0%)]\tLoss: 0.161256\nTrain: [1098/2931 (100%)]\tLoss: 0.155796\nEpoch: 6/10. Train set: Average loss: 0.1558\nEpoch: 6/10. Validation set: Average loss: 0.1506\nTrain: [0/2931 (0%)]\tLoss: 0.200680\nTrain: [1098/2931 (100%)]\tLoss: 0.154723\nEpoch: 7/10. Train set: Average loss: 0.1548\nEpoch: 7/10. Validation set: Average loss: 0.1513\nTrain: [0/2931 (0%)]\tLoss: 0.112397\nTrain: [1098/2931 (100%)]\tLoss: 0.156938\nEpoch: 8/10. Train set: Average loss: 0.1568\nEpoch: 8/10. Validation set: Average loss: 0.1320\nTrain: [0/2931 (0%)]\tLoss: 0.172098\nTrain: [1098/2931 (100%)]\tLoss: 0.146801\nEpoch: 9/10. Train set: Average loss: 0.1469\nEpoch: 9/10. Validation set: Average loss: 0.1301\nTrain: [0/2931 (0%)]\tLoss: 0.122571\nTrain: [1098/2931 (100%)]\tLoss: 0.145802\nEpoch: 10/10. Train set: Average loss: 0.1457\nEpoch: 10/10. Validation set: Average loss: 0.1299\nNumber features: 20\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.312092\nTrain: [1098/2931 (100%)]\tLoss: 0.161594\nEpoch: 1/10. Train set: Average loss: 0.1620\nEpoch: 1/10. Validation set: Average loss: 0.1780\nTrain: [0/2931 (0%)]\tLoss: 0.159799\nTrain: [1098/2931 (100%)]\tLoss: 0.150011\nEpoch: 2/10. Train set: Average loss: 0.1500\nEpoch: 2/10. Validation set: Average loss: 0.1708\nTrain: [0/2931 (0%)]\tLoss: 0.218066\nTrain: [1098/2931 (100%)]\tLoss: 0.156368\nEpoch: 3/10. Train set: Average loss: 0.1565\nEpoch: 3/10. Validation set: Average loss: 0.1685\nTrain: [0/2931 (0%)]\tLoss: 0.194393\nTrain: [1098/2931 (100%)]\tLoss: 0.150576\nEpoch: 4/10. Train set: Average loss: 0.1507\nEpoch: 4/10. Validation set: Average loss: 0.1614\nTrain: [0/2931 (0%)]\tLoss: 0.156045\nTrain: [1098/2931 (100%)]\tLoss: 0.145860\nEpoch: 5/10. Train set: Average loss: 0.1459\nEpoch: 5/10. Validation set: Average loss: 0.1776\nTrain: [0/2931 (0%)]\tLoss: 0.266631\nTrain: [1098/2931 (100%)]\tLoss: 0.154063\nEpoch: 6/10. Train set: Average loss: 0.1544\nEpoch: 6/10. Validation set: Average loss: 0.1572\nTrain: [0/2931 (0%)]\tLoss: 0.165483\nTrain: [1098/2931 (100%)]\tLoss: 0.148114\nEpoch: 7/10. Train set: Average loss: 0.1482\nEpoch: 7/10. Validation set: Average loss: 0.1705\nTrain: [0/2931 (0%)]\tLoss: 0.225475\nTrain: [1098/2931 (100%)]\tLoss: 0.150035\nEpoch: 8/10. Train set: Average loss: 0.1502\nEpoch: 8/10. Validation set: Average loss: 0.1607\nTrain: [0/2931 (0%)]\tLoss: 0.218015\nTrain: [1098/2931 (100%)]\tLoss: 0.143848\nEpoch: 9/10. Train set: Average loss: 0.1440\nEpoch: 9/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.180250\nTrain: [1098/2931 (100%)]\tLoss: 0.144640\nEpoch: 10/10. Train set: Average loss: 0.1447\nEpoch: 10/10. Validation set: Average loss: 0.1480\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.312177\nTrain: [1098/2931 (100%)]\tLoss: 0.164309\nEpoch: 1/10. Train set: Average loss: 0.1647\nEpoch: 1/10. Validation set: Average loss: 0.1593\nTrain: [0/2931 (0%)]\tLoss: 0.139682\nTrain: [1098/2931 (100%)]\tLoss: 0.149618\nEpoch: 2/10. Train set: Average loss: 0.1496\nEpoch: 2/10. Validation set: Average loss: 0.1681\nTrain: [0/2931 (0%)]\tLoss: 0.220141\nTrain: [1098/2931 (100%)]\tLoss: 0.159513\nEpoch: 3/10. Train set: Average loss: 0.1597\nEpoch: 3/10. Validation set: Average loss: 0.1583\nTrain: [0/2931 (0%)]\tLoss: 0.163896\nTrain: [1098/2931 (100%)]\tLoss: 0.155884\nEpoch: 4/10. Train set: Average loss: 0.1559\nEpoch: 4/10. Validation set: Average loss: 0.1580\nTrain: [0/2931 (0%)]\tLoss: 0.186783\nTrain: [1098/2931 (100%)]\tLoss: 0.154634\nEpoch: 5/10. Train set: Average loss: 0.1547\nEpoch: 5/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.199909\nTrain: [1098/2931 (100%)]\tLoss: 0.157603\nEpoch: 6/10. Train set: Average loss: 0.1577\nEpoch: 6/10. Validation set: Average loss: 0.1801\nTrain: [0/2931 (0%)]\tLoss: 0.275641\nTrain: [1098/2931 (100%)]\tLoss: 0.152017\nEpoch: 7/10. Train set: Average loss: 0.1524\nEpoch: 7/10. Validation set: Average loss: 0.1745\nTrain: [0/2931 (0%)]\tLoss: 0.129814\nTrain: [1098/2931 (100%)]\tLoss: 0.150181\nEpoch: 8/10. Train set: Average loss: 0.1501\nEpoch: 8/10. Validation set: Average loss: 0.1735\nTrain: [0/2931 (0%)]\tLoss: 0.143338\nTrain: [1098/2931 (100%)]\tLoss: 0.152533\nEpoch: 9/10. Train set: Average loss: 0.1525\nEpoch: 9/10. Validation set: Average loss: 0.1624\nTrain: [0/2931 (0%)]\tLoss: 0.156953\nTrain: [1098/2931 (100%)]\tLoss: 0.146945\nEpoch: 10/10. Train set: Average loss: 0.1470\nEpoch: 10/10. Validation set: Average loss: 0.1585\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.310534\nTrain: [1098/2931 (100%)]\tLoss: 0.156607\nEpoch: 1/10. Train set: Average loss: 0.1570\nEpoch: 1/10. Validation set: Average loss: 0.1648\nTrain: [0/2931 (0%)]\tLoss: 0.158676\nTrain: [1098/2931 (100%)]\tLoss: 0.161648\nEpoch: 2/10. Train set: Average loss: 0.1616\nEpoch: 2/10. Validation set: Average loss: 0.1446\nTrain: [0/2931 (0%)]\tLoss: 0.153257\nTrain: [1098/2931 (100%)]\tLoss: 0.156773\nEpoch: 3/10. Train set: Average loss: 0.1568\nEpoch: 3/10. Validation set: Average loss: 0.1770\nTrain: [0/2931 (0%)]\tLoss: 0.101691\nTrain: [1098/2931 (100%)]\tLoss: 0.154597\nEpoch: 4/10. Train set: Average loss: 0.1545\nEpoch: 4/10. Validation set: Average loss: 0.1616\nTrain: [0/2931 (0%)]\tLoss: 0.181199\nTrain: [1098/2931 (100%)]\tLoss: 0.149884\nEpoch: 5/10. Train set: Average loss: 0.1500\nEpoch: 5/10. Validation set: Average loss: 0.1557\nTrain: [0/2931 (0%)]\tLoss: 0.073662\nTrain: [1098/2931 (100%)]\tLoss: 0.152605\nEpoch: 6/10. Train set: Average loss: 0.1524\nEpoch: 6/10. Validation set: Average loss: 0.1488\nTrain: [0/2931 (0%)]\tLoss: 0.104809\nTrain: [1098/2931 (100%)]\tLoss: 0.150963\nEpoch: 7/10. Train set: Average loss: 0.1508\nEpoch: 7/10. Validation set: Average loss: 0.1701\nTrain: [0/2931 (0%)]\tLoss: 0.161126\nTrain: [1098/2931 (100%)]\tLoss: 0.151143\nEpoch: 8/10. Train set: Average loss: 0.1512\nEpoch: 8/10. Validation set: Average loss: 0.1616\nTrain: [0/2931 (0%)]\tLoss: 0.118273\nTrain: [1098/2931 (100%)]\tLoss: 0.146031\nEpoch: 9/10. Train set: Average loss: 0.1460\nEpoch: 9/10. Validation set: Average loss: 0.1527\nTrain: [0/2931 (0%)]\tLoss: 0.165978\nTrain: [1098/2931 (100%)]\tLoss: 0.139323\nEpoch: 10/10. Train set: Average loss: 0.1394\nEpoch: 10/10. Validation set: Average loss: 0.1528\nNumber features: 21\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311821\nTrain: [1098/2931 (100%)]\tLoss: 0.156139\nEpoch: 1/10. Train set: Average loss: 0.1566\nEpoch: 1/10. Validation set: Average loss: 0.1349\nTrain: [0/2931 (0%)]\tLoss: 0.103300\nTrain: [1098/2931 (100%)]\tLoss: 0.151093\nEpoch: 2/10. Train set: Average loss: 0.1510\nEpoch: 2/10. Validation set: Average loss: 0.1522\nTrain: [0/2931 (0%)]\tLoss: 0.206736\nTrain: [1098/2931 (100%)]\tLoss: 0.154567\nEpoch: 3/10. Train set: Average loss: 0.1547\nEpoch: 3/10. Validation set: Average loss: 0.1470\nTrain: [0/2931 (0%)]\tLoss: 0.069796\nTrain: [1098/2931 (100%)]\tLoss: 0.157099\nEpoch: 4/10. Train set: Average loss: 0.1569\nEpoch: 4/10. Validation set: Average loss: 0.1576\nTrain: [0/2931 (0%)]\tLoss: 0.209593\nTrain: [1098/2931 (100%)]\tLoss: 0.161792\nEpoch: 5/10. Train set: Average loss: 0.1619\nEpoch: 5/10. Validation set: Average loss: 0.1388\nTrain: [0/2931 (0%)]\tLoss: 0.192427\nTrain: [1098/2931 (100%)]\tLoss: 0.162663\nEpoch: 6/10. Train set: Average loss: 0.1627\nEpoch: 6/10. Validation set: Average loss: 0.1560\nTrain: [0/2931 (0%)]\tLoss: 0.171165\nTrain: [1098/2931 (100%)]\tLoss: 0.160294\nEpoch: 7/10. Train set: Average loss: 0.1603\nEpoch: 7/10. Validation set: Average loss: 0.1567\nTrain: [0/2931 (0%)]\tLoss: 0.199443\nTrain: [1098/2931 (100%)]\tLoss: 0.161632\nEpoch: 8/10. Train set: Average loss: 0.1617\nEpoch: 8/10. Validation set: Average loss: 0.1724\nTrain: [0/2931 (0%)]\tLoss: 0.129972\nTrain: [1098/2931 (100%)]\tLoss: 0.159815\nEpoch: 9/10. Train set: Average loss: 0.1597\nEpoch: 9/10. Validation set: Average loss: 0.1738\nTrain: [0/2931 (0%)]\tLoss: 0.229063\nTrain: [1098/2931 (100%)]\tLoss: 0.156525\nEpoch: 10/10. Train set: Average loss: 0.1567\nEpoch: 10/10. Validation set: Average loss: 0.1711\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.311087\nTrain: [1098/2931 (100%)]\tLoss: 0.151448\nEpoch: 1/10. Train set: Average loss: 0.1519\nEpoch: 1/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.230338\nTrain: [1098/2931 (100%)]\tLoss: 0.153459\nEpoch: 2/10. Train set: Average loss: 0.1537\nEpoch: 2/10. Validation set: Average loss: 0.1682\nTrain: [0/2931 (0%)]\tLoss: 0.141554\nTrain: [1098/2931 (100%)]\tLoss: 0.147873\nEpoch: 3/10. Train set: Average loss: 0.1479\nEpoch: 3/10. Validation set: Average loss: 0.1361\nTrain: [0/2931 (0%)]\tLoss: 0.176859\nTrain: [1098/2931 (100%)]\tLoss: 0.148870\nEpoch: 4/10. Train set: Average loss: 0.1489\nEpoch: 4/10. Validation set: Average loss: 0.1389\nTrain: [0/2931 (0%)]\tLoss: 0.221769\nTrain: [1098/2931 (100%)]\tLoss: 0.155274\nEpoch: 5/10. Train set: Average loss: 0.1555\nEpoch: 5/10. Validation set: Average loss: 0.1645\nTrain: [0/2931 (0%)]\tLoss: 0.194328\nTrain: [1098/2931 (100%)]\tLoss: 0.159970\nEpoch: 6/10. Train set: Average loss: 0.1601\nEpoch: 6/10. Validation set: Average loss: 0.1364\nTrain: [0/2931 (0%)]\tLoss: 0.201956\nTrain: [1098/2931 (100%)]\tLoss: 0.151084\nEpoch: 7/10. Train set: Average loss: 0.1512\nEpoch: 7/10. Validation set: Average loss: 0.1331\nTrain: [0/2931 (0%)]\tLoss: 0.141283\nTrain: [1098/2931 (100%)]\tLoss: 0.147487\nEpoch: 8/10. Train set: Average loss: 0.1475\nEpoch: 8/10. Validation set: Average loss: 0.1458\nTrain: [0/2931 (0%)]\tLoss: 0.174876\nTrain: [1098/2931 (100%)]\tLoss: 0.146295\nEpoch: 9/10. Train set: Average loss: 0.1464\nEpoch: 9/10. Validation set: Average loss: 0.1446\nTrain: [0/2931 (0%)]\tLoss: 0.229624\nTrain: [1098/2931 (100%)]\tLoss: 0.143104\nEpoch: 10/10. Train set: Average loss: 0.1433\nEpoch: 10/10. Validation set: Average loss: 0.1476\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124196\nTrain: [1098/2931 (100%)]\tLoss: 0.158693\nEpoch: 1/10. Train set: Average loss: 0.1586\nEpoch: 1/10. Validation set: Average loss: 0.1304\nTrain: [0/2931 (0%)]\tLoss: 0.169038\nTrain: [1098/2931 (100%)]\tLoss: 0.153569\nEpoch: 2/10. Train set: Average loss: 0.1536\nEpoch: 2/10. Validation set: Average loss: 0.1476\nTrain: [0/2931 (0%)]\tLoss: 0.124688\nTrain: [1098/2931 (100%)]\tLoss: 0.156377\nEpoch: 3/10. Train set: Average loss: 0.1563\nEpoch: 3/10. Validation set: Average loss: 0.1625\nTrain: [0/2931 (0%)]\tLoss: 0.220992\nTrain: [1098/2931 (100%)]\tLoss: 0.165940\nEpoch: 4/10. Train set: Average loss: 0.1661\nEpoch: 4/10. Validation set: Average loss: 0.1481\nTrain: [0/2931 (0%)]\tLoss: 0.120357\nTrain: [1098/2931 (100%)]\tLoss: 0.151562\nEpoch: 5/10. Train set: Average loss: 0.1515\nEpoch: 5/10. Validation set: Average loss: 0.1510\nTrain: [0/2931 (0%)]\tLoss: 0.208990\nTrain: [1098/2931 (100%)]\tLoss: 0.153370\nEpoch: 6/10. Train set: Average loss: 0.1535\nEpoch: 6/10. Validation set: Average loss: 0.1369\nTrain: [0/2931 (0%)]\tLoss: 0.143091\nTrain: [1098/2931 (100%)]\tLoss: 0.157740\nEpoch: 7/10. Train set: Average loss: 0.1577\nEpoch: 7/10. Validation set: Average loss: 0.1341\nTrain: [0/2931 (0%)]\tLoss: 0.077291\nTrain: [1098/2931 (100%)]\tLoss: 0.148966\nEpoch: 8/10. Train set: Average loss: 0.1488\nEpoch: 8/10. Validation set: Average loss: 0.1452\nTrain: [0/2931 (0%)]\tLoss: 0.147442\nTrain: [1098/2931 (100%)]\tLoss: 0.145572\nEpoch: 9/10. Train set: Average loss: 0.1456\nEpoch: 9/10. Validation set: Average loss: 0.1402\nTrain: [0/2931 (0%)]\tLoss: 0.094560\nTrain: [1098/2931 (100%)]\tLoss: 0.145362\nEpoch: 10/10. Train set: Average loss: 0.1452\nEpoch: 10/10. Validation set: Average loss: 0.1404\nNumber features: 22\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124696\nTrain: [1098/2931 (100%)]\tLoss: 0.159438\nEpoch: 1/10. Train set: Average loss: 0.1593\nEpoch: 1/10. Validation set: Average loss: 0.1550\nTrain: [0/2931 (0%)]\tLoss: 0.120150\nTrain: [1098/2931 (100%)]\tLoss: 0.159736\nEpoch: 2/10. Train set: Average loss: 0.1596\nEpoch: 2/10. Validation set: Average loss: 0.1239\nTrain: [0/2931 (0%)]\tLoss: 0.134753\nTrain: [1098/2931 (100%)]\tLoss: 0.160177\nEpoch: 3/10. Train set: Average loss: 0.1601\nEpoch: 3/10. Validation set: Average loss: 0.1202\nTrain: [0/2931 (0%)]\tLoss: 0.134195\nTrain: [1098/2931 (100%)]\tLoss: 0.156184\nEpoch: 4/10. Train set: Average loss: 0.1561\nEpoch: 4/10. Validation set: Average loss: 0.1521\nTrain: [0/2931 (0%)]\tLoss: 0.138874\nTrain: [1098/2931 (100%)]\tLoss: 0.158593\nEpoch: 5/10. Train set: Average loss: 0.1585\nEpoch: 5/10. Validation set: Average loss: 0.1402\nTrain: [0/2931 (0%)]\tLoss: 0.126166\nTrain: [1098/2931 (100%)]\tLoss: 0.160741\nEpoch: 6/10. Train set: Average loss: 0.1606\nEpoch: 6/10. Validation set: Average loss: 0.1470\nTrain: [0/2931 (0%)]\tLoss: 0.208170\nTrain: [1098/2931 (100%)]\tLoss: 0.150626\nEpoch: 7/10. Train set: Average loss: 0.1508\nEpoch: 7/10. Validation set: Average loss: 0.1325\nTrain: [0/2931 (0%)]\tLoss: 0.205419\nTrain: [1098/2931 (100%)]\tLoss: 0.155273\nEpoch: 8/10. Train set: Average loss: 0.1554\nEpoch: 8/10. Validation set: Average loss: 0.1467\nTrain: [0/2931 (0%)]\tLoss: 0.146881\nTrain: [1098/2931 (100%)]\tLoss: 0.147002\nEpoch: 9/10. Train set: Average loss: 0.1470\nEpoch: 9/10. Validation set: Average loss: 0.1370\nTrain: [0/2931 (0%)]\tLoss: 0.114335\nTrain: [1098/2931 (100%)]\tLoss: 0.144145\nEpoch: 10/10. Train set: Average loss: 0.1441\nEpoch: 10/10. Validation set: Average loss: 0.1370\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.187314\nTrain: [1098/2931 (100%)]\tLoss: 0.157895\nEpoch: 1/10. Train set: Average loss: 0.1580\nEpoch: 1/10. Validation set: Average loss: 0.1285\nTrain: [0/2931 (0%)]\tLoss: 0.165673\nTrain: [1098/2931 (100%)]\tLoss: 0.159747\nEpoch: 2/10. Train set: Average loss: 0.1598\nEpoch: 2/10. Validation set: Average loss: 0.1284\nTrain: [0/2931 (0%)]\tLoss: 0.236949\nTrain: [1098/2931 (100%)]\tLoss: 0.170616\nEpoch: 3/10. Train set: Average loss: 0.1708\nEpoch: 3/10. Validation set: Average loss: 0.1471\nTrain: [0/2931 (0%)]\tLoss: 0.085285\nTrain: [1098/2931 (100%)]\tLoss: 0.165115\nEpoch: 4/10. Train set: Average loss: 0.1649\nEpoch: 4/10. Validation set: Average loss: 0.1444\nTrain: [0/2931 (0%)]\tLoss: 0.174532\nTrain: [1098/2931 (100%)]\tLoss: 0.156624\nEpoch: 5/10. Train set: Average loss: 0.1567\nEpoch: 5/10. Validation set: Average loss: 0.1464\nTrain: [0/2931 (0%)]\tLoss: 0.095620\nTrain: [1098/2931 (100%)]\tLoss: 0.158752\nEpoch: 6/10. Train set: Average loss: 0.1586\nEpoch: 6/10. Validation set: Average loss: 0.1345\nTrain: [0/2931 (0%)]\tLoss: 0.137457\nTrain: [1098/2931 (100%)]\tLoss: 0.158780\nEpoch: 7/10. Train set: Average loss: 0.1587\nEpoch: 7/10. Validation set: Average loss: 0.1355\nTrain: [0/2931 (0%)]\tLoss: 0.145613\nTrain: [1098/2931 (100%)]\tLoss: 0.150528\nEpoch: 8/10. Train set: Average loss: 0.1505\nEpoch: 8/10. Validation set: Average loss: 0.1284\nTrain: [0/2931 (0%)]\tLoss: 0.137733\nTrain: [1098/2931 (100%)]\tLoss: 0.143412\nEpoch: 9/10. Train set: Average loss: 0.1434\nEpoch: 9/10. Validation set: Average loss: 0.1315\nTrain: [0/2931 (0%)]\tLoss: 0.091347\nTrain: [1098/2931 (100%)]\tLoss: 0.145105\nEpoch: 10/10. Train set: Average loss: 0.1450\nEpoch: 10/10. Validation set: Average loss: 0.1367\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124571\nTrain: [1098/2931 (100%)]\tLoss: 0.163815\nEpoch: 1/10. Train set: Average loss: 0.1637\nEpoch: 1/10. Validation set: Average loss: 0.1410\nTrain: [0/2931 (0%)]\tLoss: 0.475120\nTrain: [1098/2931 (100%)]\tLoss: 0.162671\nEpoch: 2/10. Train set: Average loss: 0.1635\nEpoch: 2/10. Validation set: Average loss: 0.1428\nTrain: [0/2931 (0%)]\tLoss: 0.087581\nTrain: [1098/2931 (100%)]\tLoss: 0.158495\nEpoch: 3/10. Train set: Average loss: 0.1583\nEpoch: 3/10. Validation set: Average loss: 0.1215\nTrain: [0/2931 (0%)]\tLoss: 0.138482\nTrain: [1098/2931 (100%)]\tLoss: 0.155511\nEpoch: 4/10. Train set: Average loss: 0.1555\nEpoch: 4/10. Validation set: Average loss: 0.1366\nTrain: [0/2931 (0%)]\tLoss: 0.214536\nTrain: [1098/2931 (100%)]\tLoss: 0.156767\nEpoch: 5/10. Train set: Average loss: 0.1569\nEpoch: 5/10. Validation set: Average loss: 0.1910\nTrain: [0/2931 (0%)]\tLoss: 0.277139\nTrain: [1098/2931 (100%)]\tLoss: 0.159966\nEpoch: 6/10. Train set: Average loss: 0.1603\nEpoch: 6/10. Validation set: Average loss: 0.1402\nTrain: [0/2931 (0%)]\tLoss: 0.291065\nTrain: [1098/2931 (100%)]\tLoss: 0.166456\nEpoch: 7/10. Train set: Average loss: 0.1668\nEpoch: 7/10. Validation set: Average loss: 0.1503\nTrain: [0/2931 (0%)]\tLoss: 0.070217\nTrain: [1098/2931 (100%)]\tLoss: 0.163137\nEpoch: 8/10. Train set: Average loss: 0.1629\nEpoch: 8/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.149355\nTrain: [1098/2931 (100%)]\tLoss: 0.149555\nEpoch: 9/10. Train set: Average loss: 0.1496\nEpoch: 9/10. Validation set: Average loss: 0.1318\nTrain: [0/2931 (0%)]\tLoss: 0.194628\nTrain: [1098/2931 (100%)]\tLoss: 0.143389\nEpoch: 10/10. Train set: Average loss: 0.1435\nEpoch: 10/10. Validation set: Average loss: 0.1308\nNumber features: 23\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.062346\nTrain: [1098/2931 (100%)]\tLoss: 0.169242\nEpoch: 1/10. Train set: Average loss: 0.1690\nEpoch: 1/10. Validation set: Average loss: 0.1758\nTrain: [0/2931 (0%)]\tLoss: 0.509744\nTrain: [1098/2931 (100%)]\tLoss: 0.163322\nEpoch: 2/10. Train set: Average loss: 0.1643\nEpoch: 2/10. Validation set: Average loss: 0.1539\nTrain: [0/2931 (0%)]\tLoss: 0.067546\nTrain: [1098/2931 (100%)]\tLoss: 0.156652\nEpoch: 3/10. Train set: Average loss: 0.1564\nEpoch: 3/10. Validation set: Average loss: 0.1852\nTrain: [0/2931 (0%)]\tLoss: 0.285536\nTrain: [1098/2931 (100%)]\tLoss: 0.155351\nEpoch: 4/10. Train set: Average loss: 0.1557\nEpoch: 4/10. Validation set: Average loss: 0.1678\nTrain: [0/2931 (0%)]\tLoss: 0.146143\nTrain: [1098/2931 (100%)]\tLoss: 0.156128\nEpoch: 5/10. Train set: Average loss: 0.1561\nEpoch: 5/10. Validation set: Average loss: 0.2217\nTrain: [0/2931 (0%)]\tLoss: 0.502355\nTrain: [1098/2931 (100%)]\tLoss: 0.171076\nEpoch: 6/10. Train set: Average loss: 0.1720\nEpoch: 6/10. Validation set: Average loss: 0.2204\nTrain: [0/2931 (0%)]\tLoss: 0.170802\nTrain: [1098/2931 (100%)]\tLoss: 0.167204\nEpoch: 7/10. Train set: Average loss: 0.1672\nEpoch: 7/10. Validation set: Average loss: 0.1684\nTrain: [0/2931 (0%)]\tLoss: 0.119273\nTrain: [1098/2931 (100%)]\tLoss: 0.166338\nEpoch: 8/10. Train set: Average loss: 0.1662\nEpoch: 8/10. Validation set: Average loss: 0.1671\nTrain: [0/2931 (0%)]\tLoss: 0.294824\nTrain: [1098/2931 (100%)]\tLoss: 0.153682\nEpoch: 9/10. Train set: Average loss: 0.1541\nEpoch: 9/10. Validation set: Average loss: 0.1508\nTrain: [0/2931 (0%)]\tLoss: 0.073026\nTrain: [1098/2931 (100%)]\tLoss: 0.146199\nEpoch: 10/10. Train set: Average loss: 0.1460\nEpoch: 10/10. Validation set: Average loss: 0.1504\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249586\nTrain: [1098/2931 (100%)]\tLoss: 0.173407\nEpoch: 1/10. Train set: Average loss: 0.1736\nEpoch: 1/10. Validation set: Average loss: 0.2010\nTrain: [0/2931 (0%)]\tLoss: 0.084303\nTrain: [1098/2931 (100%)]\tLoss: 0.178212\nEpoch: 2/10. Train set: Average loss: 0.1780\nEpoch: 2/10. Validation set: Average loss: 0.1848\nTrain: [0/2931 (0%)]\tLoss: 0.094448\nTrain: [1098/2931 (100%)]\tLoss: 0.170586\nEpoch: 3/10. Train set: Average loss: 0.1704\nEpoch: 3/10. Validation set: Average loss: 0.3427\nTrain: [0/2931 (0%)]\tLoss: 0.193522\nTrain: [1098/2931 (100%)]\tLoss: 0.165428\nEpoch: 4/10. Train set: Average loss: 0.1655\nEpoch: 4/10. Validation set: Average loss: 0.2841\nTrain: [0/2931 (0%)]\tLoss: 0.356819\nTrain: [1098/2931 (100%)]\tLoss: 0.166530\nEpoch: 5/10. Train set: Average loss: 0.1670\nEpoch: 5/10. Validation set: Average loss: 0.2324\nTrain: [0/2931 (0%)]\tLoss: 0.312755\nTrain: [1098/2931 (100%)]\tLoss: 0.157574\nEpoch: 6/10. Train set: Average loss: 0.1580\nEpoch: 6/10. Validation set: Average loss: 0.1973\nTrain: [0/2931 (0%)]\tLoss: 0.070177\nTrain: [1098/2931 (100%)]\tLoss: 0.154287\nEpoch: 7/10. Train set: Average loss: 0.1541\nEpoch: 7/10. Validation set: Average loss: 0.2115\nTrain: [0/2931 (0%)]\tLoss: 0.247115\nTrain: [1098/2931 (100%)]\tLoss: 0.149679\nEpoch: 8/10. Train set: Average loss: 0.1499\nEpoch: 8/10. Validation set: Average loss: 0.2435\nTrain: [0/2931 (0%)]\tLoss: 0.400878\nTrain: [1098/2931 (100%)]\tLoss: 0.152308\nEpoch: 9/10. Train set: Average loss: 0.1530\nEpoch: 9/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.176842\nTrain: [1098/2931 (100%)]\tLoss: 0.146074\nEpoch: 10/10. Train set: Average loss: 0.1462\nEpoch: 10/10. Validation set: Average loss: 0.1499\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.312018\nTrain: [1098/2931 (100%)]\tLoss: 0.168900\nEpoch: 1/10. Train set: Average loss: 0.1693\nEpoch: 1/10. Validation set: Average loss: 0.3932\nTrain: [0/2931 (0%)]\tLoss: 1.666468\nTrain: [1098/2931 (100%)]\tLoss: 0.168528\nEpoch: 2/10. Train set: Average loss: 0.1726\nEpoch: 2/10. Validation set: Average loss: 0.1648\nTrain: [0/2931 (0%)]\tLoss: 0.165081\nTrain: [1098/2931 (100%)]\tLoss: 0.176593\nEpoch: 3/10. Train set: Average loss: 0.1766\nEpoch: 3/10. Validation set: Average loss: 0.1693\nTrain: [0/2931 (0%)]\tLoss: 0.141771\nTrain: [1098/2931 (100%)]\tLoss: 0.166012\nEpoch: 4/10. Train set: Average loss: 0.1659\nEpoch: 4/10. Validation set: Average loss: 0.3335\nTrain: [0/2931 (0%)]\tLoss: 0.262634\nTrain: [1098/2931 (100%)]\tLoss: 0.172872\nEpoch: 5/10. Train set: Average loss: 0.1731\nEpoch: 5/10. Validation set: Average loss: 0.1810\nTrain: [0/2931 (0%)]\tLoss: 0.110803\nTrain: [1098/2931 (100%)]\tLoss: 0.167474\nEpoch: 6/10. Train set: Average loss: 0.1673\nEpoch: 6/10. Validation set: Average loss: 0.1697\nTrain: [0/2931 (0%)]\tLoss: 0.155922\nTrain: [1098/2931 (100%)]\tLoss: 0.162157\nEpoch: 7/10. Train set: Average loss: 0.1621\nEpoch: 7/10. Validation set: Average loss: 0.2046\nTrain: [0/2931 (0%)]\tLoss: 0.237859\nTrain: [1098/2931 (100%)]\tLoss: 0.158385\nEpoch: 8/10. Train set: Average loss: 0.1586\nEpoch: 8/10. Validation set: Average loss: 0.2245\nTrain: [0/2931 (0%)]\tLoss: 0.157416\nTrain: [1098/2931 (100%)]\tLoss: 0.150219\nEpoch: 9/10. Train set: Average loss: 0.1502\nEpoch: 9/10. Validation set: Average loss: 0.1499\nTrain: [0/2931 (0%)]\tLoss: 0.148553\nTrain: [1098/2931 (100%)]\tLoss: 0.147616\nEpoch: 10/10. Train set: Average loss: 0.1476\nEpoch: 10/10. Validation set: Average loss: 0.1510\nNumber features: 24\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187213\nTrain: [1098/2931 (100%)]\tLoss: 0.168461\nEpoch: 1/10. Train set: Average loss: 0.1685\nEpoch: 1/10. Validation set: Average loss: 0.1490\nTrain: [0/2931 (0%)]\tLoss: 0.169652\nTrain: [1098/2931 (100%)]\tLoss: 0.172000\nEpoch: 2/10. Train set: Average loss: 0.1720\nEpoch: 2/10. Validation set: Average loss: 0.1601\nTrain: [0/2931 (0%)]\tLoss: 0.090425\nTrain: [1098/2931 (100%)]\tLoss: 0.171559\nEpoch: 3/10. Train set: Average loss: 0.1713\nEpoch: 3/10. Validation set: Average loss: 0.1328\nTrain: [0/2931 (0%)]\tLoss: 0.121009\nTrain: [1098/2931 (100%)]\tLoss: 0.165797\nEpoch: 4/10. Train set: Average loss: 0.1657\nEpoch: 4/10. Validation set: Average loss: 0.1403\nTrain: [0/2931 (0%)]\tLoss: 0.132607\nTrain: [1098/2931 (100%)]\tLoss: 0.165958\nEpoch: 5/10. Train set: Average loss: 0.1659\nEpoch: 5/10. Validation set: Average loss: 0.1308\nTrain: [0/2931 (0%)]\tLoss: 0.134930\nTrain: [1098/2931 (100%)]\tLoss: 0.158596\nEpoch: 6/10. Train set: Average loss: 0.1585\nEpoch: 6/10. Validation set: Average loss: 0.1435\nTrain: [0/2931 (0%)]\tLoss: 0.181504\nTrain: [1098/2931 (100%)]\tLoss: 0.165291\nEpoch: 7/10. Train set: Average loss: 0.1653\nEpoch: 7/10. Validation set: Average loss: 0.1602\nTrain: [0/2931 (0%)]\tLoss: 0.167018\nTrain: [1098/2931 (100%)]\tLoss: 0.163799\nEpoch: 8/10. Train set: Average loss: 0.1638\nEpoch: 8/10. Validation set: Average loss: 0.1691\nTrain: [0/2931 (0%)]\tLoss: 0.151216\nTrain: [1098/2931 (100%)]\tLoss: 0.157533\nEpoch: 9/10. Train set: Average loss: 0.1575\nEpoch: 9/10. Validation set: Average loss: 0.1438\nTrain: [0/2931 (0%)]\tLoss: 0.115693\nTrain: [1098/2931 (100%)]\tLoss: 0.148854\nEpoch: 10/10. Train set: Average loss: 0.1488\nEpoch: 10/10. Validation set: Average loss: 0.1487\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186360\nTrain: [1098/2931 (100%)]\tLoss: 0.171983\nEpoch: 1/10. Train set: Average loss: 0.1720\nEpoch: 1/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.173244\nTrain: [1098/2931 (100%)]\tLoss: 0.159895\nEpoch: 2/10. Train set: Average loss: 0.1599\nEpoch: 2/10. Validation set: Average loss: 0.1300\nTrain: [0/2931 (0%)]\tLoss: 0.114230\nTrain: [1098/2931 (100%)]\tLoss: 0.162674\nEpoch: 3/10. Train set: Average loss: 0.1625\nEpoch: 3/10. Validation set: Average loss: 0.1331\nTrain: [0/2931 (0%)]\tLoss: 0.184205\nTrain: [1098/2931 (100%)]\tLoss: 0.156392\nEpoch: 4/10. Train set: Average loss: 0.1565\nEpoch: 4/10. Validation set: Average loss: 0.1431\nTrain: [0/2931 (0%)]\tLoss: 0.107182\nTrain: [1098/2931 (100%)]\tLoss: 0.151586\nEpoch: 5/10. Train set: Average loss: 0.1515\nEpoch: 5/10. Validation set: Average loss: 0.1264\nTrain: [0/2931 (0%)]\tLoss: 0.140219\nTrain: [1098/2931 (100%)]\tLoss: 0.158467\nEpoch: 6/10. Train set: Average loss: 0.1584\nEpoch: 6/10. Validation set: Average loss: 0.1369\nTrain: [0/2931 (0%)]\tLoss: 0.209621\nTrain: [1098/2931 (100%)]\tLoss: 0.155362\nEpoch: 7/10. Train set: Average loss: 0.1555\nEpoch: 7/10. Validation set: Average loss: 0.1425\nTrain: [0/2931 (0%)]\tLoss: 0.103968\nTrain: [1098/2931 (100%)]\tLoss: 0.153822\nEpoch: 8/10. Train set: Average loss: 0.1537\nEpoch: 8/10. Validation set: Average loss: 0.1464\nTrain: [0/2931 (0%)]\tLoss: 0.119293\nTrain: [1098/2931 (100%)]\tLoss: 0.149289\nEpoch: 9/10. Train set: Average loss: 0.1492\nEpoch: 9/10. Validation set: Average loss: 0.1379\nTrain: [0/2931 (0%)]\tLoss: 0.127050\nTrain: [1098/2931 (100%)]\tLoss: 0.148340\nEpoch: 10/10. Train set: Average loss: 0.1483\nEpoch: 10/10. Validation set: Average loss: 0.1352\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311953\nTrain: [1098/2931 (100%)]\tLoss: 0.180374\nEpoch: 1/10. Train set: Average loss: 0.1807\nEpoch: 1/10. Validation set: Average loss: 0.1247\nTrain: [0/2931 (0%)]\tLoss: 0.157428\nTrain: [1098/2931 (100%)]\tLoss: 0.160970\nEpoch: 2/10. Train set: Average loss: 0.1610\nEpoch: 2/10. Validation set: Average loss: 0.1413\nTrain: [0/2931 (0%)]\tLoss: 0.174404\nTrain: [1098/2931 (100%)]\tLoss: 0.168358\nEpoch: 3/10. Train set: Average loss: 0.1684\nEpoch: 3/10. Validation set: Average loss: 0.1250\nTrain: [0/2931 (0%)]\tLoss: 0.260466\nTrain: [1098/2931 (100%)]\tLoss: 0.160974\nEpoch: 4/10. Train set: Average loss: 0.1612\nEpoch: 4/10. Validation set: Average loss: 0.1334\nTrain: [0/2931 (0%)]\tLoss: 0.156847\nTrain: [1098/2931 (100%)]\tLoss: 0.164449\nEpoch: 5/10. Train set: Average loss: 0.1644\nEpoch: 5/10. Validation set: Average loss: 0.1516\nTrain: [0/2931 (0%)]\tLoss: 0.113917\nTrain: [1098/2931 (100%)]\tLoss: 0.166551\nEpoch: 6/10. Train set: Average loss: 0.1664\nEpoch: 6/10. Validation set: Average loss: 0.1447\nTrain: [0/2931 (0%)]\tLoss: 0.169778\nTrain: [1098/2931 (100%)]\tLoss: 0.168351\nEpoch: 7/10. Train set: Average loss: 0.1684\nEpoch: 7/10. Validation set: Average loss: 0.1357\nTrain: [0/2931 (0%)]\tLoss: 0.178612\nTrain: [1098/2931 (100%)]\tLoss: 0.166244\nEpoch: 8/10. Train set: Average loss: 0.1663\nEpoch: 8/10. Validation set: Average loss: 0.1311\nTrain: [0/2931 (0%)]\tLoss: 0.133452\nTrain: [1098/2931 (100%)]\tLoss: 0.148852\nEpoch: 9/10. Train set: Average loss: 0.1488\nEpoch: 9/10. Validation set: Average loss: 0.1352\nTrain: [0/2931 (0%)]\tLoss: 0.201153\nTrain: [1098/2931 (100%)]\tLoss: 0.151658\nEpoch: 10/10. Train set: Average loss: 0.1518\nEpoch: 10/10. Validation set: Average loss: 0.1363\nNumber features: 25\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249505\nTrain: [1098/2931 (100%)]\tLoss: 0.166777\nEpoch: 1/10. Train set: Average loss: 0.1670\nEpoch: 1/10. Validation set: Average loss: 0.1375\nTrain: [0/2931 (0%)]\tLoss: 0.172798\nTrain: [1098/2931 (100%)]\tLoss: 0.162926\nEpoch: 2/10. Train set: Average loss: 0.1630\nEpoch: 2/10. Validation set: Average loss: 0.1622\nTrain: [0/2931 (0%)]\tLoss: 0.161284\nTrain: [1098/2931 (100%)]\tLoss: 0.169247\nEpoch: 3/10. Train set: Average loss: 0.1692\nEpoch: 3/10. Validation set: Average loss: 0.1661\nTrain: [0/2931 (0%)]\tLoss: 0.184359\nTrain: [1098/2931 (100%)]\tLoss: 0.153864\nEpoch: 4/10. Train set: Average loss: 0.1539\nEpoch: 4/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.150405\nTrain: [1098/2931 (100%)]\tLoss: 0.154939\nEpoch: 5/10. Train set: Average loss: 0.1549\nEpoch: 5/10. Validation set: Average loss: 0.1320\nTrain: [0/2931 (0%)]\tLoss: 0.178688\nTrain: [1098/2931 (100%)]\tLoss: 0.154491\nEpoch: 6/10. Train set: Average loss: 0.1546\nEpoch: 6/10. Validation set: Average loss: 0.1260\nTrain: [0/2931 (0%)]\tLoss: 0.235860\nTrain: [1098/2931 (100%)]\tLoss: 0.151567\nEpoch: 7/10. Train set: Average loss: 0.1518\nEpoch: 7/10. Validation set: Average loss: 0.1214\nTrain: [0/2931 (0%)]\tLoss: 0.158517\nTrain: [1098/2931 (100%)]\tLoss: 0.150299\nEpoch: 8/10. Train set: Average loss: 0.1503\nEpoch: 8/10. Validation set: Average loss: 0.1259\nTrain: [0/2931 (0%)]\tLoss: 0.129701\nTrain: [1098/2931 (100%)]\tLoss: 0.148928\nEpoch: 9/10. Train set: Average loss: 0.1489\nEpoch: 9/10. Validation set: Average loss: 0.1222\nTrain: [0/2931 (0%)]\tLoss: 0.204649\nTrain: [1098/2931 (100%)]\tLoss: 0.143269\nEpoch: 10/10. Train set: Average loss: 0.1434\nEpoch: 10/10. Validation set: Average loss: 0.1191\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.312173\nTrain: [1098/2931 (100%)]\tLoss: 0.164818\nEpoch: 1/10. Train set: Average loss: 0.1652\nEpoch: 1/10. Validation set: Average loss: 0.1685\nTrain: [0/2931 (0%)]\tLoss: 0.146586\nTrain: [1098/2931 (100%)]\tLoss: 0.154665\nEpoch: 2/10. Train set: Average loss: 0.1546\nEpoch: 2/10. Validation set: Average loss: 0.1600\nTrain: [0/2931 (0%)]\tLoss: 0.153409\nTrain: [1098/2931 (100%)]\tLoss: 0.166401\nEpoch: 3/10. Train set: Average loss: 0.1664\nEpoch: 3/10. Validation set: Average loss: 0.1968\nTrain: [0/2931 (0%)]\tLoss: 0.167349\nTrain: [1098/2931 (100%)]\tLoss: 0.156237\nEpoch: 4/10. Train set: Average loss: 0.1563\nEpoch: 4/10. Validation set: Average loss: 0.1413\nTrain: [0/2931 (0%)]\tLoss: 0.258663\nTrain: [1098/2931 (100%)]\tLoss: 0.157172\nEpoch: 5/10. Train set: Average loss: 0.1574\nEpoch: 5/10. Validation set: Average loss: 0.1589\nTrain: [0/2931 (0%)]\tLoss: 0.087240\nTrain: [1098/2931 (100%)]\tLoss: 0.157485\nEpoch: 6/10. Train set: Average loss: 0.1573\nEpoch: 6/10. Validation set: Average loss: 0.1396\nTrain: [0/2931 (0%)]\tLoss: 0.147783\nTrain: [1098/2931 (100%)]\tLoss: 0.154882\nEpoch: 7/10. Train set: Average loss: 0.1549\nEpoch: 7/10. Validation set: Average loss: 0.1470\nTrain: [0/2931 (0%)]\tLoss: 0.145846\nTrain: [1098/2931 (100%)]\tLoss: 0.151903\nEpoch: 8/10. Train set: Average loss: 0.1519\nEpoch: 8/10. Validation set: Average loss: 0.1430\nTrain: [0/2931 (0%)]\tLoss: 0.216498\nTrain: [1098/2931 (100%)]\tLoss: 0.144621\nEpoch: 9/10. Train set: Average loss: 0.1448\nEpoch: 9/10. Validation set: Average loss: 0.1281\nTrain: [0/2931 (0%)]\tLoss: 0.148740\nTrain: [1098/2931 (100%)]\tLoss: 0.144274\nEpoch: 10/10. Train set: Average loss: 0.1443\nEpoch: 10/10. Validation set: Average loss: 0.1282\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249204\nTrain: [1098/2931 (100%)]\tLoss: 0.168339\nEpoch: 1/10. Train set: Average loss: 0.1686\nEpoch: 1/10. Validation set: Average loss: 0.1565\nTrain: [0/2931 (0%)]\tLoss: 0.201386\nTrain: [1098/2931 (100%)]\tLoss: 0.153754\nEpoch: 2/10. Train set: Average loss: 0.1539\nEpoch: 2/10. Validation set: Average loss: 0.1349\nTrain: [0/2931 (0%)]\tLoss: 0.217741\nTrain: [1098/2931 (100%)]\tLoss: 0.160106\nEpoch: 3/10. Train set: Average loss: 0.1603\nEpoch: 3/10. Validation set: Average loss: 0.1665\nTrain: [0/2931 (0%)]\tLoss: 0.233445\nTrain: [1098/2931 (100%)]\tLoss: 0.153743\nEpoch: 4/10. Train set: Average loss: 0.1540\nEpoch: 4/10. Validation set: Average loss: 0.1656\nTrain: [0/2931 (0%)]\tLoss: 0.139048\nTrain: [1098/2931 (100%)]\tLoss: 0.158087\nEpoch: 5/10. Train set: Average loss: 0.1580\nEpoch: 5/10. Validation set: Average loss: 0.1389\nTrain: [0/2931 (0%)]\tLoss: 0.132739\nTrain: [1098/2931 (100%)]\tLoss: 0.154876\nEpoch: 6/10. Train set: Average loss: 0.1548\nEpoch: 6/10. Validation set: Average loss: 0.1262\nTrain: [0/2931 (0%)]\tLoss: 0.219492\nTrain: [1098/2931 (100%)]\tLoss: 0.153706\nEpoch: 7/10. Train set: Average loss: 0.1539\nEpoch: 7/10. Validation set: Average loss: 0.1573\nTrain: [0/2931 (0%)]\tLoss: 0.147850\nTrain: [1098/2931 (100%)]\tLoss: 0.156438\nEpoch: 8/10. Train set: Average loss: 0.1564\nEpoch: 8/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.142134\nTrain: [1098/2931 (100%)]\tLoss: 0.149638\nEpoch: 9/10. Train set: Average loss: 0.1496\nEpoch: 9/10. Validation set: Average loss: 0.1305\nTrain: [0/2931 (0%)]\tLoss: 0.169827\nTrain: [1098/2931 (100%)]\tLoss: 0.143885\nEpoch: 10/10. Train set: Average loss: 0.1440\nEpoch: 10/10. Validation set: Average loss: 0.1292\nNumber features: 26\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124704\nTrain: [1098/2931 (100%)]\tLoss: 0.171226\nEpoch: 1/10. Train set: Average loss: 0.1711\nEpoch: 1/10. Validation set: Average loss: 0.1580\nTrain: [0/2931 (0%)]\tLoss: 0.112250\nTrain: [1098/2931 (100%)]\tLoss: 0.175344\nEpoch: 2/10. Train set: Average loss: 0.1752\nEpoch: 2/10. Validation set: Average loss: 0.1330\nTrain: [0/2931 (0%)]\tLoss: 0.124082\nTrain: [1098/2931 (100%)]\tLoss: 0.172134\nEpoch: 3/10. Train set: Average loss: 0.1720\nEpoch: 3/10. Validation set: Average loss: 0.1312\nTrain: [0/2931 (0%)]\tLoss: 0.136928\nTrain: [1098/2931 (100%)]\tLoss: 0.171933\nEpoch: 4/10. Train set: Average loss: 0.1718\nEpoch: 4/10. Validation set: Average loss: 0.1352\nTrain: [0/2931 (0%)]\tLoss: 0.067254\nTrain: [1098/2931 (100%)]\tLoss: 0.158309\nEpoch: 5/10. Train set: Average loss: 0.1581\nEpoch: 5/10. Validation set: Average loss: 0.1460\nTrain: [0/2931 (0%)]\tLoss: 0.242276\nTrain: [1098/2931 (100%)]\tLoss: 0.160582\nEpoch: 6/10. Train set: Average loss: 0.1608\nEpoch: 6/10. Validation set: Average loss: 0.1358\nTrain: [0/2931 (0%)]\tLoss: 0.174257\nTrain: [1098/2931 (100%)]\tLoss: 0.156370\nEpoch: 7/10. Train set: Average loss: 0.1564\nEpoch: 7/10. Validation set: Average loss: 0.1234\nTrain: [0/2931 (0%)]\tLoss: 0.080392\nTrain: [1098/2931 (100%)]\tLoss: 0.158954\nEpoch: 8/10. Train set: Average loss: 0.1587\nEpoch: 8/10. Validation set: Average loss: 0.1455\nTrain: [0/2931 (0%)]\tLoss: 0.117181\nTrain: [1098/2931 (100%)]\tLoss: 0.150362\nEpoch: 9/10. Train set: Average loss: 0.1503\nEpoch: 9/10. Validation set: Average loss: 0.1327\nTrain: [0/2931 (0%)]\tLoss: 0.136321\nTrain: [1098/2931 (100%)]\tLoss: 0.146369\nEpoch: 10/10. Train set: Average loss: 0.1463\nEpoch: 10/10. Validation set: Average loss: 0.1311\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.062274\nTrain: [1098/2931 (100%)]\tLoss: 0.182203\nEpoch: 1/10. Train set: Average loss: 0.1819\nEpoch: 1/10. Validation set: Average loss: 0.1349\nTrain: [0/2931 (0%)]\tLoss: 0.238996\nTrain: [1098/2931 (100%)]\tLoss: 0.171139\nEpoch: 2/10. Train set: Average loss: 0.1713\nEpoch: 2/10. Validation set: Average loss: 0.1284\nTrain: [0/2931 (0%)]\tLoss: 0.128562\nTrain: [1098/2931 (100%)]\tLoss: 0.162321\nEpoch: 3/10. Train set: Average loss: 0.1622\nEpoch: 3/10. Validation set: Average loss: 0.1278\nTrain: [0/2931 (0%)]\tLoss: 0.115398\nTrain: [1098/2931 (100%)]\tLoss: 0.164754\nEpoch: 4/10. Train set: Average loss: 0.1646\nEpoch: 4/10. Validation set: Average loss: 0.1496\nTrain: [0/2931 (0%)]\tLoss: 0.236575\nTrain: [1098/2931 (100%)]\tLoss: 0.165085\nEpoch: 5/10. Train set: Average loss: 0.1653\nEpoch: 5/10. Validation set: Average loss: 0.1343\nTrain: [0/2931 (0%)]\tLoss: 0.137180\nTrain: [1098/2931 (100%)]\tLoss: 0.155550\nEpoch: 6/10. Train set: Average loss: 0.1555\nEpoch: 6/10. Validation set: Average loss: 0.1198\nTrain: [0/2931 (0%)]\tLoss: 0.080653\nTrain: [1098/2931 (100%)]\tLoss: 0.159697\nEpoch: 7/10. Train set: Average loss: 0.1595\nEpoch: 7/10. Validation set: Average loss: 0.1521\nTrain: [0/2931 (0%)]\tLoss: 0.092243\nTrain: [1098/2931 (100%)]\tLoss: 0.163641\nEpoch: 8/10. Train set: Average loss: 0.1634\nEpoch: 8/10. Validation set: Average loss: 0.1357\nTrain: [0/2931 (0%)]\tLoss: 0.104282\nTrain: [1098/2931 (100%)]\tLoss: 0.149658\nEpoch: 9/10. Train set: Average loss: 0.1495\nEpoch: 9/10. Validation set: Average loss: 0.1468\nTrain: [0/2931 (0%)]\tLoss: 0.085362\nTrain: [1098/2931 (100%)]\tLoss: 0.154410\nEpoch: 10/10. Train set: Average loss: 0.1542\nEpoch: 10/10. Validation set: Average loss: 0.1269\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311960\nTrain: [1098/2931 (100%)]\tLoss: 0.166634\nEpoch: 1/10. Train set: Average loss: 0.1670\nEpoch: 1/10. Validation set: Average loss: 0.1327\nTrain: [0/2931 (0%)]\tLoss: 0.122402\nTrain: [1098/2931 (100%)]\tLoss: 0.173756\nEpoch: 2/10. Train set: Average loss: 0.1736\nEpoch: 2/10. Validation set: Average loss: 0.1691\nTrain: [0/2931 (0%)]\tLoss: 0.201551\nTrain: [1098/2931 (100%)]\tLoss: 0.183097\nEpoch: 3/10. Train set: Average loss: 0.1831\nEpoch: 3/10. Validation set: Average loss: 0.1276\nTrain: [0/2931 (0%)]\tLoss: 0.237154\nTrain: [1098/2931 (100%)]\tLoss: 0.158898\nEpoch: 4/10. Train set: Average loss: 0.1591\nEpoch: 4/10. Validation set: Average loss: 0.1266\nTrain: [0/2931 (0%)]\tLoss: 0.116748\nTrain: [1098/2931 (100%)]\tLoss: 0.158465\nEpoch: 5/10. Train set: Average loss: 0.1584\nEpoch: 5/10. Validation set: Average loss: 0.1331\nTrain: [0/2931 (0%)]\tLoss: 0.159547\nTrain: [1098/2931 (100%)]\tLoss: 0.158592\nEpoch: 6/10. Train set: Average loss: 0.1586\nEpoch: 6/10. Validation set: Average loss: 0.1382\nTrain: [0/2931 (0%)]\tLoss: 0.142852\nTrain: [1098/2931 (100%)]\tLoss: 0.161273\nEpoch: 7/10. Train set: Average loss: 0.1612\nEpoch: 7/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.085167\nTrain: [1098/2931 (100%)]\tLoss: 0.157473\nEpoch: 8/10. Train set: Average loss: 0.1573\nEpoch: 8/10. Validation set: Average loss: 0.1288\nTrain: [0/2931 (0%)]\tLoss: 0.186916\nTrain: [1098/2931 (100%)]\tLoss: 0.145952\nEpoch: 9/10. Train set: Average loss: 0.1461\nEpoch: 9/10. Validation set: Average loss: 0.1172\nTrain: [0/2931 (0%)]\tLoss: 0.063336\nTrain: [1098/2931 (100%)]\tLoss: 0.150492\nEpoch: 10/10. Train set: Average loss: 0.1503\nEpoch: 10/10. Validation set: Average loss: 0.1192\nNumber features: 27\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.249470\nTrain: [1098/2931 (100%)]\tLoss: 0.159181\nEpoch: 1/10. Train set: Average loss: 0.1594\nEpoch: 1/10. Validation set: Average loss: 0.1384\nTrain: [0/2931 (0%)]\tLoss: 0.174331\nTrain: [1098/2931 (100%)]\tLoss: 0.162680\nEpoch: 2/10. Train set: Average loss: 0.1627\nEpoch: 2/10. Validation set: Average loss: 0.1398\nTrain: [0/2931 (0%)]\tLoss: 0.138225\nTrain: [1098/2931 (100%)]\tLoss: 0.163358\nEpoch: 3/10. Train set: Average loss: 0.1633\nEpoch: 3/10. Validation set: Average loss: 0.1639\nTrain: [0/2931 (0%)]\tLoss: 0.209103\nTrain: [1098/2931 (100%)]\tLoss: 0.157782\nEpoch: 4/10. Train set: Average loss: 0.1579\nEpoch: 4/10. Validation set: Average loss: 0.1317\nTrain: [0/2931 (0%)]\tLoss: 0.234477\nTrain: [1098/2931 (100%)]\tLoss: 0.152426\nEpoch: 5/10. Train set: Average loss: 0.1526\nEpoch: 5/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.136066\nTrain: [1098/2931 (100%)]\tLoss: 0.145746\nEpoch: 6/10. Train set: Average loss: 0.1457\nEpoch: 6/10. Validation set: Average loss: 0.1444\nTrain: [0/2931 (0%)]\tLoss: 0.221527\nTrain: [1098/2931 (100%)]\tLoss: 0.154401\nEpoch: 7/10. Train set: Average loss: 0.1546\nEpoch: 7/10. Validation set: Average loss: 0.1169\nTrain: [0/2931 (0%)]\tLoss: 0.189269\nTrain: [1098/2931 (100%)]\tLoss: 0.149211\nEpoch: 8/10. Train set: Average loss: 0.1493\nEpoch: 8/10. Validation set: Average loss: 0.1359\nTrain: [0/2931 (0%)]\tLoss: 0.197279\nTrain: [1098/2931 (100%)]\tLoss: 0.140353\nEpoch: 9/10. Train set: Average loss: 0.1405\nEpoch: 9/10. Validation set: Average loss: 0.1263\nTrain: [0/2931 (0%)]\tLoss: 0.159202\nTrain: [1098/2931 (100%)]\tLoss: 0.143515\nEpoch: 10/10. Train set: Average loss: 0.1436\nEpoch: 10/10. Validation set: Average loss: 0.1287\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249363\nTrain: [1098/2931 (100%)]\tLoss: 0.159831\nEpoch: 1/10. Train set: Average loss: 0.1601\nEpoch: 1/10. Validation set: Average loss: 0.1701\nTrain: [0/2931 (0%)]\tLoss: 0.129866\nTrain: [1098/2931 (100%)]\tLoss: 0.161317\nEpoch: 2/10. Train set: Average loss: 0.1612\nEpoch: 2/10. Validation set: Average loss: 0.1522\nTrain: [0/2931 (0%)]\tLoss: 0.167693\nTrain: [1098/2931 (100%)]\tLoss: 0.151470\nEpoch: 3/10. Train set: Average loss: 0.1515\nEpoch: 3/10. Validation set: Average loss: 0.1341\nTrain: [0/2931 (0%)]\tLoss: 0.181839\nTrain: [1098/2931 (100%)]\tLoss: 0.146119\nEpoch: 4/10. Train set: Average loss: 0.1462\nEpoch: 4/10. Validation set: Average loss: 0.1501\nTrain: [0/2931 (0%)]\tLoss: 0.158023\nTrain: [1098/2931 (100%)]\tLoss: 0.159043\nEpoch: 5/10. Train set: Average loss: 0.1590\nEpoch: 5/10. Validation set: Average loss: 0.1429\nTrain: [0/2931 (0%)]\tLoss: 0.093739\nTrain: [1098/2931 (100%)]\tLoss: 0.149973\nEpoch: 6/10. Train set: Average loss: 0.1498\nEpoch: 6/10. Validation set: Average loss: 0.1313\nTrain: [0/2931 (0%)]\tLoss: 0.150517\nTrain: [1098/2931 (100%)]\tLoss: 0.146856\nEpoch: 7/10. Train set: Average loss: 0.1469\nEpoch: 7/10. Validation set: Average loss: 0.1407\nTrain: [0/2931 (0%)]\tLoss: 0.153756\nTrain: [1098/2931 (100%)]\tLoss: 0.148574\nEpoch: 8/10. Train set: Average loss: 0.1486\nEpoch: 8/10. Validation set: Average loss: 0.1549\nTrain: [0/2931 (0%)]\tLoss: 0.127515\nTrain: [1098/2931 (100%)]\tLoss: 0.144436\nEpoch: 9/10. Train set: Average loss: 0.1444\nEpoch: 9/10. Validation set: Average loss: 0.1333\nTrain: [0/2931 (0%)]\tLoss: 0.161387\nTrain: [1098/2931 (100%)]\tLoss: 0.144136\nEpoch: 10/10. Train set: Average loss: 0.1442\nEpoch: 10/10. Validation set: Average loss: 0.1345\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.186725\nTrain: [1098/2931 (100%)]\tLoss: 0.159494\nEpoch: 1/10. Train set: Average loss: 0.1596\nEpoch: 1/10. Validation set: Average loss: 0.1826\nTrain: [0/2931 (0%)]\tLoss: 0.113985\nTrain: [1098/2931 (100%)]\tLoss: 0.156810\nEpoch: 2/10. Train set: Average loss: 0.1567\nEpoch: 2/10. Validation set: Average loss: 0.1558\nTrain: [0/2931 (0%)]\tLoss: 0.102837\nTrain: [1098/2931 (100%)]\tLoss: 0.149513\nEpoch: 3/10. Train set: Average loss: 0.1494\nEpoch: 3/10. Validation set: Average loss: 0.1632\nTrain: [0/2931 (0%)]\tLoss: 0.093470\nTrain: [1098/2931 (100%)]\tLoss: 0.156041\nEpoch: 4/10. Train set: Average loss: 0.1559\nEpoch: 4/10. Validation set: Average loss: 0.1457\nTrain: [0/2931 (0%)]\tLoss: 0.097932\nTrain: [1098/2931 (100%)]\tLoss: 0.146024\nEpoch: 5/10. Train set: Average loss: 0.1459\nEpoch: 5/10. Validation set: Average loss: 0.1324\nTrain: [0/2931 (0%)]\tLoss: 0.128518\nTrain: [1098/2931 (100%)]\tLoss: 0.148148\nEpoch: 6/10. Train set: Average loss: 0.1481\nEpoch: 6/10. Validation set: Average loss: 0.1348\nTrain: [0/2931 (0%)]\tLoss: 0.176906\nTrain: [1098/2931 (100%)]\tLoss: 0.147265\nEpoch: 7/10. Train set: Average loss: 0.1473\nEpoch: 7/10. Validation set: Average loss: 0.1586\nTrain: [0/2931 (0%)]\tLoss: 0.118084\nTrain: [1098/2931 (100%)]\tLoss: 0.150712\nEpoch: 8/10. Train set: Average loss: 0.1506\nEpoch: 8/10. Validation set: Average loss: 0.1434\nTrain: [0/2931 (0%)]\tLoss: 0.128895\nTrain: [1098/2931 (100%)]\tLoss: 0.152512\nEpoch: 9/10. Train set: Average loss: 0.1524\nEpoch: 9/10. Validation set: Average loss: 0.1237\nTrain: [0/2931 (0%)]\tLoss: 0.226390\nTrain: [1098/2931 (100%)]\tLoss: 0.142281\nEpoch: 10/10. Train set: Average loss: 0.1425\nEpoch: 10/10. Validation set: Average loss: 0.1203\nNumber features: 28\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187094\nTrain: [1098/2931 (100%)]\tLoss: 0.164324\nEpoch: 1/10. Train set: Average loss: 0.1644\nEpoch: 1/10. Validation set: Average loss: 0.1298\nTrain: [0/2931 (0%)]\tLoss: 0.196688\nTrain: [1098/2931 (100%)]\tLoss: 0.158668\nEpoch: 2/10. Train set: Average loss: 0.1588\nEpoch: 2/10. Validation set: Average loss: 0.1600\nTrain: [0/2931 (0%)]\tLoss: 0.202048\nTrain: [1098/2931 (100%)]\tLoss: 0.153975\nEpoch: 3/10. Train set: Average loss: 0.1541\nEpoch: 3/10. Validation set: Average loss: 0.1338\nTrain: [0/2931 (0%)]\tLoss: 0.188108\nTrain: [1098/2931 (100%)]\tLoss: 0.156860\nEpoch: 4/10. Train set: Average loss: 0.1569\nEpoch: 4/10. Validation set: Average loss: 0.1678\nTrain: [0/2931 (0%)]\tLoss: 0.219401\nTrain: [1098/2931 (100%)]\tLoss: 0.152633\nEpoch: 5/10. Train set: Average loss: 0.1528\nEpoch: 5/10. Validation set: Average loss: 0.1623\nTrain: [0/2931 (0%)]\tLoss: 0.192210\nTrain: [1098/2931 (100%)]\tLoss: 0.147832\nEpoch: 6/10. Train set: Average loss: 0.1480\nEpoch: 6/10. Validation set: Average loss: 0.1697\nTrain: [0/2931 (0%)]\tLoss: 0.247310\nTrain: [1098/2931 (100%)]\tLoss: 0.155243\nEpoch: 7/10. Train set: Average loss: 0.1555\nEpoch: 7/10. Validation set: Average loss: 0.1409\nTrain: [0/2931 (0%)]\tLoss: 0.188480\nTrain: [1098/2931 (100%)]\tLoss: 0.148560\nEpoch: 8/10. Train set: Average loss: 0.1487\nEpoch: 8/10. Validation set: Average loss: 0.1773\nTrain: [0/2931 (0%)]\tLoss: 0.197589\nTrain: [1098/2931 (100%)]\tLoss: 0.154166\nEpoch: 9/10. Train set: Average loss: 0.1543\nEpoch: 9/10. Validation set: Average loss: 0.1337\nTrain: [0/2931 (0%)]\tLoss: 0.115733\nTrain: [1098/2931 (100%)]\tLoss: 0.147844\nEpoch: 10/10. Train set: Average loss: 0.1478\nEpoch: 10/10. Validation set: Average loss: 0.1323\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186960\nTrain: [1098/2931 (100%)]\tLoss: 0.165393\nEpoch: 1/10. Train set: Average loss: 0.1655\nEpoch: 1/10. Validation set: Average loss: 0.1474\nTrain: [0/2931 (0%)]\tLoss: 0.194405\nTrain: [1098/2931 (100%)]\tLoss: 0.159772\nEpoch: 2/10. Train set: Average loss: 0.1599\nEpoch: 2/10. Validation set: Average loss: 0.1843\nTrain: [0/2931 (0%)]\tLoss: 0.171187\nTrain: [1098/2931 (100%)]\tLoss: 0.159946\nEpoch: 3/10. Train set: Average loss: 0.1600\nEpoch: 3/10. Validation set: Average loss: 0.1762\nTrain: [0/2931 (0%)]\tLoss: 0.148010\nTrain: [1098/2931 (100%)]\tLoss: 0.154043\nEpoch: 4/10. Train set: Average loss: 0.1540\nEpoch: 4/10. Validation set: Average loss: 0.1660\nTrain: [0/2931 (0%)]\tLoss: 0.110486\nTrain: [1098/2931 (100%)]\tLoss: 0.153204\nEpoch: 5/10. Train set: Average loss: 0.1531\nEpoch: 5/10. Validation set: Average loss: 0.1624\nTrain: [0/2931 (0%)]\tLoss: 0.133013\nTrain: [1098/2931 (100%)]\tLoss: 0.153561\nEpoch: 6/10. Train set: Average loss: 0.1535\nEpoch: 6/10. Validation set: Average loss: 0.1624\nTrain: [0/2931 (0%)]\tLoss: 0.150881\nTrain: [1098/2931 (100%)]\tLoss: 0.150514\nEpoch: 7/10. Train set: Average loss: 0.1505\nEpoch: 7/10. Validation set: Average loss: 0.1643\nTrain: [0/2931 (0%)]\tLoss: 0.119197\nTrain: [1098/2931 (100%)]\tLoss: 0.150980\nEpoch: 8/10. Train set: Average loss: 0.1509\nEpoch: 8/10. Validation set: Average loss: 0.1676\nTrain: [0/2931 (0%)]\tLoss: 0.153296\nTrain: [1098/2931 (100%)]\tLoss: 0.147561\nEpoch: 9/10. Train set: Average loss: 0.1476\nEpoch: 9/10. Validation set: Average loss: 0.1307\nTrain: [0/2931 (0%)]\tLoss: 0.103125\nTrain: [1098/2931 (100%)]\tLoss: 0.142013\nEpoch: 10/10. Train set: Average loss: 0.1419\nEpoch: 10/10. Validation set: Average loss: 0.1317\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187086\nTrain: [1098/2931 (100%)]\tLoss: 0.163230\nEpoch: 1/10. Train set: Average loss: 0.1633\nEpoch: 1/10. Validation set: Average loss: 0.1881\nTrain: [0/2931 (0%)]\tLoss: 0.273574\nTrain: [1098/2931 (100%)]\tLoss: 0.159456\nEpoch: 2/10. Train set: Average loss: 0.1598\nEpoch: 2/10. Validation set: Average loss: 0.1599\nTrain: [0/2931 (0%)]\tLoss: 0.129542\nTrain: [1098/2931 (100%)]\tLoss: 0.160175\nEpoch: 3/10. Train set: Average loss: 0.1601\nEpoch: 3/10. Validation set: Average loss: 0.1885\nTrain: [0/2931 (0%)]\tLoss: 0.190118\nTrain: [1098/2931 (100%)]\tLoss: 0.162417\nEpoch: 4/10. Train set: Average loss: 0.1625\nEpoch: 4/10. Validation set: Average loss: 0.1752\nTrain: [0/2931 (0%)]\tLoss: 0.211888\nTrain: [1098/2931 (100%)]\tLoss: 0.159933\nEpoch: 5/10. Train set: Average loss: 0.1601\nEpoch: 5/10. Validation set: Average loss: 0.1525\nTrain: [0/2931 (0%)]\tLoss: 0.149306\nTrain: [1098/2931 (100%)]\tLoss: 0.157651\nEpoch: 6/10. Train set: Average loss: 0.1576\nEpoch: 6/10. Validation set: Average loss: 0.1589\nTrain: [0/2931 (0%)]\tLoss: 0.192896\nTrain: [1098/2931 (100%)]\tLoss: 0.150509\nEpoch: 7/10. Train set: Average loss: 0.1506\nEpoch: 7/10. Validation set: Average loss: 0.1560\nTrain: [0/2931 (0%)]\tLoss: 0.132654\nTrain: [1098/2931 (100%)]\tLoss: 0.149817\nEpoch: 8/10. Train set: Average loss: 0.1498\nEpoch: 8/10. Validation set: Average loss: 0.1594\nTrain: [0/2931 (0%)]\tLoss: 0.252325\nTrain: [1098/2931 (100%)]\tLoss: 0.148748\nEpoch: 9/10. Train set: Average loss: 0.1490\nEpoch: 9/10. Validation set: Average loss: 0.1368\nTrain: [0/2931 (0%)]\tLoss: 0.122695\nTrain: [1098/2931 (100%)]\tLoss: 0.146967\nEpoch: 10/10. Train set: Average loss: 0.1469\nEpoch: 10/10. Validation set: Average loss: 0.1363\nNumber features: 29\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187151\nTrain: [1098/2931 (100%)]\tLoss: 0.169946\nEpoch: 1/10. Train set: Average loss: 0.1700\nEpoch: 1/10. Validation set: Average loss: 0.1599\nTrain: [0/2931 (0%)]\tLoss: 0.139641\nTrain: [1098/2931 (100%)]\tLoss: 0.158696\nEpoch: 2/10. Train set: Average loss: 0.1586\nEpoch: 2/10. Validation set: Average loss: 0.1526\nTrain: [0/2931 (0%)]\tLoss: 0.038579\nTrain: [1098/2931 (100%)]\tLoss: 0.157308\nEpoch: 3/10. Train set: Average loss: 0.1570\nEpoch: 3/10. Validation set: Average loss: 0.1329\nTrain: [0/2931 (0%)]\tLoss: 0.162208\nTrain: [1098/2931 (100%)]\tLoss: 0.154768\nEpoch: 4/10. Train set: Average loss: 0.1548\nEpoch: 4/10. Validation set: Average loss: 0.1414\nTrain: [0/2931 (0%)]\tLoss: 0.117717\nTrain: [1098/2931 (100%)]\tLoss: 0.157052\nEpoch: 5/10. Train set: Average loss: 0.1569\nEpoch: 5/10. Validation set: Average loss: 0.1520\nTrain: [0/2931 (0%)]\tLoss: 0.081430\nTrain: [1098/2931 (100%)]\tLoss: 0.154324\nEpoch: 6/10. Train set: Average loss: 0.1541\nEpoch: 6/10. Validation set: Average loss: 0.1461\nTrain: [0/2931 (0%)]\tLoss: 0.141874\nTrain: [1098/2931 (100%)]\tLoss: 0.155396\nEpoch: 7/10. Train set: Average loss: 0.1554\nEpoch: 7/10. Validation set: Average loss: 0.1461\nTrain: [0/2931 (0%)]\tLoss: 0.148374\nTrain: [1098/2931 (100%)]\tLoss: 0.148227\nEpoch: 8/10. Train set: Average loss: 0.1482\nEpoch: 8/10. Validation set: Average loss: 0.1369\nTrain: [0/2931 (0%)]\tLoss: 0.094895\nTrain: [1098/2931 (100%)]\tLoss: 0.143871\nEpoch: 9/10. Train set: Average loss: 0.1437\nEpoch: 9/10. Validation set: Average loss: 0.1272\nTrain: [0/2931 (0%)]\tLoss: 0.141674\nTrain: [1098/2931 (100%)]\tLoss: 0.143974\nEpoch: 10/10. Train set: Average loss: 0.1440\nEpoch: 10/10. Validation set: Average loss: 0.1270\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186830\nTrain: [1098/2931 (100%)]\tLoss: 0.163896\nEpoch: 1/10. Train set: Average loss: 0.1640\nEpoch: 1/10. Validation set: Average loss: 0.1362\nTrain: [0/2931 (0%)]\tLoss: 0.264611\nTrain: [1098/2931 (100%)]\tLoss: 0.161377\nEpoch: 2/10. Train set: Average loss: 0.1617\nEpoch: 2/10. Validation set: Average loss: 0.1423\nTrain: [0/2931 (0%)]\tLoss: 0.138614\nTrain: [1098/2931 (100%)]\tLoss: 0.159175\nEpoch: 3/10. Train set: Average loss: 0.1591\nEpoch: 3/10. Validation set: Average loss: 0.1338\nTrain: [0/2931 (0%)]\tLoss: 0.146621\nTrain: [1098/2931 (100%)]\tLoss: 0.163240\nEpoch: 4/10. Train set: Average loss: 0.1632\nEpoch: 4/10. Validation set: Average loss: 0.1466\nTrain: [0/2931 (0%)]\tLoss: 0.113783\nTrain: [1098/2931 (100%)]\tLoss: 0.153152\nEpoch: 5/10. Train set: Average loss: 0.1530\nEpoch: 5/10. Validation set: Average loss: 0.1358\nTrain: [0/2931 (0%)]\tLoss: 0.096575\nTrain: [1098/2931 (100%)]\tLoss: 0.149658\nEpoch: 6/10. Train set: Average loss: 0.1495\nEpoch: 6/10. Validation set: Average loss: 0.1412\nTrain: [0/2931 (0%)]\tLoss: 0.179798\nTrain: [1098/2931 (100%)]\tLoss: 0.149637\nEpoch: 7/10. Train set: Average loss: 0.1497\nEpoch: 7/10. Validation set: Average loss: 0.1390\nTrain: [0/2931 (0%)]\tLoss: 0.097009\nTrain: [1098/2931 (100%)]\tLoss: 0.153575\nEpoch: 8/10. Train set: Average loss: 0.1534\nEpoch: 8/10. Validation set: Average loss: 0.1336\nTrain: [0/2931 (0%)]\tLoss: 0.251368\nTrain: [1098/2931 (100%)]\tLoss: 0.148225\nEpoch: 9/10. Train set: Average loss: 0.1485\nEpoch: 9/10. Validation set: Average loss: 0.1322\nTrain: [0/2931 (0%)]\tLoss: 0.070218\nTrain: [1098/2931 (100%)]\tLoss: 0.143443\nEpoch: 10/10. Train set: Average loss: 0.1432\nEpoch: 10/10. Validation set: Average loss: 0.1325\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.187329\nTrain: [1098/2931 (100%)]\tLoss: 0.161806\nEpoch: 1/10. Train set: Average loss: 0.1619\nEpoch: 1/10. Validation set: Average loss: 0.1414\nTrain: [0/2931 (0%)]\tLoss: 0.173998\nTrain: [1098/2931 (100%)]\tLoss: 0.156668\nEpoch: 2/10. Train set: Average loss: 0.1567\nEpoch: 2/10. Validation set: Average loss: 0.1346\nTrain: [0/2931 (0%)]\tLoss: 0.164049\nTrain: [1098/2931 (100%)]\tLoss: 0.163945\nEpoch: 3/10. Train set: Average loss: 0.1639\nEpoch: 3/10. Validation set: Average loss: 0.1298\nTrain: [0/2931 (0%)]\tLoss: 0.154958\nTrain: [1098/2931 (100%)]\tLoss: 0.155633\nEpoch: 4/10. Train set: Average loss: 0.1556\nEpoch: 4/10. Validation set: Average loss: 0.1251\nTrain: [0/2931 (0%)]\tLoss: 0.108935\nTrain: [1098/2931 (100%)]\tLoss: 0.152857\nEpoch: 5/10. Train set: Average loss: 0.1527\nEpoch: 5/10. Validation set: Average loss: 0.1289\nTrain: [0/2931 (0%)]\tLoss: 0.075868\nTrain: [1098/2931 (100%)]\tLoss: 0.155791\nEpoch: 6/10. Train set: Average loss: 0.1556\nEpoch: 6/10. Validation set: Average loss: 0.1540\nTrain: [0/2931 (0%)]\tLoss: 0.154149\nTrain: [1098/2931 (100%)]\tLoss: 0.151899\nEpoch: 7/10. Train set: Average loss: 0.1519\nEpoch: 7/10. Validation set: Average loss: 0.1378\nTrain: [0/2931 (0%)]\tLoss: 0.129345\nTrain: [1098/2931 (100%)]\tLoss: 0.149898\nEpoch: 8/10. Train set: Average loss: 0.1498\nEpoch: 8/10. Validation set: Average loss: 0.1228\nTrain: [0/2931 (0%)]\tLoss: 0.145263\nTrain: [1098/2931 (100%)]\tLoss: 0.145774\nEpoch: 9/10. Train set: Average loss: 0.1458\nEpoch: 9/10. Validation set: Average loss: 0.1263\nTrain: [0/2931 (0%)]\tLoss: 0.087092\nTrain: [1098/2931 (100%)]\tLoss: 0.147097\nEpoch: 10/10. Train set: Average loss: 0.1469\nEpoch: 10/10. Validation set: Average loss: 0.1264\nNumber features: 30\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124691\nTrain: [1098/2931 (100%)]\tLoss: 0.169133\nEpoch: 1/10. Train set: Average loss: 0.1690\nEpoch: 1/10. Validation set: Average loss: 0.1721\nTrain: [0/2931 (0%)]\tLoss: 0.103310\nTrain: [1098/2931 (100%)]\tLoss: 0.158031\nEpoch: 2/10. Train set: Average loss: 0.1579\nEpoch: 2/10. Validation set: Average loss: 0.1485\nTrain: [0/2931 (0%)]\tLoss: 0.102753\nTrain: [1098/2931 (100%)]\tLoss: 0.153030\nEpoch: 3/10. Train set: Average loss: 0.1529\nEpoch: 3/10. Validation set: Average loss: 0.1535\nTrain: [0/2931 (0%)]\tLoss: 0.055598\nTrain: [1098/2931 (100%)]\tLoss: 0.160293\nEpoch: 4/10. Train set: Average loss: 0.1600\nEpoch: 4/10. Validation set: Average loss: 0.1688\nTrain: [0/2931 (0%)]\tLoss: 0.057415\nTrain: [1098/2931 (100%)]\tLoss: 0.156311\nEpoch: 5/10. Train set: Average loss: 0.1560\nEpoch: 5/10. Validation set: Average loss: 0.1649\nTrain: [0/2931 (0%)]\tLoss: 0.088550\nTrain: [1098/2931 (100%)]\tLoss: 0.158179\nEpoch: 6/10. Train set: Average loss: 0.1580\nEpoch: 6/10. Validation set: Average loss: 0.1460\nTrain: [0/2931 (0%)]\tLoss: 0.075446\nTrain: [1098/2931 (100%)]\tLoss: 0.148325\nEpoch: 7/10. Train set: Average loss: 0.1481\nEpoch: 7/10. Validation set: Average loss: 0.1554\nTrain: [0/2931 (0%)]\tLoss: 0.100993\nTrain: [1098/2931 (100%)]\tLoss: 0.146673\nEpoch: 8/10. Train set: Average loss: 0.1465\nEpoch: 8/10. Validation set: Average loss: 0.1666\nTrain: [0/2931 (0%)]\tLoss: 0.078778\nTrain: [1098/2931 (100%)]\tLoss: 0.154838\nEpoch: 9/10. Train set: Average loss: 0.1546\nEpoch: 9/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.051940\nTrain: [1098/2931 (100%)]\tLoss: 0.142689\nEpoch: 10/10. Train set: Average loss: 0.1424\nEpoch: 10/10. Validation set: Average loss: 0.1378\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.062425\nTrain: [1098/2931 (100%)]\tLoss: 0.164526\nEpoch: 1/10. Train set: Average loss: 0.1642\nEpoch: 1/10. Validation set: Average loss: 0.2111\nTrain: [0/2931 (0%)]\tLoss: 0.218390\nTrain: [1098/2931 (100%)]\tLoss: 0.156381\nEpoch: 2/10. Train set: Average loss: 0.1566\nEpoch: 2/10. Validation set: Average loss: 0.1735\nTrain: [0/2931 (0%)]\tLoss: 0.099059\nTrain: [1098/2931 (100%)]\tLoss: 0.151440\nEpoch: 3/10. Train set: Average loss: 0.1513\nEpoch: 3/10. Validation set: Average loss: 0.1487\nTrain: [0/2931 (0%)]\tLoss: 0.125248\nTrain: [1098/2931 (100%)]\tLoss: 0.151136\nEpoch: 4/10. Train set: Average loss: 0.1511\nEpoch: 4/10. Validation set: Average loss: 0.1570\nTrain: [0/2931 (0%)]\tLoss: 0.109496\nTrain: [1098/2931 (100%)]\tLoss: 0.152981\nEpoch: 5/10. Train set: Average loss: 0.1529\nEpoch: 5/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.062087\nTrain: [1098/2931 (100%)]\tLoss: 0.151760\nEpoch: 6/10. Train set: Average loss: 0.1515\nEpoch: 6/10. Validation set: Average loss: 0.1728\nTrain: [0/2931 (0%)]\tLoss: 0.133733\nTrain: [1098/2931 (100%)]\tLoss: 0.147460\nEpoch: 7/10. Train set: Average loss: 0.1474\nEpoch: 7/10. Validation set: Average loss: 0.1560\nTrain: [0/2931 (0%)]\tLoss: 0.179337\nTrain: [1098/2931 (100%)]\tLoss: 0.150683\nEpoch: 8/10. Train set: Average loss: 0.1508\nEpoch: 8/10. Validation set: Average loss: 0.1669\nTrain: [0/2931 (0%)]\tLoss: 0.169967\nTrain: [1098/2931 (100%)]\tLoss: 0.149116\nEpoch: 9/10. Train set: Average loss: 0.1492\nEpoch: 9/10. Validation set: Average loss: 0.1410\nTrain: [0/2931 (0%)]\tLoss: 0.144343\nTrain: [1098/2931 (100%)]\tLoss: 0.143989\nEpoch: 10/10. Train set: Average loss: 0.1440\nEpoch: 10/10. Validation set: Average loss: 0.1406\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.310828\nTrain: [1098/2931 (100%)]\tLoss: 0.166207\nEpoch: 1/10. Train set: Average loss: 0.1666\nEpoch: 1/10. Validation set: Average loss: 0.1622\nTrain: [0/2931 (0%)]\tLoss: 0.096186\nTrain: [1098/2931 (100%)]\tLoss: 0.153853\nEpoch: 2/10. Train set: Average loss: 0.1537\nEpoch: 2/10. Validation set: Average loss: 0.1625\nTrain: [0/2931 (0%)]\tLoss: 0.115149\nTrain: [1098/2931 (100%)]\tLoss: 0.152969\nEpoch: 3/10. Train set: Average loss: 0.1529\nEpoch: 3/10. Validation set: Average loss: 0.1734\nTrain: [0/2931 (0%)]\tLoss: 0.158804\nTrain: [1098/2931 (100%)]\tLoss: 0.146850\nEpoch: 4/10. Train set: Average loss: 0.1469\nEpoch: 4/10. Validation set: Average loss: 0.1736\nTrain: [0/2931 (0%)]\tLoss: 0.119094\nTrain: [1098/2931 (100%)]\tLoss: 0.150322\nEpoch: 5/10. Train set: Average loss: 0.1502\nEpoch: 5/10. Validation set: Average loss: 0.1666\nTrain: [0/2931 (0%)]\tLoss: 0.149572\nTrain: [1098/2931 (100%)]\tLoss: 0.158912\nEpoch: 6/10. Train set: Average loss: 0.1589\nEpoch: 6/10. Validation set: Average loss: 0.1765\nTrain: [0/2931 (0%)]\tLoss: 0.076768\nTrain: [1098/2931 (100%)]\tLoss: 0.151203\nEpoch: 7/10. Train set: Average loss: 0.1510\nEpoch: 7/10. Validation set: Average loss: 0.1636\nTrain: [0/2931 (0%)]\tLoss: 0.202072\nTrain: [1098/2931 (100%)]\tLoss: 0.143306\nEpoch: 8/10. Train set: Average loss: 0.1435\nEpoch: 8/10. Validation set: Average loss: 0.1582\nTrain: [0/2931 (0%)]\tLoss: 0.154329\nTrain: [1098/2931 (100%)]\tLoss: 0.141083\nEpoch: 9/10. Train set: Average loss: 0.1411\nEpoch: 9/10. Validation set: Average loss: 0.1479\nTrain: [0/2931 (0%)]\tLoss: 0.151627\nTrain: [1098/2931 (100%)]\tLoss: 0.143546\nEpoch: 10/10. Train set: Average loss: 0.1436\nEpoch: 10/10. Validation set: Average loss: 0.1475\nNumber features: 31\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.248521\nTrain: [1098/2931 (100%)]\tLoss: 0.166950\nEpoch: 1/10. Train set: Average loss: 0.1672\nEpoch: 1/10. Validation set: Average loss: 0.1457\nTrain: [0/2931 (0%)]\tLoss: 0.182320\nTrain: [1098/2931 (100%)]\tLoss: 0.156441\nEpoch: 2/10. Train set: Average loss: 0.1565\nEpoch: 2/10. Validation set: Average loss: 0.1462\nTrain: [0/2931 (0%)]\tLoss: 0.185170\nTrain: [1098/2931 (100%)]\tLoss: 0.155619\nEpoch: 3/10. Train set: Average loss: 0.1557\nEpoch: 3/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.105308\nTrain: [1098/2931 (100%)]\tLoss: 0.156844\nEpoch: 4/10. Train set: Average loss: 0.1567\nEpoch: 4/10. Validation set: Average loss: 0.1973\nTrain: [0/2931 (0%)]\tLoss: 0.163831\nTrain: [1098/2931 (100%)]\tLoss: 0.158001\nEpoch: 5/10. Train set: Average loss: 0.1580\nEpoch: 5/10. Validation set: Average loss: 0.1698\nTrain: [0/2931 (0%)]\tLoss: 0.181152\nTrain: [1098/2931 (100%)]\tLoss: 0.157006\nEpoch: 6/10. Train set: Average loss: 0.1571\nEpoch: 6/10. Validation set: Average loss: 0.1367\nTrain: [0/2931 (0%)]\tLoss: 0.209435\nTrain: [1098/2931 (100%)]\tLoss: 0.149188\nEpoch: 7/10. Train set: Average loss: 0.1494\nEpoch: 7/10. Validation set: Average loss: 0.1610\nTrain: [0/2931 (0%)]\tLoss: 0.129841\nTrain: [1098/2931 (100%)]\tLoss: 0.149563\nEpoch: 8/10. Train set: Average loss: 0.1495\nEpoch: 8/10. Validation set: Average loss: 0.1446\nTrain: [0/2931 (0%)]\tLoss: 0.147648\nTrain: [1098/2931 (100%)]\tLoss: 0.149522\nEpoch: 9/10. Train set: Average loss: 0.1495\nEpoch: 9/10. Validation set: Average loss: 0.1375\nTrain: [0/2931 (0%)]\tLoss: 0.145646\nTrain: [1098/2931 (100%)]\tLoss: 0.136413\nEpoch: 10/10. Train set: Average loss: 0.1364\nEpoch: 10/10. Validation set: Average loss: 0.1355\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.124579\nTrain: [1098/2931 (100%)]\tLoss: 0.164295\nEpoch: 1/10. Train set: Average loss: 0.1642\nEpoch: 1/10. Validation set: Average loss: 0.1395\nTrain: [0/2931 (0%)]\tLoss: 0.126717\nTrain: [1098/2931 (100%)]\tLoss: 0.158481\nEpoch: 2/10. Train set: Average loss: 0.1584\nEpoch: 2/10. Validation set: Average loss: 0.1513\nTrain: [0/2931 (0%)]\tLoss: 0.061076\nTrain: [1098/2931 (100%)]\tLoss: 0.152363\nEpoch: 3/10. Train set: Average loss: 0.1521\nEpoch: 3/10. Validation set: Average loss: 0.1411\nTrain: [0/2931 (0%)]\tLoss: 0.121035\nTrain: [1098/2931 (100%)]\tLoss: 0.149102\nEpoch: 4/10. Train set: Average loss: 0.1490\nEpoch: 4/10. Validation set: Average loss: 0.1338\nTrain: [0/2931 (0%)]\tLoss: 0.206215\nTrain: [1098/2931 (100%)]\tLoss: 0.152189\nEpoch: 5/10. Train set: Average loss: 0.1523\nEpoch: 5/10. Validation set: Average loss: 0.1385\nTrain: [0/2931 (0%)]\tLoss: 0.087041\nTrain: [1098/2931 (100%)]\tLoss: 0.151436\nEpoch: 6/10. Train set: Average loss: 0.1513\nEpoch: 6/10. Validation set: Average loss: 0.1473\nTrain: [0/2931 (0%)]\tLoss: 0.087386\nTrain: [1098/2931 (100%)]\tLoss: 0.155614\nEpoch: 7/10. Train set: Average loss: 0.1554\nEpoch: 7/10. Validation set: Average loss: 0.1404\nTrain: [0/2931 (0%)]\tLoss: 0.080025\nTrain: [1098/2931 (100%)]\tLoss: 0.143509\nEpoch: 8/10. Train set: Average loss: 0.1433\nEpoch: 8/10. Validation set: Average loss: 0.1415\nTrain: [0/2931 (0%)]\tLoss: 0.095473\nTrain: [1098/2931 (100%)]\tLoss: 0.137173\nEpoch: 9/10. Train set: Average loss: 0.1371\nEpoch: 9/10. Validation set: Average loss: 0.1334\nTrain: [0/2931 (0%)]\tLoss: 0.107733\nTrain: [1098/2931 (100%)]\tLoss: 0.139883\nEpoch: 10/10. Train set: Average loss: 0.1398\nEpoch: 10/10. Validation set: Average loss: 0.1337\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.186934\nTrain: [1098/2931 (100%)]\tLoss: 0.158998\nEpoch: 1/10. Train set: Average loss: 0.1591\nEpoch: 1/10. Validation set: Average loss: 0.1657\nTrain: [0/2931 (0%)]\tLoss: 0.086062\nTrain: [1098/2931 (100%)]\tLoss: 0.159568\nEpoch: 2/10. Train set: Average loss: 0.1594\nEpoch: 2/10. Validation set: Average loss: 0.1436\nTrain: [0/2931 (0%)]\tLoss: 0.220575\nTrain: [1098/2931 (100%)]\tLoss: 0.154894\nEpoch: 3/10. Train set: Average loss: 0.1551\nEpoch: 3/10. Validation set: Average loss: 0.1369\nTrain: [0/2931 (0%)]\tLoss: 0.142200\nTrain: [1098/2931 (100%)]\tLoss: 0.151321\nEpoch: 4/10. Train set: Average loss: 0.1513\nEpoch: 4/10. Validation set: Average loss: 0.1328\nTrain: [0/2931 (0%)]\tLoss: 0.149073\nTrain: [1098/2931 (100%)]\tLoss: 0.148801\nEpoch: 5/10. Train set: Average loss: 0.1488\nEpoch: 5/10. Validation set: Average loss: 0.1397\nTrain: [0/2931 (0%)]\tLoss: 0.102809\nTrain: [1098/2931 (100%)]\tLoss: 0.146777\nEpoch: 6/10. Train set: Average loss: 0.1467\nEpoch: 6/10. Validation set: Average loss: 0.1468\nTrain: [0/2931 (0%)]\tLoss: 0.094628\nTrain: [1098/2931 (100%)]\tLoss: 0.145475\nEpoch: 7/10. Train set: Average loss: 0.1453\nEpoch: 7/10. Validation set: Average loss: 0.1496\nTrain: [0/2931 (0%)]\tLoss: 0.122882\nTrain: [1098/2931 (100%)]\tLoss: 0.143522\nEpoch: 8/10. Train set: Average loss: 0.1435\nEpoch: 8/10. Validation set: Average loss: 0.1651\nTrain: [0/2931 (0%)]\tLoss: 0.143122\nTrain: [1098/2931 (100%)]\tLoss: 0.140964\nEpoch: 9/10. Train set: Average loss: 0.1410\nEpoch: 9/10. Validation set: Average loss: 0.1403\nTrain: [0/2931 (0%)]\tLoss: 0.145220\nTrain: [1098/2931 (100%)]\tLoss: 0.137472\nEpoch: 10/10. Train set: Average loss: 0.1375\nEpoch: 10/10. Validation set: Average loss: 0.1421\nNumber features: 32\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124792\nTrain: [1098/2931 (100%)]\tLoss: 0.170625\nEpoch: 1/10. Train set: Average loss: 0.1705\nEpoch: 1/10. Validation set: Average loss: 0.1553\nTrain: [0/2931 (0%)]\tLoss: 0.124237\nTrain: [1098/2931 (100%)]\tLoss: 0.159808\nEpoch: 2/10. Train set: Average loss: 0.1597\nEpoch: 2/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.213428\nTrain: [1098/2931 (100%)]\tLoss: 0.155619\nEpoch: 3/10. Train set: Average loss: 0.1558\nEpoch: 3/10. Validation set: Average loss: 0.1505\nTrain: [0/2931 (0%)]\tLoss: 0.129372\nTrain: [1098/2931 (100%)]\tLoss: 0.156187\nEpoch: 4/10. Train set: Average loss: 0.1561\nEpoch: 4/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.176683\nTrain: [1098/2931 (100%)]\tLoss: 0.154962\nEpoch: 5/10. Train set: Average loss: 0.1550\nEpoch: 5/10. Validation set: Average loss: 0.1547\nTrain: [0/2931 (0%)]\tLoss: 0.139751\nTrain: [1098/2931 (100%)]\tLoss: 0.152004\nEpoch: 6/10. Train set: Average loss: 0.1520\nEpoch: 6/10. Validation set: Average loss: 0.1655\nTrain: [0/2931 (0%)]\tLoss: 0.129002\nTrain: [1098/2931 (100%)]\tLoss: 0.147168\nEpoch: 7/10. Train set: Average loss: 0.1471\nEpoch: 7/10. Validation set: Average loss: 0.1448\nTrain: [0/2931 (0%)]\tLoss: 0.117316\nTrain: [1098/2931 (100%)]\tLoss: 0.151833\nEpoch: 8/10. Train set: Average loss: 0.1517\nEpoch: 8/10. Validation set: Average loss: 0.1671\nTrain: [0/2931 (0%)]\tLoss: 0.213926\nTrain: [1098/2931 (100%)]\tLoss: 0.147364\nEpoch: 9/10. Train set: Average loss: 0.1475\nEpoch: 9/10. Validation set: Average loss: 0.1437\nTrain: [0/2931 (0%)]\tLoss: 0.089912\nTrain: [1098/2931 (100%)]\tLoss: 0.141491\nEpoch: 10/10. Train set: Average loss: 0.1414\nEpoch: 10/10. Validation set: Average loss: 0.1411\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186993\nTrain: [1098/2931 (100%)]\tLoss: 0.167124\nEpoch: 1/10. Train set: Average loss: 0.1672\nEpoch: 1/10. Validation set: Average loss: 0.1585\nTrain: [0/2931 (0%)]\tLoss: 0.158679\nTrain: [1098/2931 (100%)]\tLoss: 0.156631\nEpoch: 2/10. Train set: Average loss: 0.1566\nEpoch: 2/10. Validation set: Average loss: 0.1683\nTrain: [0/2931 (0%)]\tLoss: 0.205695\nTrain: [1098/2931 (100%)]\tLoss: 0.164443\nEpoch: 3/10. Train set: Average loss: 0.1646\nEpoch: 3/10. Validation set: Average loss: 0.1494\nTrain: [0/2931 (0%)]\tLoss: 0.102792\nTrain: [1098/2931 (100%)]\tLoss: 0.153869\nEpoch: 4/10. Train set: Average loss: 0.1537\nEpoch: 4/10. Validation set: Average loss: 0.1426\nTrain: [0/2931 (0%)]\tLoss: 0.158867\nTrain: [1098/2931 (100%)]\tLoss: 0.151130\nEpoch: 5/10. Train set: Average loss: 0.1512\nEpoch: 5/10. Validation set: Average loss: 0.1535\nTrain: [0/2931 (0%)]\tLoss: 0.156237\nTrain: [1098/2931 (100%)]\tLoss: 0.146861\nEpoch: 6/10. Train set: Average loss: 0.1469\nEpoch: 6/10. Validation set: Average loss: 0.1568\nTrain: [0/2931 (0%)]\tLoss: 0.139261\nTrain: [1098/2931 (100%)]\tLoss: 0.148405\nEpoch: 7/10. Train set: Average loss: 0.1484\nEpoch: 7/10. Validation set: Average loss: 0.1573\nTrain: [0/2931 (0%)]\tLoss: 0.110176\nTrain: [1098/2931 (100%)]\tLoss: 0.147770\nEpoch: 8/10. Train set: Average loss: 0.1477\nEpoch: 8/10. Validation set: Average loss: 0.1644\nTrain: [0/2931 (0%)]\tLoss: 0.147693\nTrain: [1098/2931 (100%)]\tLoss: 0.149522\nEpoch: 9/10. Train set: Average loss: 0.1495\nEpoch: 9/10. Validation set: Average loss: 0.1356\nTrain: [0/2931 (0%)]\tLoss: 0.171455\nTrain: [1098/2931 (100%)]\tLoss: 0.142475\nEpoch: 10/10. Train set: Average loss: 0.1426\nEpoch: 10/10. Validation set: Average loss: 0.1357\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249710\nTrain: [1098/2931 (100%)]\tLoss: 0.163486\nEpoch: 1/10. Train set: Average loss: 0.1637\nEpoch: 1/10. Validation set: Average loss: 0.1406\nTrain: [0/2931 (0%)]\tLoss: 0.151565\nTrain: [1098/2931 (100%)]\tLoss: 0.160338\nEpoch: 2/10. Train set: Average loss: 0.1603\nEpoch: 2/10. Validation set: Average loss: 0.1521\nTrain: [0/2931 (0%)]\tLoss: 0.111501\nTrain: [1098/2931 (100%)]\tLoss: 0.154286\nEpoch: 3/10. Train set: Average loss: 0.1542\nEpoch: 3/10. Validation set: Average loss: 0.1526\nTrain: [0/2931 (0%)]\tLoss: 0.142037\nTrain: [1098/2931 (100%)]\tLoss: 0.150081\nEpoch: 4/10. Train set: Average loss: 0.1501\nEpoch: 4/10. Validation set: Average loss: 0.1545\nTrain: [0/2931 (0%)]\tLoss: 0.117892\nTrain: [1098/2931 (100%)]\tLoss: 0.153028\nEpoch: 5/10. Train set: Average loss: 0.1529\nEpoch: 5/10. Validation set: Average loss: 0.1590\nTrain: [0/2931 (0%)]\tLoss: 0.181125\nTrain: [1098/2931 (100%)]\tLoss: 0.147474\nEpoch: 6/10. Train set: Average loss: 0.1476\nEpoch: 6/10. Validation set: Average loss: 0.1542\nTrain: [0/2931 (0%)]\tLoss: 0.134787\nTrain: [1098/2931 (100%)]\tLoss: 0.146949\nEpoch: 7/10. Train set: Average loss: 0.1469\nEpoch: 7/10. Validation set: Average loss: 0.1455\nTrain: [0/2931 (0%)]\tLoss: 0.124792\nTrain: [1098/2931 (100%)]\tLoss: 0.147673\nEpoch: 8/10. Train set: Average loss: 0.1476\nEpoch: 8/10. Validation set: Average loss: 0.1497\nTrain: [0/2931 (0%)]\tLoss: 0.164092\nTrain: [1098/2931 (100%)]\tLoss: 0.145936\nEpoch: 9/10. Train set: Average loss: 0.1460\nEpoch: 9/10. Validation set: Average loss: 0.1371\nTrain: [0/2931 (0%)]\tLoss: 0.148835\nTrain: [1098/2931 (100%)]\tLoss: 0.139359\nEpoch: 10/10. Train set: Average loss: 0.1394\nEpoch: 10/10. Validation set: Average loss: 0.1368\nNumber features: 33\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311555\nTrain: [1098/2931 (100%)]\tLoss: 0.181689\nEpoch: 1/10. Train set: Average loss: 0.1820\nEpoch: 1/10. Validation set: Average loss: 0.1782\nTrain: [0/2931 (0%)]\tLoss: 0.245067\nTrain: [1098/2931 (100%)]\tLoss: 0.163333\nEpoch: 2/10. Train set: Average loss: 0.1636\nEpoch: 2/10. Validation set: Average loss: 0.1573\nTrain: [0/2931 (0%)]\tLoss: 0.187579\nTrain: [1098/2931 (100%)]\tLoss: 0.162447\nEpoch: 3/10. Train set: Average loss: 0.1625\nEpoch: 3/10. Validation set: Average loss: 0.1648\nTrain: [0/2931 (0%)]\tLoss: 0.197714\nTrain: [1098/2931 (100%)]\tLoss: 0.160323\nEpoch: 4/10. Train set: Average loss: 0.1604\nEpoch: 4/10. Validation set: Average loss: 0.1961\nTrain: [0/2931 (0%)]\tLoss: 0.229260\nTrain: [1098/2931 (100%)]\tLoss: 0.158844\nEpoch: 5/10. Train set: Average loss: 0.1590\nEpoch: 5/10. Validation set: Average loss: 0.1617\nTrain: [0/2931 (0%)]\tLoss: 0.203248\nTrain: [1098/2931 (100%)]\tLoss: 0.159427\nEpoch: 6/10. Train set: Average loss: 0.1595\nEpoch: 6/10. Validation set: Average loss: 0.1762\nTrain: [0/2931 (0%)]\tLoss: 0.234571\nTrain: [1098/2931 (100%)]\tLoss: 0.154569\nEpoch: 7/10. Train set: Average loss: 0.1548\nEpoch: 7/10. Validation set: Average loss: 0.1501\nTrain: [0/2931 (0%)]\tLoss: 0.199279\nTrain: [1098/2931 (100%)]\tLoss: 0.148488\nEpoch: 8/10. Train set: Average loss: 0.1486\nEpoch: 8/10. Validation set: Average loss: 0.1489\nTrain: [0/2931 (0%)]\tLoss: 0.149129\nTrain: [1098/2931 (100%)]\tLoss: 0.150771\nEpoch: 9/10. Train set: Average loss: 0.1508\nEpoch: 9/10. Validation set: Average loss: 0.1356\nTrain: [0/2931 (0%)]\tLoss: 0.076484\nTrain: [1098/2931 (100%)]\tLoss: 0.143744\nEpoch: 10/10. Train set: Average loss: 0.1436\nEpoch: 10/10. Validation set: Average loss: 0.1332\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.312039\nTrain: [1098/2931 (100%)]\tLoss: 0.171468\nEpoch: 1/10. Train set: Average loss: 0.1719\nEpoch: 1/10. Validation set: Average loss: 0.1520\nTrain: [0/2931 (0%)]\tLoss: 0.205132\nTrain: [1098/2931 (100%)]\tLoss: 0.180827\nEpoch: 2/10. Train set: Average loss: 0.1809\nEpoch: 2/10. Validation set: Average loss: 0.1568\nTrain: [0/2931 (0%)]\tLoss: 0.223264\nTrain: [1098/2931 (100%)]\tLoss: 0.174050\nEpoch: 3/10. Train set: Average loss: 0.1742\nEpoch: 3/10. Validation set: Average loss: 0.1663\nTrain: [0/2931 (0%)]\tLoss: 0.282959\nTrain: [1098/2931 (100%)]\tLoss: 0.171359\nEpoch: 4/10. Train set: Average loss: 0.1717\nEpoch: 4/10. Validation set: Average loss: 0.1687\nTrain: [0/2931 (0%)]\tLoss: 0.112082\nTrain: [1098/2931 (100%)]\tLoss: 0.161597\nEpoch: 5/10. Train set: Average loss: 0.1615\nEpoch: 5/10. Validation set: Average loss: 0.1611\nTrain: [0/2931 (0%)]\tLoss: 0.159162\nTrain: [1098/2931 (100%)]\tLoss: 0.161314\nEpoch: 6/10. Train set: Average loss: 0.1613\nEpoch: 6/10. Validation set: Average loss: 0.1691\nTrain: [0/2931 (0%)]\tLoss: 0.231847\nTrain: [1098/2931 (100%)]\tLoss: 0.154679\nEpoch: 7/10. Train set: Average loss: 0.1549\nEpoch: 7/10. Validation set: Average loss: 0.1647\nTrain: [0/2931 (0%)]\tLoss: 0.254145\nTrain: [1098/2931 (100%)]\tLoss: 0.157473\nEpoch: 8/10. Train set: Average loss: 0.1577\nEpoch: 8/10. Validation set: Average loss: 0.1654\nTrain: [0/2931 (0%)]\tLoss: 0.135722\nTrain: [1098/2931 (100%)]\tLoss: 0.147874\nEpoch: 9/10. Train set: Average loss: 0.1478\nEpoch: 9/10. Validation set: Average loss: 0.1382\nTrain: [0/2931 (0%)]\tLoss: 0.107603\nTrain: [1098/2931 (100%)]\tLoss: 0.145651\nEpoch: 10/10. Train set: Average loss: 0.1455\nEpoch: 10/10. Validation set: Average loss: 0.1396\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.124685\nTrain: [1098/2931 (100%)]\tLoss: 0.178927\nEpoch: 1/10. Train set: Average loss: 0.1788\nEpoch: 1/10. Validation set: Average loss: 0.1581\nTrain: [0/2931 (0%)]\tLoss: 0.160745\nTrain: [1098/2931 (100%)]\tLoss: 0.171517\nEpoch: 2/10. Train set: Average loss: 0.1715\nEpoch: 2/10. Validation set: Average loss: 0.1503\nTrain: [0/2931 (0%)]\tLoss: 0.124435\nTrain: [1098/2931 (100%)]\tLoss: 0.167214\nEpoch: 3/10. Train set: Average loss: 0.1671\nEpoch: 3/10. Validation set: Average loss: 0.1908\nTrain: [0/2931 (0%)]\tLoss: 0.203212\nTrain: [1098/2931 (100%)]\tLoss: 0.158691\nEpoch: 4/10. Train set: Average loss: 0.1588\nEpoch: 4/10. Validation set: Average loss: 0.1517\nTrain: [0/2931 (0%)]\tLoss: 0.191518\nTrain: [1098/2931 (100%)]\tLoss: 0.156507\nEpoch: 5/10. Train set: Average loss: 0.1566\nEpoch: 5/10. Validation set: Average loss: 0.1351\nTrain: [0/2931 (0%)]\tLoss: 0.119405\nTrain: [1098/2931 (100%)]\tLoss: 0.170215\nEpoch: 6/10. Train set: Average loss: 0.1701\nEpoch: 6/10. Validation set: Average loss: 0.1810\nTrain: [0/2931 (0%)]\tLoss: 0.199800\nTrain: [1098/2931 (100%)]\tLoss: 0.168013\nEpoch: 7/10. Train set: Average loss: 0.1681\nEpoch: 7/10. Validation set: Average loss: 0.1592\nTrain: [0/2931 (0%)]\tLoss: 0.191394\nTrain: [1098/2931 (100%)]\tLoss: 0.173104\nEpoch: 8/10. Train set: Average loss: 0.1732\nEpoch: 8/10. Validation set: Average loss: 0.1554\nTrain: [0/2931 (0%)]\tLoss: 0.107772\nTrain: [1098/2931 (100%)]\tLoss: 0.157716\nEpoch: 9/10. Train set: Average loss: 0.1576\nEpoch: 9/10. Validation set: Average loss: 0.1369\nTrain: [0/2931 (0%)]\tLoss: 0.192774\nTrain: [1098/2931 (100%)]\tLoss: 0.149948\nEpoch: 10/10. Train set: Average loss: 0.1501\nEpoch: 10/10. Validation set: Average loss: 0.1358\nNumber features: 34\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.062457\nTrain: [1098/2931 (100%)]\tLoss: 0.165004\nEpoch: 1/10. Train set: Average loss: 0.1647\nEpoch: 1/10. Validation set: Average loss: 0.1294\nTrain: [0/2931 (0%)]\tLoss: 0.175003\nTrain: [1098/2931 (100%)]\tLoss: 0.172725\nEpoch: 2/10. Train set: Average loss: 0.1727\nEpoch: 2/10. Validation set: Average loss: 0.1557\nTrain: [0/2931 (0%)]\tLoss: 0.132159\nTrain: [1098/2931 (100%)]\tLoss: 0.167866\nEpoch: 3/10. Train set: Average loss: 0.1678\nEpoch: 3/10. Validation set: Average loss: 0.1622\nTrain: [0/2931 (0%)]\tLoss: 0.192787\nTrain: [1098/2931 (100%)]\tLoss: 0.162413\nEpoch: 4/10. Train set: Average loss: 0.1625\nEpoch: 4/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.068253\nTrain: [1098/2931 (100%)]\tLoss: 0.166362\nEpoch: 5/10. Train set: Average loss: 0.1661\nEpoch: 5/10. Validation set: Average loss: 0.1322\nTrain: [0/2931 (0%)]\tLoss: 0.127605\nTrain: [1098/2931 (100%)]\tLoss: 0.157693\nEpoch: 6/10. Train set: Average loss: 0.1576\nEpoch: 6/10. Validation set: Average loss: 0.1317\nTrain: [0/2931 (0%)]\tLoss: 0.274311\nTrain: [1098/2931 (100%)]\tLoss: 0.160635\nEpoch: 7/10. Train set: Average loss: 0.1609\nEpoch: 7/10. Validation set: Average loss: 0.1214\nTrain: [0/2931 (0%)]\tLoss: 0.044392\nTrain: [1098/2931 (100%)]\tLoss: 0.162736\nEpoch: 8/10. Train set: Average loss: 0.1624\nEpoch: 8/10. Validation set: Average loss: 0.1251\nTrain: [0/2931 (0%)]\tLoss: 0.146603\nTrain: [1098/2931 (100%)]\tLoss: 0.153547\nEpoch: 9/10. Train set: Average loss: 0.1535\nEpoch: 9/10. Validation set: Average loss: 0.1216\nTrain: [0/2931 (0%)]\tLoss: 0.172937\nTrain: [1098/2931 (100%)]\tLoss: 0.145991\nEpoch: 10/10. Train set: Average loss: 0.1461\nEpoch: 10/10. Validation set: Average loss: 0.1201\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.436338\nTrain: [1098/2931 (100%)]\tLoss: 0.176047\nEpoch: 1/10. Train set: Average loss: 0.1768\nEpoch: 1/10. Validation set: Average loss: 0.1370\nTrain: [0/2931 (0%)]\tLoss: 0.228215\nTrain: [1098/2931 (100%)]\tLoss: 0.174336\nEpoch: 2/10. Train set: Average loss: 0.1745\nEpoch: 2/10. Validation set: Average loss: 0.1292\nTrain: [0/2931 (0%)]\tLoss: 0.110806\nTrain: [1098/2931 (100%)]\tLoss: 0.168301\nEpoch: 3/10. Train set: Average loss: 0.1681\nEpoch: 3/10. Validation set: Average loss: 0.1162\nTrain: [0/2931 (0%)]\tLoss: 0.141143\nTrain: [1098/2931 (100%)]\tLoss: 0.158369\nEpoch: 4/10. Train set: Average loss: 0.1583\nEpoch: 4/10. Validation set: Average loss: 0.1316\nTrain: [0/2931 (0%)]\tLoss: 0.151365\nTrain: [1098/2931 (100%)]\tLoss: 0.169423\nEpoch: 5/10. Train set: Average loss: 0.1694\nEpoch: 5/10. Validation set: Average loss: 0.1154\nTrain: [0/2931 (0%)]\tLoss: 0.254184\nTrain: [1098/2931 (100%)]\tLoss: 0.158071\nEpoch: 6/10. Train set: Average loss: 0.1583\nEpoch: 6/10. Validation set: Average loss: 0.1356\nTrain: [0/2931 (0%)]\tLoss: 0.280129\nTrain: [1098/2931 (100%)]\tLoss: 0.162013\nEpoch: 7/10. Train set: Average loss: 0.1623\nEpoch: 7/10. Validation set: Average loss: 0.1215\nTrain: [0/2931 (0%)]\tLoss: 0.146424\nTrain: [1098/2931 (100%)]\tLoss: 0.153105\nEpoch: 8/10. Train set: Average loss: 0.1531\nEpoch: 8/10. Validation set: Average loss: 0.1366\nTrain: [0/2931 (0%)]\tLoss: 0.189054\nTrain: [1098/2931 (100%)]\tLoss: 0.150295\nEpoch: 9/10. Train set: Average loss: 0.1504\nEpoch: 9/10. Validation set: Average loss: 0.1216\nTrain: [0/2931 (0%)]\tLoss: 0.155357\nTrain: [1098/2931 (100%)]\tLoss: 0.149919\nEpoch: 10/10. Train set: Average loss: 0.1499\nEpoch: 10/10. Validation set: Average loss: 0.1203\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.436170\nTrain: [1098/2931 (100%)]\tLoss: 0.168749\nEpoch: 1/10. Train set: Average loss: 0.1695\nEpoch: 1/10. Validation set: Average loss: 0.1230\nTrain: [0/2931 (0%)]\tLoss: 0.142632\nTrain: [1098/2931 (100%)]\tLoss: 0.170083\nEpoch: 2/10. Train set: Average loss: 0.1700\nEpoch: 2/10. Validation set: Average loss: 0.1155\nTrain: [0/2931 (0%)]\tLoss: 0.115458\nTrain: [1098/2931 (100%)]\tLoss: 0.153247\nEpoch: 3/10. Train set: Average loss: 0.1531\nEpoch: 3/10. Validation set: Average loss: 0.1252\nTrain: [0/2931 (0%)]\tLoss: 0.150961\nTrain: [1098/2931 (100%)]\tLoss: 0.164710\nEpoch: 4/10. Train set: Average loss: 0.1647\nEpoch: 4/10. Validation set: Average loss: 0.1282\nTrain: [0/2931 (0%)]\tLoss: 0.143600\nTrain: [1098/2931 (100%)]\tLoss: 0.157371\nEpoch: 5/10. Train set: Average loss: 0.1573\nEpoch: 5/10. Validation set: Average loss: 0.1312\nTrain: [0/2931 (0%)]\tLoss: 0.217085\nTrain: [1098/2931 (100%)]\tLoss: 0.152936\nEpoch: 6/10. Train set: Average loss: 0.1531\nEpoch: 6/10. Validation set: Average loss: 0.1242\nTrain: [0/2931 (0%)]\tLoss: 0.253683\nTrain: [1098/2931 (100%)]\tLoss: 0.157539\nEpoch: 7/10. Train set: Average loss: 0.1578\nEpoch: 7/10. Validation set: Average loss: 0.1304\nTrain: [0/2931 (0%)]\tLoss: 0.243777\nTrain: [1098/2931 (100%)]\tLoss: 0.159105\nEpoch: 8/10. Train set: Average loss: 0.1593\nEpoch: 8/10. Validation set: Average loss: 0.1265\nTrain: [0/2931 (0%)]\tLoss: 0.133187\nTrain: [1098/2931 (100%)]\tLoss: 0.144832\nEpoch: 9/10. Train set: Average loss: 0.1448\nEpoch: 9/10. Validation set: Average loss: 0.1294\nTrain: [0/2931 (0%)]\tLoss: 0.114142\nTrain: [1098/2931 (100%)]\tLoss: 0.145371\nEpoch: 10/10. Train set: Average loss: 0.1453\nEpoch: 10/10. Validation set: Average loss: 0.1297\nNumber features: 35\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187167\nTrain: [1098/2931 (100%)]\tLoss: 0.165577\nEpoch: 1/10. Train set: Average loss: 0.1656\nEpoch: 1/10. Validation set: Average loss: 0.1462\nTrain: [0/2931 (0%)]\tLoss: 0.161368\nTrain: [1098/2931 (100%)]\tLoss: 0.158705\nEpoch: 2/10. Train set: Average loss: 0.1587\nEpoch: 2/10. Validation set: Average loss: 0.1274\nTrain: [0/2931 (0%)]\tLoss: 0.206349\nTrain: [1098/2931 (100%)]\tLoss: 0.166990\nEpoch: 3/10. Train set: Average loss: 0.1671\nEpoch: 3/10. Validation set: Average loss: 0.1374\nTrain: [0/2931 (0%)]\tLoss: 0.143094\nTrain: [1098/2931 (100%)]\tLoss: 0.170386\nEpoch: 4/10. Train set: Average loss: 0.1703\nEpoch: 4/10. Validation set: Average loss: 0.1437\nTrain: [0/2931 (0%)]\tLoss: 0.121423\nTrain: [1098/2931 (100%)]\tLoss: 0.166122\nEpoch: 5/10. Train set: Average loss: 0.1660\nEpoch: 5/10. Validation set: Average loss: 0.1550\nTrain: [0/2931 (0%)]\tLoss: 0.126751\nTrain: [1098/2931 (100%)]\tLoss: 0.163985\nEpoch: 6/10. Train set: Average loss: 0.1639\nEpoch: 6/10. Validation set: Average loss: 0.1438\nTrain: [0/2931 (0%)]\tLoss: 0.142331\nTrain: [1098/2931 (100%)]\tLoss: 0.156948\nEpoch: 7/10. Train set: Average loss: 0.1569\nEpoch: 7/10. Validation set: Average loss: 0.1223\nTrain: [0/2931 (0%)]\tLoss: 0.103553\nTrain: [1098/2931 (100%)]\tLoss: 0.156223\nEpoch: 8/10. Train set: Average loss: 0.1561\nEpoch: 8/10. Validation set: Average loss: 0.1381\nTrain: [0/2931 (0%)]\tLoss: 0.140961\nTrain: [1098/2931 (100%)]\tLoss: 0.150303\nEpoch: 9/10. Train set: Average loss: 0.1503\nEpoch: 9/10. Validation set: Average loss: 0.1231\nTrain: [0/2931 (0%)]\tLoss: 0.151432\nTrain: [1098/2931 (100%)]\tLoss: 0.143701\nEpoch: 10/10. Train set: Average loss: 0.1437\nEpoch: 10/10. Validation set: Average loss: 0.1245\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186880\nTrain: [1098/2931 (100%)]\tLoss: 0.169931\nEpoch: 1/10. Train set: Average loss: 0.1700\nEpoch: 1/10. Validation set: Average loss: 0.1390\nTrain: [0/2931 (0%)]\tLoss: 0.201012\nTrain: [1098/2931 (100%)]\tLoss: 0.162249\nEpoch: 2/10. Train set: Average loss: 0.1624\nEpoch: 2/10. Validation set: Average loss: 0.1479\nTrain: [0/2931 (0%)]\tLoss: 0.105832\nTrain: [1098/2931 (100%)]\tLoss: 0.162010\nEpoch: 3/10. Train set: Average loss: 0.1619\nEpoch: 3/10. Validation set: Average loss: 0.1617\nTrain: [0/2931 (0%)]\tLoss: 0.105352\nTrain: [1098/2931 (100%)]\tLoss: 0.158815\nEpoch: 4/10. Train set: Average loss: 0.1587\nEpoch: 4/10. Validation set: Average loss: 0.1520\nTrain: [0/2931 (0%)]\tLoss: 0.172469\nTrain: [1098/2931 (100%)]\tLoss: 0.157941\nEpoch: 5/10. Train set: Average loss: 0.1580\nEpoch: 5/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.127207\nTrain: [1098/2931 (100%)]\tLoss: 0.160353\nEpoch: 6/10. Train set: Average loss: 0.1603\nEpoch: 6/10. Validation set: Average loss: 0.1374\nTrain: [0/2931 (0%)]\tLoss: 0.193114\nTrain: [1098/2931 (100%)]\tLoss: 0.152416\nEpoch: 7/10. Train set: Average loss: 0.1525\nEpoch: 7/10. Validation set: Average loss: 0.1355\nTrain: [0/2931 (0%)]\tLoss: 0.243055\nTrain: [1098/2931 (100%)]\tLoss: 0.147828\nEpoch: 8/10. Train set: Average loss: 0.1481\nEpoch: 8/10. Validation set: Average loss: 0.1421\nTrain: [0/2931 (0%)]\tLoss: 0.142753\nTrain: [1098/2931 (100%)]\tLoss: 0.144622\nEpoch: 9/10. Train set: Average loss: 0.1446\nEpoch: 9/10. Validation set: Average loss: 0.1354\nTrain: [0/2931 (0%)]\tLoss: 0.088517\nTrain: [1098/2931 (100%)]\tLoss: 0.145148\nEpoch: 10/10. Train set: Average loss: 0.1450\nEpoch: 10/10. Validation set: Average loss: 0.1366\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249401\nTrain: [1098/2931 (100%)]\tLoss: 0.164106\nEpoch: 1/10. Train set: Average loss: 0.1643\nEpoch: 1/10. Validation set: Average loss: 0.1407\nTrain: [0/2931 (0%)]\tLoss: 0.121642\nTrain: [1098/2931 (100%)]\tLoss: 0.157301\nEpoch: 2/10. Train set: Average loss: 0.1572\nEpoch: 2/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.137923\nTrain: [1098/2931 (100%)]\tLoss: 0.153939\nEpoch: 3/10. Train set: Average loss: 0.1539\nEpoch: 3/10. Validation set: Average loss: 0.1448\nTrain: [0/2931 (0%)]\tLoss: 0.142807\nTrain: [1098/2931 (100%)]\tLoss: 0.154789\nEpoch: 4/10. Train set: Average loss: 0.1548\nEpoch: 4/10. Validation set: Average loss: 0.1424\nTrain: [0/2931 (0%)]\tLoss: 0.111903\nTrain: [1098/2931 (100%)]\tLoss: 0.152794\nEpoch: 5/10. Train set: Average loss: 0.1527\nEpoch: 5/10. Validation set: Average loss: 0.1528\nTrain: [0/2931 (0%)]\tLoss: 0.119518\nTrain: [1098/2931 (100%)]\tLoss: 0.153051\nEpoch: 6/10. Train set: Average loss: 0.1530\nEpoch: 6/10. Validation set: Average loss: 0.1447\nTrain: [0/2931 (0%)]\tLoss: 0.135923\nTrain: [1098/2931 (100%)]\tLoss: 0.146148\nEpoch: 7/10. Train set: Average loss: 0.1461\nEpoch: 7/10. Validation set: Average loss: 0.1542\nTrain: [0/2931 (0%)]\tLoss: 0.081777\nTrain: [1098/2931 (100%)]\tLoss: 0.154478\nEpoch: 8/10. Train set: Average loss: 0.1543\nEpoch: 8/10. Validation set: Average loss: 0.1395\nTrain: [0/2931 (0%)]\tLoss: 0.125604\nTrain: [1098/2931 (100%)]\tLoss: 0.145708\nEpoch: 9/10. Train set: Average loss: 0.1457\nEpoch: 9/10. Validation set: Average loss: 0.1322\nTrain: [0/2931 (0%)]\tLoss: 0.118570\nTrain: [1098/2931 (100%)]\tLoss: 0.143592\nEpoch: 10/10. Train set: Average loss: 0.1435\nEpoch: 10/10. Validation set: Average loss: 0.1318\nNumber features: 36\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187145\nTrain: [1098/2931 (100%)]\tLoss: 0.164235\nEpoch: 1/10. Train set: Average loss: 0.1643\nEpoch: 1/10. Validation set: Average loss: 0.1333\nTrain: [0/2931 (0%)]\tLoss: 0.174641\nTrain: [1098/2931 (100%)]\tLoss: 0.171380\nEpoch: 2/10. Train set: Average loss: 0.1714\nEpoch: 2/10. Validation set: Average loss: 0.1393\nTrain: [0/2931 (0%)]\tLoss: 0.114669\nTrain: [1098/2931 (100%)]\tLoss: 0.154125\nEpoch: 3/10. Train set: Average loss: 0.1540\nEpoch: 3/10. Validation set: Average loss: 0.1428\nTrain: [0/2931 (0%)]\tLoss: 0.136039\nTrain: [1098/2931 (100%)]\tLoss: 0.148483\nEpoch: 4/10. Train set: Average loss: 0.1484\nEpoch: 4/10. Validation set: Average loss: 0.1228\nTrain: [0/2931 (0%)]\tLoss: 0.145055\nTrain: [1098/2931 (100%)]\tLoss: 0.152375\nEpoch: 5/10. Train set: Average loss: 0.1524\nEpoch: 5/10. Validation set: Average loss: 0.1442\nTrain: [0/2931 (0%)]\tLoss: 0.065265\nTrain: [1098/2931 (100%)]\tLoss: 0.156484\nEpoch: 6/10. Train set: Average loss: 0.1562\nEpoch: 6/10. Validation set: Average loss: 0.1512\nTrain: [0/2931 (0%)]\tLoss: 0.097215\nTrain: [1098/2931 (100%)]\tLoss: 0.155069\nEpoch: 7/10. Train set: Average loss: 0.1549\nEpoch: 7/10. Validation set: Average loss: 0.1477\nTrain: [0/2931 (0%)]\tLoss: 0.093106\nTrain: [1098/2931 (100%)]\tLoss: 0.153638\nEpoch: 8/10. Train set: Average loss: 0.1535\nEpoch: 8/10. Validation set: Average loss: 0.1469\nTrain: [0/2931 (0%)]\tLoss: 0.193940\nTrain: [1098/2931 (100%)]\tLoss: 0.149969\nEpoch: 9/10. Train set: Average loss: 0.1501\nEpoch: 9/10. Validation set: Average loss: 0.1366\nTrain: [0/2931 (0%)]\tLoss: 0.112200\nTrain: [1098/2931 (100%)]\tLoss: 0.145052\nEpoch: 10/10. Train set: Average loss: 0.1450\nEpoch: 10/10. Validation set: Average loss: 0.1340\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.124879\nTrain: [1098/2931 (100%)]\tLoss: 0.169418\nEpoch: 1/10. Train set: Average loss: 0.1693\nEpoch: 1/10. Validation set: Average loss: 0.1464\nTrain: [0/2931 (0%)]\tLoss: 0.127423\nTrain: [1098/2931 (100%)]\tLoss: 0.164429\nEpoch: 2/10. Train set: Average loss: 0.1643\nEpoch: 2/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.131680\nTrain: [1098/2931 (100%)]\tLoss: 0.163823\nEpoch: 3/10. Train set: Average loss: 0.1637\nEpoch: 3/10. Validation set: Average loss: 0.1756\nTrain: [0/2931 (0%)]\tLoss: 0.474896\nTrain: [1098/2931 (100%)]\tLoss: 0.157335\nEpoch: 4/10. Train set: Average loss: 0.1582\nEpoch: 4/10. Validation set: Average loss: 0.1661\nTrain: [0/2931 (0%)]\tLoss: 0.092826\nTrain: [1098/2931 (100%)]\tLoss: 0.154375\nEpoch: 5/10. Train set: Average loss: 0.1542\nEpoch: 5/10. Validation set: Average loss: 0.1326\nTrain: [0/2931 (0%)]\tLoss: 0.277112\nTrain: [1098/2931 (100%)]\tLoss: 0.155465\nEpoch: 6/10. Train set: Average loss: 0.1558\nEpoch: 6/10. Validation set: Average loss: 0.1429\nTrain: [0/2931 (0%)]\tLoss: 0.153607\nTrain: [1098/2931 (100%)]\tLoss: 0.153119\nEpoch: 7/10. Train set: Average loss: 0.1531\nEpoch: 7/10. Validation set: Average loss: 0.1864\nTrain: [0/2931 (0%)]\tLoss: 0.180777\nTrain: [1098/2931 (100%)]\tLoss: 0.153696\nEpoch: 8/10. Train set: Average loss: 0.1538\nEpoch: 8/10. Validation set: Average loss: 0.1283\nTrain: [0/2931 (0%)]\tLoss: 0.194324\nTrain: [1098/2931 (100%)]\tLoss: 0.149259\nEpoch: 9/10. Train set: Average loss: 0.1494\nEpoch: 9/10. Validation set: Average loss: 0.1324\nTrain: [0/2931 (0%)]\tLoss: 0.110892\nTrain: [1098/2931 (100%)]\tLoss: 0.145672\nEpoch: 10/10. Train set: Average loss: 0.1456\nEpoch: 10/10. Validation set: Average loss: 0.1317\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311134\nTrain: [1098/2931 (100%)]\tLoss: 0.160977\nEpoch: 1/10. Train set: Average loss: 0.1614\nEpoch: 1/10. Validation set: Average loss: 0.1150\nTrain: [0/2931 (0%)]\tLoss: 0.201692\nTrain: [1098/2931 (100%)]\tLoss: 0.158651\nEpoch: 2/10. Train set: Average loss: 0.1588\nEpoch: 2/10. Validation set: Average loss: 0.1325\nTrain: [0/2931 (0%)]\tLoss: 0.240207\nTrain: [1098/2931 (100%)]\tLoss: 0.164264\nEpoch: 3/10. Train set: Average loss: 0.1645\nEpoch: 3/10. Validation set: Average loss: 0.1429\nTrain: [0/2931 (0%)]\tLoss: 0.056202\nTrain: [1098/2931 (100%)]\tLoss: 0.169171\nEpoch: 4/10. Train set: Average loss: 0.1689\nEpoch: 4/10. Validation set: Average loss: 0.1149\nTrain: [0/2931 (0%)]\tLoss: 0.156901\nTrain: [1098/2931 (100%)]\tLoss: 0.160805\nEpoch: 5/10. Train set: Average loss: 0.1608\nEpoch: 5/10. Validation set: Average loss: 0.1247\nTrain: [0/2931 (0%)]\tLoss: 0.145813\nTrain: [1098/2931 (100%)]\tLoss: 0.162189\nEpoch: 6/10. Train set: Average loss: 0.1621\nEpoch: 6/10. Validation set: Average loss: 0.1206\nTrain: [0/2931 (0%)]\tLoss: 0.178289\nTrain: [1098/2931 (100%)]\tLoss: 0.155908\nEpoch: 7/10. Train set: Average loss: 0.1560\nEpoch: 7/10. Validation set: Average loss: 0.1138\nTrain: [0/2931 (0%)]\tLoss: 0.089758\nTrain: [1098/2931 (100%)]\tLoss: 0.155382\nEpoch: 8/10. Train set: Average loss: 0.1552\nEpoch: 8/10. Validation set: Average loss: 0.1207\nTrain: [0/2931 (0%)]\tLoss: 0.067720\nTrain: [1098/2931 (100%)]\tLoss: 0.151655\nEpoch: 9/10. Train set: Average loss: 0.1514\nEpoch: 9/10. Validation set: Average loss: 0.1231\nTrain: [0/2931 (0%)]\tLoss: 0.078304\nTrain: [1098/2931 (100%)]\tLoss: 0.143651\nEpoch: 10/10. Train set: Average loss: 0.1435\nEpoch: 10/10. Validation set: Average loss: 0.1230\nNumber features: 37\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.124904\nTrain: [1098/2931 (100%)]\tLoss: 0.173594\nEpoch: 1/10. Train set: Average loss: 0.1735\nEpoch: 1/10. Validation set: Average loss: 0.1401\nTrain: [0/2931 (0%)]\tLoss: 0.374623\nTrain: [1098/2931 (100%)]\tLoss: 0.157901\nEpoch: 2/10. Train set: Average loss: 0.1585\nEpoch: 2/10. Validation set: Average loss: 0.1356\nTrain: [0/2931 (0%)]\tLoss: 0.195865\nTrain: [1098/2931 (100%)]\tLoss: 0.157412\nEpoch: 3/10. Train set: Average loss: 0.1575\nEpoch: 3/10. Validation set: Average loss: 0.1549\nTrain: [0/2931 (0%)]\tLoss: 0.062250\nTrain: [1098/2931 (100%)]\tLoss: 0.173095\nEpoch: 4/10. Train set: Average loss: 0.1728\nEpoch: 4/10. Validation set: Average loss: 0.1521\nTrain: [0/2931 (0%)]\tLoss: 0.183280\nTrain: [1098/2931 (100%)]\tLoss: 0.157040\nEpoch: 5/10. Train set: Average loss: 0.1571\nEpoch: 5/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.178847\nTrain: [1098/2931 (100%)]\tLoss: 0.159511\nEpoch: 6/10. Train set: Average loss: 0.1596\nEpoch: 6/10. Validation set: Average loss: 0.1553\nTrain: [0/2931 (0%)]\tLoss: 0.135965\nTrain: [1098/2931 (100%)]\tLoss: 0.154278\nEpoch: 7/10. Train set: Average loss: 0.1542\nEpoch: 7/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.103346\nTrain: [1098/2931 (100%)]\tLoss: 0.154749\nEpoch: 8/10. Train set: Average loss: 0.1546\nEpoch: 8/10. Validation set: Average loss: 0.1461\nTrain: [0/2931 (0%)]\tLoss: 0.141792\nTrain: [1098/2931 (100%)]\tLoss: 0.145361\nEpoch: 9/10. Train set: Average loss: 0.1454\nEpoch: 9/10. Validation set: Average loss: 0.1480\nTrain: [0/2931 (0%)]\tLoss: 0.154545\nTrain: [1098/2931 (100%)]\tLoss: 0.150745\nEpoch: 10/10. Train set: Average loss: 0.1508\nEpoch: 10/10. Validation set: Average loss: 0.1474\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249605\nTrain: [1098/2931 (100%)]\tLoss: 0.161057\nEpoch: 1/10. Train set: Average loss: 0.1613\nEpoch: 1/10. Validation set: Average loss: 0.1206\nTrain: [0/2931 (0%)]\tLoss: 0.137545\nTrain: [1098/2931 (100%)]\tLoss: 0.151744\nEpoch: 2/10. Train set: Average loss: 0.1517\nEpoch: 2/10. Validation set: Average loss: 0.1354\nTrain: [0/2931 (0%)]\tLoss: 0.277964\nTrain: [1098/2931 (100%)]\tLoss: 0.160319\nEpoch: 3/10. Train set: Average loss: 0.1606\nEpoch: 3/10. Validation set: Average loss: 0.1274\nTrain: [0/2931 (0%)]\tLoss: 0.181225\nTrain: [1098/2931 (100%)]\tLoss: 0.152072\nEpoch: 4/10. Train set: Average loss: 0.1522\nEpoch: 4/10. Validation set: Average loss: 0.1532\nTrain: [0/2931 (0%)]\tLoss: 0.130306\nTrain: [1098/2931 (100%)]\tLoss: 0.152315\nEpoch: 5/10. Train set: Average loss: 0.1523\nEpoch: 5/10. Validation set: Average loss: 0.1336\nTrain: [0/2931 (0%)]\tLoss: 0.082418\nTrain: [1098/2931 (100%)]\tLoss: 0.156693\nEpoch: 6/10. Train set: Average loss: 0.1565\nEpoch: 6/10. Validation set: Average loss: 0.1515\nTrain: [0/2931 (0%)]\tLoss: 0.149462\nTrain: [1098/2931 (100%)]\tLoss: 0.154960\nEpoch: 7/10. Train set: Average loss: 0.1549\nEpoch: 7/10. Validation set: Average loss: 0.1460\nTrain: [0/2931 (0%)]\tLoss: 0.130043\nTrain: [1098/2931 (100%)]\tLoss: 0.151264\nEpoch: 8/10. Train set: Average loss: 0.1512\nEpoch: 8/10. Validation set: Average loss: 0.1541\nTrain: [0/2931 (0%)]\tLoss: 0.174236\nTrain: [1098/2931 (100%)]\tLoss: 0.142538\nEpoch: 9/10. Train set: Average loss: 0.1426\nEpoch: 9/10. Validation set: Average loss: 0.1490\nTrain: [0/2931 (0%)]\tLoss: 0.134005\nTrain: [1098/2931 (100%)]\tLoss: 0.145009\nEpoch: 10/10. Train set: Average loss: 0.1450\nEpoch: 10/10. Validation set: Average loss: 0.1495\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311720\nTrain: [1098/2931 (100%)]\tLoss: 0.168823\nEpoch: 1/10. Train set: Average loss: 0.1692\nEpoch: 1/10. Validation set: Average loss: 0.1666\nTrain: [0/2931 (0%)]\tLoss: 0.186440\nTrain: [1098/2931 (100%)]\tLoss: 0.160802\nEpoch: 2/10. Train set: Average loss: 0.1609\nEpoch: 2/10. Validation set: Average loss: 0.1543\nTrain: [0/2931 (0%)]\tLoss: 0.165093\nTrain: [1098/2931 (100%)]\tLoss: 0.160324\nEpoch: 3/10. Train set: Average loss: 0.1603\nEpoch: 3/10. Validation set: Average loss: 0.1392\nTrain: [0/2931 (0%)]\tLoss: 0.164449\nTrain: [1098/2931 (100%)]\tLoss: 0.161411\nEpoch: 4/10. Train set: Average loss: 0.1614\nEpoch: 4/10. Validation set: Average loss: 0.1492\nTrain: [0/2931 (0%)]\tLoss: 0.221218\nTrain: [1098/2931 (100%)]\tLoss: 0.151665\nEpoch: 5/10. Train set: Average loss: 0.1519\nEpoch: 5/10. Validation set: Average loss: 0.1362\nTrain: [0/2931 (0%)]\tLoss: 0.216765\nTrain: [1098/2931 (100%)]\tLoss: 0.158543\nEpoch: 6/10. Train set: Average loss: 0.1587\nEpoch: 6/10. Validation set: Average loss: 0.1541\nTrain: [0/2931 (0%)]\tLoss: 0.171907\nTrain: [1098/2931 (100%)]\tLoss: 0.156598\nEpoch: 7/10. Train set: Average loss: 0.1566\nEpoch: 7/10. Validation set: Average loss: 0.1391\nTrain: [0/2931 (0%)]\tLoss: 0.125405\nTrain: [1098/2931 (100%)]\tLoss: 0.156337\nEpoch: 8/10. Train set: Average loss: 0.1563\nEpoch: 8/10. Validation set: Average loss: 0.1445\nTrain: [0/2931 (0%)]\tLoss: 0.220299\nTrain: [1098/2931 (100%)]\tLoss: 0.152550\nEpoch: 9/10. Train set: Average loss: 0.1527\nEpoch: 9/10. Validation set: Average loss: 0.1405\nTrain: [0/2931 (0%)]\tLoss: 0.211647\nTrain: [1098/2931 (100%)]\tLoss: 0.145686\nEpoch: 10/10. Train set: Average loss: 0.1459\nEpoch: 10/10. Validation set: Average loss: 0.1464\nNumber features: 38\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.374015\nTrain: [1098/2931 (100%)]\tLoss: 0.170900\nEpoch: 1/10. Train set: Average loss: 0.1715\nEpoch: 1/10. Validation set: Average loss: 0.1189\nTrain: [0/2931 (0%)]\tLoss: 0.186039\nTrain: [1098/2931 (100%)]\tLoss: 0.157581\nEpoch: 2/10. Train set: Average loss: 0.1577\nEpoch: 2/10. Validation set: Average loss: 0.1341\nTrain: [0/2931 (0%)]\tLoss: 0.197636\nTrain: [1098/2931 (100%)]\tLoss: 0.158327\nEpoch: 3/10. Train set: Average loss: 0.1584\nEpoch: 3/10. Validation set: Average loss: 0.1285\nTrain: [0/2931 (0%)]\tLoss: 0.136584\nTrain: [1098/2931 (100%)]\tLoss: 0.158994\nEpoch: 4/10. Train set: Average loss: 0.1589\nEpoch: 4/10. Validation set: Average loss: 0.1171\nTrain: [0/2931 (0%)]\tLoss: 0.122069\nTrain: [1098/2931 (100%)]\tLoss: 0.154802\nEpoch: 5/10. Train set: Average loss: 0.1547\nEpoch: 5/10. Validation set: Average loss: 0.1263\nTrain: [0/2931 (0%)]\tLoss: 0.154101\nTrain: [1098/2931 (100%)]\tLoss: 0.156248\nEpoch: 6/10. Train set: Average loss: 0.1562\nEpoch: 6/10. Validation set: Average loss: 0.1197\nTrain: [0/2931 (0%)]\tLoss: 0.107832\nTrain: [1098/2931 (100%)]\tLoss: 0.149526\nEpoch: 7/10. Train set: Average loss: 0.1494\nEpoch: 7/10. Validation set: Average loss: 0.1390\nTrain: [0/2931 (0%)]\tLoss: 0.197323\nTrain: [1098/2931 (100%)]\tLoss: 0.149429\nEpoch: 8/10. Train set: Average loss: 0.1496\nEpoch: 8/10. Validation set: Average loss: 0.1546\nTrain: [0/2931 (0%)]\tLoss: 0.265598\nTrain: [1098/2931 (100%)]\tLoss: 0.156032\nEpoch: 9/10. Train set: Average loss: 0.1563\nEpoch: 9/10. Validation set: Average loss: 0.1254\nTrain: [0/2931 (0%)]\tLoss: 0.168248\nTrain: [1098/2931 (100%)]\tLoss: 0.146522\nEpoch: 10/10. Train set: Average loss: 0.1466\nEpoch: 10/10. Validation set: Average loss: 0.1286\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.249486\nTrain: [1098/2931 (100%)]\tLoss: 0.163218\nEpoch: 1/10. Train set: Average loss: 0.1635\nEpoch: 1/10. Validation set: Average loss: 0.1415\nTrain: [0/2931 (0%)]\tLoss: 0.138787\nTrain: [1098/2931 (100%)]\tLoss: 0.153329\nEpoch: 2/10. Train set: Average loss: 0.1533\nEpoch: 2/10. Validation set: Average loss: 0.1238\nTrain: [0/2931 (0%)]\tLoss: 0.141318\nTrain: [1098/2931 (100%)]\tLoss: 0.152498\nEpoch: 3/10. Train set: Average loss: 0.1525\nEpoch: 3/10. Validation set: Average loss: 0.1325\nTrain: [0/2931 (0%)]\tLoss: 0.125936\nTrain: [1098/2931 (100%)]\tLoss: 0.163375\nEpoch: 4/10. Train set: Average loss: 0.1633\nEpoch: 4/10. Validation set: Average loss: 0.1378\nTrain: [0/2931 (0%)]\tLoss: 0.182872\nTrain: [1098/2931 (100%)]\tLoss: 0.153710\nEpoch: 5/10. Train set: Average loss: 0.1538\nEpoch: 5/10. Validation set: Average loss: 0.1394\nTrain: [0/2931 (0%)]\tLoss: 0.126086\nTrain: [1098/2931 (100%)]\tLoss: 0.153043\nEpoch: 6/10. Train set: Average loss: 0.1530\nEpoch: 6/10. Validation set: Average loss: 0.1327\nTrain: [0/2931 (0%)]\tLoss: 0.192564\nTrain: [1098/2931 (100%)]\tLoss: 0.154274\nEpoch: 7/10. Train set: Average loss: 0.1544\nEpoch: 7/10. Validation set: Average loss: 0.1450\nTrain: [0/2931 (0%)]\tLoss: 0.191864\nTrain: [1098/2931 (100%)]\tLoss: 0.146446\nEpoch: 8/10. Train set: Average loss: 0.1466\nEpoch: 8/10. Validation set: Average loss: 0.1235\nTrain: [0/2931 (0%)]\tLoss: 0.150450\nTrain: [1098/2931 (100%)]\tLoss: 0.143710\nEpoch: 9/10. Train set: Average loss: 0.1437\nEpoch: 9/10. Validation set: Average loss: 0.1343\nTrain: [0/2931 (0%)]\tLoss: 0.090538\nTrain: [1098/2931 (100%)]\tLoss: 0.142512\nEpoch: 10/10. Train set: Average loss: 0.1424\nEpoch: 10/10. Validation set: Average loss: 0.1280\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.311360\nTrain: [1098/2931 (100%)]\tLoss: 0.161207\nEpoch: 1/10. Train set: Average loss: 0.1616\nEpoch: 1/10. Validation set: Average loss: 0.1190\nTrain: [0/2931 (0%)]\tLoss: 0.126628\nTrain: [1098/2931 (100%)]\tLoss: 0.155627\nEpoch: 2/10. Train set: Average loss: 0.1555\nEpoch: 2/10. Validation set: Average loss: 0.1443\nTrain: [0/2931 (0%)]\tLoss: 0.309209\nTrain: [1098/2931 (100%)]\tLoss: 0.159180\nEpoch: 3/10. Train set: Average loss: 0.1596\nEpoch: 3/10. Validation set: Average loss: 0.1385\nTrain: [0/2931 (0%)]\tLoss: 0.116303\nTrain: [1098/2931 (100%)]\tLoss: 0.169306\nEpoch: 4/10. Train set: Average loss: 0.1692\nEpoch: 4/10. Validation set: Average loss: 0.1189\nTrain: [0/2931 (0%)]\tLoss: 0.106532\nTrain: [1098/2931 (100%)]\tLoss: 0.152887\nEpoch: 5/10. Train set: Average loss: 0.1528\nEpoch: 5/10. Validation set: Average loss: 0.1452\nTrain: [0/2931 (0%)]\tLoss: 0.211309\nTrain: [1098/2931 (100%)]\tLoss: 0.154559\nEpoch: 6/10. Train set: Average loss: 0.1547\nEpoch: 6/10. Validation set: Average loss: 0.1267\nTrain: [0/2931 (0%)]\tLoss: 0.152117\nTrain: [1098/2931 (100%)]\tLoss: 0.150025\nEpoch: 7/10. Train set: Average loss: 0.1500\nEpoch: 7/10. Validation set: Average loss: 0.1209\nTrain: [0/2931 (0%)]\tLoss: 0.112135\nTrain: [1098/2931 (100%)]\tLoss: 0.150626\nEpoch: 8/10. Train set: Average loss: 0.1505\nEpoch: 8/10. Validation set: Average loss: 0.1223\nTrain: [0/2931 (0%)]\tLoss: 0.085205\nTrain: [1098/2931 (100%)]\tLoss: 0.141495\nEpoch: 9/10. Train set: Average loss: 0.1413\nEpoch: 9/10. Validation set: Average loss: 0.1251\nTrain: [0/2931 (0%)]\tLoss: 0.115540\nTrain: [1098/2931 (100%)]\tLoss: 0.140546\nEpoch: 10/10. Train set: Average loss: 0.1405\nEpoch: 10/10. Validation set: Average loss: 0.1231\nNumber features: 39\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.311825\nTrain: [1098/2931 (100%)]\tLoss: 0.161946\nEpoch: 1/10. Train set: Average loss: 0.1624\nEpoch: 1/10. Validation set: Average loss: 0.1380\nTrain: [0/2931 (0%)]\tLoss: 0.135542\nTrain: [1098/2931 (100%)]\tLoss: 0.172361\nEpoch: 2/10. Train set: Average loss: 0.1723\nEpoch: 2/10. Validation set: Average loss: 0.1621\nTrain: [0/2931 (0%)]\tLoss: 0.156691\nTrain: [1098/2931 (100%)]\tLoss: 0.154289\nEpoch: 3/10. Train set: Average loss: 0.1543\nEpoch: 3/10. Validation set: Average loss: 0.1688\nTrain: [0/2931 (0%)]\tLoss: 0.150875\nTrain: [1098/2931 (100%)]\tLoss: 0.152154\nEpoch: 4/10. Train set: Average loss: 0.1522\nEpoch: 4/10. Validation set: Average loss: 0.1592\nTrain: [0/2931 (0%)]\tLoss: 0.161415\nTrain: [1098/2931 (100%)]\tLoss: 0.148924\nEpoch: 5/10. Train set: Average loss: 0.1490\nEpoch: 5/10. Validation set: Average loss: 0.1418\nTrain: [0/2931 (0%)]\tLoss: 0.105070\nTrain: [1098/2931 (100%)]\tLoss: 0.143753\nEpoch: 6/10. Train set: Average loss: 0.1436\nEpoch: 6/10. Validation set: Average loss: 0.1325\nTrain: [0/2931 (0%)]\tLoss: 0.184294\nTrain: [1098/2931 (100%)]\tLoss: 0.141899\nEpoch: 7/10. Train set: Average loss: 0.1420\nEpoch: 7/10. Validation set: Average loss: 0.1275\nTrain: [0/2931 (0%)]\tLoss: 0.130162\nTrain: [1098/2931 (100%)]\tLoss: 0.145793\nEpoch: 8/10. Train set: Average loss: 0.1458\nEpoch: 8/10. Validation set: Average loss: 0.1438\nTrain: [0/2931 (0%)]\tLoss: 0.103289\nTrain: [1098/2931 (100%)]\tLoss: 0.142804\nEpoch: 9/10. Train set: Average loss: 0.1427\nEpoch: 9/10. Validation set: Average loss: 0.1411\nTrain: [0/2931 (0%)]\tLoss: 0.132904\nTrain: [1098/2931 (100%)]\tLoss: 0.141884\nEpoch: 10/10. Train set: Average loss: 0.1419\nEpoch: 10/10. Validation set: Average loss: 0.1389\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.186824\nTrain: [1098/2931 (100%)]\tLoss: 0.171816\nEpoch: 1/10. Train set: Average loss: 0.1719\nEpoch: 1/10. Validation set: Average loss: 0.1529\nTrain: [0/2931 (0%)]\tLoss: 0.165474\nTrain: [1098/2931 (100%)]\tLoss: 0.158320\nEpoch: 2/10. Train set: Average loss: 0.1583\nEpoch: 2/10. Validation set: Average loss: 0.1516\nTrain: [0/2931 (0%)]\tLoss: 0.110868\nTrain: [1098/2931 (100%)]\tLoss: 0.154972\nEpoch: 3/10. Train set: Average loss: 0.1549\nEpoch: 3/10. Validation set: Average loss: 0.1416\nTrain: [0/2931 (0%)]\tLoss: 0.168083\nTrain: [1098/2931 (100%)]\tLoss: 0.146895\nEpoch: 4/10. Train set: Average loss: 0.1470\nEpoch: 4/10. Validation set: Average loss: 0.1360\nTrain: [0/2931 (0%)]\tLoss: 0.112856\nTrain: [1098/2931 (100%)]\tLoss: 0.150601\nEpoch: 5/10. Train set: Average loss: 0.1505\nEpoch: 5/10. Validation set: Average loss: 0.1545\nTrain: [0/2931 (0%)]\tLoss: 0.098737\nTrain: [1098/2931 (100%)]\tLoss: 0.148410\nEpoch: 6/10. Train set: Average loss: 0.1483\nEpoch: 6/10. Validation set: Average loss: 0.1383\nTrain: [0/2931 (0%)]\tLoss: 0.116536\nTrain: [1098/2931 (100%)]\tLoss: 0.147573\nEpoch: 7/10. Train set: Average loss: 0.1475\nEpoch: 7/10. Validation set: Average loss: 0.1297\nTrain: [0/2931 (0%)]\tLoss: 0.161279\nTrain: [1098/2931 (100%)]\tLoss: 0.151922\nEpoch: 8/10. Train set: Average loss: 0.1519\nEpoch: 8/10. Validation set: Average loss: 0.1335\nTrain: [0/2931 (0%)]\tLoss: 0.170205\nTrain: [1098/2931 (100%)]\tLoss: 0.143010\nEpoch: 9/10. Train set: Average loss: 0.1431\nEpoch: 9/10. Validation set: Average loss: 0.1304\nTrain: [0/2931 (0%)]\tLoss: 0.199948\nTrain: [1098/2931 (100%)]\tLoss: 0.146452\nEpoch: 10/10. Train set: Average loss: 0.1466\nEpoch: 10/10. Validation set: Average loss: 0.1288\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.374067\nTrain: [1098/2931 (100%)]\tLoss: 0.166669\nEpoch: 1/10. Train set: Average loss: 0.1672\nEpoch: 1/10. Validation set: Average loss: 0.1513\nTrain: [0/2931 (0%)]\tLoss: 0.134457\nTrain: [1098/2931 (100%)]\tLoss: 0.164409\nEpoch: 2/10. Train set: Average loss: 0.1643\nEpoch: 2/10. Validation set: Average loss: 0.1531\nTrain: [0/2931 (0%)]\tLoss: 0.180841\nTrain: [1098/2931 (100%)]\tLoss: 0.168865\nEpoch: 3/10. Train set: Average loss: 0.1689\nEpoch: 3/10. Validation set: Average loss: 0.1486\nTrain: [0/2931 (0%)]\tLoss: 0.202250\nTrain: [1098/2931 (100%)]\tLoss: 0.152154\nEpoch: 4/10. Train set: Average loss: 0.1523\nEpoch: 4/10. Validation set: Average loss: 0.1438\nTrain: [0/2931 (0%)]\tLoss: 0.141968\nTrain: [1098/2931 (100%)]\tLoss: 0.151632\nEpoch: 5/10. Train set: Average loss: 0.1516\nEpoch: 5/10. Validation set: Average loss: 0.1382\nTrain: [0/2931 (0%)]\tLoss: 0.154754\nTrain: [1098/2931 (100%)]\tLoss: 0.148672\nEpoch: 6/10. Train set: Average loss: 0.1487\nEpoch: 6/10. Validation set: Average loss: 0.1513\nTrain: [0/2931 (0%)]\tLoss: 0.202865\nTrain: [1098/2931 (100%)]\tLoss: 0.152252\nEpoch: 7/10. Train set: Average loss: 0.1524\nEpoch: 7/10. Validation set: Average loss: 0.1454\nTrain: [0/2931 (0%)]\tLoss: 0.235193\nTrain: [1098/2931 (100%)]\tLoss: 0.147270\nEpoch: 8/10. Train set: Average loss: 0.1475\nEpoch: 8/10. Validation set: Average loss: 0.1417\nTrain: [0/2931 (0%)]\tLoss: 0.170854\nTrain: [1098/2931 (100%)]\tLoss: 0.147205\nEpoch: 9/10. Train set: Average loss: 0.1473\nEpoch: 9/10. Validation set: Average loss: 0.1410\nTrain: [0/2931 (0%)]\tLoss: 0.218591\nTrain: [1098/2931 (100%)]\tLoss: 0.144878\nEpoch: 10/10. Train set: Average loss: 0.1451\nEpoch: 10/10. Validation set: Average loss: 0.1462\nNumber features: 40\n\n\tmodel_0\nTrain: [0/2931 (0%)]\tLoss: 0.187376\nTrain: [1098/2931 (100%)]\tLoss: 0.172890\nEpoch: 1/10. Train set: Average loss: 0.1729\nEpoch: 1/10. Validation set: Average loss: 0.1655\nTrain: [0/2931 (0%)]\tLoss: 0.173633\nTrain: [1098/2931 (100%)]\tLoss: 0.167800\nEpoch: 2/10. Train set: Average loss: 0.1678\nEpoch: 2/10. Validation set: Average loss: 0.1510\nTrain: [0/2931 (0%)]\tLoss: 0.154663\nTrain: [1098/2931 (100%)]\tLoss: 0.155995\nEpoch: 3/10. Train set: Average loss: 0.1560\nEpoch: 3/10. Validation set: Average loss: 0.1455\nTrain: [0/2931 (0%)]\tLoss: 0.116748\nTrain: [1098/2931 (100%)]\tLoss: 0.152127\nEpoch: 4/10. Train set: Average loss: 0.1520\nEpoch: 4/10. Validation set: Average loss: 0.1378\nTrain: [0/2931 (0%)]\tLoss: 0.103726\nTrain: [1098/2931 (100%)]\tLoss: 0.149029\nEpoch: 5/10. Train set: Average loss: 0.1489\nEpoch: 5/10. Validation set: Average loss: 0.1392\nTrain: [0/2931 (0%)]\tLoss: 0.156946\nTrain: [1098/2931 (100%)]\tLoss: 0.150946\nEpoch: 6/10. Train set: Average loss: 0.1510\nEpoch: 6/10. Validation set: Average loss: 0.1373\nTrain: [0/2931 (0%)]\tLoss: 0.194885\nTrain: [1098/2931 (100%)]\tLoss: 0.153025\nEpoch: 7/10. Train set: Average loss: 0.1531\nEpoch: 7/10. Validation set: Average loss: 0.1561\nTrain: [0/2931 (0%)]\tLoss: 0.146791\nTrain: [1098/2931 (100%)]\tLoss: 0.152269\nEpoch: 8/10. Train set: Average loss: 0.1523\nEpoch: 8/10. Validation set: Average loss: 0.1513\nTrain: [0/2931 (0%)]\tLoss: 0.151004\nTrain: [1098/2931 (100%)]\tLoss: 0.153233\nEpoch: 9/10. Train set: Average loss: 0.1532\nEpoch: 9/10. Validation set: Average loss: 0.1367\nTrain: [0/2931 (0%)]\tLoss: 0.146672\nTrain: [1098/2931 (100%)]\tLoss: 0.145045\nEpoch: 10/10. Train set: Average loss: 0.1450\nEpoch: 10/10. Validation set: Average loss: 0.1356\n\tmodel_1\nTrain: [0/2931 (0%)]\tLoss: 0.187028\nTrain: [1098/2931 (100%)]\tLoss: 0.178555\nEpoch: 1/10. Train set: Average loss: 0.1786\nEpoch: 1/10. Validation set: Average loss: 0.1530\nTrain: [0/2931 (0%)]\tLoss: 0.074034\nTrain: [1098/2931 (100%)]\tLoss: 0.160765\nEpoch: 2/10. Train set: Average loss: 0.1605\nEpoch: 2/10. Validation set: Average loss: 0.1203\nTrain: [0/2931 (0%)]\tLoss: 0.144902\nTrain: [1098/2931 (100%)]\tLoss: 0.156239\nEpoch: 3/10. Train set: Average loss: 0.1562\nEpoch: 3/10. Validation set: Average loss: 0.1573\nTrain: [0/2931 (0%)]\tLoss: 0.116086\nTrain: [1098/2931 (100%)]\tLoss: 0.154190\nEpoch: 4/10. Train set: Average loss: 0.1541\nEpoch: 4/10. Validation set: Average loss: 0.1400\nTrain: [0/2931 (0%)]\tLoss: 0.134161\nTrain: [1098/2931 (100%)]\tLoss: 0.166827\nEpoch: 5/10. Train set: Average loss: 0.1667\nEpoch: 5/10. Validation set: Average loss: 0.1480\nTrain: [0/2931 (0%)]\tLoss: 0.163438\nTrain: [1098/2931 (100%)]\tLoss: 0.148924\nEpoch: 6/10. Train set: Average loss: 0.1490\nEpoch: 6/10. Validation set: Average loss: 0.1549\nTrain: [0/2931 (0%)]\tLoss: 0.114868\nTrain: [1098/2931 (100%)]\tLoss: 0.148936\nEpoch: 7/10. Train set: Average loss: 0.1488\nEpoch: 7/10. Validation set: Average loss: 0.1497\nTrain: [0/2931 (0%)]\tLoss: 0.088322\nTrain: [1098/2931 (100%)]\tLoss: 0.152249\nEpoch: 8/10. Train set: Average loss: 0.1521\nEpoch: 8/10. Validation set: Average loss: 0.1326\nTrain: [0/2931 (0%)]\tLoss: 0.125784\nTrain: [1098/2931 (100%)]\tLoss: 0.147777\nEpoch: 9/10. Train set: Average loss: 0.1477\nEpoch: 9/10. Validation set: Average loss: 0.1361\nTrain: [0/2931 (0%)]\tLoss: 0.187914\nTrain: [1098/2931 (100%)]\tLoss: 0.142468\nEpoch: 10/10. Train set: Average loss: 0.1426\nEpoch: 10/10. Validation set: Average loss: 0.1309\n\tmodel_2\nTrain: [0/2931 (0%)]\tLoss: 0.249663\nTrain: [1098/2931 (100%)]\tLoss: 0.167130\nEpoch: 1/10. Train set: Average loss: 0.1674\nEpoch: 1/10. Validation set: Average loss: 0.1253\nTrain: [0/2931 (0%)]\tLoss: 0.131634\nTrain: [1098/2931 (100%)]\tLoss: 0.153790\nEpoch: 2/10. Train set: Average loss: 0.1537\nEpoch: 2/10. Validation set: Average loss: 0.1328\nTrain: [0/2931 (0%)]\tLoss: 0.143843\nTrain: [1098/2931 (100%)]\tLoss: 0.151634\nEpoch: 3/10. Train set: Average loss: 0.1516\nEpoch: 3/10. Validation set: Average loss: 0.1252\nTrain: [0/2931 (0%)]\tLoss: 0.173569\nTrain: [1098/2931 (100%)]\tLoss: 0.156962\nEpoch: 4/10. Train set: Average loss: 0.1570\nEpoch: 4/10. Validation set: Average loss: 0.1445\nTrain: [0/2931 (0%)]\tLoss: 0.122052\nTrain: [1098/2931 (100%)]\tLoss: 0.165557\nEpoch: 5/10. Train set: Average loss: 0.1654\nEpoch: 5/10. Validation set: Average loss: 0.1300\nTrain: [0/2931 (0%)]\tLoss: 0.097903\nTrain: [1098/2931 (100%)]\tLoss: 0.165786\nEpoch: 6/10. Train set: Average loss: 0.1656\nEpoch: 6/10. Validation set: Average loss: 0.1496\nTrain: [0/2931 (0%)]\tLoss: 0.147194\nTrain: [1098/2931 (100%)]\tLoss: 0.152778\nEpoch: 7/10. Train set: Average loss: 0.1528\nEpoch: 7/10. Validation set: Average loss: 0.1405\nTrain: [0/2931 (0%)]\tLoss: 0.206992\nTrain: [1098/2931 (100%)]\tLoss: 0.155909\nEpoch: 8/10. Train set: Average loss: 0.1560\nEpoch: 8/10. Validation set: Average loss: 0.1297\nTrain: [0/2931 (0%)]\tLoss: 0.192942\nTrain: [1098/2931 (100%)]\tLoss: 0.145894\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb41e5030ee6e7d16a16305e2226daf742abaed5
25,498
ipynb
Jupyter Notebook
examples/duet/mnist/MNIST_Syft_Data_Scientist.ipynb
manisoftwartist/PySyft
19cf2cbc11efaae16932f4a5aa9a225060675bd0
[ "MIT" ]
null
null
null
examples/duet/mnist/MNIST_Syft_Data_Scientist.ipynb
manisoftwartist/PySyft
19cf2cbc11efaae16932f4a5aa9a225060675bd0
[ "MIT" ]
null
null
null
examples/duet/mnist/MNIST_Syft_Data_Scientist.ipynb
manisoftwartist/PySyft
19cf2cbc11efaae16932f4a5aa9a225060675bd0
[ "MIT" ]
null
null
null
32.440204
451
0.563848
[ [ [ "# MNIST - Syft Duet - Data Scientist 🥁", "_____no_output_____" ], [ "## PART 0: Optional - Google Colab Setup", "_____no_output_____" ] ], [ [ "%%capture\n# This only runs in colab and clones the code sets it up and fixes a few issues, \n# you can skip this if you are running Jupyter Notebooks\nimport sys\nif \"google.colab\" in sys.modules:\n branch = \"master\" # change to the branch you want\n ! git clone --single-branch --branch $branch https://github.com/OpenMined/PySyft.git\n ! cd PySyft && ./scripts/colab.sh # fixes some colab python issues\n sys.path.append(\"/content/PySyft/src\") # prevents needing restart", "_____no_output_____" ] ], [ [ "## PART 1: Connect to a Remote Duet Server\n\nAs the Data Scientist, you want to perform data science on data that is sitting in the Data Owner's Duet server in their Notebook.\n\nIn order to do this, we must run the code that the Data Owner sends us, which importantly includes their Duet Session ID. The code will look like this, importantly with their real Server ID.\n\n```\nimport syft as sy\nduet = sy.duet('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\n```\n\nThis will create a direct connection from my notebook to the remote Duet server. Once the connection is established all traffic is sent directly between the two nodes.\n\nPaste the code or Server ID that the Data Owner gives you and run it in the cell below. It will return your Client ID which you must send to the Data Owner to enter into Duet so it can pair your notebooks.", "_____no_output_____" ] ], [ [ "import syft as sy\nduet = sy.join_duet(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\nsy.logger.add(sink=\"./syft_ds.log\")", "_____no_output_____" ] ], [ [ "## PART 2: Setting up a Model and our Data\nThe majority of the code below has been adapted closely from the original PyTorch MNIST example which is available in the `original` directory with these notebooks.", "_____no_output_____" ], [ "The `duet` variable is now your reference to a whole world of remote operations including supported libraries like torch.\n\nLets take a look at the duet.torch attribute.\n```\nduet.torch\n```", "_____no_output_____" ] ], [ [ "duet.torch", "_____no_output_____" ] ], [ [ "Lets create a model just like the one in the MNIST example. We do this in almost the exact same way as in PyTorch. The main difference is we inherit from sy.Module instead of nn.Module and we need to pass in a variable called torch_ref which we will use internally for any calls that would normally be to torch.", "_____no_output_____" ] ], [ [ "class SyNet(sy.Module):\n def __init__(self, torch_ref):\n super(SyNet, self).__init__(torch_ref=torch_ref)\n self.conv1 = self.torch_ref.nn.Conv2d(1, 32, 3, 1)\n self.conv2 = self.torch_ref.nn.Conv2d(32, 64, 3, 1) \n self.dropout1 = self.torch_ref.nn.Dropout2d(0.25)\n self.dropout2 = self.torch_ref.nn.Dropout2d(0.5)\n self.fc1 = self.torch_ref.nn.Linear(9216, 128)\n self.fc2 = self.torch_ref.nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.torch_ref.nn.functional.relu(x)\n x = self.conv2(x)\n x = self.torch_ref.nn.functional.relu(x)\n x = self.torch_ref.nn.functional.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = self.torch_ref.flatten(x, 1)\n x = self.fc1(x)\n x = self.torch_ref.nn.functional.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = self.torch_ref.nn.functional.log_softmax(x, dim=1)\n return output", "_____no_output_____" ], [ "# lets import torch and torchvision just as we normally would\nimport torch\nimport torchvision", "_____no_output_____" ], [ "# now we can create the model and pass in our local copy of torch\nlocal_model = SyNet(torch)", "_____no_output_____" ] ], [ [ "Next we can get our MNIST Test Set ready using our local copy of torch.", "_____no_output_____" ] ], [ [ "# we need some transforms for the MNIST data set\nlocal_transform_1 = torchvision.transforms.ToTensor() # this converts PIL images to Tensors\nlocal_transform_2 = torchvision.transforms.Normalize(0.1307, 0.3081) # this normalizes the dataset\n\n# compose our transforms\nlocal_transforms = torchvision.transforms.Compose([local_transform_1, local_transform_2])", "_____no_output_____" ], [ "# Lets define a few settings which are from the original MNIST example command-line args\nargs = {\n \"batch_size\": 64,\n \"test_batch_size\": 1000,\n \"epochs\": 14,\n \"lr\": 1.0,\n \"gamma\": 0.7,\n \"no_cuda\": False,\n \"dry_run\": False,\n \"seed\": 42, # the meaning of life\n \"log_interval\": 10,\n \"save_model\": True,\n}", "_____no_output_____" ], [ "# we will configure the test set here locally since we want to know if our Data Owner's\n# private training dataset will help us reach new SOTA results for our benchmark test set\ntest_kwargs = {\n \"batch_size\": args[\"test_batch_size\"],\n}\n\ntest_data = torchvision.datasets.MNIST('../data', train=False, download=True, transform=local_transforms)\ntest_loader = torch.utils.data.DataLoader(test_data,**test_kwargs)\ntest_data_length = len(test_loader.dataset)\nprint(test_data_length)", "_____no_output_____" ] ], [ [ "Now its time to send the model to our partner’s Duet Server.", "_____no_output_____" ], [ "Note: You can load normal torch model weights before sending your model.\nTry training the model and saving it at the end of the notebook and then coming back and\nreloading the weights here, or you can train the same model once using the original script\nin `original` dir and load it here as well.", "_____no_output_____" ] ], [ [ "# local_model.load(\"./duet_mnist.pt\")", "_____no_output_____" ], [ "model = local_model.send(duet)", "_____no_output_____" ] ], [ [ "Lets create an alias for our partner’s torch called `remote_torch` so we can refer to the local torch as `torch` and any operation we want to do remotely as `remote_torch`. Remember, the return values from `remote_torch` are `Pointers`, not the real objects. They mostly act the same when using them with other `Pointers` but you can't mix them with local torch objects.", "_____no_output_____" ] ], [ [ "remote_torch = duet.torch", "_____no_output_____" ], [ "# lets ask to see if our Data Owner has CUDA\nhas_cuda = False\nhas_cuda_ptr = remote_torch.cuda.is_available()\nhas_cuda = bool(has_cuda_ptr.get(\n request_block=True,\n name=\"cuda_is_available\",\n reason=\"To run test and inference locally\",\n timeout_secs=5, # change to something slower\n))\nprint(has_cuda)", "_____no_output_____" ], [ "use_cuda = not args[\"no_cuda\"] and has_cuda\n# now we can set the seed\nremote_torch.manual_seed(args[\"seed\"])\n\ndevice = remote_torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint(f\"Data Owner device is {device.type.get()}\")", "_____no_output_____" ], [ "# if we have CUDA lets send our model to the GPU\nif has_cuda:\n model.cuda(device)\nelse:\n model.cpu()", "_____no_output_____" ] ], [ [ "Lets get our params, setup an optimizer and a scheduler just the same as the PyTorch MNIST example", "_____no_output_____" ] ], [ [ "params = model.parameters()", "_____no_output_____" ], [ "optimizer = remote_torch.optim.Adadelta(params, lr=args[\"lr\"])", "_____no_output_____" ], [ "scheduler = remote_torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=args[\"gamma\"])", "_____no_output_____" ] ], [ [ "Next we need a training loop so we can improve our remote model. Since we want to train on remote data we should first check if the model is remote since we will be using remote_torch in this function. To check if a model is local or remote simply use the `.is_local` attribute.", "_____no_output_____" ] ], [ [ "def train(model, torch_ref, train_loader, optimizer, epoch, args, train_data_length):\n # + 0.5 lets us math.ceil without the import\n train_batches = round((train_data_length / args[\"batch_size\"]) + 0.5)\n print(f\"> Running train in {train_batches} batches\")\n if model.is_local:\n print(\"Training requires remote model\")\n return\n\n model.train()\n\n for batch_idx, data in enumerate(train_loader):\n data_ptr, target_ptr = data[0], data[1]\n optimizer.zero_grad()\n output = model(data_ptr)\n loss = torch_ref.nn.functional.nll_loss(output, target_ptr)\n loss.backward()\n optimizer.step()\n loss_item = loss.item()\n train_loss = duet.python.Float(0) # create a remote Float we can use for summation\n train_loss += loss_item\n if batch_idx % args[\"log_interval\"] == 0:\n local_loss = None\n local_loss = loss_item.get(\n name=\"loss\",\n reason=\"To evaluate training progress\",\n request_block=True,\n timeout_secs=5\n )\n if local_loss is not None:\n print(\"Train Epoch: {} {} {:.4}\".format(epoch, batch_idx, local_loss))\n else:\n print(\"Train Epoch: {} {} ?\".format(epoch, batch_idx))\n if args[\"dry_run\"]:\n break\n if batch_idx >= train_batches - 1:\n print(\"batch_idx >= train_batches, breaking\")\n break", "_____no_output_____" ] ], [ [ "Now we can define a simple test loop very similar to the original PyTorch MNIST example.\nThis function should expect a remote model from our outer epoch loop, so internally we can call `get` to download the weights to do an evaluation on our machine with our local test set. Remember, if we have trained on private data, our model will require permission to download, so we should use request_block=True and make sure the Data Owner approves our requests. For the rest of this function, we will use local `torch` as we normally would.", "_____no_output_____" ] ], [ [ "def test_local(model, torch_ref, test_loader, test_data_length):\n # download remote model\n if not model.is_local:\n local_model = model.get(\n request_block=True,\n name=\"model_download\",\n reason=\"test evaluation\",\n timeout_secs=5\n )\n else:\n local_model = model\n # + 0.5 lets us math.ceil without the import\n test_batches = round((test_data_length / args[\"test_batch_size\"]) + 0.5)\n print(f\"> Running test_local in {test_batches} batches\")\n local_model.eval()\n test_loss = 0.0\n correct = 0.0\n\n with torch_ref.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n output = local_model(data)\n iter_loss = torch_ref.nn.functional.nll_loss(output, target, reduction=\"sum\").item()\n test_loss = test_loss + iter_loss\n pred = output.argmax(dim=1)\n total = pred.eq(target).sum().item()\n correct += total\n if args[\"dry_run\"]:\n break\n \n if batch_idx >= test_batches - 1:\n print(\"batch_idx >= test_batches, breaking\")\n break\n\n accuracy = correct / test_data_length\n print(f\"Test Set Accuracy: {100 * accuracy}%\")", "_____no_output_____" ] ], [ [ "Finally just for demonstration purposes, we will get the built-in MNIST dataset but on the Data Owners side from `remote_torchvision`.", "_____no_output_____" ] ], [ [ "# we need some transforms for the MNIST data set\nremote_torchvision = duet.torchvision\n\ntransform_1 = remote_torchvision.transforms.ToTensor() # this converts PIL images to Tensors\ntransform_2 = remote_torchvision.transforms.Normalize(0.1307, 0.3081) # this normalizes the dataset\n\nremote_list = duet.python.List() # create a remote list to add the transforms to\nremote_list.append(transform_1)\nremote_list.append(transform_2)\n\n# compose our transforms\ntransforms = remote_torchvision.transforms.Compose(remote_list)\n\n# The DO has kindly let us initialise a DataLoader for their training set\ntrain_kwargs = {\n \"batch_size\": args[\"batch_size\"],\n}\ntrain_data_ptr = remote_torchvision.datasets.MNIST('../data', train=True, download=True, transform=transforms)\ntrain_loader_ptr = remote_torch.utils.data.DataLoader(train_data_ptr,**train_kwargs)", "_____no_output_____" ], [ "# normally we would not necessarily know the length of a remote dataset so lets ask for it\n# so we can pass that to our training loop and know when to stop\ndef get_train_length(train_data_ptr):\n train_length_ptr = train_data_ptr.__len__()\n train_data_length = train_length_ptr.get(\n request_block=True,\n name=\"train_size\",\n reason=\"To write the training loop\",\n timeout_secs=5,\n )\n return train_data_length\n\ntry:\n if train_data_length is None:\n train_data_length = get_train_length(train_data_ptr)\nexcept NameError:\n train_data_length = get_train_length(train_data_ptr)\n\nprint(f\"Training Dataset size is: {train_data_length}\")", "_____no_output_____" ] ], [ [ "## PART 3: Training", "_____no_output_____" ] ], [ [ "%%time\nimport time\n\nargs[\"dry_run\"] = True # comment to do a full train\nprint(\"Starting Training\")\nfor epoch in range(1, args[\"epochs\"] + 1):\n epoch_start = time.time()\n print(f\"Epoch: {epoch}\")\n # remote training on model with remote_torch\n train(model, remote_torch, train_loader_ptr, optimizer, epoch, args, train_data_length)\n # local testing on model with local torch\n test_local(model, torch, test_loader, test_data_length)\n scheduler.step()\n epoch_end = time.time()\n print(f\"Epoch time: {int(epoch_end - epoch_start)} seconds\")\n break\nprint(\"Finished Training\")", "_____no_output_____" ], [ "if args[\"save_model\"]:\n model.get(\n request_block=True,\n name=\"model_download\",\n reason=\"test evaluation\",\n timeout_secs=5\n ).save(\"./duet_mnist.pt\")", "_____no_output_____" ] ], [ [ "## PART 4: Inference", "_____no_output_____" ], [ "A model would be no fun without the ability to do inference. The following code shows some examples on how we can do this either remotely or locally.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\ndef draw_image_and_label(image, label):\n fig = plt.figure()\n plt.tight_layout()\n plt.imshow(image, cmap=\"gray\", interpolation=\"none\")\n plt.title(\"Ground Truth: {}\".format(label))\n \ndef prep_for_inference(image):\n image_batch = image.unsqueeze(0).unsqueeze(0)\n image_batch = image_batch * 1.0\n return image_batch", "_____no_output_____" ], [ "def classify_local(image, model):\n if not model.is_local:\n print(\"model is remote try .get()\")\n return -1, torch.Tensor([-1])\n image_tensor = torch.Tensor(prep_for_inference(image))\n output = model(image_tensor)\n preds = torch.exp(output)\n local_y = preds\n local_y = local_y.squeeze()\n pos = local_y == max(local_y)\n index = torch.nonzero(pos, as_tuple=False)\n class_num = index.squeeze()\n return class_num, local_y", "_____no_output_____" ], [ "def classify_remote(image, model):\n if model.is_local:\n print(\"model is local try .send()\")\n return -1, remote_torch.Tensor([-1])\n image_tensor_ptr = remote_torch.Tensor(prep_for_inference(image))\n output = model(image_tensor_ptr)\n preds = remote_torch.exp(output)\n preds_result = preds.get(\n request_block=True,\n name=\"inference\",\n reason=\"To see a real world example of inference\",\n timeout_secs=10\n )\n if preds_result is None:\n print(\"No permission to do inference, request again\")\n return -1, torch.Tensor([-1])\n else:\n # now we have the local tensor we can use local torch\n local_y = torch.Tensor(preds_result)\n local_y = local_y.squeeze()\n pos = local_y == max(local_y)\n index = torch.nonzero(pos, as_tuple=False)\n class_num = index.squeeze()\n return class_num, local_y", "_____no_output_____" ], [ "# lets grab something from the test set\nimport random\ntotal_images = test_data_length # 10000\nindex = random.randint(0, total_images)\nprint(\"Random Test Image:\", index)\ncount = 0\nbatch = index // test_kwargs[\"batch_size\"]\nbatch_index = index % int(total_images / len(test_loader))\nfor tensor_ptr in test_loader:\n data, target = tensor_ptr[0], tensor_ptr[1]\n if batch == count:\n break\n count += 1\n\nprint(f\"Displaying {index} == {batch_index} in Batch: {batch}/{len(test_loader)}\")\nimage_1 = data[batch_index].reshape((28, 28))\nlabel_1 = target[batch_index]\ndraw_image_and_label(image_1, label_1)", "_____no_output_____" ], [ "# classify remote\nclass_num, preds = classify_remote(image_1, model)\nprint(f\"Prediction: {class_num} Ground Truth: {label_1}\")\nprint(preds)", "_____no_output_____" ], [ "local_model = model.get(\n request_block=True,\n name=\"model_download\",\n reason=\"To run test and inference locally\",\n timeout_secs=5,\n)", "_____no_output_____" ], [ "# classify local\nclass_num, preds = classify_local(image_1, local_model)\nprint(f\"Prediction: {class_num} Ground Truth: {label_1}\")\nprint(preds)", "_____no_output_____" ], [ "# We can also download an image from the web and run inference on that\nfrom PIL import Image, ImageEnhance\nimport PIL.ImageOps \n\nimport os\ndef classify_url_image(image_url):\n filename = os.path.basename(image_url)\n !curl -O $image_url\n im = Image.open(filename)\n im = PIL.ImageOps.invert(im)\n# im = im.resize((28,28), Image.ANTIALIAS)\n im = im.convert('LA')\n enhancer = ImageEnhance.Brightness(im)\n im = enhancer.enhance(3)\n\n\n print(im.size)\n fig = plt.figure()\n plt.tight_layout()\n plt.imshow(im, cmap=\"gray\", interpolation=\"none\")\n \n # classify local\n class_num, preds = classify_local(image_1, local_model)\n print(f\"Prediction: {class_num}\")\n print(preds)", "_____no_output_____" ], [ "image_url = \"https://raw.githubusercontent.com/kensanata/numbers/master/0018_CHXX/0/number-100.png\"\nclassify_url_image(image_url)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb41fb29709e9cf8ede19d5af06bc08b43463396
46,157
ipynb
Jupyter Notebook
notebooks/ddqn_atari.ipynb
tomault/pegushi
2d90d18ac908ce135a4222b86dc2129a0d23851e
[ "Apache-2.0" ]
null
null
null
notebooks/ddqn_atari.ipynb
tomault/pegushi
2d90d18ac908ce135a4222b86dc2129a0d23851e
[ "Apache-2.0" ]
null
null
null
notebooks/ddqn_atari.ipynb
tomault/pegushi
2d90d18ac908ce135a4222b86dc2129a0d23851e
[ "Apache-2.0" ]
null
null
null
50.555312
303
0.523864
[ [ [ "Implementation of double deep-Q learning initially taken from https://github.com/fg91/Deep-Q-Learning/blob/master/DQN.ipynb", "_____no_output_____" ] ], [ [ "import os\nimport random\nimport gym\nimport tensorflow as tf\nimport numpy as np\n", "_____no_output_____" ], [ "class FrameProcessor:\n \"\"\"Resizes and converts RGB Atari frames to grayscale\"\"\"\n def __init__(self, frame_height=84, frame_width=84):\n \"\"\"\n Args:\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n \"\"\"\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.frame = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)\n self.processed = tf.image.rgb_to_grayscale(self.frame)\n self.processed = tf.image.crop_to_bounding_box(self.processed, 34, 0, 160, 160)\n self.processed = tf.image.resize_images(self.processed, \n [self.frame_height, self.frame_width], \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n def process(self, session, frame):\n \"\"\"\n Args:\n session: A Tensorflow session object\n frame: A (210, 160, 3) frame of an Atari game in RGB\n Returns:\n A processed (84, 84, 1) frame in grayscale\n \"\"\"\n return session.run(self.processed, feed_dict={self.frame:frame})", "_____no_output_____" ], [ "class DQN:\n \"\"\"Implements a Deep Q Network\"\"\"\n \n # pylint: disable=too-many-instance-attributes\n \n def __init__(self, n_actions, hidden=1024, learning_rate=0.00001, \n frame_height=84, frame_width=84, agent_history_length=4):\n \"\"\"\n Args:\n n_actions: Integer, number of possible actions\n hidden: Integer, Number of filters in the final convolutional layer. \n This is different from the DeepMind implementation\n learning_rate: Float, Learning rate for the Adam optimizer\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n agent_history_length: Integer, Number of frames stacked together to create a state\n \"\"\"\n self.n_actions = n_actions\n self.hidden = hidden\n self.learning_rate = learning_rate\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.agent_history_length = agent_history_length\n \n self.input = tf.placeholder(shape=[None, self.frame_height, \n self.frame_width, self.agent_history_length], \n dtype=tf.float32)\n # Normalizing the input\n self.inputscaled = self.input/255\n \n # Convolutional layers\n self.conv1 = tf.layers.conv2d(\n inputs=self.inputscaled, filters=32, kernel_size=[8, 8], strides=4,\n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv1')\n self.conv2 = tf.layers.conv2d(\n inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=2, \n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv2')\n self.conv3 = tf.layers.conv2d(\n inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=1, \n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv3')\n self.conv4 = tf.layers.conv2d(\n inputs=self.conv3, filters=hidden, kernel_size=[7, 7], strides=1, \n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv4')\n \n # Splitting into value and advantage stream\n self.valuestream, self.advantagestream = tf.split(self.conv4, 2, 3)\n self.valuestream = tf.layers.flatten(self.valuestream)\n self.advantagestream = tf.layers.flatten(self.advantagestream)\n self.advantage = tf.layers.dense(\n inputs=self.advantagestream, units=self.n_actions,\n kernel_initializer=tf.variance_scaling_initializer(scale=2), name=\"advantage\")\n self.value = tf.layers.dense(\n inputs=self.valuestream, units=1, \n kernel_initializer=tf.variance_scaling_initializer(scale=2), name='value')\n \n # Combining value and advantage into Q-values as described above\n self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))\n self.best_action = tf.argmax(self.q_values, 1)\n \n # The next lines perform the parameter update. This will be explained in detail later.\n \n # targetQ according to Bellman equation: \n # Q = r + gamma*max Q', calculated in the function learn()\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)\n # Action that was performed\n self.action = tf.placeholder(shape=[None], dtype=tf.int32)\n # Q value of the action that was performed\n self.Q = tf.reduce_sum(tf.multiply(self.q_values, tf.one_hot(self.action, self.n_actions, dtype=tf.float32)), axis=1)\n \n # Parameter updates\n self.loss = tf.reduce_mean(tf.losses.huber_loss(labels=self.target_q, predictions=self.Q))\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.update = self.optimizer.minimize(self.loss)", "_____no_output_____" ], [ "class ActionGetter:\n \"\"\"Determines an action according to an epsilon greedy strategy with annealing epsilon\"\"\"\n def __init__(self, n_actions, eps_initial=1, eps_final=0.1, eps_final_frame=0.01, \n eps_evaluation=0.0, eps_annealing_frames=1000000, \n replay_memory_start_size=50000, max_frames=25000000):\n \"\"\"\n Args:\n n_actions: Integer, number of possible actions\n eps_initial: Float, Exploration probability for the first \n replay_memory_start_size frames\n eps_final: Float, Exploration probability after \n replay_memory_start_size + eps_annealing_frames frames\n eps_final_frame: Float, Exploration probability after max_frames frames\n eps_evaluation: Float, Exploration probability during evaluation\n eps_annealing_frames: Int, Number of frames over which the \n exploration probabilty is annealed from eps_initial to eps_final\n replay_memory_start_size: Integer, Number of frames during \n which the agent only explores\n max_frames: Integer, Total number of frames shown to the agent\n \"\"\"\n self.n_actions = n_actions\n self.eps_initial = eps_initial\n self.eps_final = eps_final\n self.eps_final_frame = eps_final_frame\n self.eps_evaluation = eps_evaluation\n self.eps_annealing_frames = eps_annealing_frames\n self.replay_memory_start_size = replay_memory_start_size\n self.max_frames = max_frames\n \n # Slopes and intercepts for exploration decrease\n self.slope = -(self.eps_initial - self.eps_final)/self.eps_annealing_frames\n self.intercept = self.eps_initial - self.slope*self.replay_memory_start_size\n self.slope_2 = -(self.eps_final - self.eps_final_frame)/(self.max_frames - self.eps_annealing_frames - self.replay_memory_start_size)\n self.intercept_2 = self.eps_final_frame - self.slope_2*self.max_frames\n \n def get_action(self, session, frame_number, state, main_dqn, evaluation=False):\n \"\"\"\n Args:\n session: A tensorflow session object\n frame_number: Integer, number of the current frame\n state: A (84, 84, 4) sequence of frames of an Atari game in grayscale\n main_dqn: A DQN object\n evaluation: A boolean saying whether the agent is being evaluated\n Returns:\n An integer between 0 and n_actions - 1 determining the action the agent perfoms next\n \"\"\"\n if evaluation:\n eps = self.eps_evaluation\n elif frame_number < self.replay_memory_start_size:\n eps = self.eps_initial\n elif frame_number >= self.replay_memory_start_size and frame_number < self.replay_memory_start_size + self.eps_annealing_frames:\n eps = self.slope*frame_number + self.intercept\n elif frame_number >= self.replay_memory_start_size + self.eps_annealing_frames:\n eps = self.slope_2*frame_number + self.intercept_2\n \n if np.random.rand(1) < eps:\n return np.random.randint(0, self.n_actions)\n return session.run(main_dqn.best_action, feed_dict={main_dqn.input:[state]})[0] ", "_____no_output_____" ], [ "class ReplayMemory:\n \"\"\"Replay Memory that stores the last size=1,000,000 transitions\"\"\"\n def __init__(self, size=1000000, frame_height=84, frame_width=84, \n agent_history_length=4, batch_size=32):\n \"\"\"\n Args:\n size: Integer, Number of stored transitions\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n agent_history_length: Integer, Number of frames stacked together to create a state\n batch_size: Integer, Number if transitions returned in a minibatch\n \"\"\"\n self.size = size\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.agent_history_length = agent_history_length\n self.batch_size = batch_size\n self.count = 0\n self.current = 0\n \n # Pre-allocate memory\n self.actions = np.empty(self.size, dtype=np.int32)\n self.rewards = np.empty(self.size, dtype=np.float32)\n self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)\n self.terminal_flags = np.empty(self.size, dtype=np.bool)\n \n # Pre-allocate memory for the states and new_states in a minibatch\n self.states = np.empty((self.batch_size, self.agent_history_length, \n self.frame_height, self.frame_width), dtype=np.uint8)\n self.new_states = np.empty((self.batch_size, self.agent_history_length, \n self.frame_height, self.frame_width), dtype=np.uint8)\n self.indices = np.empty(self.batch_size, dtype=np.int32)\n \n def add_experience(self, action, frame, reward, terminal):\n \"\"\"\n Args:\n action: An integer between 0 and env.action_space.n - 1 \n determining the action the agent perfomed\n frame: A (84, 84, 1) frame of an Atari game in grayscale\n reward: A float determining the reward the agend received for performing an action\n terminal: A bool stating whether the episode terminated\n \"\"\"\n if frame.shape != (self.frame_height, self.frame_width):\n raise ValueError('Dimension of frame is wrong!')\n self.actions[self.current] = action\n self.frames[self.current, ...] = frame\n self.rewards[self.current] = reward\n self.terminal_flags[self.current] = terminal\n self.count = max(self.count, self.current+1)\n self.current = (self.current + 1) % self.size\n \n def _get_state(self, index):\n if self.count is 0:\n raise ValueError(\"The replay memory is empty!\")\n if index < self.agent_history_length - 1:\n raise ValueError(\"Index must be min 3\")\n return self.frames[index-self.agent_history_length+1:index+1, ...]\n \n def _get_valid_indices(self):\n for i in range(self.batch_size):\n while True:\n index = random.randint(self.agent_history_length, self.count - 1)\n if index < self.agent_history_length:\n continue\n if index >= self.current and index - self.agent_history_length <= self.current:\n continue\n if self.terminal_flags[index - self.agent_history_length:index].any():\n continue\n break\n self.indices[i] = index\n\n def get_minibatch(self):\n \"\"\"\n Returns a minibatch of self.batch_size = 32 transitions\n \"\"\"\n if self.count < self.agent_history_length:\n raise ValueError('Not enough memories to get a minibatch')\n \n self._get_valid_indices()\n \n for i, idx in enumerate(self.indices):\n self.states[i] = self._get_state(idx - 1)\n self.new_states[i] = self._get_state(idx)\n \n return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices]", "_____no_output_____" ], [ "def learn(session, replay_memory, main_dqn, target_dqn, batch_size, gamma):\n \"\"\"\n Args:\n session: A tensorflow sesson object\n replay_memory: A ReplayMemory object\n main_dqn: A DQN object\n target_dqn: A DQN object\n batch_size: Integer, Batch size\n gamma: Float, discount factor for the Bellman equation\n Returns:\n loss: The loss of the minibatch, for tensorboard\n Draws a minibatch from the replay memory, calculates the \n target Q-value that the prediction Q-value is regressed to. \n Then a parameter update is performed on the main DQN.\n \"\"\"\n # Draw a minibatch from the replay memory\n states, actions, rewards, new_states, terminal_flags = replay_memory.get_minibatch() \n # The main network estimates which action is best (in the next \n # state s', new_states is passed!) \n # for every transition in the minibatch\n arg_q_max = session.run(main_dqn.best_action, feed_dict={main_dqn.input:new_states})\n # The target network estimates the Q-values (in the next state s', new_states is passed!) \n # for every transition in the minibatch\n q_vals = session.run(target_dqn.q_values, feed_dict={target_dqn.input:new_states})\n double_q = q_vals[range(batch_size), arg_q_max]\n # Bellman equation. Multiplication with (1-terminal_flags) makes sure that \n # if the game is over, targetQ=rewards\n target_q = rewards + (gamma*double_q * (1-terminal_flags))\n # Gradient descend step to update the parameters of the main network\n loss, _ = session.run([main_dqn.loss, main_dqn.update], \n feed_dict={main_dqn.input:states, \n main_dqn.target_q:target_q, \n main_dqn.action:actions})\n return loss", "_____no_output_____" ], [ "class TargetNetworkUpdater:\n \"\"\"Copies the parameters of the main DQN to the target DQN\"\"\"\n def __init__(self, main_dqn_vars, target_dqn_vars):\n \"\"\"\n Args:\n main_dqn_vars: A list of tensorflow variables belonging to the main DQN network\n target_dqn_vars: A list of tensorflow variables belonging to the target DQN network\n \"\"\"\n self.main_dqn_vars = main_dqn_vars\n self.target_dqn_vars = target_dqn_vars\n\n def _update_target_vars(self):\n update_ops = []\n for i, var in enumerate(self.main_dqn_vars):\n copy_op = self.target_dqn_vars[i].assign(var.value())\n update_ops.append(copy_op)\n return update_ops\n \n def update_networks(self, sess):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n Assigns the values of the parameters of the main network to the \n parameters of the target network\n \"\"\"\n update_ops = self._update_target_vars()\n for copy_op in update_ops:\n sess.run(copy_op)", "_____no_output_____" ], [ "class Atari:\n \"\"\"Wrapper for the environment provided by gym\"\"\"\n def __init__(self, envName, no_op_steps=10, agent_history_length=4):\n self.env = gym.make(envName)\n self.frame_processor = FrameProcessor()\n self.state = None\n self.last_lives = 0\n self.no_op_steps = no_op_steps\n self.agent_history_length = agent_history_length\n\n def reset(self, sess, evaluation=False):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n evaluation: A boolean saying whether the agent is evaluating or training\n Resets the environment and stacks four frames ontop of each other to \n create the first state\n \"\"\"\n frame = self.env.reset()\n self.last_lives = 0\n terminal_life_lost = True # Set to true so that the agent starts \n # with a 'FIRE' action when evaluating\n if evaluation:\n for _ in range(random.randint(1, self.no_op_steps)):\n frame, _, _, _ = self.env.step(1) # Action 'Fire'\n processed_frame = self.frame_processor.process(sess, frame) # (★★★)\n self.state = np.repeat(processed_frame, self.agent_history_length, axis=2)\n \n return terminal_life_lost\n\n def step(self, sess, action):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n action: Integer, action the agent performs\n Performs an action and observes the reward and terminal state from the environment\n \"\"\"\n new_frame, reward, terminal, info = self.env.step(action) # (5★)\n \n if info['ale.lives'] < self.last_lives:\n terminal_life_lost = True\n else:\n terminal_life_lost = terminal\n self.last_lives = info['ale.lives']\n \n processed_new_frame = self.frame_processor.process(sess, new_frame) # (6★)\n new_state = np.append(self.state[:, :, 1:], processed_new_frame, axis=2) # (6★) \n self.state = new_state\n \n return processed_new_frame, reward, terminal, terminal_life_lost, new_frame\n", "_____no_output_____" ], [ "ENV_NAME = 'BreakoutDeterministic-v4'\n\ntf.reset_default_graph()\n\n# Control parameters\nMAX_EPISODE_LENGTH = 18000 # Equivalent of 5 minutes of gameplay at 60 frames per second\nEVAL_FREQUENCY = 200000 # Number of frames the agent sees between evaluations\nEVAL_STEPS = 10000 # Number of frames for one evaluation\nNETW_UPDATE_FREQ = 10000 # Number of chosen actions between updating the target network. \n # According to Mnih et al. 2015 this is measured in the number of \n # parameter updates (every four actions), however, in the \n # DeepMind code, it is clearly measured in the number\n # of actions the agent choses\nDISCOUNT_FACTOR = 0.99 # gamma in the Bellman equation\nREPLAY_MEMORY_START_SIZE = 50000 # Number of completely random actions, \n # before the agent starts learning\nMAX_FRAMES = 30000000 # Total number of frames the agent sees \nMEMORY_SIZE = 1000000 # Number of transitions stored in the replay memory\nNO_OP_STEPS = 10 # Number of 'NOOP' or 'FIRE' actions at the beginning of an \n # evaluation episode\nUPDATE_FREQ = 4 # Every four actions a gradient descend step is performed\nHIDDEN = 1024 # Number of filters in the final convolutional layer. The output \n # has the shape (1,1,1024) which is split into two streams. Both \n # the advantage stream and value stream have the shape \n # (1,1,512). This is slightly different from the original \n # implementation but tests I did with the environment Pong \n # have shown that this way the score increases more quickly\nLEARNING_RATE = 0.00001 # Set to 0.00025 in Pong for quicker results\nBS = 32 # Batch size\n\nPATH = \"output/\" # Gifs and checkpoints will be saved here\nSUMMARIES = \"summaries\" # logdir for tensorboard\nRUNID = 'run_1'\n#os.makedirs(PATH, exist_ok=True)\n#os.makedirs(os.path.join(SUMMARIES, RUNID))\nSUMM_WRITER = tf.summary.FileWriter(os.path.join(SUMMARIES, RUNID))\n\natari = Atari(ENV_NAME, NO_OP_STEPS)\n\nprint(\"The environment has the following {} actions: {}\".format(atari.env.action_space.n, \n atari.env.unwrapped.get_action_meanings()))", "The environment has the following 4 actions: ['NOOP', 'FIRE', 'RIGHT', 'LEFT']\n" ], [ "# main DQN and target DQN networks:\nwith tf.device(\"/gpu:0\"):\n with tf.variable_scope('mainDQN'):\n MAIN_DQN = DQN(atari.env.action_space.n, HIDDEN, LEARNING_RATE) # (★★)\n with tf.variable_scope('targetDQN'):\n TARGET_DQN = DQN(atari.env.action_space.n, HIDDEN) # (★★)\n\n init = tf.global_variables_initializer()\n\n MAIN_DQN_VARS = tf.trainable_variables(scope='mainDQN')\n TARGET_DQN_VARS = tf.trainable_variables(scope='targetDQN')\n \nsaver = tf.train.Saver() \n", "_____no_output_____" ], [ "LAYER_IDS = [\"conv1\", \"conv2\", \"conv3\", \"conv4\", \"denseAdvantage\", \n \"denseAdvantageBias\", \"denseValue\", \"denseValueBias\"]\n\n# Scalar summariess for tensorboard: loss, average reward and evaluation score\nwith tf.name_scope('Performance'):\n LOSS_PH = tf.placeholder(tf.float32, shape=None, name='loss_summary')\n LOSS_SUMMARY = tf.summary.scalar('loss', LOSS_PH)\n REWARD_PH = tf.placeholder(tf.float32, shape=None, name='reward_summary')\n REWARD_SUMMARY = tf.summary.scalar('reward', REWARD_PH)\n EVAL_SCORE_PH = tf.placeholder(tf.float32, shape=None, name='evaluation_summary')\n EVAL_SCORE_SUMMARY = tf.summary.scalar('evaluation_score', EVAL_SCORE_PH)\n\nPERFORMANCE_SUMMARIES = tf.summary.merge([LOSS_SUMMARY, REWARD_SUMMARY])\n\nwith tf.device(\"/gpu:0\"):\n # Histogramm summaries for tensorboard: parameters\n with tf.name_scope('Parameters'):\n ALL_PARAM_SUMMARIES = []\n for i, Id in enumerate(LAYER_IDS):\n with tf.name_scope('mainDQN/'):\n MAIN_DQN_KERNEL = tf.summary.histogram(Id, tf.reshape(MAIN_DQN_VARS[i], shape=[-1]))\n ALL_PARAM_SUMMARIES.extend([MAIN_DQN_KERNEL])\n PARAM_SUMMARIES = tf.summary.merge(ALL_PARAM_SUMMARIES)", "_____no_output_____" ], [ "import datetime", "_____no_output_____" ], [ "def train():\n \"\"\"Contains the training and evaluation loops\"\"\"\n with tf.device(\"/gpu:0\"):\n my_replay_memory = ReplayMemory(size=MEMORY_SIZE, batch_size=BS) # (★)\n network_updater = TargetNetworkUpdater(MAIN_DQN_VARS, TARGET_DQN_VARS)\n action_getter = ActionGetter(atari.env.action_space.n, \n replay_memory_start_size=REPLAY_MEMORY_START_SIZE, \n max_frames=MAX_FRAMES)\n \n config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = True)\n with tf.Session(config = config) as sess:\n sess.run(init)\n \n frame_number = 0\n rewards = []\n loss_list = []\n \n while frame_number < MAX_FRAMES:\n \n ########################\n ####### Training #######\n ########################\n epoch_frame = 0\n while epoch_frame < EVAL_FREQUENCY:\n terminal_life_lost = atari.reset(sess)\n episode_reward_sum = 0\n for _ in range(MAX_EPISODE_LENGTH):\n # (4★)\n action = action_getter.get_action(sess, frame_number, atari.state, MAIN_DQN) \n # (5★)\n processed_new_frame, reward, terminal, terminal_life_lost, _ = atari.step(sess, action) \n frame_number += 1\n epoch_frame += 1\n episode_reward_sum += reward\n \n # (7★) Store transition in the replay memory\n my_replay_memory.add_experience(action=action, \n frame=processed_new_frame[:, :, 0],\n reward=reward, \n terminal=terminal_life_lost) \n \n if frame_number % UPDATE_FREQ == 0 and frame_number > REPLAY_MEMORY_START_SIZE:\n loss = learn(sess, my_replay_memory, MAIN_DQN, TARGET_DQN,\n BS, gamma = DISCOUNT_FACTOR) # (8★)\n loss_list.append(loss)\n if frame_number % NETW_UPDATE_FREQ == 0 and frame_number > REPLAY_MEMORY_START_SIZE:\n network_updater.update_networks(sess) # (9★)\n \n if terminal:\n terminal = False\n break\n\n rewards.append(episode_reward_sum)\n \n # Output the progress:\n if len(rewards) % 10 == 0:\n # Scalar summaries for tensorboard\n if frame_number > REPLAY_MEMORY_START_SIZE:\n summ = sess.run(PERFORMANCE_SUMMARIES, \n feed_dict={LOSS_PH:np.mean(loss_list), \n REWARD_PH:np.mean(rewards[-100:])})\n \n SUMM_WRITER.add_summary(summ, frame_number)\n loss_list = []\n # Histogramm summaries for tensorboard\n summ_param = sess.run(PARAM_SUMMARIES)\n SUMM_WRITER.add_summary(summ_param, frame_number)\n \n dt_now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n print(dt_now, len(rewards), frame_number, np.mean(rewards[-100:]))\n with open('rewards.dat', 'a') as reward_file:\n reward_file.write('%s: %d %s %s\\n' % (dt_now, len(rewards), frame_number, np.mean(rewards[-100:])))\n \n ########################\n ###### Evaluation ######\n ########################\n terminal = True\n gif = False\n frames_for_gif = []\n eval_rewards = []\n evaluate_frame_number = 0\n\n for _ in range(EVAL_STEPS):\n if terminal:\n terminal_life_lost = atari.reset(sess, evaluation=True)\n episode_reward_sum = 0\n terminal = False\n \n # Fire (action 1), when a life was lost or the game just started, \n # so that the agent does not stand around doing nothing. When playing \n # with other environments, you might want to change this...\n action = 1 if terminal_life_lost else action_getter.get_action(sess, frame_number,\n atari.state, \n MAIN_DQN,\n evaluation=True)\n processed_new_frame, reward, terminal, terminal_life_lost, new_frame = atari.step(sess, action)\n evaluate_frame_number += 1\n episode_reward_sum += reward\n\n if gif: \n frames_for_gif.append(new_frame)\n if terminal:\n eval_rewards.append(episode_reward_sum)\n gif = False # Save only the first game of the evaluation as a gif\n \n print(\"Evaluation score:\\n\", np.mean(eval_rewards)) \n# try:\n# generate_gif(frame_number, frames_for_gif, eval_rewards[0], PATH)\n# except IndexError:\n# print(\"No evaluation game finished\")\n \n #Save the network parameters\n saver.save(sess, PATH+'/my_model', global_step=frame_number)\n frames_for_gif = []\n \n # Show the evaluation score in tensorboard\n summ = sess.run(EVAL_SCORE_SUMMARY, feed_dict={EVAL_SCORE_PH:np.mean(eval_rewards)})\n SUMM_WRITER.add_summary(summ, frame_number)\n with open('rewardsEval.dat', 'a') as eval_reward_file:\n eval_reward_file.write('%s %s\\n' % (frame_number, np.mean(eval_rewards)))", "_____no_output_____" ], [ "train()", "('2018-09-02T08:25:15', 10, 1725, 1.1)\n('2018-09-02T08:25:24', 20, 3679, 1.35)\n('2018-09-02T08:25:33', 30, 5409, 1.2)\n('2018-09-02T08:25:42', 40, 7315, 1.25)\n('2018-09-02T08:25:50', 50, 9081, 1.24)\n('2018-09-02T08:25:58', 60, 10695, 1.15)\n('2018-09-02T08:26:08', 70, 12344, 1.1)\n('2018-09-02T08:26:15', 80, 13911, 1.025)\n('2018-09-02T08:26:26', 90, 15908, 1.0888888888888888)\n('2018-09-02T08:26:34', 100, 17538, 1.03)\n('2018-09-02T08:26:44', 110, 19194, 0.99)\n('2018-09-02T08:26:56', 120, 21276, 1.01)\n('2018-09-02T08:27:05', 130, 23059, 1.04)\n('2018-09-02T08:27:14', 140, 25076, 1.08)\n('2018-09-02T08:27:22', 150, 26957, 1.1)\n('2018-09-02T08:27:31', 160, 28615, 1.1)\n('2018-09-02T08:27:39', 170, 30298, 1.1)\n('2018-09-02T08:27:48', 180, 32358, 1.23)\n('2018-09-02T08:27:58', 190, 34071, 1.18)\n('2018-09-02T08:28:08', 200, 35782, 1.23)\n('2018-09-02T08:28:16', 210, 37470, 1.25)\n('2018-09-02T08:28:25', 220, 39558, 1.25)\n('2018-09-02T08:28:35', 230, 41658, 1.31)\n('2018-09-02T08:28:43', 240, 43423, 1.25)\n('2018-09-02T08:28:51', 250, 45203, 1.22)\n('2018-09-02T08:28:59', 260, 47025, 1.28)\n('2018-09-02T08:29:10', 270, 48829, 1.33)\n('2018-09-02T08:29:23', 280, 50410, 1.22)\n('2018-09-02T08:29:49', 290, 52295, 1.24)\n('2018-09-02T08:30:16', 300, 54137, 1.27)\n('2018-09-02T08:30:40', 310, 55839, 1.27)\n('2018-09-02T08:31:06', 320, 57605, 1.21)\n('2018-09-02T08:31:30', 330, 59315, 1.11)\n('2018-09-02T08:31:55', 340, 60950, 1.07)\n('2018-09-02T08:32:19', 350, 62491, 1.01)\n('2018-09-02T08:32:44', 360, 64283, 0.99)\n('2018-09-02T08:33:12', 370, 66194, 0.99)\n('2018-09-02T08:33:37', 380, 67954, 1.02)\n('2018-09-02T08:34:01', 390, 69697, 0.98)\n('2018-09-02T08:34:28', 400, 71586, 0.99)\n('2018-09-02T08:34:50', 410, 73134, 0.95)\n('2018-09-02T08:35:14', 420, 74837, 0.92)\n('2018-09-02T08:35:39', 430, 76638, 0.96)\n('2018-09-02T08:36:01', 440, 78262, 0.94)\n('2018-09-02T08:36:27', 450, 80030, 0.99)\n('2018-09-02T08:36:56', 460, 82019, 1.05)\n('2018-09-02T08:37:20', 470, 83707, 1.02)\n('2018-09-02T08:37:44', 480, 85423, 1.02)\n('2018-09-02T08:38:11', 490, 87256, 1.04)\n('2018-09-02T08:38:37', 500, 89048, 1.01)\n('2018-09-02T08:39:01', 510, 90715, 1.04)\n('2018-09-02T08:39:26', 520, 92493, 1.07)\n('2018-09-02T08:39:51', 530, 94300, 1.09)\n('2018-09-02T08:40:17', 540, 96089, 1.13)\n('2018-09-02T08:40:42', 550, 97889, 1.17)\n('2018-09-02T08:41:09', 560, 99773, 1.15)\n('2018-09-02T08:41:32', 570, 101373, 1.12)\n('2018-09-02T08:41:58', 580, 103272, 1.15)\n('2018-09-02T08:42:22', 590, 105030, 1.16)\n('2018-09-02T08:42:49', 600, 106811, 1.16)\n('2018-09-02T08:43:13', 610, 108449, 1.16)\n('2018-09-02T08:43:42', 620, 110459, 1.21)\n('2018-09-02T08:44:10', 630, 112356, 1.21)\n('2018-09-02T08:44:32', 640, 113868, 1.16)\n('2018-09-02T08:44:56', 650, 115576, 1.12)\n('2018-09-02T08:45:23', 660, 117500, 1.13)\n('2018-09-02T08:45:50', 670, 119404, 1.2)\n('2018-09-02T08:46:13', 680, 121047, 1.15)\n('2018-09-02T08:46:37', 690, 122756, 1.13)\n('2018-09-02T08:47:00', 700, 124328, 1.07)\n('2018-09-02T08:47:24', 710, 126024, 1.08)\n('2018-09-02T08:47:49', 720, 127757, 1.01)\n('2018-09-02T08:48:13', 730, 129519, 0.99)\n('2018-09-02T08:48:37', 740, 131205, 1.03)\n('2018-09-02T08:49:01', 750, 132857, 1.01)\n('2018-09-02T08:49:25', 760, 134500, 0.93)\n('2018-09-02T08:49:54', 770, 136416, 0.94)\n('2018-09-02T08:50:17', 780, 138078, 0.94)\n('2018-09-02T08:50:43', 790, 139922, 0.95)\n('2018-09-02T08:51:12', 800, 142018, 1.08)\n('2018-09-02T08:51:38', 810, 143873, 1.12)\n('2018-09-02T08:52:04', 820, 145645, 1.15)\n('2018-09-02T08:52:28', 830, 147355, 1.14)\n('2018-09-02T08:52:52', 840, 148959, 1.12)\n('2018-09-02T08:53:14', 850, 150539, 1.1)\n('2018-09-02T08:53:40', 860, 152372, 1.15)\n('2018-09-02T08:54:01', 870, 153865, 1.05)\n('2018-09-02T08:54:29', 880, 155844, 1.13)\n('2018-09-02T08:54:58', 890, 157865, 1.19)\n('2018-09-02T08:55:22', 900, 159535, 1.11)\n('2018-09-02T08:55:47', 910, 161310, 1.1)\n('2018-09-02T08:56:11', 920, 163061, 1.08)\n('2018-09-02T08:56:39', 930, 165149, 1.16)\n('2018-09-02T08:57:05', 940, 167027, 1.24)\n('2018-09-02T08:57:30', 950, 168820, 1.31)\n('2018-09-02T08:57:56', 960, 170653, 1.31)\n('2018-09-02T08:58:17', 970, 172119, 1.29)\n('2018-09-02T08:58:39', 980, 173683, 1.19)\n('2018-09-02T08:59:07', 990, 175649, 1.17)\n('2018-09-02T08:59:31', 1000, 177345, 1.18)\n('2018-09-02T08:59:54', 1010, 179047, 1.16)\n('2018-09-02T09:00:20', 1020, 180849, 1.17)\n('2018-09-02T09:00:47', 1030, 182690, 1.12)\n('2018-09-02T09:01:16', 1040, 184680, 1.16)\n('2018-09-02T09:01:39', 1050, 186320, 1.12)\n('2018-09-02T09:02:07', 1060, 188215, 1.12)\n('2018-09-02T09:02:38', 1070, 190407, 1.3)\n('2018-09-02T09:03:05', 1080, 192287, 1.4)\n('2018-09-02T09:03:33', 1090, 194277, 1.41)\n('2018-09-02T09:04:04', 1100, 196344, 1.5)\n('2018-09-02T09:04:28', 1110, 197989, 1.49)\n('2018-09-02T09:04:54', 1120, 199859, 1.5)\n('Evaluation score:\\n', 3.0)\n('2018-09-02T09:06:32', 1130, 201688, 1.5)\n('2018-09-02T09:07:00', 1140, 203619, 1.48)\n('2018-09-02T09:07:29', 1150, 205569, 1.54)\n('2018-09-02T09:07:56', 1160, 207497, 1.57)\n('2018-09-02T09:08:22', 1170, 209220, 1.47)\n('2018-09-02T09:08:51', 1180, 211258, 1.49)\n('2018-09-02T09:09:13', 1190, 212798, 1.4)\n('2018-09-02T09:09:36', 1200, 214421, 1.28)\n('2018-09-02T09:10:01', 1210, 216133, 1.28)\n('2018-09-02T09:10:29', 1220, 218110, 1.32)\n('2018-09-02T09:10:55', 1230, 219907, 1.32)\n('2018-09-02T09:11:26', 1240, 222044, 1.35)\n('2018-09-02T09:11:53', 1250, 223967, 1.34)\n('2018-09-02T09:12:24', 1260, 226153, 1.4)\n('2018-09-02T09:12:49', 1270, 227866, 1.4)\n('2018-09-02T09:13:15', 1280, 229709, 1.39)\n('2018-09-02T09:13:41', 1290, 231536, 1.44)\n('2018-09-02T09:14:06', 1300, 233205, 1.46)\n('2018-09-02T09:14:36', 1310, 235314, 1.55)\n('2018-09-02T09:15:06', 1320, 237385, 1.56)\n('2018-09-02T09:15:35', 1330, 239412, 1.61)\n('2018-09-02T09:16:06', 1340, 241486, 1.62)\n('2018-09-02T09:16:36', 1350, 243591, 1.71)\n('2018-09-02T09:17:05', 1360, 245601, 1.66)\n('2018-09-02T09:17:32', 1370, 247537, 1.72)\n('2018-09-02T09:18:06', 1380, 249788, 1.81)\n('2018-09-02T09:18:35', 1390, 251752, 1.87)\n('2018-09-02T09:19:09', 1400, 254074, 2.02)\n('2018-09-02T09:19:38', 1410, 256131, 2.04)\n('2018-09-02T09:20:04', 1420, 257952, 2.0)\n('2018-09-02T09:20:34', 1430, 259990, 1.99)\n('2018-09-02T09:21:03', 1440, 261976, 1.95)\n('2018-09-02T09:21:35', 1450, 264138, 1.96)\n('2018-09-02T09:22:02', 1460, 266045, 1.94)\n('2018-09-02T09:22:30', 1470, 267995, 1.95)\n('2018-09-02T09:23:02', 1480, 270150, 1.92)\n('2018-09-02T09:23:35', 1490, 272307, 1.96)\n('2018-09-02T09:24:12', 1500, 274774, 2.01)\n('2018-09-02T09:24:39', 1510, 276653, 1.97)\n('2018-09-02T09:25:09', 1520, 278722, 2.05)\n('2018-09-02T09:25:40', 1530, 280797, 2.07)\n('2018-09-02T09:26:13', 1540, 283131, 2.16)\n('2018-09-02T09:26:42', 1550, 285065, 2.08)\n('2018-09-02T09:27:18', 1560, 287577, 2.23)\n('2018-09-02T09:27:51', 1570, 289869, 2.29)\n('2018-09-02T09:28:27', 1580, 292134, 2.29)\n('2018-09-02T09:29:01', 1590, 294336, 2.33)\n('2018-09-02T09:29:40', 1600, 296948, 2.36)\n('2018-09-02T09:30:12', 1610, 299146, 2.45)\n('2018-09-02T09:30:48', 1620, 301504, 2.51)\n('2018-09-02T09:31:24', 1630, 303887, 2.61)\n('2018-09-02T09:32:00', 1640, 306358, 2.66)\n('2018-09-02T09:32:34', 1650, 308686, 2.79)\n('2018-09-02T09:33:08', 1660, 310893, 2.75)\n('2018-09-02T09:33:40', 1670, 312975, 2.73)\n('2018-09-02T09:34:19', 1680, 315476, 2.8)\n('2018-09-02T09:34:57', 1690, 318011, 2.85)\n('2018-09-02T09:35:30', 1700, 320333, 2.83)\n('2018-09-02T09:36:04', 1710, 322646, 2.86)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb4233905b497efa42d69a72474d64ce7e6424f3
221,838
ipynb
Jupyter Notebook
notebooks/Obj vs Dupliverts Rendering.ipynb
ExcaliburZero/blendplot-data
118bbd8471f57994e4e1fba67e22b1697015552e
[ "MIT" ]
null
null
null
notebooks/Obj vs Dupliverts Rendering.ipynb
ExcaliburZero/blendplot-data
118bbd8471f57994e4e1fba67e22b1697015552e
[ "MIT" ]
null
null
null
notebooks/Obj vs Dupliverts Rendering.ipynb
ExcaliburZero/blendplot-data
118bbd8471f57994e4e1fba67e22b1697015552e
[ "MIT" ]
null
null
null
972.973684
54,684
0.945348
[ [ [ "import pandas as pd\nfrom plotnine import *\n\ndata_file = \"../data/render_info.csv\"\ndata = pd.read_csv(data_file)\n\ndata.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 42 entries, 0 to 41\nData columns (total 7 columns):\ntype 42 non-null object\npoints 42 non-null int64\nt_all 42 non-null float64\nmax_memory 42 non-null float64\navg_cpu 42 non-null float64\nt_plot 42 non-null float64\nt_render 42 non-null float64\ndtypes: float64(5), int64(1), object(1)\nmemory usage: 2.4+ KB\n" ], [ "y_cols = list(data.columns)\n\ndel y_cols[0]\ndel y_cols[0]\n\ny_cols", "_____no_output_____" ], [ "x_label = \"# of Points Plotted\"\ny_labels = [\n \"Total Time (seconds)\",\n \"Max Memory Usage (GB)\",\n \"Average System CPU Usage (%)\",\n \"Time to Create Plot (seconds)\",\n \"Render Time (seconds)\"\n]\n\ntitles = [\n \"Total Time v Points to Plot and Render\",\n \"Max Memory Usage v Points to Plot and Render\",\n \"Average CPU Usage v Points to Plot and Render\",\n \"Time to Plot v Points\",\n \"Time to Render v Points\"\n]", "_____no_output_____" ], [ "for (col, y_lab, t) in zip(y_cols, y_labels, titles):\n print(\n ggplot(data, aes(x = \"points\", y = col)) +\\\n facet_wrap(\"~type\") +\\\n geom_line() +\\\n xlab(x_label) +\\\n ylab(y_lab) +\\\n ggtitle(t)\n )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb425d1805c6bbd907ae487a6153bd209063c6be
477,597
ipynb
Jupyter Notebook
GHCND Processor Tutorial.ipynb
jwarndt/GHCND
dd998b14d854da6effdbf6de850885005f61b522
[ "MIT" ]
null
null
null
GHCND Processor Tutorial.ipynb
jwarndt/GHCND
dd998b14d854da6effdbf6de850885005f61b522
[ "MIT" ]
null
null
null
GHCND Processor Tutorial.ipynb
jwarndt/GHCND
dd998b14d854da6effdbf6de850885005f61b522
[ "MIT" ]
null
null
null
776.580488
268,532
0.940956
[ [ [ "# Overview", "_____no_output_____" ], [ "This is a python package that helps process, filter, and \nanalyze the Global Historical Climatology Network Daily dataset. ", "_____no_output_____" ], [ "# I. Import\nThe first step is to import all the modules. Do this by using the following import statement. This will give you access to all available modules and classes that the package has to offer.\n \nThere are currently four modules included.\n1. preprocessor\n2. stats\n3. plotter\n4. conversion\n \nThe preprocessor has three classes which serve as the core building blocks for managing and manipulating the GLobal Historical Climatology Network Daily dataset.\n1. StationPreprocessor\n2. Station\n3. ClimateVar ", "_____no_output_____" ] ], [ [ "from GHCND import *", "_____no_output_____" ] ], [ [ "# II. The StationPreprocessor\nAfter you import, you will then make a StationPreprocessor object that points to two files and one directory. \nThe two files reference the necessary station metadata to create the data objects while the directory point to the location of the .dly files. The .dly files are fixed width text files that contain the station data.", "_____no_output_____" ] ], [ [ "sp = preprocessor.StationPreprocessor(\"D:/GHCND_data/ghcnd-stations.txt\",\n \"D:/GHCND_data/ghcnd-inventory.txt\",\n \"D:/GHCND_data/ghcnd_all.tar/ghcnd_all/ghcnd_all\")", "_____no_output_____" ] ], [ [ "# III. Define which stations to fetch", "_____no_output_____" ], [ "The next step is to define which countries you would like to process and summarize data for. In the case of the United States and Canada, you can also specify which state, province, or territory you would like to fetch data for. \n\nTo set the countries and states for processing, use the addCountries and addStates methods.\n```python\nStationPreprocessor.addCountries(country_names)\nStationPreprocessor.addStates(state_names)\n```\nboth country_names and state_names should be a list of strings. In the following code snippet, the United States is added as a country and the states of Wisconsin and Minnesota are added.", "_____no_output_____" ] ], [ [ "sp.addCountries([\"united states\"])\nsp.addStates([\"wisconsin\",\"minnesota\"])", "_____no_output_____" ] ], [ [ "You can print or return the states and countries defined in the StationPreprocessor as follows. Notice that the internal representation of the states and countries are abreviations and not the actual name.", "_____no_output_____" ] ], [ [ "print(sp.states)\nprint(sp.countries)", "['WI', 'MN']\n['US']\n" ], [ "print(sp.stations)", "[]\n" ] ], [ [ "# IV. Build the Station objects", "_____no_output_____" ], [ "After you have defined which stations you want to process data for, you can build the Station objects. Station objects have metadata about the station such as station ID, elevation, latitude, longitude, etc.", "_____no_output_____" ] ], [ [ "sp.addStations()", "_____no_output_____" ] ], [ [ "You can see that there are now station objects in the StationPreprocessor.", "_____no_output_____" ] ], [ [ "len(sp.stations)", "_____no_output_____" ], [ "sp.stations[0] # returns the station object", "_____no_output_____" ], [ "print(sp.stations[0]) # a human readable string", "US1MNAA0002,US,MN,45.1947,-93.3257,263.0,False,False,False,None\n" ] ], [ [ "A station object has many attributes. At this point, each station object doesn't have any variables such as precipitation or temperature attached to it because it has not yet read the data files.", "_____no_output_____" ] ], [ [ "print(\"station ID: \", sp.stations[0].stationId)\nprint(\"counry: \", sp.stations[0].country)\nprint(\"state: \", sp.stations[0].state)\nprint(\"latitude: \", sp.stations[0].lat) \nprint(\"longitude: \", sp.stations[0].lon)\nprint(\"elevation: \", sp.stations[0].elev)\nprint(\"is the station part of the u.s. historical climatology network: \", sp.stations[0].hcn)\nprint(\"is the station part of the u.s. climate reference network: \", sp.stations[0].crn)\nprint(\"world meteorological station number: \", sp.stations[0].wmoId)\nprint(\"station variables: \", sp.stations[0].variables)", "('station ID: ', 'US1MNAA0002')\n('counry: ', 'US')\n('state: ', 'MN')\n('latitude: ', 45.1947)\n('longitude: ', -93.3257)\n('elevation: ', 263.0)\n('is the station part of the u.s. historical climatology network: ', False)\n('is the station part of the u.s. climate reference network: ', False)\n('world meteorological station number: ', None)\n('station variables: ', {})\n" ] ], [ [ "# V. Parse the daily data files", "_____no_output_____" ], [ "It is finally time to parse the daily data files and add data to the station objects. After this step, the Station objects will have a dictionary where the keys are variable names such as \"TMIN\", \"TMAX\", \"PRCP\", etc. and the values associated with them are the ClimateVar objects. \n\nUse the \n```python\nstationPreprocessor.processDlyFiles(variable_names)\n```\nmethod to begin reading the data files. variable_names should be a list of strings representing the names of the variables you would like to include.", "_____no_output_____" ] ], [ [ "sp.processDlyFiles([\"TMAX\",\"TMIN\",\"PRCP\"])", "reading 1939 stations\ndone with 200 stations. 10% complete.\ndone with 400 stations. 20% complete.\ndone with 600 stations. 30% complete.\ndone with 800 stations. 41% complete.\ndone with 1000 stations. 51% complete.\ndone with 1200 stations. 61% complete.\ndone with 1400 stations. 72% complete.\ndone with 1600 stations. 82% complete.\ndone with 1800 stations. 92% complete.\ndone reading stations. 852 stations left after filtering\ntotal data read time: 161.262000084\n" ] ], [ [ "The daily data is now loaded into the StationPreprocessor. The following example tells us that there are two variables (\"TMAX\" and \"TMIN\") in the Station object at index 0 of the StationPreprocessor's stations attribute. The keys in the dictionary are associated with the ClimateVar objects. By printing the ClimateVar object we see that it holds maximum temperature data, at a daily temporal resolution, and its record begins on Jan 1, 1893 and ends on Oct 31, 2017.", "_____no_output_____" ] ], [ [ "sp.stations[0].variables", "_____no_output_____" ], [ "print(sp.stations[0].variables[\"TMAX\"])", "TMAX,daily,1893-01-01,2017-10-31\n" ] ], [ [ "The temperature data from the GHCND is in tenths of degrees Celsius. You could convert this data using the conversion.TenthsCelsiusToCelsius() and passing in the StationPreprocessor as an argument. ", "_____no_output_____" ] ], [ [ "print(sp.stations[0].variables[\"TMAX\"].data[:31])", "[-33.0, -133.0, -244.0, -133.0, -156.0, -189.0, -200.0, -122.0, -67.0, -222.0, -133.0, -189.0, -233.0, -244.0, -244.0, -200.0, -133.0, -189.0, -122.0, -61.0, -44.0, -78.0, -133.0, -100.0, -178.0, -206.0, -244.0, -189.0, -228.0, -322.0, -133.0]\n" ], [ "print(sp.stations[0].variables[\"TMAX\"].timelist[:31])", "[datetime.date(1893, 1, 1), datetime.date(1893, 1, 2), datetime.date(1893, 1, 3), datetime.date(1893, 1, 4), datetime.date(1893, 1, 5), datetime.date(1893, 1, 6), datetime.date(1893, 1, 7), datetime.date(1893, 1, 8), datetime.date(1893, 1, 9), datetime.date(1893, 1, 10), datetime.date(1893, 1, 11), datetime.date(1893, 1, 12), datetime.date(1893, 1, 13), datetime.date(1893, 1, 14), datetime.date(1893, 1, 15), datetime.date(1893, 1, 16), datetime.date(1893, 1, 17), datetime.date(1893, 1, 18), datetime.date(1893, 1, 19), datetime.date(1893, 1, 20), datetime.date(1893, 1, 21), datetime.date(1893, 1, 22), datetime.date(1893, 1, 23), datetime.date(1893, 1, 24), datetime.date(1893, 1, 25), datetime.date(1893, 1, 26), datetime.date(1893, 1, 27), datetime.date(1893, 1, 28), datetime.date(1893, 1, 29), datetime.date(1893, 1, 30), datetime.date(1893, 1, 31)]\n" ] ], [ [ "# VI. Aggregate to monthly mean", "_____no_output_____" ] ], [ [ "stats.calculateMean(sp,\"month\") # this does additional data filtering. See reference materials for details.", "_____no_output_____" ], [ "print(sp.stations[0].variables[\"TMAX\"])", "TMAX,monthly_mean,1893-01-01 00:00:00,2017-10-01 00:00:00\n" ], [ "print(sp.stations[0].variables[\"TMAX\"].data[:12])", "[-16.45806452 -16.092 -8.44230769 nan 12.66774194\n nan 28.2 nan 22.84 14.18709677\n 2.00689655 -9.80333333]\n" ], [ "print(sp.stations[0].variables[\"TMAX\"].timelist[:12])", "[datetime.datetime(1893, 1, 1, 0, 0) datetime.datetime(1893, 2, 1, 0, 0)\n datetime.datetime(1893, 3, 1, 0, 0) datetime.datetime(1893, 4, 1, 0, 0)\n datetime.datetime(1893, 5, 1, 0, 0) datetime.datetime(1893, 6, 1, 0, 0)\n datetime.datetime(1893, 7, 1, 0, 0) datetime.datetime(1893, 8, 1, 0, 0)\n datetime.datetime(1893, 9, 1, 0, 0) datetime.datetime(1893, 10, 1, 0, 0)\n datetime.datetime(1893, 11, 1, 0, 0) datetime.datetime(1893, 12, 1, 0, 0)]\n" ], [ "print(len(sp.stations))", "852\n" ] ], [ [ "# VII. Utilities", "_____no_output_____" ], [ "Up to this point, the data has gone through a filtering process. You can use the plotter module to plot timeseries of the ClimateVars.", "_____no_output_____" ] ], [ [ "plotter.plotStationSeries(sp.stations[0], \"TMAX\")", "_____no_output_____" ], [ "plotter.plotStationSeries(sp.stations[0], \"PRCP\")", "_____no_output_____" ] ], [ [ "There are also export options \n\nexportToShapefile - export the stations to a shapefile. This does not include the data values with it. \nexportToJSON - export the station's climate variable data to json. \nexportToDat - export the data to single column text files. One file for each station and variable in the station preprocessor.", "_____no_output_____" ] ], [ [ "sp.exportToShapefile(\"D:/GHCND_data/minnesota_wisconsin_ghcnd.shp\")\nsp.exportToJSON(\"D:/GHCND_data/minnesota_wisconsin_monthlydata.json\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb42647885b2b485ceaf128aefaaf3f831aa2723
16,905
ipynb
Jupyter Notebook
Homework/PyTorch Demo/Testing PyTorch.ipynb
schultzp2020/MSCS-335
2aabd5d4500aa5771967cf51eed2720fa670d9f5
[ "MIT" ]
null
null
null
Homework/PyTorch Demo/Testing PyTorch.ipynb
schultzp2020/MSCS-335
2aabd5d4500aa5771967cf51eed2720fa670d9f5
[ "MIT" ]
null
null
null
Homework/PyTorch Demo/Testing PyTorch.ipynb
schultzp2020/MSCS-335
2aabd5d4500aa5771967cf51eed2720fa670d9f5
[ "MIT" ]
null
null
null
22.937585
95
0.458563
[ [ [ "import torch\nimport numpy as np", "_____no_output_____" ], [ "data = [[1, 2], [3, 4]]\nx_data = torch.tensor(data)", "_____no_output_____" ], [ "np_array = np.array(data)\nx_np = torch.from_numpy(np_array)", "_____no_output_____" ], [ "x_ones = torch.ones_like(x_data) # retains the properties of x_data\nprint(f\"Ones Tensor: \\n {x_ones} \\n\")\n\nx_rand = torch.rand_like(x_data, dtype=torch.float) # overrides the datatype of x_data\nprint(f\"Random Tensor: \\n {x_rand} \\n\")", "Ones Tensor: \n tensor([[1, 1],\n [1, 1]]) \n\nRandom Tensor: \n tensor([[0.1317, 0.7023],\n [0.3061, 0.7332]]) \n\n" ], [ "shape = (2, 3,)\nrand_tensor = torch.rand(shape)\nones_tensor = torch.ones(shape)\nzeros_tensor = torch.zeros(shape)\n\nprint(f\"Random Tensor: \\n {rand_tensor} \\n\")\nprint(f\"Ones Tensor: \\n {ones_tensor} \\n\")\nprint(f\"Zeros Tensor: \\n {zeros_tensor}\")", "Random Tensor: \n tensor([[0.4485, 0.7829, 0.5109],\n [0.1946, 0.4556, 0.1215]]) \n\nOnes Tensor: \n tensor([[1., 1., 1.],\n [1., 1., 1.]]) \n\nZeros Tensor: \n tensor([[0., 0., 0.],\n [0., 0., 0.]])\n" ], [ "tensor = torch.rand(3, 4)\n\nprint(f\"Shape of tensor: {tensor.shape}\")\nprint(f\"Datatype of tensor: {tensor.dtype}\")\nprint(f\"Device tensor is stored on: {tensor.device}\")", "Shape of tensor: torch.Size([3, 4])\nDatatype of tensor: torch.float32\nDevice tensor is stored on: cpu\n" ], [ "# We move our tensor to the GPU if available\nif torch.cuda.is_available():\n tensor = tensor.to('cuda')\n print(f\"Device tensor is stored on: {tensor.device}\")", "Device tensor is stored on: cuda:0\n" ], [ "tensor = torch.ones(4, 4)\ntensor[:,1] = 0\nprint(tensor)", "tensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]])\n" ], [ "t1 = torch.cat([tensor, tensor, tensor], dim=1)\nprint(t1)", "tensor([[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.]])\n" ], [ "# This computes the element-wise product\nprint(f\"tensor.mul(tensor) \\n {tensor.mul(tensor)} \\n\")\n# Alternative syntax:\nprint(f\"tensor * tensor \\n {tensor * tensor}\")", "tensor.mul(tensor) \n tensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]]) \n\ntensor * tensor \n tensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]])\n" ], [ "print(f\"tensor.matmul(tensor.T) \\n {tensor.matmul(tensor.T)} \\n\")\n# Alternative syntax:\nprint(f\"tensor @ tensor.T \\n {tensor @ tensor.T}\")", "tensor.matmul(tensor.T) \n tensor([[3., 3., 3., 3.],\n [3., 3., 3., 3.],\n [3., 3., 3., 3.],\n [3., 3., 3., 3.]]) \n\ntensor @ tensor.T \n tensor([[3., 3., 3., 3.],\n [3., 3., 3., 3.],\n [3., 3., 3., 3.],\n [3., 3., 3., 3.]])\n" ], [ "print(tensor, \"\\n\")\ntensor.add_(5)\nprint(tensor)", "tensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]]) \n\ntensor([[6., 5., 6., 6.],\n [6., 5., 6., 6.],\n [6., 5., 6., 6.],\n [6., 5., 6., 6.]])\n" ], [ "t = torch.ones(5)\nprint(f\"t: {t}\")\nn = t.numpy()\nprint(f\"n: {n}\")", "t: tensor([1., 1., 1., 1., 1.])\nn: [1. 1. 1. 1. 1.]\n" ], [ "t.add_(1)\nprint(f\"t: {t}\")\nprint(f\"n: {n}\")", "t: tensor([2., 2., 2., 2., 2.])\nn: [2. 2. 2. 2. 2.]\n" ], [ "n = np.ones(5)\nt = torch.from_numpy(n)", "_____no_output_____" ], [ "np.add(n, 1, out=n)\nprint(f\"t: {t}\")\nprint(f\"n: {n}\")", "t: tensor([2., 2., 2., 2., 2.], dtype=torch.float64)\nn: [2. 2. 2. 2. 2.]\n" ], [ "import torch, torchvision\nmodel = torchvision.models.resnet18(pretrained=True)\ndata = torch.rand(1, 3, 64, 64)\nlabels = torch.rand(1, 1000)", "_____no_output_____" ], [ "prediction = model(data) # forward pass", "_____no_output_____" ], [ "loss = (prediction - labels).sum()\nloss.backward() # backward pass", "_____no_output_____" ], [ "optim = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)", "_____no_output_____" ], [ "optim.step() #gradient descent", "_____no_output_____" ], [ "import torch\n\na = torch.tensor([2., 3.], requires_grad=True)\nb = torch.tensor([6., 4.], requires_grad=True)", "_____no_output_____" ], [ "Q = 3*a**3 - b**2", "_____no_output_____" ], [ "external_grad = torch.tensor([1., 1.])\nQ.backward(gradient=external_grad)", "_____no_output_____" ], [ "# check if collected gradients are correct\nprint(9*a**2 == a.grad)\nprint(-2*b == b.grad)", "tensor([True, True])\ntensor([True, True])\n" ], [ "x = torch.rand(5, 5)\ny = torch.rand(5, 5)\nz = torch.rand((5, 5), requires_grad=True)\n\na = x + y\nprint(f\"Does `a` require gradients? : {a.requires_grad}\")\nb = x + z\nprint(f\"Does `b` require gradients?: {b.requires_grad}\")", "Does `a` require gradients? : False\nDoes `b` require gradients?: True\n" ], [ "from torch import nn, optim\n\nmodel = torchvision.models.resnet18(pretrained=True)\n\n# Freeze all the parameters in the network\nfor param in model.parameters():\n param.requires_grad = False", "_____no_output_____" ], [ "model.fc = nn.Linear(512, 10)", "_____no_output_____" ], [ "# Optimize only the classifier\noptimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 5x5 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square, you can specify with a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = torch.flatten(x, 1) # flatten all dimensions except the batch dimension\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nnet = Net()\nprint(net)", "Net(\n (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))\n (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n (fc1): Linear(in_features=400, out_features=120, bias=True)\n (fc2): Linear(in_features=120, out_features=84, bias=True)\n (fc3): Linear(in_features=84, out_features=10, bias=True)\n)\n" ], [ "params = list(net.parameters())\nprint(len(params))\nprint(params[0].size()) # conv1's .weight", "10\ntorch.Size([6, 1, 5, 5])\n" ], [ "input = torch.randn(1, 1, 32, 32)\nout = net(input)\nprint(out)", "tensor([[-0.0117, -0.1003, 0.0350, 0.0039, -0.0858, 0.0638, 0.1207, -0.0792,\n 0.0893, 0.0428]], grad_fn=<AddmmBackward0>)\n" ], [ "net.zero_grad()\nout.backward(torch.randn(1, 10))", "_____no_output_____" ], [ "output = net(input)\ntarget = torch.randn(10) # a dummy target, for example\ntarget = target.view(1, -1) # make it the same shape as output\ncriterion = nn.MSELoss()\n\nloss = criterion(output, target)\nprint(loss)", "tensor(0.8787, grad_fn=<MseLossBackward0>)\n" ], [ "print(loss.grad_fn) # MSELoss\nprint(loss.grad_fn.next_functions[0][0]) # Linear\nprint(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU", "<MseLossBackward0 object at 0x0000024D3BAA5EE0>\n<AddmmBackward0 object at 0x0000024D3552BCD0>\n<AccumulateGrad object at 0x0000024D3552BC40>\n" ], [ "net.zero_grad() # zeroes the gradient buffers of all parameters\n\nprint('conv1.bias.grad before backward')\nprint(net.conv1.bias.grad)\n\nloss.backward()\n\nprint('conv1.bias.grad after backward')\nprint(net.conv1.bias.grad)", "conv1.bias.grad before backward\ntensor([0., 0., 0., 0., 0., 0.])\nconv1.bias.grad after backward\ntensor([-0.0067, -0.0129, -0.0153, 0.0059, 0.0114, 0.0131])\n" ], [ "learning_rate = 0.01\nfor f in net.parameters():\n f.data.sub_(f.grad.data * learning_rate)", "_____no_output_____" ], [ "import torch.optim as optim\n\n# create your optimizer\noptimizer = optim.SGD(net.parameters(), lr=0.01)\n\n# in your training loop:\noptimizer.zero_grad() # zero the gradient buffers\noutput = net(input)\nloss = criterion(output, target)\nloss.backward()\noptimizer.step() # Does the update", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb4268b38cd2e29410951717be767f7f2f29c047
375,702
ipynb
Jupyter Notebook
Speech_emotion_classification.ipynb
VikriAulia/Tensorflow-Deep-Learning-Speech-Recognition
2abc0404d84fa3b683b50b2211613f60e87cbf2b
[ "MIT" ]
null
null
null
Speech_emotion_classification.ipynb
VikriAulia/Tensorflow-Deep-Learning-Speech-Recognition
2abc0404d84fa3b683b50b2211613f60e87cbf2b
[ "MIT" ]
null
null
null
Speech_emotion_classification.ipynb
VikriAulia/Tensorflow-Deep-Learning-Speech-Recognition
2abc0404d84fa3b683b50b2211613f60e87cbf2b
[ "MIT" ]
null
null
null
81.709874
153,648
0.673976
[ [ [ "<a href=\"https://colab.research.google.com/github/VikriAulia/Tensorflow-Deep-Learning-Speech-Recognition/blob/master/Speech_emotion_classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## Importing the required libraries", "_____no_output_____" ] ], [ [ "import librosa\nimport librosa.display\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom matplotlib.pyplot import specgram\nimport keras\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding\nfrom keras.layers import LSTM\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical, get_file\nfrom keras.layers import Input, Flatten, Dropout, Activation\nfrom keras.layers import Conv1D, MaxPooling1D, AveragePooling1D\nfrom keras.models import Model\nfrom keras.callbacks import *\nfrom sklearn.metrics import confusion_matrix\nfrom fnmatch import fnmatch\nimport pandas as pd\nimport os\n%load_ext tensorboard\nRUN = 1", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive', force_remount=True)", "Mounted at /content/drive\n" ], [ "from keras import regularizers\nimport os\nRawData = '/content/RawData/SpeechEmotion/'", "_____no_output_____" ], [ "!ls /content/drive/My\\ Drive/Dataset/Speech/SpeechEmotion/", "SpeechEmotion.zip\n" ], [ "#!unzip -q /content/drive/My\\ Drive/Dataset/Speech/SpeechEmotion/SpeechEmotion.zip -d RawData ", "_____no_output_____" ], [ "def getlistOfFiles(dirName):\n allFiles = list()\n for path, subdirs, files in os.walk(dirName):\n for name in files:\n if fnmatch(name, '*.wav'):\n allFiles.append(name)\n return allFiles", "_____no_output_____" ], [ "mylist= getlistOfFiles(RawData)", "_____no_output_____" ], [ "print(type(mylist))\nprint(len(mylist))", "<class 'list'>\n2932\n" ], [ "print(mylist[1400])", "03-01-06-02-02-01-18.wav\n" ], [ "print(mylist[400][6:-16])", "\n" ] ], [ [ "## Plotting the audio file's waveform and its spectrogram", "_____no_output_____" ] ], [ [ "data, sampling_rate = librosa.load(RawData+'a01.wav')", "_____no_output_____" ], [ "% pylab inline\nimport os\nimport pandas as pd\nimport librosa\nimport glob \n\nplt.figure(figsize=(15, 5))\nlibrosa.display.waveplot(data, sr=sampling_rate)", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "import matplotlib.pyplot as plt\nimport scipy.io.wavfile\nimport numpy as np\nimport sys\n\n\nsr,x = scipy.io.wavfile.read(RawData+'a01.wav')\n\n## Parameters: 10ms step, 30ms window\nnstep = int(sr * 0.01)\nnwin = int(sr * 0.03)\nnfft = nwin\n\nwindow = np.hamming(nwin)\n\n## will take windows x[n1:n2]. generate\n## and loop over n2 such that all frames\n## fit within the waveform\nnn = range(nwin, len(x), nstep)\n\nX = np.zeros( (len(nn), nfft//2) )\n\nfor i,n in enumerate(nn):\n xseg = x[n-nwin:n]\n z = np.fft.fft(window * xseg, nfft)\n X[i,:] = np.log(np.abs(z[:nfft//2]))\n\nplt.imshow(X.T, interpolation='nearest',\n origin='lower',\n aspect='auto')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Setting the labels", "_____no_output_____" ] ], [ [ "feeling_list=[]\nfor item in mylist:\n if item[6:-16]=='02' and int(item[18:-4])%2==0:\n feeling_list.append('female_calm')\n elif item[6:-16]=='01' and int(item[18:-4])%2==1:\n feeling_list.append('male_neutral')\n elif item[6:-16]=='02' and int(item[18:-4])%2==1:\n feeling_list.append('male_calm')\n elif item[6:-16]=='01' and int(item[18:-4])%2==0:\n feeling_list.append('female_neutral')\n elif item[6:-16]=='03' and int(item[18:-4])%2==0:\n feeling_list.append('female_happy')\n elif item[6:-16]=='03' and int(item[18:-4])%2==1:\n feeling_list.append('male_happy')\n elif item[6:-16]=='04' and int(item[18:-4])%2==0:\n feeling_list.append('female_sad')\n elif item[6:-16]=='04' and int(item[18:-4])%2==1:\n feeling_list.append('male_sad')\n elif item[6:-16]=='05' and int(item[18:-4])%2==0:\n feeling_list.append('female_angry')\n elif item[6:-16]=='05' and int(item[18:-4])%2==1:\n feeling_list.append('male_angry')\n elif item[6:-16]=='06' and int(item[18:-4])%2==0:\n feeling_list.append('female_fearful')\n elif item[6:-16]=='06' and int(item[18:-4])%2==1:\n feeling_list.append('male_fearful')\n elif item[6:-16]=='07' and int(item[18:-4])%2==0:\n feeling_list.append('female_disgust')\n elif item[6:-16]=='07' and int(item[18:-4])%2==1:\n feeling_list.append('male_disgust')\n elif item[6:-16]=='08' and int(item[18:-4])%2==0:\n feeling_list.append('female_surprise')\n elif item[6:-16]=='08' and int(item[18:-4])%2==1:\n feeling_list.append('male_surprise')\n elif item[:1]=='a':\n feeling_list.append('male_angry')\n elif item[:1]=='d':\n feeling_list.append('male_disgust')\n elif item[:1]=='f':\n feeling_list.append('male_fearful')\n elif item[:1]=='h':\n feeling_list.append('male_happy')\n elif item[:1]=='n':\n feeling_list.append('male_neutral')\n elif item[:2]=='sa':\n feeling_list.append('male_sad')\n elif item[:2]=='su':\n feeling_list.append('male_surprise')\n else:\n print(item)", "_____no_output_____" ], [ "labels = pd.DataFrame(feeling_list)", "_____no_output_____" ], [ "len(labels)", "_____no_output_____" ], [ "len(mylist)", "_____no_output_____" ] ], [ [ "## Getting the features of audio files using librosa", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(columns=['feature'])\nfor index,y in enumerate(mylist):\n X, sample_rate = librosa.load(RawData+y, res_type='kaiser_fast',duration=3, sr=16000, offset=0.5)\n sample_rate = np.array(sample_rate)\n mfccs = np.mean(librosa.feature.mfcc(y=X, \n sr=sample_rate, \n n_mfcc=80),\n axis=0)\n feature = mfccs\n #[float(i) for i in feature]\n #feature1=feature[:135]\n df.loc[index] = [-(feature/100)] ", "_____no_output_____" ], [ "df[:5]", "_____no_output_____" ], [ "df3 = pd.DataFrame(df['feature'].values.tolist())", "_____no_output_____" ] ], [ [ "df3[:5]", "_____no_output_____" ] ], [ [ "newdf = pd.concat([df3,labels], axis=1)", "_____no_output_____" ], [ "rnewdf = newdf.rename(index=str, columns={\"0\": \"label\"})", "_____no_output_____" ], [ "rnewdf[:5]", "_____no_output_____" ], [ "from sklearn.utils import shuffle\nrnewdf = shuffle(newdf)\nrnewdf[:10]", "_____no_output_____" ], [ "rnewdf=rnewdf.fillna(0)\n#rnewdf=rnewdf.dropna()", "_____no_output_____" ], [ "len(rnewdf)", "_____no_output_____" ] ], [ [ "## Dividing the data into test and train", "_____no_output_____" ] ], [ [ "newdf1 = np.random.rand(len(rnewdf)) < 0.8\ntrain = rnewdf[newdf1]\ntest = rnewdf[~newdf1]", "_____no_output_____" ], [ "train = shuffle(train)\ntrain[260:270]", "_____no_output_____" ], [ "trainfeatures = train.iloc[:, :-1]", "_____no_output_____" ], [ "trainlabel = train.iloc[:, -1:]", "_____no_output_____" ], [ "testfeatures = test.iloc[:, :-1]", "_____no_output_____" ], [ "testlabel = test.iloc[:, -1:]", "_____no_output_____" ], [ "from keras.utils import np_utils\nfrom sklearn.preprocessing import LabelEncoder\n\nX_train = np.array(trainfeatures)\ny_train = np.array(trainlabel)\nX_test = np.array(testfeatures)\ny_test = np.array(testlabel)\n\nlb = LabelEncoder()\n\ny_train = np_utils.to_categorical(lb.fit_transform(y_train))\ny_test = np_utils.to_categorical(lb.fit_transform(y_test))\n", "/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/label.py:235: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n" ], [ "X_train.shape", "_____no_output_____" ] ], [ [ "## Changing dimension for CNN model", "_____no_output_____" ] ], [ [ "x_traincnn =np.expand_dims(X_train, axis=2)\nx_testcnn= np.expand_dims(X_test, axis=2)", "_____no_output_____" ], [ "x_traincnn.shape", "_____no_output_____" ], [ "model = Sequential()\n\nmodel.add(Conv1D(128, 5,padding='same',\n input_shape=(94,1)))\nmodel.add(Activation('relu'))\nmodel.add(Conv1D(128, 5,padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.1))\nmodel.add(MaxPooling1D(pool_size=(8)))\nmodel.add(Conv1D(128, 5,padding='same',))\nmodel.add(Activation('relu'))\nmodel.add(Conv1D(128, 5,padding='same',))\nmodel.add(Activation('relu'))\nmodel.add(Conv1D(128, 5,padding='same',))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(128, 5,padding='same',))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\n#model.add(Dense(128))\n#model.add(Activation('relu'))\n#model.add(Dropout(0.2))\n#model.add(Dense(64))\n#model.add(Activation('relu'))\n#model.add(Dropout(0.2))\nmodel.add(Dense(16))\nmodel.add(Activation('softmax'))", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:148: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3733: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\n" ], [ "model.summary()", "Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv1d_1 (Conv1D) (None, 94, 128) 768 \n_________________________________________________________________\nactivation_1 (Activation) (None, 94, 128) 0 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 94, 128) 82048 \n_________________________________________________________________\nactivation_2 (Activation) (None, 94, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 94, 128) 0 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 11, 128) 0 \n_________________________________________________________________\nconv1d_3 (Conv1D) (None, 11, 128) 82048 \n_________________________________________________________________\nactivation_3 (Activation) (None, 11, 128) 0 \n_________________________________________________________________\nconv1d_4 (Conv1D) (None, 11, 128) 82048 \n_________________________________________________________________\nactivation_4 (Activation) (None, 11, 128) 0 \n_________________________________________________________________\nconv1d_5 (Conv1D) (None, 11, 128) 82048 \n_________________________________________________________________\nactivation_5 (Activation) (None, 11, 128) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 11, 128) 0 \n_________________________________________________________________\nconv1d_6 (Conv1D) (None, 11, 128) 82048 \n_________________________________________________________________\nactivation_6 (Activation) (None, 11, 128) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 1408) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 16) 22544 \n_________________________________________________________________\nactivation_7 (Activation) (None, 16) 0 \n=================================================================\nTotal params: 433,552\nTrainable params: 433,552\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3576: The name tf.log is deprecated. Please use tf.math.log instead.\n\n" ] ], [ [ "### Removed the whole training part for avoiding unnecessary long epochs list", "_____no_output_____" ] ], [ [ "import math\ndef step_decay(epoch):\n initial_lrate = 0.001\n drop = 0.2\n epochs_drop = 10.0\n lrate = initial_lrate * math.pow(drop, \n math.floor((1+epoch)/epochs_drop))\n \n if (lrate < 4e-5):\n lrate = 4e-5\n \n print('Changing learning rate to {}'.format(lrate))\n return lrate", "_____no_output_____" ], [ "def dotrain():\n global RUN\n RUN+=1\n print(\"RUN {}\".format(RUN))\n LOG_DIR = '/content/drive/My Drive/Model/speechEmotion/output/training_logs/run-{}'.format(RUN)\n LOG_FILE_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'\n\n tensorboard = TensorBoard(log_dir=LOG_DIR, histogram_freq=1, write_grads=False, write_graph=False)\n #tensorboard = TensorBoard(log_dir=LOG_DIR, write_graph=True)\n checkpoint = ModelCheckpoint(filepath=LOG_FILE_PATH, monitor='val_loss', verbose=1, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=1)\n lrate = LearningRateScheduler(step_decay)\n history=model.fit(x_traincnn, \n y_train, \n batch_size=32, \n epochs=1000, \n validation_data=(x_testcnn, y_test),\n callbacks=[tensorboard, checkpoint, early_stopping, lrate])\n return history", "_____no_output_____" ], [ "print(len(x_traincnn))", "2281\n" ], [ "cnnhistory = dotrain()", "RUN 2\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3005: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\nTrain on 2281 samples, validate on 651 samples\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/callbacks.py:1068: The name tf.summary.histogram is deprecated. Please use tf.compat.v1.summary.histogram instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/callbacks.py:1122: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/callbacks.py:1128: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.\n\nEpoch 1/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 3s 1ms/step - loss: 2.7204 - acc: 0.0886 - val_loss: 2.6425 - val_acc: 0.0983\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/callbacks.py:1265: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.\n\n\nEpoch 00001: val_loss improved from inf to 2.64249, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-01-2.6425.hdf5\nEpoch 2/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 409us/step - loss: 2.5772 - acc: 0.1271 - val_loss: 2.5050 - val_acc: 0.1551\n\nEpoch 00002: val_loss improved from 2.64249 to 2.50496, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-02-2.5050.hdf5\nEpoch 3/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 441us/step - loss: 2.5019 - acc: 0.1495 - val_loss: 2.4560 - val_acc: 0.1613\n\nEpoch 00003: val_loss improved from 2.50496 to 2.45605, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-03-2.4560.hdf5\nEpoch 4/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 472us/step - loss: 2.4280 - acc: 0.1697 - val_loss: 2.4236 - val_acc: 0.1935\n\nEpoch 00004: val_loss improved from 2.45605 to 2.42362, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-04-2.4236.hdf5\nEpoch 5/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 478us/step - loss: 2.3783 - acc: 0.1833 - val_loss: 2.3744 - val_acc: 0.1905\n\nEpoch 00005: val_loss improved from 2.42362 to 2.37437, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-05-2.3744.hdf5\nEpoch 6/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 486us/step - loss: 2.3323 - acc: 0.1929 - val_loss: 2.3277 - val_acc: 0.1997\n\nEpoch 00006: val_loss improved from 2.37437 to 2.32772, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-06-2.3277.hdf5\nEpoch 7/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 484us/step - loss: 2.2905 - acc: 0.2166 - val_loss: 2.3233 - val_acc: 0.2212\n\nEpoch 00007: val_loss improved from 2.32772 to 2.32331, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-07-2.3233.hdf5\nEpoch 8/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 483us/step - loss: 2.2418 - acc: 0.2245 - val_loss: 2.3636 - val_acc: 0.1951\n\nEpoch 00008: val_loss did not improve from 2.32331\nEpoch 9/1000\nChanging learning rate to 0.001\n2281/2281 [==============================] - 1s 479us/step - loss: 2.1912 - acc: 0.2420 - val_loss: 2.2319 - val_acc: 0.2212\n\nEpoch 00009: val_loss improved from 2.32331 to 2.23193, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-09-2.2319.hdf5\nEpoch 10/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 472us/step - loss: 2.0813 - acc: 0.2775 - val_loss: 2.1872 - val_acc: 0.2473\n\nEpoch 00010: val_loss improved from 2.23193 to 2.18724, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-10-2.1872.hdf5\nEpoch 11/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 479us/step - loss: 2.0255 - acc: 0.2950 - val_loss: 2.2129 - val_acc: 0.2504\n\nEpoch 00011: val_loss did not improve from 2.18724\nEpoch 12/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 484us/step - loss: 2.0087 - acc: 0.3051 - val_loss: 2.1637 - val_acc: 0.2627\n\nEpoch 00012: val_loss improved from 2.18724 to 2.16374, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-12-2.1637.hdf5\nEpoch 13/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 482us/step - loss: 1.9728 - acc: 0.3043 - val_loss: 2.1509 - val_acc: 0.2519\n\nEpoch 00013: val_loss improved from 2.16374 to 2.15087, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-13-2.1509.hdf5\nEpoch 14/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 476us/step - loss: 1.9372 - acc: 0.3253 - val_loss: 2.1290 - val_acc: 0.2550\n\nEpoch 00014: val_loss improved from 2.15087 to 2.12900, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-14-2.1290.hdf5\nEpoch 15/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 475us/step - loss: 1.9211 - acc: 0.3314 - val_loss: 2.1152 - val_acc: 0.2596\n\nEpoch 00015: val_loss improved from 2.12900 to 2.11520, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-15-2.1152.hdf5\nEpoch 16/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 476us/step - loss: 1.9042 - acc: 0.3301 - val_loss: 2.0843 - val_acc: 0.2704\n\nEpoch 00016: val_loss improved from 2.11520 to 2.08425, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-16-2.0843.hdf5\nEpoch 17/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 483us/step - loss: 1.8898 - acc: 0.3363 - val_loss: 2.0998 - val_acc: 0.2611\n\nEpoch 00017: val_loss did not improve from 2.08425\nEpoch 18/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 475us/step - loss: 1.8515 - acc: 0.3455 - val_loss: 2.1604 - val_acc: 0.2442\n\nEpoch 00018: val_loss did not improve from 2.08425\nEpoch 19/1000\nChanging learning rate to 0.0002\n2281/2281 [==============================] - 1s 489us/step - loss: 1.8370 - acc: 0.3573 - val_loss: 2.0993 - val_acc: 0.2657\n\nEpoch 00019: val_loss did not improve from 2.08425\nEpoch 20/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 467us/step - loss: 1.7836 - acc: 0.3770 - val_loss: 2.0749 - val_acc: 0.2734\n\nEpoch 00020: val_loss improved from 2.08425 to 2.07488, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-20-2.0749.hdf5\nEpoch 21/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 480us/step - loss: 1.7810 - acc: 0.3783 - val_loss: 2.0627 - val_acc: 0.2704\n\nEpoch 00021: val_loss improved from 2.07488 to 2.06271, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-21-2.0627.hdf5\nEpoch 22/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 482us/step - loss: 1.7665 - acc: 0.3832 - val_loss: 2.0656 - val_acc: 0.2657\n\nEpoch 00022: val_loss did not improve from 2.06271\nEpoch 23/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 476us/step - loss: 1.7561 - acc: 0.3819 - val_loss: 2.0648 - val_acc: 0.2704\n\nEpoch 00023: val_loss did not improve from 2.06271\nEpoch 24/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 488us/step - loss: 1.7514 - acc: 0.3871 - val_loss: 2.0540 - val_acc: 0.2750\n\nEpoch 00024: val_loss improved from 2.06271 to 2.05403, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-24-2.0540.hdf5\nEpoch 25/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 490us/step - loss: 1.7495 - acc: 0.3906 - val_loss: 2.0512 - val_acc: 0.2719\n\nEpoch 00025: val_loss improved from 2.05403 to 2.05121, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-25-2.0512.hdf5\nEpoch 26/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 476us/step - loss: 1.7525 - acc: 0.3893 - val_loss: 2.0648 - val_acc: 0.2673\n\nEpoch 00026: val_loss did not improve from 2.05121\nEpoch 27/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 471us/step - loss: 1.7371 - acc: 0.3959 - val_loss: 2.0622 - val_acc: 0.2688\n\nEpoch 00027: val_loss did not improve from 2.05121\nEpoch 28/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 471us/step - loss: 1.7295 - acc: 0.3954 - val_loss: 2.0569 - val_acc: 0.2765\n\nEpoch 00028: val_loss did not improve from 2.05121\nEpoch 29/1000\nChanging learning rate to 4.000000000000001e-05\n2281/2281 [==============================] - 1s 468us/step - loss: 1.7263 - acc: 0.3989 - val_loss: 2.0573 - val_acc: 0.2734\n\nEpoch 00029: val_loss did not improve from 2.05121\nEpoch 30/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 483us/step - loss: 1.7264 - acc: 0.4003 - val_loss: 2.0608 - val_acc: 0.2704\n\nEpoch 00030: val_loss did not improve from 2.05121\nEpoch 31/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 478us/step - loss: 1.7159 - acc: 0.4020 - val_loss: 2.0463 - val_acc: 0.2765\n\nEpoch 00031: val_loss improved from 2.05121 to 2.04629, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-31-2.0463.hdf5\nEpoch 32/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 473us/step - loss: 1.7198 - acc: 0.4046 - val_loss: 2.0536 - val_acc: 0.2750\n\nEpoch 00032: val_loss did not improve from 2.04629\nEpoch 33/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 476us/step - loss: 1.7120 - acc: 0.3998 - val_loss: 2.0648 - val_acc: 0.2596\n\nEpoch 00033: val_loss did not improve from 2.04629\nEpoch 34/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 473us/step - loss: 1.7065 - acc: 0.4046 - val_loss: 2.0503 - val_acc: 0.2704\n\nEpoch 00034: val_loss did not improve from 2.04629\nEpoch 35/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 476us/step - loss: 1.7104 - acc: 0.4055 - val_loss: 2.0597 - val_acc: 0.2688\n\nEpoch 00035: val_loss did not improve from 2.04629\nEpoch 36/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 475us/step - loss: 1.7018 - acc: 0.4095 - val_loss: 2.0496 - val_acc: 0.2765\n\nEpoch 00036: val_loss did not improve from 2.04629\nEpoch 37/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 473us/step - loss: 1.6946 - acc: 0.4143 - val_loss: 2.0366 - val_acc: 0.2811\n\nEpoch 00037: val_loss improved from 2.04629 to 2.03664, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-37-2.0366.hdf5\nEpoch 38/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 475us/step - loss: 1.6911 - acc: 0.4196 - val_loss: 2.0507 - val_acc: 0.2750\n\nEpoch 00038: val_loss did not improve from 2.03664\nEpoch 39/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 483us/step - loss: 1.6818 - acc: 0.4169 - val_loss: 2.0483 - val_acc: 0.2719\n\nEpoch 00039: val_loss did not improve from 2.03664\nEpoch 40/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 475us/step - loss: 1.6853 - acc: 0.4152 - val_loss: 2.0488 - val_acc: 0.2811\n\nEpoch 00040: val_loss did not improve from 2.03664\nEpoch 41/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 474us/step - loss: 1.6808 - acc: 0.4143 - val_loss: 2.0447 - val_acc: 0.2780\n\nEpoch 00041: val_loss did not improve from 2.03664\nEpoch 42/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 469us/step - loss: 1.6753 - acc: 0.4253 - val_loss: 2.0326 - val_acc: 0.2811\n\nEpoch 00042: val_loss improved from 2.03664 to 2.03263, saving model to /content/drive/My Drive/Model/speechEmotion/output/training_logs/run-2/checkpoint-42-2.0326.hdf5\nEpoch 43/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 471us/step - loss: 1.6705 - acc: 0.4160 - val_loss: 2.0471 - val_acc: 0.2796\n\nEpoch 00043: val_loss did not improve from 2.03263\nEpoch 44/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 479us/step - loss: 1.6632 - acc: 0.4143 - val_loss: 2.0414 - val_acc: 0.2811\n\nEpoch 00044: val_loss did not improve from 2.03263\nEpoch 45/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 466us/step - loss: 1.6594 - acc: 0.4160 - val_loss: 2.0427 - val_acc: 0.2826\n\nEpoch 00045: val_loss did not improve from 2.03263\nEpoch 46/1000\nChanging learning rate to 4e-05\n2281/2281 [==============================] - 1s 480us/step - loss: 1.6571 - acc: 0.4314 - val_loss: 2.0436 - val_acc: 0.2842\n" ], [ "plt.plot(cnnhistory.history['loss'])\nplt.plot(cnnhistory.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "## Saving the model", "_____no_output_____" ] ], [ [ "model_name = 'Emotion_Voice_Detection_Model.h5'\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\n# Save model and weights\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nmodel_path = os.path.join(save_dir, model_name)\nmodel.save(model_path)\nprint('Saved trained model at %s ' % model_path)", "_____no_output_____" ], [ "import json\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)", "_____no_output_____" ] ], [ [ "## Loading the model", "_____no_output_____" ] ], [ [ "# loading json and creating model\nfrom keras.models import model_from_json\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"saved_models/Emotion_Voice_Detection_Model.h5\")\nprint(\"Loaded model from disk\")\n \n# evaluate loaded model on test data\nloaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\nscore = loaded_model.evaluate(x_testcnn, y_test, verbose=0)\nprint(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))", "_____no_output_____" ] ], [ [ "## Predicting emotions on the test data", "_____no_output_____" ] ], [ [ "preds = loaded_model.predict(x_testcnn, \n batch_size=32, \n verbose=1)", "_____no_output_____" ], [ "preds", "_____no_output_____" ], [ "preds1=preds.argmax(axis=1)", "_____no_output_____" ], [ "preds1", "_____no_output_____" ], [ "abc = preds1.astype(int).flatten()", "_____no_output_____" ], [ "predictions = (lb.inverse_transform((abc)))", "_____no_output_____" ], [ "preddf = pd.DataFrame({'predictedvalues': predictions})\npreddf[:10]", "_____no_output_____" ], [ "actual=y_test.argmax(axis=1)\nabc123 = actual.astype(int).flatten()\nactualvalues = (lb.inverse_transform((abc123)))", "_____no_output_____" ], [ "actualdf = pd.DataFrame({'actualvalues': actualvalues})\nactualdf[:10]", "_____no_output_____" ], [ "finaldf = actualdf.join(preddf)", "_____no_output_____" ] ], [ [ "## Actual v/s Predicted emotions", "_____no_output_____" ] ], [ [ "finaldf[170:180]", "_____no_output_____" ], [ "finaldf.groupby('actualvalues').count()", "_____no_output_____" ], [ "finaldf.groupby('predictedvalues').count()", "_____no_output_____" ], [ "finaldf.to_csv('Predictions.csv', index=False)", "_____no_output_____" ] ], [ [ "## Live Demo", "_____no_output_____" ], [ "#### The file 'output10.wav' in the next cell is the file that was recorded live using the code in AudioRecoreder notebook found in the repository", "_____no_output_____" ] ], [ [ "data, sampling_rate = librosa.load('output10.wav')", "_____no_output_____" ], [ "% pylab inline\nimport os\nimport pandas as pd\nimport librosa\nimport glob \n\nplt.figure(figsize=(15, 5))\nlibrosa.display.waveplot(data, sr=sampling_rate)", "_____no_output_____" ], [ "#livedf= pd.DataFrame(columns=['feature'])\nX, sample_rate = librosa.load('output10.wav', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5)\nsample_rate = np.array(sample_rate)\nmfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13),axis=0)\nfeaturelive = mfccs\nlivedf2 = featurelive", "_____no_output_____" ], [ "livedf2= pd.DataFrame(data=livedf2)", "_____no_output_____" ], [ "livedf2 = livedf2.stack().to_frame().T", "_____no_output_____" ], [ "livedf2", "_____no_output_____" ], [ "twodim= np.expand_dims(livedf2, axis=2)", "_____no_output_____" ], [ "livepreds = loaded_model.predict(twodim, \n batch_size=32, \n verbose=1)", "_____no_output_____" ], [ "livepreds", "_____no_output_____" ], [ "livepreds1=livepreds.argmax(axis=1)", "_____no_output_____" ], [ "liveabc = livepreds1.astype(int).flatten()", "_____no_output_____" ], [ "livepredictions = (lb.inverse_transform((liveabc)))\nlivepredictions", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb427503286794829cf49f83c0ea11f7dc0e3e88
187,929
ipynb
Jupyter Notebook
project.ipynb
IoannisGeorgousis/CharityML-Project
1687d985bbf63f7c3f8e0099557e32d33b2d1486
[ "MIT" ]
2
2020-07-10T15:34:57.000Z
2020-07-10T15:35:02.000Z
project.ipynb
IoannisGeorgousis/CharityML-Udacity-Project
1687d985bbf63f7c3f8e0099557e32d33b2d1486
[ "MIT" ]
null
null
null
project.ipynb
IoannisGeorgousis/CharityML-Udacity-Project
1687d985bbf63f7c3f8e0099557e32d33b2d1486
[ "MIT" ]
null
null
null
139.828125
52,432
0.845383
[ [ [ "# Introduction to Machine Learning Nanodegree\n## Project: Finding Donors for *CharityML*", "_____no_output_____" ], [ "In this project, we employ several supervised algorithms to accurately model individuals' income using data collected from the 1994 U.S. Census. The best candidate algorithm is then chosen from preliminary results and is further optimized to best model the data. The goal with this implementation is to construct a model that accurately predicts whether an individual makes more than \\$50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.", "_____no_output_____" ], [ "\n\nThe dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by Ron Kohavi and Barry Becker, after being published in the article _\"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid\"_. You can find the article by Ron Kohavi [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.", "_____no_output_____" ], [ "----\n## Exploring the Data\nRun the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.", "_____no_output_____" ] ], [ [ "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom IPython.display import display # Allows the use of display() for DataFrames\n\n# Import supplementary visualization code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the Census dataset\ndata = pd.read_csv(\"census.csv\")\n\n# Display the first record\ndisplay(data.head(5))", "_____no_output_____" ] ], [ [ "### Implementation: Data Exploration\nA cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \\$50,000. In the code cell below, the following information is computed:\n- The total number of records, `'n_records'`\n- The number of individuals making more than \\$50,000 annually, `'n_greater_50k'`.\n- The number of individuals making at most \\$50,000 annually, `'n_at_most_50k'`.\n- The percentage of individuals making more than \\$50,000 annually, `'greater_percent'`.", "_____no_output_____" ] ], [ [ "# Total number of records\nn_records = data.shape[0]\n\n# Number of records where individual's income is more than $50,000\nn_greater_50k = data['income'].value_counts()[1]\n\n# Number of records where individual's income is at most $50,000\nn_at_most_50k = data['income'].value_counts()[0]\n\n# Percentage of individuals whose income is more than $50,000\ngreater_percent = 100 * (n_greater_50k / (n_greater_50k + n_at_most_50k))\n\n# Print the results\nprint(\"Total number of records: {}\".format(n_records))\nprint(\"Individuals making more than $50,000: {}\".format(n_greater_50k))\nprint(\"Individuals making at most $50,000: {}\".format(n_at_most_50k))\nprint(\"Percentage of individuals making more than $50,000: {}%\".format(greater_percent))\n\n# Check whether records are consistent\nif n_records == (n_greater_50k + n_at_most_50k):\n print('Records are consistent!')\n", "Total number of records: 45222\nIndividuals making more than $50,000: 11208\nIndividuals making at most $50,000: 34014\nPercentage of individuals making more than $50,000: 24.78439697492371%\nRecords are consistent!\n" ] ], [ [ "**Featureset Exploration**\n\n* **age**: continuous. \n* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. \n* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. \n* **education-num**: continuous. \n* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. \n* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. \n* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. \n* **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other. \n* **sex**: Female, Male. \n* **capital-gain**: continuous. \n* **capital-loss**: continuous. \n* **hours-per-week**: continuous. \n* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.", "_____no_output_____" ], [ "----\n## Preparing the Data\nBefore data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.", "_____no_output_____" ], [ "### Transforming Skewed Continuous Features\nA dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`. \n\nRun the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.", "_____no_output_____" ] ], [ [ "# Split the data into features and target label\nincome_raw = data['income']\nfeatures_raw = data.drop('income', axis = 1)\n\n# Visualize skewed continuous features of original data\nvs.distribution(data)", "C:\\Users\\johng\\python-projects\\Udacity Project Supervised ML\\intro-to-ml-tensorflow\\projects\\p1_charityml\\visuals.py:48: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n fig.show()\n" ] ], [ [ "For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href=\"https://en.wikipedia.org/wiki/Data_transformation_(statistics)\">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.\n\nRun the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed. ", "_____no_output_____" ] ], [ [ "# Log-transform the skewed features\nskewed = ['capital-gain', 'capital-loss']\nfeatures_log_transformed = pd.DataFrame(data = features_raw)\nfeatures_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))\n\n# Visualize the new log distributions\nvs.distribution(features_log_transformed, transformed = True)", "_____no_output_____" ] ], [ [ "### Normalizing Numerical Features\nIn addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.\n\nRun the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.", "_____no_output_____" ] ], [ [ "# Import sklearn.preprocessing.StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Initialize a scaler, then apply it to the features\nscaler = MinMaxScaler() # default=(0, 1)\nnumerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']\n\nfeatures_log_minmax_transform = pd.DataFrame(data = features_log_transformed)\nfeatures_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])\n\n# Show an example of a record with scaling applied\ndisplay(features_log_minmax_transform.head(n = 5))", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nFrom the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _\"dummy\"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.\n\n| | someFeature | | someFeature_A | someFeature_B | someFeature_C |\n| :-: | :-: | | :-: | :-: | :-: |\n| 0 | B | | 0 | 1 | 0 |\n| 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |\n| 2 | A | | 1 | 0 | 0 |\n\nAdditionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label (\"<=50K\" and \">50K\"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:\n - Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_log_minmax_transform'` data.\n - Convert the target label `'income_raw'` to numerical entries.\n - Set records with \"<=50K\" to `0` and records with \">50K\" to `1`.", "_____no_output_____" ] ], [ [ "# One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()\nfeatures_final = pd.get_dummies(features_log_minmax_transform)\n\n# Encode the 'income_raw' data to numerical values\nincome = income_raw.replace(to_replace = {'<=50K': 0, '>50K': 1})\n\n# Print the number of features after one-hot encoding\nencoded = list(features_final.columns)\nprint(\"{} total features after one-hot encoding.\".format(len(encoded)))\n\n# Uncomment the following line to see the encoded feature names\n#print(encoded)", "103 total features after one-hot encoding.\n['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week', 'workclass_ Federal-gov', 'workclass_ Local-gov', 'workclass_ Private', 'workclass_ Self-emp-inc', 'workclass_ Self-emp-not-inc', 'workclass_ State-gov', 'workclass_ Without-pay', 'education_level_ 10th', 'education_level_ 11th', 'education_level_ 12th', 'education_level_ 1st-4th', 'education_level_ 5th-6th', 'education_level_ 7th-8th', 'education_level_ 9th', 'education_level_ Assoc-acdm', 'education_level_ Assoc-voc', 'education_level_ Bachelors', 'education_level_ Doctorate', 'education_level_ HS-grad', 'education_level_ Masters', 'education_level_ Preschool', 'education_level_ Prof-school', 'education_level_ Some-college', 'marital-status_ Divorced', 'marital-status_ Married-AF-spouse', 'marital-status_ Married-civ-spouse', 'marital-status_ Married-spouse-absent', 'marital-status_ Never-married', 'marital-status_ Separated', 'marital-status_ Widowed', 'occupation_ Adm-clerical', 'occupation_ Armed-Forces', 'occupation_ Craft-repair', 'occupation_ Exec-managerial', 'occupation_ Farming-fishing', 'occupation_ Handlers-cleaners', 'occupation_ Machine-op-inspct', 'occupation_ Other-service', 'occupation_ Priv-house-serv', 'occupation_ Prof-specialty', 'occupation_ Protective-serv', 'occupation_ Sales', 'occupation_ Tech-support', 'occupation_ Transport-moving', 'relationship_ Husband', 'relationship_ Not-in-family', 'relationship_ Other-relative', 'relationship_ Own-child', 'relationship_ Unmarried', 'relationship_ Wife', 'race_ Amer-Indian-Eskimo', 'race_ Asian-Pac-Islander', 'race_ Black', 'race_ Other', 'race_ White', 'sex_ Female', 'sex_ Male', 'native-country_ Cambodia', 'native-country_ Canada', 'native-country_ China', 'native-country_ Columbia', 'native-country_ Cuba', 'native-country_ Dominican-Republic', 'native-country_ Ecuador', 'native-country_ El-Salvador', 'native-country_ England', 'native-country_ France', 'native-country_ Germany', 'native-country_ Greece', 'native-country_ Guatemala', 'native-country_ Haiti', 'native-country_ Holand-Netherlands', 'native-country_ Honduras', 'native-country_ Hong', 'native-country_ Hungary', 'native-country_ India', 'native-country_ Iran', 'native-country_ Ireland', 'native-country_ Italy', 'native-country_ Jamaica', 'native-country_ Japan', 'native-country_ Laos', 'native-country_ Mexico', 'native-country_ Nicaragua', 'native-country_ Outlying-US(Guam-USVI-etc)', 'native-country_ Peru', 'native-country_ Philippines', 'native-country_ Poland', 'native-country_ Portugal', 'native-country_ Puerto-Rico', 'native-country_ Scotland', 'native-country_ South', 'native-country_ Taiwan', 'native-country_ Thailand', 'native-country_ Trinadad&Tobago', 'native-country_ United-States', 'native-country_ Vietnam', 'native-country_ Yugoslavia']\n" ] ], [ [ "### Shuffle and Split Data\nNow all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. As always, we will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.\n\nRun the code cell below to perform this split.", "_____no_output_____" ] ], [ [ "# Import train_test_split\nfrom sklearn.model_selection import train_test_split\n\n# Split the 'features' and 'income' data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(features_final, \n income, \n test_size = 0.2, \n random_state = 0)\n\n# Show the results of the split\nprint(\"Training set has {} samples.\".format(X_train.shape[0]))\nprint(\"Testing set has {} samples.\".format(X_test.shape[0]))", "Training set has 36177 samples.\nTesting set has 9045 samples.\n" ] ], [ [ "----\n## Evaluating Model Performance\nIn this section, we will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of our choice, and the fourth algorithm is known as a *naive predictor*.", "_____no_output_____" ], [ "### Metrics and the Naive Predictor\n*CharityML*, equipped with their research, knows individuals that make more than \\$50,000 are most likely to donate to their charity. Because of this, *CharityML* is particularly interested in predicting who makes more than \\$50,000 accurately. It would seem that using **accuracy** as a metric for evaluating a particular model's performace would be appropriate. Additionally, identifying someone that *does not* make more than \\$50,000 as someone who does would be detrimental to *CharityML*, since they are looking to find individuals willing to donate. Therefore, a model's ability to precisely predict those that make more than \\$50,000 is *more important* than the model's ability to **recall** those individuals. We can use **F-beta score** as a metric that considers both precision and recall:\n\n$$ F_{\\beta} = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{\\left( \\beta^2 \\cdot precision \\right) + recall} $$\n\nIn particular, when $\\beta = 0.5$, more emphasis is placed on precision. This is called the **F$_{0.5}$ score** (or F-score for simplicity).\n\nLooking at the distribution of classes (those who make at most 50,000, and those who make more), it's clear most individuals do not make more than 50,000. This can greatly affect accuracy, since we could simply say \\\"this person does not make more than 50,000\\\" and generally be right, without ever looking at the data! Making such a statement would be called naive, since we have not considered any information to substantiate the claim. It is always important to consider the naive prediction for your data, to help establish a benchmark for whether a model is performing well. That been said, using that prediction would be pointless: If we predicted all people made less than 50,000, CharityML would identify no one as donors.\n\n\n#### Note: Recap of accuracy, precision, recall\n\n**Accuracy** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).\n\n**Precision** tells us what proportion of messages we classified as spam, actually were spam.\nIt is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classificatio), in other words it is the ratio of\n\n`[True Positives/(True Positives + False Positives)]`\n\n**Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.\nIt is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of\n\n`[True Positives/(True Positives + False Negatives)]`\n\nFor classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average(harmonic mean) of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score(we take the harmonic mean as we are dealing with ratios).", "_____no_output_____" ], [ "### Naive Predictor Performace\nIf we chose a model that always predicted an individual made more than $50,000, what would that model's accuracy and F-score be on this dataset? You must use the code cell below and assign your results to `'accuracy'` and `'fscore'` to be used later.\n\n**Please note** that the the purpose of generating a naive predictor is simply to show what a base model without any intelligence would look like. In the real world, ideally your base model would be either the results of a previous model or could be based on a research paper upon which you are looking to improve. When there is no benchmark model set, getting a result better than random choice is a place you could start from.\n\n**Notes:** \n\n* When we have a model that always predicts '1' (i.e. the individual makes more than 50k) then our model will have no True Negatives(TN) or False Negatives(FN) as we are not making any negative('0' value) predictions. Therefore our Accuracy in this case becomes the same as our Precision(True Positives/(True Positives + False Positives)) as every prediction that we have made with value '1' that should have '0' becomes a False Positive; therefore our denominator in this case is the total number of records we have in total. \n* Our Recall score(True Positives/(True Positives + False Negatives)) in this setting becomes 1 as we have no False Negatives.", "_____no_output_____" ] ], [ [ "'''\nTP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data \nencoded to numerical values done in the data preprocessing step.\nFP = income.count() - TP # Specific to the naive case\n\nTN = 0 # No predicted negatives in the naive case\nFN = 0 # No predicted negatives in the naive case\n'''\n# Calculate accuracy, precision and recall\n\nTP = np.sum(income)\nFP = income.count() - TP\nTN, FN = 0, 0\n\n\naccuracy = (TP + TN) / (TP + TN + FP + FN)\nrecall = TP / (TP + FN)\nprecision = TP / (TP + FP)\n\n\n# Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.\nbeta = 0.5 # Define beta\nfscore = (1 + beta**2) * (precision * recall) / (beta**2 * precision + recall)\n\n# Print the results \nprint(\"Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]\".format(accuracy, fscore))", "Naive Predictor: [Accuracy score: 0.2478, F-score: 0.2917]\n" ] ], [ [ "### Supervised Learning Models\n**The following are some of the supervised learning models that are currently available in** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) **that you may choose from:**\n- Gaussian Naive Bayes (GaussianNB)\n- Decision Trees\n- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)\n- K-Nearest Neighbors (KNeighbors)\n- Stochastic Gradient Descent Classifier (SGDC)\n- Support Vector Machines (SVM)\n- Logistic Regression", "_____no_output_____" ], [ "### Model Application\nList three of the supervised learning models above that are appropriate for this problem that you will test on the census data. For each model chosen\n\n- Describe one real-world application in industry where the model can be applied. \n- What are the strengths of the model; when does it perform well?\n- What are the weaknesses of the model; when does it perform poorly?\n- What makes this model a good candidate for the problem, given what you know about the data?\n", "_____no_output_____" ], [ "### Decision Trees\n**Describe one real-world application in industry where the model can be applied.**\n\nDecision trees can be used for \"Identifying Defective Products in the\nManufacturing Process\". [1] \n\nIn this regard, decision trees are used as a classification algorithm that is trained on data with features of products that the company manufactures, as well as labels \"Defective\" and \"Non-defective\". \nAfter training process, the model should be able to group products into \"Defective\" and \"Non-defective\" categories and predict whether a manufactured product is defective or not. \n\n\n\n**What are the strengths of the model; when does it perform well?** \n\n1. The data pre-processing step for decision trees requires less effort compared to other algorithms (e.g. no need to normalize/scale data or impute missing values). [2]\n\n2. The way the algorithm works is very intuitive, and thus easier to understand and explain. In addition, they can be used as a white box model. [3]\n\n\n\n**What are the weaknesses of the model; when does it perform poorly?**\n\n1. Because decision trees are so simple there is often a need for more complex algorithms (e.g. Random Forest) to achieve a higher accuracy. [3]\n\n2. Decision trees have the tendency to overfit the training set. [3]\n\n3. Decision trees are unstable. The reproducibility of a decision tree model is unreliable since the structure is sensitive to even to small changes in the data. [3]\n\n4. Decision trees can get complex and computationally expensive. [3]\n\n\n**What makes this model a good candidate for the problem, given what you know about the data?**\n\nI think this model is a good candidate in this situation because, as a white box, and because the features are well-defined, it might provide further insights which CharityML can rely on. \n\nFor example, CharityML identified that the most relevant parameter when it comes to determining donation likelihood is individual income.\n\nA decision tree model may find highly accurate predictors of income that can simplify the current process and help draw more valuable conclusions such as this one.\n\nMoreover, due to the algorithms simplicity, the charity members will have the capacity to intuitively understand its basic internal processes. \n\n**References**\n\n[[1]](http://www.kpubs.org/article/articleDownload.kpubs?downType=pdf&articleANo=E1CTBR_2017_v13n2_57)\n[[2]](https://medium.com/@dhiraj8899/top-5-advantages-and-disadvantages-of-decision-tree-algorithm-428ebd199d9a)\n[[3]](https://botbark.com/2019/12/19/top-6-advantages-and-disadvantages-of-decision-tree-algorithm/)\n\n\n\n\n\n\n", "_____no_output_____" ], [ "### Ensemble Methods (AdaBoost)\n**Describe one real-world application in industry where the model can be applied.**\n\nThe AdaBoost algorithm can be applied for \"Telecommunication Fraud Detection\". [1]\n\nThe model is trained using features of past telecommunication messages (features) along with whether they ended up being fraudulent or not (labels). \n\nThen, the AdaBoost model should be able to predict whether future telecommunication material is fraudulent or not. \n\n**What are the strengths of the model; when does it perform well?**\n\n1. High flexibility. Different classification algorithms (decision trees, SVMs, etc.) can be used as weak learners to finally constitute a strong learner (final model). [2]\n\n2. High precision. Experiments have shown AdaBoost models to achieve relatively high precision when making predictions. [3]\n\n3. Simple preprocessing. AdaBoost algorithms are not too demanding when it comes to preprocessed data, thus more time is saved during the pre-processing step. [4]\n\n\n\n**What are the weaknesses of the model; when does it perform poorly?**\n\n1. Sensitive to noise data and outliers. [4]\n\n2. Requires quality data because the boosting technique learns progressively and is prone to error. [4]\n\n3. Low Accuracy when Data is Imbalanced. [3]\n\n4. Training is mildly computationally expensive, and thus it can be time-consuming. [3]\n\n**What makes this model a good candidate for the problem, given what you know about the data?**\n\nAdaBoost will be tried as a alternative to decision trees with stronger predictive capacity. \n\nAn AdaBoost model is a good candidate because it can provide improvements over decision trees to valuable metrics such as accuracy and precision. \nSince it has been shown that this algorithm can achieve relatively high precision (which is what we are looking for in this problem), this aspect of it will also benefit us. \n\n**References**\n\n[[1]](https://download.atlantis-press.com/article/25896505.pdf)\n[[2]](https://www.educba.com/adaboost-algorithm/)\n[[3]](https://easyai.tech/en/ai-definition/adaboost/#:~:text=AdaBoost%20is%20adaptive%20in%20a,problems%20than%20other%20learning%20algorithms.)\n[[4]](https://blog.paperspace.com/adaboost-optimizer/)", "_____no_output_____" ], [ "### Support Vector Machines\n**Describe one real-world application in industry where the model can be applied.**\n\nSVM's can be applied in bioinformatics. [1]\n\nFor example, an SVM model can be trained on data involving features of cancer tumours and then be able to identify whether a tumour is benign or malignant (labels). \n\n**What are the strengths of the model; when does it perform well?**\n\n1. Effective in high dimensional spaces (i.e. when there numerous features). [2]\n\n2. Generally good algorithm. SVM’s are good when we have almost no information about the data. [3]\n\n3. Relatively low risk of overfitting. This is due to its L2 Regularisation feature. [4]\n\n4. High flexibility. Can handle linear & non-linear data due to variety added by different kernel functions. [3]\n\n5. Stability. Since a small change to the data does not greatly affect the hyperplane. [4]\n\n6. SVM is defined by a convex optimisation problem (i.e. no local minima) [4]\n\n**What are the weaknesses of the model; when does it perform poorly?**\n\n1. Training is very computationally expensive (high memory requirement) and thus it can be time-consuming, especially for large datasets [3]\n\n2. Sensitive to noisy data, i.e. when the target classes are overlapping [2]\n\n3. Hyperparameters can be difficult to tune. (Kernel, C parameter, gamma)\ne.g. when choosing a Kernel, if you always go with high-dimensional ones you might generate too many support vectors and reduce training speed drastically. [4]\n\n4. Difficult to understand and interpret, particularly with high dimensional data. Also, the final model is not easy to see, so we cannot do small calibrations based on business intuition. [3]\n\n5. Requires feature scaling. [4]\n\n**What makes this model a good candidate for the problem, given what you know about the data?**\n\nGiven what we know about the data, SVM would be a good choice since it can handle its multiple dimensions. \n\nIt will also add variety when compared to decision trees and AdaBoost, potentially yielding better results due to its vastly different mechanism. \n\n\n**References**\n\n[[1]](https://data-flair.training/blogs/applications-of-svm/)\n[[2]](https://medium.com/@dhiraj8899/top-4-advantages-and-disadvantages-of-support-vector-machine-or-svm-a3c06a2b107)\n[[3]](https://statinfer.com/204-6-8-svm-advantages-disadvantages-applications/)\n[[4]](http://theprofessionalspoint.blogspot.com/2019/03/advantages-and-disadvantages-of-svm.html)", "_____no_output_____" ], [ "### Creating a Training and Predicting Pipeline\nTo properly evaluate the performance of each model you've chosen, it's important that you create a training and predicting pipeline that allows you to quickly and effectively train models using various sizes of training data and perform predictions on the testing data. Your implementation here will be used in the following section.\nIn the code block below, you will need to implement the following:\n - Import `fbeta_score` and `accuracy_score` from [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics).\n - Fit the learner to the sampled training data and record the training time.\n - Perform predictions on the test data `X_test`, and also on the first 300 training points `X_train[:300]`.\n - Record the total prediction time.\n - Calculate the accuracy score for both the training subset and testing set.\n - Calculate the F-score for both the training subset and testing set.\n - Make sure that you set the `beta` parameter!", "_____no_output_____" ] ], [ [ "# Import two metrics from sklearn - fbeta_score and accuracy_score\nfrom sklearn.metrics import fbeta_score, accuracy_score\n\n\ndef train_predict(learner, sample_size, X_train, y_train, X_test, y_test): \n '''\n inputs:\n - learner: the learning algorithm to be trained and predicted on\n - sample_size: the size of samples (number) to be drawn from training set\n - X_train: features training set\n - y_train: income training set\n - X_test: features testing set\n - y_test: income testing set\n '''\n \n results = {}\n \n # Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])\n start = time() # Get start time\n learner = learner.fit(X_train[:sample_size], y_train[:sample_size])\n end = time() # Get end time\n \n # Calculate the training time\n results['train_time'] = end - start\n \n # Get the predictions on the test set(X_test),\n # then get predictions on the first 300 training samples(X_train) using .predict()\n start = time() # Get start time\n predictions_test = learner.predict(X_test)\n predictions_train = learner.predict(X_train[:300])\n end = time() # Get end time\n \n # Calculate the total prediction time\n results['pred_time'] = end - start\n \n # Compute accuracy on the first 300 training samples\n results['acc_train'] = accuracy_score(y_train[:300], predictions_train)\n \n # Compute accuracy on test set using accuracy_score()\n results['acc_test'] = accuracy_score(y_test, predictions_test)\n \n # Compute F-score on the the first 300 training samples using fbeta_score()\n results['f_train'] = fbeta_score(y_train[:300], predictions_train, beta=beta)\n \n # Compute F-score on the test set which is y_test\n results['f_test'] = fbeta_score(y_test, predictions_test, beta=beta)\n \n # Success\n print(\"{} trained on {} samples.\".format(learner.__class__.__name__, sample_size))\n \n # Return the results\n return results\n", "_____no_output_____" ] ], [ [ "### Initial Model Evaluation\nIn the code cell, you will need to implement the following:\n- Import the three supervised learning models you've discussed in the previous section.\n- Initialize the three models and store them in `'clf_A'`, `'clf_B'`, and `'clf_C'`.\n - Use a `'random_state'` for each model you use, if provided.\n - **Note:** Use the default settings for each model — you will tune one specific model in a later section.\n- Calculate the number of records equal to 1%, 10%, and 100% of the training data.\n - Store those values in `'samples_1'`, `'samples_10'`, and `'samples_100'` respectively.\n\n**Note:** Depending on which algorithms you chose, the following implementation may take some time to run!", "_____no_output_____" ] ], [ [ "# Import the three supervised learning models from sklearn\n# Import Algorithms\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import SVC\n\n# Initialize the three models\nclf_A = DecisionTreeClassifier(random_state=42)\nclf_B = AdaBoostClassifier(random_state=42)\nclf_C = SVC(random_state=42)\n\n# Calculate the number of samples for 1%, 10%, and 100% of the training data\nsamples_100 = len(y_train)\nsamples_10 = int(0.1*len(y_train))\nsamples_1 = int(0.01*len(y_train))\n\n# Collect results on the learners\nresults = {}\nfor clf in [clf_A, clf_B, clf_C]:\n clf_name = clf.__class__.__name__\n results[clf_name] = {}\n for i, samples in enumerate([samples_1, samples_10, samples_100]):\n results[clf_name][i] = \\\n train_predict(clf, samples, X_train, y_train, X_test, y_test)\n\n# Run metrics visualization for the three supervised learning models chosen\nvs.evaluate(results, accuracy, fscore)", "DecisionTreeClassifier trained on 361 samples.\nDecisionTreeClassifier trained on 3617 samples.\nDecisionTreeClassifier trained on 36177 samples.\nAdaBoostClassifier trained on 361 samples.\nAdaBoostClassifier trained on 3617 samples.\nAdaBoostClassifier trained on 36177 samples.\nSVC trained on 361 samples.\nSVC trained on 3617 samples.\nSVC trained on 36177 samples.\n" ] ], [ [ "----\n## Improving Results\nIn this final section, you will choose from the three supervised learning models the *best* model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (`X_train` and `y_train`) by tuning at least one parameter to improve upon the untuned model's F-score. ", "_____no_output_____" ], [ "### Choosing the Best Model\n\nBased on the evaluation you performed earlier, in one to two paragraphs, explain to *CharityML* which of the three models you believe to be most appropriate for the task of identifying individuals that make more than \\$50,000. ", "_____no_output_____" ], [ "\n##### AdaBoost\nAccording to the analysis, the most appropriate model for identifying individuals who make more than \\$50,000 is the AdaBoost model. This is because of the following reasons: \n\n- AdaBoost yields the best accuracy and F-score on the testing data, meaning that to maximise the number of true potential donors, it is the ideal model to choose.\n- The 2nd best competitor (namely, SVM) has a slightly higher tendency to overfit, and is significantly more time-consuming to train. \n- AdaBoost is suitable for the given dataset because it yields high precision (i.e. few false positives, which is what we want), and will allow us to interpret the result for potential callibrations more so than an SVM model would. ", "_____no_output_____" ], [ "### Describing the Model in Layman's Terms\n\nIn one to two paragraphs, explain to *CharityML*, in layman's terms, how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical jargon, such as describing equations.", "_____no_output_____" ], [ "##### Introduction\nAdaBoost is a model that belongs to a group of models called \"Ensemble Methods\". \nAs the name suggests, the model trains weaker models on the data (also known as \"weak learners\"), and then combines them into a single, more powerful model (which we call a \"strong learner\"). \n\n##### Training the AdaBoost Model\nIn our case, we feed the model the training data from our dataset, and it fits a simple \"weak learner\" to the data. Then, it augments the errors made by the first learner, and it fits a second learner to correct its mistakes. Then, a 3rd weak learner does the same for the 2nd one, and this process repeats until enough learners have been trained. \nThen, the algorithm assigns a weight to each weak learner based on its performance, and combines all the weak learners into a single **Strong Learner**. \nWhen combining the weak learners, the ones with the stronger weights (i.e. the more successful ones) will get more of a say on how the final model is structured. \n\n##### AdaBoost Predictions\nAfter training the model, we will be able to feed to it unseen examples (i.e. new individuals), and the model will use its knowledge on the previous individuals to predict whether or not they make more than /$50,000 per year. ", "_____no_output_____" ], [ "### Model Tuning\nFine tune the chosen model. Use grid search (`GridSearchCV`) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following:\n- Import [`sklearn.grid_search.GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) and [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).\n- Initialize the classifier you've chosen and store it in `clf`.\n - Set a `random_state` if one is available to the same state you set before.\n- Create a dictionary of parameters you wish to tune for the chosen model.\n - Example: `parameters = {'parameter' : [list of values]}`.\n - **Note:** Avoid tuning the `max_features` parameter of your learner if that parameter is available!\n- Use `make_scorer` to create an `fbeta_score` scoring object (with $\\beta = 0.5$).\n- Perform grid search on the classifier `clf` using the `'scorer'`, and store it in `grid_obj`.\n- Fit the grid search object to the training data (`X_train`, `y_train`), and store it in `grid_fit`.\n\n**Note:** Depending on the algorithm chosen and the parameter list, the following implementation may take some time to run!", "_____no_output_____" ] ], [ [ "# Import 'GridSearchCV', 'make_scorer', and any other necessary libraries\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\n# Initialize the classifier\nclf = AdaBoostClassifier(random_state=42)\n\n# Create the parameters list you wish to tune, using a dictionary if needed.\nparameters = {'n_estimators': [500, 1000, 1500, 2000], 'learning_rate': np.linspace(0.001, 1, 10)}\n\n# Make an fbeta_score scoring object using make_scorer()\nscorer = make_scorer(fbeta_score, beta=beta)\n\n# Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV()\ngrid_obj = GridSearchCV(clf, parameters, scoring=scorer, n_jobs = -1)\n\n# Fit the grid search object to the training data and find the optimal parameters using fit()\nstart = time()\ngrid_fit = grid_obj.fit(X_train, y_train)\nend = time()\nprint('Time to tune: ', end - start)\n\n# Get the estimator\nbest_clf = grid_fit.best_estimator_\n\n# Make predictions using the unoptimized and model\npredictions = (clf.fit(X_train, y_train)).predict(X_test)\nbest_predictions = best_clf.predict(X_test)\n\n# Check hyperparameters\nprint(clf)\nprint(best_clf)\n\n# Report the before-and-afterscores\nprint(\"Unoptimized model\\n------\")\nprint(\"Accuracy score on testing data: {:.4f}\".format(accuracy_score(y_test, predictions)))\nprint(\"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, predictions, beta = 0.5)))\nprint(\"\\nOptimized Model\\n------\")\nprint(\"Final accuracy score on the testing data: {:.4f}\".format(accuracy_score(y_test, best_predictions)))\nprint(\"Final F-score on the testing data: {:.4f}\".format(fbeta_score(y_test, best_predictions, beta = 0.5)))", "Time to tune: 2075.489259004593\nAdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=1.0,\n n_estimators=50, random_state=42)\nAdaBoostClassifier(algorithm='SAMME.R', base_estimator=None,\n learning_rate=0.667, n_estimators=1500, random_state=42)\nUnoptimized model\n------\nAccuracy score on testing data: 0.8576\nF-score on testing data: 0.7246\n\nOptimized Model\n------\nFinal accuracy score on the testing data: 0.8676\nFinal F-score on the testing data: 0.7456\n" ] ], [ [ "### Final Model Evaluation\n\n* What is your optimized model's accuracy and F-score on the testing data? \n* Are these scores better or worse than the unoptimized model? \n* How do the results from your optimized model compare to the naive predictor benchmarks you found earlier in **Question 1**?_ ", "_____no_output_____" ], [ "#### Results:\n\n| Metric | Unoptimized Model | Optimized Model |\n| :------------: | :---------------: | :-------------: | \n| Accuracy Score | 0.8576 | 0.8676 |\n| F-score | 0.7246 | 0.7456 |\n", "_____no_output_____" ], [ "**Discussion**\n\nMy optimised model's accuracy is 86.71% while the F-score (beta = 0.5) is 0.7448.\n\nThese scores are slightly better than the optimised model's. Accuracy improved by ~1.2% and F-score by ~2.9%. \n\nThe scores are significantly better than the naive predictor's. Accuracy improved by ~350% (3.5+ times higher) and F-score by ~256% (2.5+ times higher). \n\n", "_____no_output_____" ], [ "----\n## Feature Importance\n\nAn important task when performing supervised learning on a dataset like the census data we study here is determining which features provide the most predictive power. By focusing on the relationship between only a few crucial features and the target label we simplify our understanding of the phenomenon, which is most always a useful thing to do. In the case of this project, that means we wish to identify a small number of features that most strongly predict whether an individual makes at most or more than \\$50,000.\n\nHere, we choose a scikit-learn classifier (e.g., adaboost, random forests) that has a `feature_importance_` attribute, which is a function that ranks the importance of features according to the chosen classifier. In the next python cell, we fit this classifier to the training set and use this attribute to determine the top 5 most important features for the census dataset.", "_____no_output_____" ], [ "### Feature Relevance Observation\nWhen **Exploring the Data**, it was shown there are thirteen available features for each individual on record in the census data. Of these thirteen records, which five features do you believe to be most important for prediction, and in what order would you rank them and why?", "_____no_output_____" ], [ "**Answer:**\n\n1. **Occupation**. I would expect the job that a person has to be a good predictor of income. \n2. **Hours per week**. The more hours you work, the more you earn. \n3. **Education Number** Because of the positive correlation between education level and income. \n4. **Age** Usually older people who've had longer careers have a higher income. \n5. **Native Country** Because a US worker earns significantly more than, say, an Argentina one. ", "_____no_output_____" ], [ "### Feature Importance\nChoose a `scikit-learn` supervised learning algorithm that has a `feature_importance_` attribute availble for it. This attribute is a function that ranks the importance of each feature when making predictions based on the chosen algorithm.\n\nIn the code cell below, you will need to implement the following:\n - Import a supervised learning model from sklearn if it is different from the three used earlier.\n - Train the supervised model on the entire training set.\n - Extract the feature importances using `'.feature_importances_'`.", "_____no_output_____" ] ], [ [ "# Import a supervised learning model that has 'feature_importances_'\nfrom sklearn.ensemble import AdaBoostClassifier\n\n# Train the supervised model on the training set using .fit(X_train, y_train)\nmodel = AdaBoostClassifier().fit(X_train, y_train)\n\n# Extract the feature importances using .feature_importances_ \nimportances = model.feature_importances_\n\n# Plot\nvs.feature_plot(importances, X_train, y_train)", "_____no_output_____" ] ], [ [ "### Extracting Feature Importance\n\nObserve the visualization created above which displays the five most relevant features for predicting if an individual makes at most or above \\$50,000. \n* How do these five features compare to the five features you discussed in **Question 6**?\n* If you were close to the same answer, how does this visualization confirm your thoughts? \n* If you were not close, why do you think these features are more relevant?", "_____no_output_____" ], [ "**Answer:**\n\n* *How do these five features compare to the five features you discussed in **Question 6**?*\n\nThese five features are significantly different to what I predicted in question 6. While I did mention age, hours-per-week and education-num, I failed to mention two of the most significant features: capital-loss and capital-gain, which together amount to about 37% cumulative feature weight. \n\n* *If you were close to the same answer, how does this visualization confirm your thoughts?* \n\nThis visualisation confirms that age plays a large role and that hours-per-week and education-num are among the most relevant features. \nThis is because of the direct and strong correlation between these variables and individual income. \n\n* *If you were not close, why do you think these features are more relevant?*\n\nI was genuinely surprised that occupation did not make it in the top 5. I suppose it was because the mentioned occupations just do not have a large discrepancy in income. Whereas capital-loss and capital-gain varies more among those individuals and more directly affects their income. Similarly, regarding native-country, I suppose most people were from the US or a similarly developed country and hence the feature didn't have great predictive power. ", "_____no_output_____" ], [ "### Feature Selection\nHow does a model perform if we only use a subset of all the available features in the data? With less features required to train, the expectation is that training and prediction time is much lower — at the cost of performance metrics. From the visualization above, we see that the top five most important features contribute more than half of the importance of **all** features present in the data. This hints that we can attempt to *reduce the feature space* and simplify the information required for the model to learn. The code cell below will use the same optimized model you found earlier, and train it on the same training set *with only the top five important features*. ", "_____no_output_____" ] ], [ [ "# Import functionality for cloning a model\nfrom sklearn.base import clone\n\n# Reduce the feature space\nX_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]\nX_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]\n\n# Train on the \"best\" model found from grid search earlier\nclf = (clone(best_clf)).fit(X_train_reduced, y_train)\n\n# Make new predictions\nreduced_predictions = clf.predict(X_test_reduced)\n\n# Report scores from the final model using both versions of data\nprint(\"Final Model trained on full data\\n------\")\nprint(\"Accuracy on testing data: {:.4f}\".format(accuracy_score(y_test, best_predictions)))\nprint(\"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, best_predictions, beta = 0.5)))\nprint(\"\\nFinal Model trained on reduced data\\n------\")\nprint(\"Accuracy on testing data: {:.4f}\".format(accuracy_score(y_test, reduced_predictions)))\nprint(\"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, reduced_predictions, beta = 0.5)))", "Final Model trained on full data\n------\nAccuracy on testing data: 0.8676\nF-score on testing data: 0.7456\n\nFinal Model trained on reduced data\n------\nAccuracy on testing data: 0.8422\nF-score on testing data: 0.7021\n" ] ], [ [ "### Effects of Feature Selection\n\n* How does the final model's F-score and accuracy score on the reduced data using only five features compare to those same scores when all features are used?\n* If training time was a factor, would you consider using the reduced data as your training set?", "_____no_output_____" ], [ "**Answer:**\n\nThe model trained on reduced data gets an extra of ~2% of testing examples wrong, and its F-score is ~0.04 less. \n\nIf training time was a factor, I would probably still not use the reduced data as my training set. \n\nHowever, if more training examples yielded a significant improvement, I would recommend using lower-dimension data so that we could accommodate more training examples.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb42869fcd85cafe2fa23aa07ae2ae30423c8756
73,132
ipynb
Jupyter Notebook
source/.ipynb_checkpoints/Async_Advection_with_AT2_trial-checkpoint.ipynb
aditya5252/Asynccode
c52e36fc05e5eb2d8720e1f1674b956b69a94958
[ "MIT" ]
null
null
null
source/.ipynb_checkpoints/Async_Advection_with_AT2_trial-checkpoint.ipynb
aditya5252/Asynccode
c52e36fc05e5eb2d8720e1f1674b956b69a94958
[ "MIT" ]
null
null
null
source/.ipynb_checkpoints/Async_Advection_with_AT2_trial-checkpoint.ipynb
aditya5252/Asynccode
c52e36fc05e5eb2d8720e1f1674b956b69a94958
[ "MIT" ]
null
null
null
169.287037
58,932
0.885878
[ [ [ "'''Try Euler time-stepping for a simple array\nPrint pe,u's as said by Shubham G.\nInitial conditions also same \n'''", "_____no_output_____" ], [ "def Async_sim(num_grid):\n import delay_file\n import probability_initial\n import error_file\n import analytical_file\n import ic_file\n import numpy as np\n import matplotlib.pyplot as plt\n import timestep_file\n import FLAG_file\n import grid_file\n import input_file\n import step\n\n ## This cell remains same for Asynchronous compute ##\n Length=2*np.pi\n nx_=num_grid\n dx=Length/(nx_-1)\n x_=grid_file.grid_(dx,nx_)\n C=input_file.c\n init_c=ic_file.ic_(x_,amp=input_file.amp_ls,kappa=input_file.k_ls,phi=input_file.phi_ls,\n num_k=input_file.numk,num_phi=input_file.numphi,Nx=nx_)\n dt=timestep_file.timestep_(dx,cfl=input_file.cfl,EqFLAG=FLAG_file.EqnFLAG,cx=input_file.c)\n Nt=input_file.N_t_\n arr_2d=[]\n # Nt=input_file.N_t_\n L=3\n u=init_c\n arr_2d.append(u)\n for k in range(L-1):\n rhs=step.cd2u1(u,C,dx,nx_,Eqflag=FLAG_file.EqnFLAG,Syncflag='DSync')\n u=step.euler(u,rhs,dt,nx_)\n arr_2d.append(u)\n arr_2d=np.stack(arr_2d)\n\n num_PEs=input_file.numPE\n per_PEs=int((nx_)/(num_PEs))\n ps_i,pe_i=probability_initial.prob_2D_from_arr_2D(arr_2d[:-1],num_PEs,per_PEs,L-1)\n u=arr_2d[L-1]\n ls=[]\n ls.append(u)\n ps=ps_i\n pe=pe_i\n for j in range(Nt):\n ys,ye=probability_initial.prob_1D_from_u_1D(u,num_PEs,per_PEs)\n ps=np.vstack((ps,ys))\n pe=np.vstack((pe,ye))\n# print(f'\\n{j}th iteration pe is {pe} \\n u is {u} ')\n rhs,ps,pe=step.cd2u1(u,C,dx,nx_,Eqflag=FLAG_file.EqnFLAG,Syncflag=FLAG_file.SyncFLAG,L=3,PE=num_PEs,perPE=per_PEs,\n pstart=ps,pend=pe,ATolFLAG=FLAG_file.ATFLAG)\n \n u=step.euler(u,rhs,dt,nx_)\n \n ls.append(u)\n \n Nt_total=Nt+L-1\n# for i in range(40):\n# plt.plot(x_,ls[i*7])\n ana_soln=analytical_file.analytical_(x_,input_file.amp_ls,input_file.k_ls,input_file.phi_ls,input_file.numk,\n input_file.numphi,nx_,dt*Nt,C,0)\n error_Nx=error_file.error_MSE_(ana_soln,ls[-1])\n return error_Nx,np.vstack((arr_2d,ls[1:]))\n ", "_____no_output_____" ], [ "def at2u0(pe,l,L, p_arr): \n a = l+1 \n b = -l \n temp = a*p_arr[L-1-l][pe]+b*p_arr[L-2-l][pe]\n return temp", "_____no_output_____" ], [ "er.shape", "_____no_output_____" ], [ "for l in range(4):\n at2u0(0,l,4,er)", "_____no_output_____" ], [ "for pe in range(4):\n for l in range(4):\n at2u0(pe,l,4,er)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nerr=[]\nn_list=64*np.arange(7,15)\nprint(n_list)\nfor n in n_list:\n err.append(Async_sim(n)[0])\n# plt.plot(n_list,err)", "[448 512 576 640 704 768 832 896]\n" ], [ "import numpy as np\ndef plot_error(n_list,err,comptype='Synchronous',order=2):\n # plt.plot(n_list,err)\n plt.rcParams.update({'font.size': 22})\n plt.figure(figsize=(10,8))\n plt.plot(np.log(n_list),np.log(err),label=f'{comptype}Order')\n plt.plot(np.log(n_list),-order*np.log(n_list)+6,label=f'{order}-Order')\n plt.title(f\"{comptype} Computations Accuracy order\")\n plt.xlabel(\"log-N\")\n plt.ylabel(\"log-Avg.Error\")\n plt.legend()\n plt.grid()\nplot_error(n_list,err,'Asynchronous',2)", "_____no_output_____" ], [ "import input_file\nnum_X=input_file.Nx\nAsync_FD_data=Async_sim(num_X)[1]", "_____no_output_____" ], [ "x=np.arange(12).reshape(3,4)\ny=np.arange(4,8)", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "x=np.vstack((x,y))", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "plt.plot(np.log(n_list),np.log(err))\nplt.plot(np.log(n_list),-1*np.log(n_list))", "_____no_output_____" ], [ "import delay_file", "_____no_output_____" ], [ "x=0\nfor i in range(10000):\n x+=delay_file.delay_()", "_____no_output_____" ], [ "x/10000", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb428939bdc1da9543ed6a9397528139f67cab7b
706,740
ipynb
Jupyter Notebook
assignment1/features.ipynb
AnCoSONG/cs231n-assignments
f77f4ba1532be946370ca5fe6b31c187901e90e8
[ "MIT" ]
null
null
null
assignment1/features.ipynb
AnCoSONG/cs231n-assignments
f77f4ba1532be946370ca5fe6b31c187901e90e8
[ "MIT" ]
null
null
null
assignment1/features.ipynb
AnCoSONG/cs231n-assignments
f77f4ba1532be946370ca5fe6b31c187901e90e8
[ "MIT" ]
null
null
null
1,034.758419
315,924
0.953847
[ [ [ "# Image features exercise\n*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*\n\nWe have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.\n\nAll of your work for this exercise will be done in this notebook.", "_____no_output_____" ] ], [ [ "import random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading extenrnal modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "## Load data\nSimilar to previous exercises, we will load CIFAR-10 data from disk.", "_____no_output_____" ] ], [ [ "from cs231n.features import color_histogram_hsv, hog_feature\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n\n # Cleaning up variables to prevent loading data multiple times (which may cause memory issue)\n try:\n del X_train, y_train\n del X_test, y_test\n print('Clear previously loaded data.')\n except:\n pass\n\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n \n return X_train, y_train, X_val, y_val, X_test, y_test\n\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\nprint(f'{X_train.shape}')", "(49000, 32, 32, 3)\n" ] ], [ [ "## Extract Features\nFor each image we will compute a Histogram of Oriented\nGradients (HOG) as well as a color histogram using the hue channel in HSV\ncolor space. We form our final feature vector for each image by concatenating\nthe HOG and color histogram feature vectors.\n\nRoughly speaking, HOG should capture the texture of the image while ignoring\ncolor information, and the color histogram represents the color of the input\nimage while ignoring texture. As a result, we expect that using both together\nought to work better than using either alone. Verifying this assumption would\nbe a good thing to try for your own interest.\n\nThe `hog_feature` and `color_histogram_hsv` functions both operate on a single\nimage and return a feature vector for that image. The extract_features\nfunction takes a set of images and a list of feature functions and evaluates\neach feature function on each image, storing the results in a matrix where\neach column is the concatenation of all feature vectors for a single image.", "_____no_output_____" ] ], [ [ "from cs231n.features import *\n\nnum_color_bins = 10 # Number of bins in the color histogram\nfeature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]\nX_train_feats = extract_features(X_train, feature_fns, verbose=True)\nX_val_feats = extract_features(X_val, feature_fns)\nX_test_feats = extract_features(X_test, feature_fns)\n\n# Preprocessing: Subtract the mean feature\nmean_feat = np.mean(X_train_feats, axis=0, keepdims=True)\nX_train_feats -= mean_feat\nX_val_feats -= mean_feat\nX_test_feats -= mean_feat\n\n# Preprocessing: Divide by standard deviation. This ensures that each feature\n# has roughly the same scale.\nstd_feat = np.std(X_train_feats, axis=0, keepdims=True)\nX_train_feats /= std_feat\nX_val_feats /= std_feat\nX_test_feats /= std_feat\n\n# Preprocessing: Add a bias dimension\nX_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])\nX_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])\nX_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])", "Done extracting features for 1000 / 49000 images\nDone extracting features for 2000 / 49000 images\nDone extracting features for 3000 / 49000 images\nDone extracting features for 4000 / 49000 images\nDone extracting features for 5000 / 49000 images\nDone extracting features for 6000 / 49000 images\nDone extracting features for 7000 / 49000 images\nDone extracting features for 8000 / 49000 images\nDone extracting features for 9000 / 49000 images\nDone extracting features for 10000 / 49000 images\nDone extracting features for 11000 / 49000 images\nDone extracting features for 12000 / 49000 images\nDone extracting features for 13000 / 49000 images\nDone extracting features for 14000 / 49000 images\nDone extracting features for 15000 / 49000 images\nDone extracting features for 16000 / 49000 images\nDone extracting features for 17000 / 49000 images\nDone extracting features for 18000 / 49000 images\nDone extracting features for 19000 / 49000 images\nDone extracting features for 20000 / 49000 images\nDone extracting features for 21000 / 49000 images\nDone extracting features for 22000 / 49000 images\nDone extracting features for 23000 / 49000 images\nDone extracting features for 24000 / 49000 images\nDone extracting features for 25000 / 49000 images\nDone extracting features for 26000 / 49000 images\nDone extracting features for 27000 / 49000 images\nDone extracting features for 28000 / 49000 images\nDone extracting features for 29000 / 49000 images\nDone extracting features for 30000 / 49000 images\nDone extracting features for 31000 / 49000 images\nDone extracting features for 32000 / 49000 images\nDone extracting features for 33000 / 49000 images\nDone extracting features for 34000 / 49000 images\nDone extracting features for 35000 / 49000 images\nDone extracting features for 36000 / 49000 images\nDone extracting features for 37000 / 49000 images\nDone extracting features for 38000 / 49000 images\nDone extracting features for 39000 / 49000 images\nDone extracting features for 40000 / 49000 images\nDone extracting features for 41000 / 49000 images\nDone extracting features for 42000 / 49000 images\nDone extracting features for 43000 / 49000 images\nDone extracting features for 44000 / 49000 images\nDone extracting features for 45000 / 49000 images\nDone extracting features for 46000 / 49000 images\nDone extracting features for 47000 / 49000 images\nDone extracting features for 48000 / 49000 images\nDone extracting features for 49000 / 49000 images\n" ], [ "print(X_train_feats.shape)", "(49000, 155)\n" ] ], [ [ "## Train SVM on features\nUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.", "_____no_output_____" ] ], [ [ "# Use the validation set to tune the learning rate and regularization strength\n\nfrom cs231n.classifiers.linear_classifier import LinearSVM\n\nlearning_rates = [1e-9, 1e-8, 1e-7]\nregularization_strengths = [5e4, 5e5, 5e6]\n\nresults = {}\nbest_val = -1\nbest_svm = None\n\n################################################################################\n# TODO: #\n# Use the validation set to set the learning rate and regularization strength. #\n# This should be identical to the validation that you did for the SVM; save #\n# the best trained classifer in best_svm. You might also want to play #\n# with different numbers of bins in the color histogram. If you are careful #\n# you should be able to get accuracy of near 0.44 on the validation set. #\n################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\nfor lr in learning_rates:\n for reg in regularization_strengths:\n it = max(X_train_feats.shape[0] // 100, 1)\n num_epoch = 5\n it *= num_epoch\n svm = LinearSVM()\n svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg,\n num_iters=it, batch_size=500, verbose=False)\n train_acc = np.mean(svm.predict(X_train_feats) == y_train)\n val_acc = np.mean(svm.predict(X_val_feats) == y_val)\n results[(lr, reg)] = (train_acc, val_acc)\n if val_acc > best_val:\n best_val = val_acc\n best_svm = svm\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n# Print out results.\nfor lr, reg in sorted(results):\n train_accuracy, val_accuracy = results[(lr, reg)]\n print('lr %e reg %e train accuracy: %f val accuracy: %f' % (\n lr, reg, train_accuracy, val_accuracy))\n \nprint('best validation accuracy achieved during cross-validation: %f' % best_val)", "lr 1.000000e-09 reg 5.000000e+04 train accuracy: 0.115184 val accuracy: 0.110000\nlr 1.000000e-09 reg 5.000000e+05 train accuracy: 0.107020 val accuracy: 0.104000\nlr 1.000000e-09 reg 5.000000e+06 train accuracy: 0.416469 val accuracy: 0.412000\nlr 1.000000e-08 reg 5.000000e+04 train accuracy: 0.119714 val accuracy: 0.095000\nlr 1.000000e-08 reg 5.000000e+05 train accuracy: 0.415694 val accuracy: 0.420000\nlr 1.000000e-08 reg 5.000000e+06 train accuracy: 0.405408 val accuracy: 0.399000\nlr 1.000000e-07 reg 5.000000e+04 train accuracy: 0.413714 val accuracy: 0.417000\nlr 1.000000e-07 reg 5.000000e+05 train accuracy: 0.416735 val accuracy: 0.421000\nlr 1.000000e-07 reg 5.000000e+06 train accuracy: 0.377347 val accuracy: 0.373000\nbest validation accuracy achieved during cross-validation: 0.421000\n" ], [ "# Evaluate your trained SVM on the test set: you should be able to get at least 0.40\ny_test_pred = best_svm.predict(X_test_feats)\ntest_accuracy = np.mean(y_test == y_test_pred)\nprint(test_accuracy)", "0.425\n" ], [ "# An important way to gain intuition about how an algorithm works is to\n# visualize the mistakes that it makes. In this visualization, we show examples\n# of images that are misclassified by our current system. The first column\n# shows images that our system labeled as \"plane\" but whose true label is\n# something other than \"plane\".\n\nexamples_per_class = 8\nprint('some pictures that classified incorrectly.')\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nfor cls, cls_name in enumerate(classes):\n idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]\n idxs = np.random.choice(idxs, examples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)\n plt.imshow(X_test[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls_name)\nplt.show()\n# show some pictures that classified correctly\nprint('some that correctly classified.')\nfor cls, cls_name in enumerate(classes):\n idxs = np.where((y_test == cls) & (y_test_pred == cls))[0]\n idxs = np.random.choice(idxs, examples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)\n plt.imshow(X_test[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls_name)\nplt.show()", "some pictures that classified incorrectly.\n" ] ], [ [ "### Inline question 1:\nDescribe the misclassification results that you see. Do they make sense?\n\n\n$\\color{blue}{\\textit Your Answer:}$\n\nFor example, row 5 col 1 picture is a bird, but the plane also has two wings, thus they might have some similar HoG features because they both has some diagonal gradients.\n\n", "_____no_output_____" ], [ "## Neural Network on image features\nEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. \n\nFor completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.", "_____no_output_____" ] ], [ [ "# Preprocessing: Remove the bias dimension\n# Make sure to run this cell only ONCE\nprint(X_train_feats.shape)\nX_train_feats = X_train_feats[:, :-1]\nX_val_feats = X_val_feats[:, :-1]\nX_test_feats = X_test_feats[:, :-1]\n\nprint(X_train_feats.shape)", "(49000, 155)\n(49000, 154)\n" ], [ "from cs231n.classifiers.neural_net import TwoLayerNet\n\ninput_dim = X_train_feats.shape[1]\nhidden_dim = 500\nnum_classes = 10\n\nbest_val_acc = 0.0\nbest_net = None\nbest_lr = 1e-5\nbest_hs = 200\nbest_stats = None\n\n################################################################################\n# TODO: Train a two-layer neural network on image features. You may want to #\n# cross-validate various parameters as in previous sections. Store your best #\n# model in the best_net variable. #\n################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\ndef plot_history(stats):\n plt.subplot(2, 1, 1)\n plt.plot(stats['loss_history'])\n plt.title('Loss history')\n plt.xlabel('Iteration')\n plt.ylabel('Loss')\n\n plt.subplot(2, 1, 2)\n plt.plot(stats['train_acc_history'], label='train')\n plt.plot(stats['val_acc_history'], label='val')\n plt.title('Classification accuracy history')\n plt.xlabel('Epoch')\n plt.ylabel('Classification accuracy')\n plt.legend()\n plt.show()\n \n \ndef tune(lr, hs, reg, verbose=False):\n net = TwoLayerNet(input_dim, hs, num_classes)\n batch_size = 200\n num_epoch = 10\n num_iters = max(X_train_feats.shape[0] // batch_size, 1) * num_epoch\n stats = net.train(X_train_feats, y_train, X_val_feats, y_val, learning_rate=lr, reg=reg,\n learning_rate_decay=0.95, batch_size=batch_size, num_iters=num_iters, verbose=verbose)\n val_acc = np.mean(net.predict(X_val_feats) == y_val)\n train_acc = np.mean(net.predict(X_train_feats) == y_train)\n print(f'lr:{lr} hidden_size:{hs} lr_reg: {reg} train_acc:{train_acc} val_acc:{val_acc}')\n return val_acc, net, stats\n\n# tuning process is in the code cell below \n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n", "_____no_output_____" ], [ "best_val_acc = 0.0\nbest_net = None\nbest_lr = 5e-2\nbest_hs = 500\nbest_stats = None\nbest_reg = 5e-6", "_____no_output_____" ], [ "learning_rates = [1e-2, 5e-2, 1e-1, 5e-1]\nhidden_dim = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]\nlr_reg = [1e-4, 2e-4, 3e-4, 7e-4, 2e-3]", "_____no_output_____" ], [ "# for lr in learning_rates:\n# val_acc, net, stats = tune(lr, best_hs, best_reg, False)\n# if val_acc > best_val_acc:\n# best_lr = lr\n# best_net = net\n# best_stats = stats\n# best_val_acc = val_acc\n \n# for hs in hidden_dim:\n# val_acc, net, stats = tune(best_lr, hs, best_reg, False)\n# if val_acc > best_val_acc:\n# best_hs = hs\n# best_net = net\n# best_stats = stats\n# best_val_acc = val_acc\n \nfor reg in lr_reg:\n val_acc, net, stats = tune(best_lr, best_hs, reg, False)\n if val_acc > best_val_acc:\n best_reg = reg\n best_net = net\n best_stats = stats\n best_val_acc = val_acc\n\nprint(f'best val: {best_val_acc}, best_lr:{best_lr} best_hidden_size:{best_hs} best_lr_reg:{best_reg}')\nplot_history(best_stats)", "lr:0.5 hidden_size:500 lr_reg: 0.0001 train_acc:0.7471836734693877 val_acc:0.575\nlr:0.5 hidden_size:500 lr_reg: 0.0002 train_acc:0.7416122448979592 val_acc:0.576\nlr:0.5 hidden_size:500 lr_reg: 0.0003 train_acc:0.7335306122448979 val_acc:0.578\nlr:0.5 hidden_size:500 lr_reg: 0.0007 train_acc:0.7100816326530612 val_acc:0.584\nlr:0.5 hidden_size:500 lr_reg: 0.002 train_acc:0.6478979591836734 val_acc:0.58\nbest val: 0.586, best_lr:0.5 best_hidden_size:500 best_lr_reg:0.001\n" ], [ "# Run your best neural net classifier on the test set. You should be able\n# to get more than 55% accuracy.\n\ntest_acc = (best_net.predict(X_test_feats) == y_test).mean()\nprint(test_acc)", "0.57\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb42894e2acc0694c436fee05d320dc6c5259b2f
65,857
ipynb
Jupyter Notebook
Chapter03/Exercise3.06/Exercise_3_06.ipynb
nijinjose/The-Supervised-Learning-Workshop
33a2fec1e202dc1394116ed7a194bd8cabb61d49
[ "MIT" ]
19
2020-03-24T20:35:22.000Z
2022-01-03T19:19:48.000Z
Chapter03/Exercise3.06/Exercise_3_06.ipynb
thisabhijit/The-Supervised-Learning-Workshop
33a2fec1e202dc1394116ed7a194bd8cabb61d49
[ "MIT" ]
null
null
null
Chapter03/Exercise3.06/Exercise_3_06.ipynb
thisabhijit/The-Supervised-Learning-Workshop
33a2fec1e202dc1394116ed7a194bd8cabb61d49
[ "MIT" ]
50
2020-01-03T10:22:30.000Z
2022-01-15T07:54:26.000Z
182.429363
57,544
0.905553
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import SGDRegressor", "_____no_output_____" ], [ "#\n# load the data\n#\ndf = pd.read_csv('../Datasets/synth_temp.csv')", "_____no_output_____" ], [ "#\n# slice 1902 and forward\n#\ndf = df.loc[df.Year > 1901]", "_____no_output_____" ], [ "#\n# roll up by year\n#\ndf_group_year = df.groupby(['Year']).agg({'RgnAvTemp' : 'mean'})", "_____no_output_____" ], [ "#\n# add the Year column so we can use that in a model\n#\ndf_group_year['Year'] = df_group_year.index\ndf_group_year = df_group_year.rename(columns = {'RgnAvTemp' : 'AvTemp'})", "_____no_output_____" ], [ "#\n# scale the data\n#\nX_min = df_group_year.Year.min()\nX_range = df_group_year.Year.max() - df_group_year.Year.min()\nY_min = df_group_year.AvTemp.min()\nY_range = df_group_year.AvTemp.max() - df_group_year.AvTemp.min()\nscale_X = (df_group_year.Year - X_min) / X_range\n#\ntrain_X = scale_X.ravel()\ntrain_Y = ((df_group_year.AvTemp - Y_min) / Y_range).ravel()", "_____no_output_____" ], [ "#\n# create the model object\n#\nnp.random.seed(42)\nmodel = SGDRegressor(\n loss = 'squared_loss',\n max_iter = 100,\n learning_rate = 'constant',\n eta0 = 0.0005,\n tol = 0.00009,\n penalty = 'none')", "_____no_output_____" ], [ "#\n# fit the model\n#\nmodel.fit(train_X.reshape((-1, 1)), train_Y)", "_____no_output_____" ], [ "Beta0 = (Y_min + Y_range * model.intercept_[0] - \n Y_range * model.coef_[0] * X_min / X_range)\nBeta1 = Y_range * model.coef_[0] / X_range\nprint(Beta0)\nprint(Beta1)", "-0.5798539884018439\n0.009587734834970016\n" ], [ "#\n# generate predictions\n#\npred_X = df_group_year['Year']\npred_Y = model.predict(train_X.reshape((-1, 1)))", "_____no_output_____" ], [ "#\n# calcualte the r squared value\n#\nr2 = r2_score(train_Y, pred_Y)\nprint('r squared = ', r2)", "r squared = 0.5436475116024911\n" ], [ "#\n# scale predictions back to real values\n#\npred_Y = (pred_Y * Y_range) + Y_min", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10, 7))\nax = fig.add_axes([1, 1, 1, 1])\n#\n# Raw data\n#\nraw_plot_data = df\nax.scatter(raw_plot_data.Year, \n raw_plot_data.RgnAvTemp, \n label = 'Raw Data', \n c = 'red',\n s = 1.5)\n#\n# Annual averages\n#\nax.scatter(df_group_year.Year, \n df_group_year.AvTemp, \n label = 'Annual average', \n c = 'k',\n s = 10)\n#\n# linear fit\n#\nax.plot(pred_X, pred_Y, \n c = \"blue\",\n linestyle = '-.',\n linewidth = 4,\n label = 'linear fit')\n#\n# put the model on the plot\n#\nax.text(1902, 20,\n 'Temp = ' + \n str(round(Beta0, 2)) +\n ' + ' +\n str(round(Beta1, 4)) +\n ' * Year',\n fontsize = 16)\n#\nax.set_title('Mean Air Temperature Measurements',\n fontsize = 16)\n#\n# make the ticks include the first and last years\n#\ntick_years = [1902] + list(range(1910, 2011, 10))\nax.set_xlabel('Year', \n fontsize = 14)\nax.set_ylabel('Temperature ($^\\circ$C)', \n fontsize = 14)\nax.set_ylim(15, 21)\nax.set_xticks(tick_years)\nax.tick_params(labelsize = 12)\nax.legend(fontsize = 12)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb428bc027f0f9244938d7aacadd61d367384fb3
35,849
ipynb
Jupyter Notebook
validation.ipynb
ramuoneplus2/Machine_learning_practice
4e0aa1ac304701e2f26e79ab115be424364116f5
[ "Apache-2.0" ]
5
2018-10-17T01:44:26.000Z
2021-06-20T13:50:27.000Z
validation.ipynb
ramuoneplus2/Machine_learning_practice
4e0aa1ac304701e2f26e79ab115be424364116f5
[ "Apache-2.0" ]
null
null
null
validation.ipynb
ramuoneplus2/Machine_learning_practice
4e0aa1ac304701e2f26e79ab115be424364116f5
[ "Apache-2.0" ]
2
2020-02-18T06:10:08.000Z
2022-01-22T19:17:05.000Z
38.382227
305
0.550141
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/ArunkumarRamanan/Exercises-Machine-Learning-Crash-Course-Google-Developers/blob/master/validation.ipynb)", "_____no_output_____" ], [ "#### Copyright 2017 Google LLC.", "_____no_output_____" ] ], [ [ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Validation", "_____no_output_____" ], [ "**Learning Objectives:**\n * Use multiple features, instead of a single feature, to further improve the effectiveness of a model\n * Debug issues in model input data\n * Use a test data set to check if a model is overfitting the validation data", "_____no_output_____" ], [ "As in the prior exercises, we're working with the [California housing data set](https://developers.google.com/machine-learning/crash-course/california-housing-data-description), to try and predict `median_house_value` at the city block level from 1990 census data.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ], [ "First off, let's load up and prepare our data. This time, we're going to work with multiple features, so we'll modularize the logic for preprocessing the features a bit:", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nimport math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ncalifornia_housing_dataframe = pd.read_csv(\"https://dl.google.com/mlcc/mledu-datasets/california_housing_train.csv\", sep=\",\")\n\n# california_housing_dataframe = california_housing_dataframe.reindex(\n# np.random.permutation(california_housing_dataframe.index))", "_____no_output_____" ], [ "def preprocess_features(california_housing_dataframe):\n \"\"\"Prepares input features from California housing data set.\n\n Args:\n california_housing_dataframe: A Pandas DataFrame expected to contain data\n from the California housing data set.\n Returns:\n A DataFrame that contains the features to be used for the model, including\n synthetic features.\n \"\"\"\n selected_features = california_housing_dataframe[\n [\"latitude\",\n \"longitude\",\n \"housing_median_age\",\n \"total_rooms\",\n \"total_bedrooms\",\n \"population\",\n \"households\",\n \"median_income\"]]\n processed_features = selected_features.copy()\n # Create a synthetic feature.\n processed_features[\"rooms_per_person\"] = (\n california_housing_dataframe[\"total_rooms\"] /\n california_housing_dataframe[\"population\"])\n return processed_features\n\ndef preprocess_targets(california_housing_dataframe):\n \"\"\"Prepares target features (i.e., labels) from California housing data set.\n\n Args:\n california_housing_dataframe: A Pandas DataFrame expected to contain data\n from the California housing data set.\n Returns:\n A DataFrame that contains the target feature.\n \"\"\"\n output_targets = pd.DataFrame()\n # Scale the target to be in units of thousands of dollars.\n output_targets[\"median_house_value\"] = (\n california_housing_dataframe[\"median_house_value\"] / 1000.0)\n return output_targets", "_____no_output_____" ] ], [ [ "For the **training set**, we'll choose the first 12000 examples, out of the total of 17000.", "_____no_output_____" ] ], [ [ "training_examples = preprocess_features(california_housing_dataframe.head(12000))\ntraining_examples.describe()", "_____no_output_____" ], [ "training_targets = preprocess_targets(california_housing_dataframe.head(12000))\ntraining_targets.describe()", "_____no_output_____" ] ], [ [ "For the **validation set**, we'll choose the last 5000 examples, out of the total of 17000.", "_____no_output_____" ] ], [ [ "validation_examples = preprocess_features(california_housing_dataframe.tail(5000))\nvalidation_examples.describe()", "_____no_output_____" ], [ "validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))\nvalidation_targets.describe()", "_____no_output_____" ] ], [ [ "## Task 1: Examine the Data\nOkay, let's look at the data above. We have `9` input features that we can use.\n\nTake a quick skim over the table of values. Everything look okay? See how many issues you can spot. Don't worry if you don't have a background in statistics; common sense will get you far.\n\nAfter you've had a chance to look over the data yourself, check the solution for some additional thoughts on how to verify data.", "_____no_output_____" ], [ "### Solution\n\nClick below for the solution.", "_____no_output_____" ], [ "Let's check our data against some baseline expectations:\n\n* For some values, like `median_house_value`, we can check to see if these values fall within reasonable ranges (keeping in mind this was 1990 data — not today!).\n\n* For other values, like `latitude` and `longitude`, we can do a quick check to see if these line up with expected values from a quick Google search.\n\nIf you look closely, you may see some oddities:\n\n* `median_income` is on a scale from about 3 to 15. It's not at all clear what this scale refers to—looks like maybe some log scale? It's not documented anywhere; all we can assume is that higher values correspond to higher income.\n\n* The maximum `median_house_value` is 500,001. This looks like an artificial cap of some kind.\n\n* Our `rooms_per_person` feature is generally on a sane scale, with a 75th percentile value of about 2. But there are some very large values, like 18 or 55, which may show some amount of corruption in the data.\n\nWe'll use these features as given for now. But hopefully these kinds of examples can help to build a little intuition about how to check data that comes to you from an unknown source.", "_____no_output_____" ], [ "## Task 2: Plot Latitude/Longitude vs. Median House Value", "_____no_output_____" ], [ "Let's take a close look at two features in particular: **`latitude`** and **`longitude`**. These are geographical coordinates of the city block in question.\n\nThis might make a nice visualization — let's plot `latitude` and `longitude`, and use color to show the `median_house_value`.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(13, 8))\n\nax = plt.subplot(1, 2, 1)\nax.set_title(\"Validation Data\")\n\nax.set_autoscaley_on(False)\nax.set_ylim([32, 43])\nax.set_autoscalex_on(False)\nax.set_xlim([-126, -112])\nplt.scatter(validation_examples[\"longitude\"],\n validation_examples[\"latitude\"],\n cmap=\"coolwarm\",\n c=validation_targets[\"median_house_value\"] / validation_targets[\"median_house_value\"].max())\n\nax = plt.subplot(1,2,2)\nax.set_title(\"Training Data\")\n\nax.set_autoscaley_on(False)\nax.set_ylim([32, 43])\nax.set_autoscalex_on(False)\nax.set_xlim([-126, -112])\nplt.scatter(training_examples[\"longitude\"],\n training_examples[\"latitude\"],\n cmap=\"coolwarm\",\n c=training_targets[\"median_house_value\"] / training_targets[\"median_house_value\"].max())\n_ = plt.plot()", "_____no_output_____" ] ], [ [ "Wait a second...this should have given us a nice map of the state of California, with red showing up in expensive areas like the San Francisco and Los Angeles.\n\nThe training set sort of does, compared to a [real map](https://www.google.com/maps/place/California/@37.1870174,-123.7642688,6z/data=!3m1!4b1!4m2!3m1!1s0x808fb9fe5f285e3d:0x8b5109a227086f55), but the validation set clearly doesn't.\n\n**Go back up and look at the data from Task 1 again.**\n\nDo you see any other differences in the distributions of features or targets between the training and validation data?", "_____no_output_____" ], [ "### Solution\n\nClick below for the solution.", "_____no_output_____" ], [ "Looking at the tables of summary stats above, it's easy to wonder how anyone would do a useful data check. What's the right 75<sup>th</sup> percentile value for total_rooms per city block?\n\nThe key thing to notice is that for any given feature or column, the distribution of values between the train and validation splits should be roughly equal.\n\nThe fact that this is not the case is a real worry, and shows that we likely have a fault in the way that our train and validation split was created.", "_____no_output_____" ], [ "## Task 3: Return to the Data Importing and Pre-Processing Code, and See if You Spot Any Bugs\nIf you do, go ahead and fix the bug. Don't spend more than a minute or two looking. If you can't find the bug, check the solution.", "_____no_output_____" ], [ "When you've found and fixed the issue, re-run `latitude` / `longitude` plotting cell above and confirm that our sanity checks look better.\n\nBy the way, there's an important lesson here.\n\n**Debugging in ML is often *data debugging* rather than code debugging.**\n\nIf the data is wrong, even the most advanced ML code can't save things.", "_____no_output_____" ], [ "### Solution\n\nClick below for the solution.", "_____no_output_____" ], [ "Take a look at how the data is randomized when it's read in.\n\nIf we don't randomize the data properly before creating training and validation splits, then we may be in trouble if the data is given to us in some sorted order, which appears to be the case here.", "_____no_output_____" ], [ "## Task 4: Train and Evaluate a Model\n\n**Spend 5 minutes or so trying different hyperparameter settings. Try to get the best validation performance you can.**\n\nNext, we'll train a linear regressor using all the features in the data set, and see how well we do.\n\nLet's define the same input function we've used previously for loading the data into a TensorFlow model.\n", "_____no_output_____" ] ], [ [ "def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):\n \"\"\"Trains a linear regression model of multiple features.\n \n Args:\n features: pandas DataFrame of features\n targets: pandas DataFrame of targets\n batch_size: Size of batches to be passed to the model\n shuffle: True or False. Whether to shuffle the data.\n num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely\n Returns:\n Tuple of (features, labels) for next data batch\n \"\"\"\n \n # Convert pandas data into a dict of np arrays.\n features = {key:np.array(value) for key,value in dict(features).items()} \n \n # Construct a dataset, and configure batching/repeating.\n ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit\n ds = ds.batch(batch_size).repeat(num_epochs)\n \n # Shuffle the data, if specified.\n if shuffle:\n ds = ds.shuffle(10000)\n \n # Return the next batch of data.\n features, labels = ds.make_one_shot_iterator().get_next()\n return features, labels", "_____no_output_____" ] ], [ [ "Because we're now working with multiple input features, let's modularize our code for configuring feature columns into a separate function. (For now, this code is fairly simple, as all our features are numeric, but we'll build on this code as we use other types of features in future exercises.)", "_____no_output_____" ] ], [ [ "def construct_feature_columns(input_features):\n \"\"\"Construct the TensorFlow Feature Columns.\n\n Args:\n input_features: The names of the numerical input features to use.\n Returns:\n A set of feature columns\n \"\"\" \n return set([tf.feature_column.numeric_column(my_feature)\n for my_feature in input_features])", "_____no_output_____" ] ], [ [ "Next, go ahead and complete the `train_model()` code below to set up the input functions and calculate predictions.\n\n**NOTE:** It's okay to reference the code from the previous exercises, but make sure to call `predict()` on the appropriate data sets.\n\nCompare the losses on training data and validation data. With a single raw feature, our best root mean squared error (RMSE) was of about 180.\n\nSee how much better you can do now that we can use multiple features.\n\nCheck the data using some of the methods we've looked at before. These might include:\n\n * Comparing distributions of predictions and actual target values\n\n * Creating a scatter plot of predictions vs. target values\n\n * Creating two scatter plots of validation data using `latitude` and `longitude`:\n * One plot mapping color to actual target `median_house_value`\n * A second plot mapping color to predicted `median_house_value` for side-by-side comparison.", "_____no_output_____" ] ], [ [ "def train_model(\n learning_rate,\n steps,\n batch_size,\n training_examples,\n training_targets,\n validation_examples,\n validation_targets):\n \"\"\"Trains a linear regression model of multiple features.\n \n In addition to training, this function also prints training progress information,\n as well as a plot of the training and validation loss over time.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n training_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for training.\n training_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for training.\n validation_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for validation.\n validation_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for validation.\n \n Returns:\n A `LinearRegressor` object trained on the training data.\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n \n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=construct_feature_columns(training_examples),\n optimizer=my_optimizer\n )\n \n # 1. Create input functions.\n training_input_fn = # YOUR CODE HERE\n predict_training_input_fn = # YOUR CODE HERE\n predict_validation_input_fn = # YOUR CODE HERE\n \n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n training_rmse = []\n validation_rmse = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period,\n )\n # 2. Take a break and compute predictions.\n training_predictions = # YOUR CODE HERE\n validation_predictions = # YOUR CODE HERE\n \n # Compute training and validation loss.\n training_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(training_predictions, training_targets))\n validation_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(validation_predictions, validation_targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, training_root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n training_rmse.append(training_root_mean_squared_error)\n validation_rmse.append(validation_root_mean_squared_error)\n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.ylabel(\"RMSE\")\n plt.xlabel(\"Periods\")\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(training_rmse, label=\"training\")\n plt.plot(validation_rmse, label=\"validation\")\n plt.legend()\n\n return linear_regressor", "_____no_output_____" ], [ "linear_regressor = train_model(\n # TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE\n learning_rate=0.00001,\n steps=100,\n batch_size=1,\n training_examples=training_examples,\n training_targets=training_targets,\n validation_examples=validation_examples,\n validation_targets=validation_targets)", "_____no_output_____" ] ], [ [ "### Solution\n\nClick below for a solution.", "_____no_output_____" ] ], [ [ "def train_model(\n learning_rate,\n steps,\n batch_size,\n training_examples,\n training_targets,\n validation_examples,\n validation_targets):\n \"\"\"Trains a linear regression model of multiple features.\n \n In addition to training, this function also prints training progress information,\n as well as a plot of the training and validation loss over time.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n training_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for training.\n training_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for training.\n validation_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for validation.\n validation_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for validation.\n \n Returns:\n A `LinearRegressor` object trained on the training data.\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n \n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=construct_feature_columns(training_examples),\n optimizer=my_optimizer\n )\n \n # Create input functions.\n training_input_fn = lambda: my_input_fn(\n training_examples, \n training_targets[\"median_house_value\"], \n batch_size=batch_size)\n predict_training_input_fn = lambda: my_input_fn(\n training_examples, \n training_targets[\"median_house_value\"], \n num_epochs=1, \n shuffle=False)\n predict_validation_input_fn = lambda: my_input_fn(\n validation_examples, validation_targets[\"median_house_value\"], \n num_epochs=1, \n shuffle=False)\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n training_rmse = []\n validation_rmse = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period,\n )\n # Take a break and compute predictions.\n training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)\n training_predictions = np.array([item['predictions'][0] for item in training_predictions])\n \n validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)\n validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])\n \n \n # Compute training and validation loss.\n training_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(training_predictions, training_targets))\n validation_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(validation_predictions, validation_targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, training_root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n training_rmse.append(training_root_mean_squared_error)\n validation_rmse.append(validation_root_mean_squared_error)\n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.ylabel(\"RMSE\")\n plt.xlabel(\"Periods\")\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(training_rmse, label=\"training\")\n plt.plot(validation_rmse, label=\"validation\")\n plt.legend()\n\n return linear_regressor", "_____no_output_____" ], [ "linear_regressor = train_model(\n learning_rate=0.00003,\n steps=500,\n batch_size=5,\n training_examples=training_examples,\n training_targets=training_targets,\n validation_examples=validation_examples,\n validation_targets=validation_targets)", "_____no_output_____" ] ], [ [ "## Task 5: Evaluate on Test Data\n\n**In the cell below, load in the test data set and evaluate your model on it.**\n\nWe've done a lot of iteration on our validation data. Let's make sure we haven't overfit to the pecularities of that particular sample.\n\nTest data set is located [here](https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv).\n\nHow does your test performance compare to the validation performance? What does this say about the generalization performance of your model?", "_____no_output_____" ] ], [ [ "california_housing_test_data = pd.read_csv(\"https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv\", sep=\",\")\n#\n# YOUR CODE HERE\n#", "_____no_output_____" ] ], [ [ "### Solution\n\nClick below for the solution.", "_____no_output_____" ] ], [ [ "california_housing_test_data = pd.read_csv(\"https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv\", sep=\",\")\n\ntest_examples = preprocess_features(california_housing_test_data)\ntest_targets = preprocess_targets(california_housing_test_data)\n\npredict_test_input_fn = lambda: my_input_fn(\n test_examples, \n test_targets[\"median_house_value\"], \n num_epochs=1, \n shuffle=False)\n\ntest_predictions = linear_regressor.predict(input_fn=predict_test_input_fn)\ntest_predictions = np.array([item['predictions'][0] for item in test_predictions])\n\nroot_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(test_predictions, test_targets))\n\nprint(\"Final RMSE (on test data): %0.2f\" % root_mean_squared_error)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb428e79e35005a29e490183cdb41c6af71f98d3
282,317
ipynb
Jupyter Notebook
src/hawkes/hawkes_example.ipynb
douxiaotian/counterfactual-tpp
605f250d888b39ef100dfba14d4522c088a80c3a
[ "MIT" ]
12
2021-11-16T08:50:32.000Z
2022-03-16T14:37:49.000Z
src/hawkes/hawkes_example.ipynb
douxiaotian/counterfactual-tpp
605f250d888b39ef100dfba14d4522c088a80c3a
[ "MIT" ]
null
null
null
src/hawkes/hawkes_example.ipynb
douxiaotian/counterfactual-tpp
605f250d888b39ef100dfba14d4522c088a80c3a
[ "MIT" ]
3
2021-11-17T08:28:58.000Z
2022-01-02T05:46:35.000Z
598.129237
84,130
0.948338
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom hawkes import hawkes, sampleHawkes, plotHawkes, iterative_sampling, extract_samples, sample_counterfactual_superposition, check_monotonicity_hawkes\nsys.path.append(os.path.abspath('../'))\nfrom sampling_utils import thinning_T", "_____no_output_____" ] ], [ [ "This notebook contains an example of running algorithm 3 (in the paper) for both cases where we have (1) both observed and un-observed events, and (2) the case that we have only the observed events.", "_____no_output_____" ], [ "# 1. Sampling From Lambda_max", "_____no_output_____" ] ], [ [ "# required parameters\nmu0 = 1\nalpha = 1\nw = 1\nlambda_max = 3\nT = 5\ndef constant1(x): return mu0\n# sampling from hawkes using the superposition property\ninitial_sample, indicators = thinning_T(0, constant1, lambda_max, T)\nevents = {initial_sample[i]: indicators[i] for i in range(len(initial_sample))}\nall_events = {}\nall_events[mu0] = events\niterative_sampling(all_events, events, mu0, alpha, w, lambda_max, T)\n# plotting hawkes\nsampled_events = list(all_events.keys())[1:]\nsampled_events.sort()\nsampled_events = np.array(sampled_events)\nsampled_lambdas = hawkes(sampled_events, mu0, alpha, w)\nplt.figure(figsize=(10, 8))\ntvec, l_t = plotHawkes(sampled_events, constant1, alpha, w, T, 10000.0, label= 'intensity', color = 'r+', legend= 'accepted')\nplt.plot(sampled_events, sampled_lambdas, 'r^')\nplt.legend()\nplt.show() ", "_____no_output_____" ], [ "# extract all sampled events from all_events dictionary.\nall_samples, all_lambdas = extract_samples(all_events, sampled_events, mu0, alpha, w)", "_____no_output_____" ], [ "# plots all events, both accepted and rejected with their intensities.\nplt.figure(figsize=(10, 8))\nplt.plot(tvec, l_t, label = 'Original Intensity')\nplt.plot(all_samples, all_lambdas, 'oy', label = 'events')\nplt.plot(sampled_events,sampled_lambdas, 'r+', label = 'accepted')\nplt.xlabel('time')\nplt.ylabel('intensity')\nplt.legend()", "_____no_output_____" ], [ "# sampling from the counterfactual intensity.\nnew_mu0 = 3\nnew_alpha = 0.1\nreal_counterfactuals = sample_counterfactual_superposition(mu0, alpha, new_mu0, new_alpha, all_events, lambda_max, w, T)", "_____no_output_____" ] ], [ [ "**The red +s are the counterfactuals.**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15, 6))\nplotHawkes(np.array(real_counterfactuals), lambda t: new_mu0, new_alpha, w, T, 10000.0, label= 'counterfactul intensity', color = 'g+', legend= 'accepted in counterfactual')\nplt.plot(tvec, l_t, label = 'Original Intensity')\nplt.plot(all_samples, all_lambdas, 'oy', label = 'events')\nplt.plot(sampled_events,sampled_lambdas, 'r^')\nplt.plot(sampled_events,np.full(len(sampled_events), -0.1), 'r+', label = 'originally accepted')\nfor xc in real_counterfactuals:\n plt.axvline(x=xc, color = 'k', ls = '--', alpha = 0.2)\nplt.xlabel('time')\nplt.ylabel('intensity')\nplt.legend() ", "_____no_output_____" ] ], [ [ "In the following cell, we will check monotonicity property. Note that this property should hold in **each exponential created by superposition** (please have a look at `check_monotonicity_hawkes` in `hawkes.py` for more details.). ", "_____no_output_____" ] ], [ [ "check_monotonicity_hawkes(mu0, alpha, new_mu0, new_alpha, all_events, sampled_events, real_counterfactuals, w)", "_____no_output_____" ] ], [ [ "# 2. Real-World Scenario", "_____no_output_____" ] ], [ [ "# First, we sample from the hawkes process using the Ogata's algorithm (or any other sampling method), but only store the accepted events.\nplt.figure(figsize=(10, 8))\nmu0 = 1\nalpha = 1\nw = 1\nlambda_max = 3\nT = 5\ntev, tend, lambdas_original = sampleHawkes(mu0, alpha, w, T, Nev= 100)\ntvec, l_t = plotHawkes(tev, lambda t: mu0, alpha, w, T, 10000.0, label = 'Original Intensity', color= 'r+', legend= 'samples')\nplt.plot(tev, lambdas_original, 'r^')\nplt.legend()", "_____no_output_____" ], [ "# this list stores functions corresponding to each exponential.\nexponentials = []\nall_events = {}\nexponentials.append(lambda t: mu0)\nall_events[mu0] = {}\nfor i in range(len(tev)):\n exponentials.append(lambda t: alpha * np.exp(-w * (t - tev[i])))\n all_events[tev[i]] = {}\n\n# we should assign each accepted event to some exponential. (IMPORTANT)\nfor i in range(len(tev)):\n if i == 0:\n all_events[mu0][tev[i]] = True\n else:\n probabilities = [exponentials[j](tev[i]) for j in range(0, i + 1)]\n probabilities = [float(i)/sum(probabilities) for i in probabilities]\n a = np.random.choice(i + 1, 1, p = probabilities)\n if a == 0:\n all_events[mu0][tev[i]] = True\n else:\n all_events[tev[a[0] - 1]][tev[i]] = True", "_____no_output_____" ], [ "# using the superposition to calculate the difference between lambda_max and the exponentials, and sample from it.\ndifferences = []\ndifferences.append(lambda t: lambda_max - mu0)\nfor k in range(len(tev)):\n f = lambda t: lambda_max - alpha * np.exp(-w * (t - tev[k]))\n differences.append(f)\n\nfor i in range(len(differences)):\n if i == 0:\n rejceted, indicators = thinning_T(0, differences[i], lambda_max, T)\n else:\n rejceted, indicators = thinning_T(tev[i - 1], differences[i], lambda_max, T)\n \n rejceted = {rejceted[j]: False for j in range(len(rejceted)) if indicators[j] == True}\n if i == 0:\n all_events[mu0].update(rejceted)\n all_events[mu0] = {k:v for k,v in sorted(all_events[mu0].items())}\n else:\n all_events[tev[i - 1]].update(rejceted)\n all_events[tev[i - 1]] = {k:v for k,v in sorted(all_events[tev[i - 1]].items())}\n", "_____no_output_____" ], [ "all_samples, all_lambdas = extract_samples(all_events, tev, mu0, alpha, w)", "_____no_output_____" ], [ "plt.figure(figsize=(10, 8))\nplt.plot(tvec, l_t, label = 'Original Intensity')\nplt.plot(all_samples, all_lambdas, 'oy', label = 'events')\nplt.plot(tev,lambdas_original, 'r+', label = 'accepted')\nplt.xlabel('time')\nplt.ylabel('intensity')\nplt.legend()", "_____no_output_____" ], [ "new_mu0 = 0.1\nnew_alpha = 1.7\nreal_counterfactuals = sample_counterfactual_superposition(mu0, alpha, new_mu0, new_alpha, all_events, lambda_max, w, T)", "_____no_output_____" ] ], [ [ "**The red +s are the counterfactuals.**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15, 8))\nplotHawkes(np.array(real_counterfactuals), lambda t: new_mu0, new_alpha, w, T, 10000.0, label= 'counterfactual intensity', color= 'g+', legend= 'accepted in counterfactual')\nplt.plot(tvec, l_t, label = 'Original Intensity')\nplt.plot(all_samples, all_lambdas, 'oy', label = 'events')\nplt.plot(tev,lambdas_original, 'r^')\nplt.plot(tev,np.full(len(tev), -0.1), 'r+', label = 'originally accepted')\nfor xc in real_counterfactuals:\n plt.axvline(x=xc, color = 'k', ls = '--', alpha = 0.2)\nplt.xlabel('time')\nplt.ylabel('intensity')\nplt.legend() ", "_____no_output_____" ], [ "check_monotonicity_hawkes(mu0, alpha, new_mu0, new_alpha, all_events, tev, real_counterfactuals, w) ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb429923197ffe79155a32ca7556af7192f5cd4d
1,917
ipynb
Jupyter Notebook
enrich_stats_gnd.ipynb
reading-in-the-alps/rita-invs
21dd5a08555ea75daf39b9aeaf6b9f1446a62533
[ "MIT" ]
null
null
null
enrich_stats_gnd.ipynb
reading-in-the-alps/rita-invs
21dd5a08555ea75daf39b9aeaf6b9f1446a62533
[ "MIT" ]
5
2019-05-20T11:47:25.000Z
2021-06-10T21:12:15.000Z
enrich_stats_gnd.ipynb
reading-in-the-alps/rita-invs
21dd5a08555ea75daf39b9aeaf6b9f1446a62533
[ "MIT" ]
null
null
null
23.378049
81
0.526865
[ [ [ "[x.save_stats() for x in InventoryEntry.objects.all()]", "_____no_output_____" ], [ "for x in Creator.objects.filter(gnd_data__isnull=True):\n x.get_lobid_rdf()", "_____no_output_____" ], [ "for x in Creator.objects.filter(gnd_data__isnull=False):\n death = x.gnd_data.get('dateOfDeath', None)\n if death:\n death = \"{}-01-01\".format(death[0][:4])\n x.gnd_date_of_death = death\n try:\n x.save()\n except Exception as e:\n print(\"Error:{}, entity: {}\".format(e, x.id))", "_____no_output_____" ], [ "for x in Creator.objects.filter(gnd_data__isnull=False):\n places = x.gnd_data.get('geographicAreaCode', [])\n places = [x['label'] for x in places]\n place_obj = [Place.objects.get_or_create(name=x)[0] for x in places]\n x.gnd_geographic_area.set(place_obj)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb42a171b04e417322d5bad833a0ba67221f579e
165,575
ipynb
Jupyter Notebook
docs/examples/two-layer.ipynb
asross/pyqg
6770765c79a3f1da043e2ceb3a60085aa0dd002f
[ "MIT" ]
100
2015-08-31T19:05:22.000Z
2022-02-06T23:36:33.000Z
docs/examples/two-layer.ipynb
asross/pyqg
6770765c79a3f1da043e2ceb3a60085aa0dd002f
[ "MIT" ]
212
2015-08-30T04:07:37.000Z
2022-03-29T22:42:16.000Z
docs/examples/two-layer.ipynb
asross/pyqg
6770765c79a3f1da043e2ceb3a60085aa0dd002f
[ "MIT" ]
75
2015-08-31T16:16:23.000Z
2022-03-25T17:33:30.000Z
147.571301
34,916
0.768093
[ [ [ "# Two Layer QG Model Example #", "_____no_output_____" ], [ "Here is a quick overview of how to use the two-layer model. See the\n:py:class:`pyqg.QGModel` api documentation for further details.\n\nFirst import numpy, matplotlib, and pyqg:", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline\nimport pyqg", "_____no_output_____" ] ], [ [ "## Initialize and Run the Model ##\n\nHere we set up a model which will run for 10 years and start averaging\nafter 5 years. There are lots of parameters that can be specified as\nkeyword arguments but we are just using the defaults.", "_____no_output_____" ] ], [ [ "year = 24*60*60*360.\nm = pyqg.QGModel(tmax=10*year, twrite=10000, tavestart=5*year)\nm.run()", "INFO: Logger initialized\nINFO: Step: 10000, Time: 7.20e+07, KE: 6.08e-04, CFL: 0.089\nINFO: Step: 20000, Time: 1.44e+08, KE: 4.92e-04, CFL: 0.085\nINFO: Step: 30000, Time: 2.16e+08, KE: 4.83e-04, CFL: 0.089\nINFO: Step: 40000, Time: 2.88e+08, KE: 5.01e-04, CFL: 0.085\n" ] ], [ [ "## Convert Model Outpt to an xarray Dataset ##\n\nModel variables, coordinates, attributes, and metadata can be stored conveniently as an xarray Dataset. (Notice that this feature requires xarray to be installed on your machine. See here for installation instructions: http://xarray.pydata.org/en/stable/getting-started-guide/installing.html#instructions)", "_____no_output_____" ] ], [ [ "m_ds = m.to_dataset()\nm_ds", "_____no_output_____" ] ], [ [ "## Visualize Output ##\n\nLet's assign a new data variable, ``q_upper``, as the **upper layer PV anomaly**. We access the PV values in the Dataset as ``m_ds.q``, which has two levels and a corresponding background PV gradient, ``m_ds.Qy``.", "_____no_output_____" ] ], [ [ "m_ds['q_upper'] = m_ds.q.isel(lev=0, time=0) + m_ds.Qy.isel(lev=0)*m_ds.y\nm_ds['q_upper'].attrs = {'long_name': 'upper layer PV anomaly'}\nm_ds.q_upper.plot.contourf(levels=18, cmap='RdBu_r');", "_____no_output_____" ] ], [ [ "## Plot Diagnostics ##\n\nThe model automatically accumulates averages of certain diagnostics. We can \nfind out what diagnostics are available by calling", "_____no_output_____" ] ], [ [ "m.describe_diagnostics()", "NAME | DESCRIPTION\n--------------------------------------------------------------------------------\nAPEflux | spectral flux of available potential energy \nAPEgen | total available potential energy generation \nAPEgenspec | spectrum of available potential energy generation \nEKE | mean eddy kinetic energy \nEKEdiss | total energy dissipation by bottom drag \nEnsspec | enstrophy spectrum \nKEflux | spectral flux of kinetic energy \nKEspec | kinetic energy spectrum \nentspec | barotropic enstrophy spectrum \n" ] ], [ [ "To look at the wavenumber energy spectrum, we plot the `KEspec` diagnostic.\n(Note that summing along the l-axis, as in this example, does not give us\na true *isotropic* wavenumber spectrum.)", "_____no_output_____" ] ], [ [ "kespec_upper = m_ds.KEspec.isel(lev=0).sum('l')\nkespec_lower = m_ds.KEspec.isel(lev=1).sum('l')\n\nkespec_upper.plot.line( 'b.-', x='k', xscale='log', yscale='log', label='upper layer')\nkespec_lower.plot.line( 'g.-', x='k', xscale='log', yscale='log', label='lower layer')\nplt.legend(loc='lower left')\nplt.ylim([1e-9, 1e-3]); \nplt.xlabel(r'k (m$^{-1}$)'); plt.grid()\nplt.title('Kinetic Energy Spectrum');", "_____no_output_____" ] ], [ [ "We can also plot the spectral fluxes of energy.", "_____no_output_____" ] ], [ [ "ebud = [ m_ds.APEgenspec.sum('l'),\n m_ds.APEflux.sum('l'),\n m_ds.KEflux.sum('l'),\n -m_ds.attrs['pyqg:rek']*m.del2*m_ds.KEspec.isel(lev=1).sum('l')*m.M**2 ]\nebud.append(-np.vstack(ebud).sum(axis=0))\nebud_labels = ['APE gen','APE flux','KE flux','Diss.','Resid.']\n[plt.semilogx(m_ds.k, term) for term in ebud]\nplt.legend(ebud_labels, loc='upper right')\nplt.xlim([m_ds.k.min(), m_ds.k.max()])\nplt.xlabel(r'k (m$^{-1}$)'); plt.grid()\nplt.title('Spectral Energy Transfers');\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb42a5acc035de46a8ba37afaf924f7a49b08de2
102,489
ipynb
Jupyter Notebook
.ipynb_checkpoints/dataset-checkpoint.ipynb
f-grimaldi/Linear-Interactive-Peptides-LIPs-Predictor
17225093fd90ab9e1a8094d035ff1702dd378a07
[ "MIT" ]
null
null
null
.ipynb_checkpoints/dataset-checkpoint.ipynb
f-grimaldi/Linear-Interactive-Peptides-LIPs-Predictor
17225093fd90ab9e1a8094d035ff1702dd378a07
[ "MIT" ]
null
null
null
.ipynb_checkpoints/dataset-checkpoint.ipynb
f-grimaldi/Linear-Interactive-Peptides-LIPs-Predictor
17225093fd90ab9e1a8094d035ff1702dd378a07
[ "MIT" ]
null
null
null
38.500751
7,896
0.436525
[ [ [ "### Importing libraries", "_____no_output_____" ] ], [ [ "# Import default libraries\nimport pandas as pd\nimport numpy as np\nimport math\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport time\nimport random\nimport warnings\nimport os\nimport requests\nimport json\nimport time\nimport zipfile\nimport logging\n# Import Biopython utils\nfrom Bio.PDB import PDBList, calc_angle, calc_dihedral, PPBuilder, is_aa, PDBIO, NeighborSearch, DSSP, HSExposureCB\nfrom Bio.PDB.PDBParser import PDBParser\nfrom Bio.SeqUtils import IUPACData\nfrom Bio.PDB.PDBIO import Select\n# Import custom libraries\nfrom modules.feature_extraction import *", "_____no_output_____" ], [ "# Set debug info\nlogging.basicConfig(level=logging.DEBUG)", "_____no_output_____" ] ], [ [ "### Helping functions", "_____no_output_____" ], [ "### Importing original dataset (LIP tagged sequences)", "_____no_output_____" ] ], [ [ "def down_sampling(df, number_of_samples, seed = 42):\n noLIP_index = set(df[df['LIP'] == 0].index)\n indexes = set(np.arange(0, np.shape(df)[0]))\n sample = random.sample(noLIP_index, len(noLIP_index) - number_of_samples)\n new_index = indexes.difference(sample)\n df1 = df.iloc[list(new_index), :]\n return df1\n\n# Turns an angle from radiants to degrees \ndef rad_to_deg(rad_angle):\n # If the input is None, then it returns None.\n # For numerical input, the output is mapped to [-180,180]\n if rad_angle is None :\n return None\n # Computes angle in degrees\n angle = rad_angle * 180 / math.pi\n # Handles radiants conversion\n while angle > 180 :\n angle = angle - 360\n while angle < -180 :\n angle = angle + 360\n return angle", "_____no_output_____" ], [ "# Read original dataset (lips_dataset)\nds_original = pd.read_csv('./datasets/lips_dataset_02.txt', sep='\\t')\n# Define new dataset\nds_original.head()", "_____no_output_____" ] ], [ [ "### Downloading proteins (automatically skips a protein if it has already been downloaded)", "_____no_output_____" ] ], [ [ "# Select all proteins (pdb column)\npdb_ids = ds_original.pdb.unique()\n# Define pdb files dir\npdb_dir = './pdb_files'\n# Define pdb file fetching class\npdbl = PDBList()\n# Fetch every protein\nfor pdb_id in pdb_ids:\n # Execute fetching of the protein (pdb file)\n pdbl.retrieve_pdb_file(pdb_id, pdir=pdb_dir, file_format='pdb')", "Structure exists: './pdb_files\\pdb1cee.ent' \nStructure exists: './pdb_files\\pdb1dev.ent' \nStructure exists: './pdb_files\\pdb1dow.ent' \nStructure exists: './pdb_files\\pdb1fqj.ent' \nStructure exists: './pdb_files\\pdb1g3j.ent' \nStructure exists: './pdb_files\\pdb1hrt.ent' \nStructure exists: './pdb_files\\pdb1i7w.ent' \nStructure exists: './pdb_files\\pdb1j2j.ent' \nStructure exists: './pdb_files\\pdb1jsu.ent' \nStructure exists: './pdb_files\\pdb1kil.ent' \nStructure exists: './pdb_files\\pdb1l8c.ent' \nStructure exists: './pdb_files\\pdb1p4q.ent' \nStructure exists: './pdb_files\\pdb1pq1.ent' \nStructure exists: './pdb_files\\pdb1q68.ent' \nStructure exists: './pdb_files\\pdb1rf8.ent' \nStructure exists: './pdb_files\\pdb1sc5.ent' \nStructure exists: './pdb_files\\pdb1sqq.ent' \nStructure exists: './pdb_files\\pdb1tba.ent' \nStructure exists: './pdb_files\\pdb1th1.ent' \nStructure exists: './pdb_files\\pdb1xtg.ent' \nStructure exists: './pdb_files\\pdb1ymh.ent' \nStructure exists: './pdb_files\\pdb1zoq.ent' \nStructure exists: './pdb_files\\pdb2a6q.ent' \nStructure exists: './pdb_files\\pdb2auh.ent' \nStructure exists: './pdb_files\\pdb2c1t.ent' \nStructure exists: './pdb_files\\pdb2o8a.ent' \nStructure exists: './pdb_files\\pdb3b71.ent' \nStructure exists: './pdb_files\\pdb1a3b.ent' \nStructure exists: './pdb_files\\pdb1k2d.ent' \nStructure exists: './pdb_files\\pdb1ej4.ent' \nStructure exists: './pdb_files\\pdb1mv0.ent' \nStructure exists: './pdb_files\\pdb1t08.ent' \nStructure exists: './pdb_files\\pdb1hv2.ent' \nStructure exists: './pdb_files\\pdb1p16.ent' \nStructure exists: './pdb_files\\pdb1ee5.ent' \nStructure exists: './pdb_files\\pdb1ozs.ent' \nStructure exists: './pdb_files\\pdb2phe.ent' \nStructure exists: './pdb_files\\pdb1sb0.ent' \nStructure exists: './pdb_files\\pdb1j2x.ent' \nStructure exists: './pdb_files\\pdb1axc.ent' \nStructure exists: './pdb_files\\pdb2gl7.ent' \nStructure exists: './pdb_files\\pdb1h2k.ent' \nStructure exists: './pdb_files\\pdb1ycq.ent' \nStructure exists: './pdb_files\\pdb1p22.ent' \nStructure exists: './pdb_files\\pdb2iv8.ent' \nStructure exists: './pdb_files\\pdb1tce.ent' \nStructure exists: './pdb_files\\pdb1r1r.ent' \nStructure exists: './pdb_files\\pdb1mxl.ent' \nStructure exists: './pdb_files\\pdb2fym.ent' \nStructure exists: './pdb_files\\pdb1iwq.ent' \nStructure exists: './pdb_files\\pdb1fv1.ent' \nStructure exists: './pdb_files\\pdb1dpj.ent' \nStructure exists: './pdb_files\\pdb2b3g.ent' \nStructure exists: './pdb_files\\pdb2nl9.ent' \nStructure exists: './pdb_files\\pdb1o9a.ent' \nStructure exists: './pdb_files\\pdb1sqk.ent' \nStructure exists: './pdb_files\\pdb1nx1.ent' \nStructure exists: './pdb_files\\pdb2gsi.ent' \nStructure exists: './pdb_files\\pdb1i8h.ent' \nStructure exists: './pdb_files\\pdb1p4b.ent' \nStructure exists: './pdb_files\\pdb2ivz.ent' \nStructure exists: './pdb_files\\pdb1lm8.ent' \nStructure exists: './pdb_files\\pdb1emu.ent' \nStructure exists: './pdb_files\\pdb1un0.ent' \nStructure exists: './pdb_files\\pdb1a81.ent' \nStructure exists: './pdb_files\\pdb2oq1.ent' \nStructure exists: './pdb_files\\pdb1kdx.ent' \nStructure exists: './pdb_files\\pdb1h8b.ent' \nStructure exists: './pdb_files\\pdb1dt7.ent' \nStructure exists: './pdb_files\\pdb2pg1.ent' \nStructure exists: './pdb_files\\pdb1apm.ent' \nStructure exists: './pdb_files\\pdb1cqt.ent' \n" ] ], [ [ "### Creating redidues dataset", "_____no_output_____" ] ], [ [ "# Select all proteins (pdb column)\npdb_ids = ds_original.pdb.unique()\n# Define pdb files dir\npdb_dir = './pdb_files'\n# Define pdb file fetching class\npdbl = PDBList()", "_____no_output_____" ], [ "# Define a set containing (pdb_id, chain_id)\nvalid_chains = set([(row['pdb'], row['chain']) for idx, row in ds_original.iterrows()])", "_____no_output_____" ], [ "# New list for residues\nds_residues = list()\n# Loop thorugh every protein\nfor pdb_id in ds_original.pdb.unique():\n # Get structure of the protein\n structure = PDBParser(QUIET=True).get_structure(pdb_id, pdb_dir + '/pdb{}.ent'.format(pdb_id))\n # We select only the 0-th model\n model = structure[0]\n # Loop through every model's chain\n for chain in model:\n # Skip if the chain is not valid\n if (pdb_id, chain.id) not in valid_chains:\n continue\n for residue in chain:\n # Do not take into account non-aminoacidic residues (e.g. water molecules)\n if(not is_aa(residue)): \n continue\n # Add an entry to the residues list\n ds_residues.append((pdb_id, model.id, chain.id, residue.id[1], residue.get_resname(), 0, 0))\n\n \n# Turn list into dataframe\nds_residues = pd.DataFrame(ds_residues)\n# Define dataset column names\nds_residues.columns = ['PDB_ID', 'MODEL_ID', 'CHAIN_ID', 'RES_ID', 'RES_NAME', 'LIP_SCORE', 'LIP']\n# Show some info about the dataset\nprint(\"Numbers of proteins: {}\".format(np.shape(ds_original)[0]))\nprint(\"Numbers of res: {}\".format(np.shape(ds_residues)[0]))\n# Show first rows\nds_residues.head()", "Numbers of proteins: 143\nNumbers of res: 17911\n" ] ], [ [ "### Tagging LIP residues", "_____no_output_____" ] ], [ [ "# Launch tagging algorithm: we have 0 positively tagged residues\nLIP_tag(ds_original, ds_residues)\n# Check that the number of residues positively LIP-tagged is higher than 0\nassert True, any(ds_residues['LIP'] == 1)\n# Show first positively tagged LIP residues\nds_residues[ds_residues.LIP == 1].head()", "_____no_output_____" ] ], [ [ "### Check dataset balancement\n\nWe check if we have the same numerosity of LIP and npn-LIP tagged residues.", "_____no_output_____" ] ], [ [ "# Compute numerosity of LIP tagged residues\nprint('Numerosity of LIP tagged residues: {}'.format(ds_residues[ds_residues.LIP == 1].shape[0]))\n# Compute numerosity of non-LIP tagged residues\nprint('Numerosity of non-LIP tagged residues: {}'.format(ds_residues[ds_residues.LIP == 0].shape[0]))", "Numerosity of LIP tagged residues: 1883\nNumerosity of non-LIP tagged residues: 16028\n" ], [ "# Add plot\nfig, ax = plt.subplots(1, 1)\n# Add frequency plot\nax = plt.hist(ds_residues['LIP'], bins=2)", "DEBUG:matplotlib.font_manager:findfont: Matching :family=sans-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=10.0 to DejaVu Sans ('C:\\\\Users\\\\fgrim\\\\AppData\\\\Local\\\\Continuum\\\\anaconda3\\\\lib\\\\site-packages\\\\matplotlib\\\\mpl-data\\\\fonts\\\\ttf\\\\DejaVuSans.ttf') with score of 0.050000\n" ] ], [ [ "## Feature extraction", "_____no_output_____" ], [ "### DSSP features (angles, etc.)", "_____no_output_____" ] ], [ [ "# Get DSSP dataframe\nds_dssp = get_DSSP(ds_original.pdb.unique(), pdb_dir)\n# Show dataframe\nds_dssp.head()", "DEBUG:root:PDB ids:\nDEBUG:root:['1cee' '1dev' '1dow' '1fqj' '1g3j' '1hrt' '1i7w' '1j2j' '1jsu' '1kil'\n '1l8c' '1p4q' '1pq1' '1q68' '1rf8' '1sc5' '1sqq' '1tba' '1th1' '1xtg'\n '1ymh' '1zoq' '2a6q' '2auh' '2c1t' '2o8a' '3b71' '1a3b' '1k2d' '1ej4'\n '1mv0' '1t08' '1hv2' '1p16' '1ee5' '1ozs' '2phe' '1sb0' '1j2x' '1axc'\n '2gl7' '1h2k' '1ycq' '1p22' '2iv8' '1tce' '1r1r' '1mxl' '2fym' '1iwq'\n '1fv1' '1dpj' '2b3g' '2nl9' '1o9a' '1sqk' '1nx1' '2gsi' '1i8h' '1p4b'\n '2ivz' '1lm8' '1emu' '1un0' '1a81' '2oq1' '1kdx' '1h8b' '1dt7' '2pg1'\n '1apm' '1cqt']\nDEBUG:root:PDB directory: './pdb_files'\n" ], [ "# Check NULL values in PHI and PSI columns\nassert False == bool(ds_dssp.PHI.isnull().any())", "_____no_output_____" ], [ "# Drop useless features\nds_dssp.drop(['DSSP_ID', 'AA'], axis=1, inplace=True)\nds_dssp.head()", "_____no_output_____" ], [ "# Drop useless columns from residues dataset\nif 'PHI' in ds_residues.columns:\n ds_residues.drop(['PHI', 'PSI'], axis=1, inplace=True)\n# Merge DSSP features in ds_residues dataset\nds_residues = ds_residues.merge(ds_dssp, on=['PDB_ID', 'CHAIN_ID', 'RES_ID'], how='left')\n# Check new datset\nds_residues.head()", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 2)\nsns.boxplot(x='LIP', y='PHI',data=ds_residues, ax=ax[0])\nsns.boxplot(x='LIP', y='PSI',data=ds_residues, ax=ax[1])", "_____no_output_____" ] ], [ [ "### RING features", "_____no_output_____" ] ], [ [ "# Define folder for ring files\nring_dir = './ring_files'\n# Define PDB files for which RING feature extraction is required\npdb_ids = ds_original.pdb.unique()\n# Define contact treshold to consider\ncontact_threshold = 3.5\n# Flag for actually extract RING files\nenable_ring = False", "_____no_output_____" ], [ "if enable_ring:\n # Download chunk of 5 files per time\n for i in range(0, len(pdb_ids), 5):\n # Download required RING files\n download_RING(pdb_ids[i:i+5], ring_dir)", "_____no_output_____" ], [ "# Get edges info from RING\nds_ring = get_RING(pdb_ids, pdb_dir, ring_dir, contact_threshold)\nds_ring.head()", "_____no_output_____" ], [ "# Get the number of intra chains contacts for every residue\nintra_contacts = (ds_ring[ds_ring.CHAIN_ID_A == ds_ring.CHAIN_ID_B]\n .groupby(['PDB_ID', 'CHAIN_ID_A', 'RES_ID_A'], as_index=False)\n .size()\n .reset_index(name='COUNTS'))\nintra_contacts.columns = ['PDB_ID', 'CHAIN_ID', 'RES_ID', 'INTRA_CONTACTS']\nintra_contacts.RES_ID = intra_contacts.RES_ID.astype(int)\nintra_contacts.head()", "_____no_output_____" ], [ "# Get the number of inter chains contacts for every residue\ninter_contacts = (ds_ring[ds_ring.CHAIN_ID_A != ds_ring.CHAIN_ID_B]\n .groupby(['PDB_ID', 'CHAIN_ID_A', 'RES_ID_A'], as_index=False)\n .size()\n .reset_index(name='COUNTS'))\ninter_contacts.columns = ['PDB_ID', 'CHAIN_ID', 'RES_ID', 'INTER_CONTACTS']\ninter_contacts.RES_ID = inter_contacts.RES_ID.astype(int)\ninter_contacts.head()", "_____no_output_____" ], [ "# Merge intra chain contacts into the main dataset\nds_residues = pd.merge(ds_residues, intra_contacts, how=\"left\", on=['PDB_ID', 'CHAIN_ID', 'RES_ID'])\nds_residues.head()", "_____no_output_____" ], [ "# Merge inter chain contacts into the main dataset\nds_residues = pd.merge(ds_residues, inter_contacts, how=\"left\", on=['PDB_ID', 'CHAIN_ID', 'RES_ID'])\nds_residues.head()", "_____no_output_____" ], [ "# Fill Nan with zeroes\nds_residues.fillna(0, inplace=True)\nds_residues.head()", "_____no_output_____" ], [ "# Group every contact by residue\ngroupby = ds_ring.groupby(['PDB_ID', 'CHAIN_ID_A', 'RES_ID_A'], as_index=False)\n# Get edge locations\nedges_loc = groupby['EDGE_LOC'].apply(lambda x: ' '.join(x)).reset_index(name='EDGE_LOC')\n# Get edge types\nedges_type = groupby['EDGE_TYPE'].apply(lambda x: ' '.join(x)).reset_index(name='EDGE_TYPE')\n\n# Merge loc and type\nedges = pd.merge(edges_loc, edges_type, on=['PDB_ID', 'CHAIN_ID_A', 'RES_ID_A'])\nedges.columns = ['PDB_ID', 'CHAIN_ID', 'RES_ID', 'EDGE_LOC', 'EDGE_TYPE']\nedges.RES_ID = edges.RES_ID.astype(int)\nedges.head()", "_____no_output_____" ], [ "# Merge edges locations and types into the main dataframe\nds_residues = ds_residues.merge(edges, how='left', on=['PDB_ID', 'CHAIN_ID', 'RES_ID'])\n# Handle NaNs\nds_residues.EDGE_LOC = ds_residues.EDGE_LOC.fillna('')\nds_residues.EDGE_TYPE = ds_residues.EDGE_TYPE.fillna('')\n# Show new dataset\nds_residues.head()", "_____no_output_____" ], [ "# Save residues dataset to disk\nds_residues.to_csv('./datasets/residues.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb42b0a9d6da28260f037934b3f92603ff2e39e0
1,991
ipynb
Jupyter Notebook
Sympy_QUIZ.ipynb
UWashington-Astro300/Astro300-W20
cc786bc8491193a5d20e1cd47c93927405f694ff
[ "MIT" ]
null
null
null
Sympy_QUIZ.ipynb
UWashington-Astro300/Astro300-W20
cc786bc8491193a5d20e1cd47c93927405f694ff
[ "MIT" ]
null
null
null
Sympy_QUIZ.ipynb
UWashington-Astro300/Astro300-W20
cc786bc8491193a5d20e1cd47c93927405f694ff
[ "MIT" ]
null
null
null
16.454545
94
0.489201
[ [ [ "# SymPy QUIZ", "_____no_output_____" ] ], [ [ "import sympy as sp", "_____no_output_____" ], [ "sp.init_printing()\nx = sp.symbols('x')", "_____no_output_____" ] ], [ [ "## What is the denominator of the third term of the Taylor expansion for the equation\n\n$$ \\Large \\frac{\\sin{\\left (x \\right )}}{x^{4} + 27}$$", "_____no_output_____" ], [ "## Answer the Canvas quiz `SymPyQuiz`", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cb42b7ea8ef3e5d54e4b61bb905015dca6e9ecab
100,513
ipynb
Jupyter Notebook
Phase_3/classimbalance/Class_imbalance.ipynb
clareadunne/ds-east-042621-lectures
a95be9ebb87023a417232494ecd0e65d20d8e4a8
[ "MIT" ]
null
null
null
Phase_3/classimbalance/Class_imbalance.ipynb
clareadunne/ds-east-042621-lectures
a95be9ebb87023a417232494ecd0e65d20d8e4a8
[ "MIT" ]
null
null
null
Phase_3/classimbalance/Class_imbalance.ipynb
clareadunne/ds-east-042621-lectures
a95be9ebb87023a417232494ecd0e65d20d8e4a8
[ "MIT" ]
null
null
null
54.805344
33,004
0.702387
[ [ [ "## Starting Off\n\nLooking at the confusion matrix below, and answer the following questions. \n\n1. How many oranges are there in the dataset? 12\n2. How many fruits were predicted by the model to be an orange? 6\n3. Of the fruits that were predicted to be an orange, how many were actually mangoes? 3\n4. Of the fruits that are actually mangoes, how many were predicted to be apples? 9", "_____no_output_____" ], [ "![alt text](images/confusion_matrix.png)", "_____no_output_____" ], [ "# Classification Practicum with Class Imbalance\n\nAgenda:\n- Review class imbalance\n- Review code for different ways to handle class imbalance\n", "_____no_output_____" ] ], [ [ "!pip install imblearn", "Collecting imblearn\n Downloading imblearn-0.0-py2.py3-none-any.whl (1.9 kB)\nRequirement already satisfied: imbalanced-learn in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from imblearn) (0.7.0)\nRequirement already satisfied: scikit-learn>=0.23 in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from imbalanced-learn->imblearn) (0.23.2)\nRequirement already satisfied: scipy>=0.19.1 in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from imbalanced-learn->imblearn) (1.5.0)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from imbalanced-learn->imblearn) (0.17.0)\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from imbalanced-learn->imblearn) (1.18.5)\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages (from scikit-learn>=0.23->imbalanced-learn->imblearn) (2.1.0)\nInstalling collected packages: imblearn\nSuccessfully installed imblearn-0.0\n" ], [ "import pandas as pd\nimport numpy as np\nfrom sklearn import metrics", "_____no_output_____" ], [ "# Read in data and split data to be used in the models\ntitanic = pd.read_csv('https://raw.githubusercontent.com/learn-co-students/nyc-mhtn-ds-042219-lectures/master/Module_4/cleaned_titanic.csv', index_col='PassengerId')\n\n", "_____no_output_____" ], [ "titanic.head()", "_____no_output_____" ], [ "# Create matrix of features\nX = titanic.drop('Survived', axis = 1) # grabs everything else but 'Survived'\n\n# Create target variable\ny = titanic['Survived'] # y is the column we're trying to predict\n\n# Create a list of the features being used in the \nfeature_cols = X.columns", "_____no_output_____" ] ], [ [ "# Handling Class Imbalance", "_____no_output_____" ], [ "## Visualizing Class Imbalance", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline \n\nsns.set_style('darkgrid')\nplt.figure(figsize = (10,5))\nsns.countplot(y, alpha =.80, palette= ['grey','lightgreen'])\nplt.title('Survivors vs Non-Survivors')\nplt.ylabel('# Passengers')\nplt.show()", "C:\\Users\\clare\\anaconda3\\envs\\learn-env\\lib\\site-packages\\seaborn\\_decorators.py:36: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n warnings.warn(\n" ] ], [ [ "## Run a Dummy Classifier for Baseline Assessment", "_____no_output_____" ] ], [ [ "1-y.mean()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import accuracy_score, f1_score, recall_score\n\n# setting up testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=23, )\n\n# DummyClassifier to predict only target 0\ndummy = DummyClassifier(strategy='most_frequent').fit(X_train, y_train)\ndummy_pred = dummy.predict(X_test)\n\n\n", "_____no_output_____" ], [ "dummy_pred", "_____no_output_____" ] ], [ [ "**Questions:**\n\n- What do you think the accuracy score will be for this model?\n- What do you think the recall score will be for this model?", "_____no_output_____" ] ], [ [ "\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, dummy_pred))", "Test Accuracy score: 0.6547085201793722\n" ], [ "# checking recall\nprint('Test Recall score: ', recall_score(y_test, dummy_pred))", "Test Recall score: 0.0\n" ] ], [ [ "# Handling Class Imbalance\n\nIn this guide, we will cover 5 tactics for handling imbalanced classes in machine learning:\n\n1. Up-sample the minority class\n2. Down-sample the majority class\n3. Change your performance metric\n4. Penalize algorithms (cost-sensitive training)\n5. Use tree-based algorithms", "_____no_output_____" ], [ "## Run a classification Model with class imbalance\n\nBefore we start to implement different ways to handle class imbalance, let's fit a basic model to have a better point of comparison. ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\nlr_clf = LogisticRegression(solver='liblinear')\n\nlr_clf.fit(X_train, y_train)\n\ny_pred_test = lr_clf.predict(X_test)\n\n\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, y_pred_test))\n\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, y_pred_test))", "Test Accuracy score: 0.7847533632286996\nTest F1 score: 0.6619718309859155\n" ], [ "results = {}\n\nresults['imbalanced'] = (accuracy_score(y_test, y_pred_test), f1_score(y_test, y_pred_test))", "_____no_output_____" ] ], [ [ "## Prepping data for handling class imbalances\n\nWe are goign to change the training dataset to which we fit our model, so we want to bring our training data back together before we make those changes. ", "_____no_output_____" ] ], [ [ "# concatenate our training data back together\ntraining = pd.concat([X_train, y_train], axis=1)", "_____no_output_____" ], [ "training", "_____no_output_____" ], [ "# separate minority and majority classes\ndeceased = training[training.Survived==0]\nsurvived = training[training.Survived==1]", "_____no_output_____" ], [ "# Get a class count to understand the class imbalance.\nprint('deceased count: '+ str(len(deceased)))\nprint('survived count: '+ str(len(survived)))", "deceased count: 403\nsurvived count: 263\n" ] ], [ [ "## Resampling \nYou can change the dataset that you use to build your predictive model to have more balanced data.\n\nThis change is called sampling your dataset and there are two main methods that you can use to even-up the classes:\n\nYou can add copies of instances from the under-represented class called over-sampling (or more formally sampling with replacement), or\nYou can delete instances from the over-represented class, called under-sampling.\nThese approaches are often very easy to implement and fast to run. They are an excellent starting point.\n\n\n**Some Rules of Thumb:**\n- Consider testing under-sampling when you have an a lot data (tens- or hundreds of thousands of instances or more)\n- Consider testing over-sampling when you don’t have a lot of data (tens of thousands of records or less)\n- Consider testing random and non-random (e.g. stratified) sampling schemes.\n- Consider testing different resampled ratios (e.g. you don’t have to target a 1:1 ratio in a binary classification problem, try other ratios)", "_____no_output_____" ], [ "![alt text](images/resampling.png)", "_____no_output_____" ] ], [ [ "from sklearn.utils import resample\n", "_____no_output_____" ] ], [ [ "### Upsampling\n\n", "_____no_output_____" ] ], [ [ "# upsample minority\nsurvived_upsampled = resample(survived,\n replace=True, # sample with replacement\n n_samples=len(deceased), # match number in majority class\n random_state=23) # reproducible results", "_____no_output_____" ], [ "survived_upsampled.shape", "_____no_output_____" ], [ "# combine majority and upsampled minority\nupsampled = pd.concat([deceased, survived_upsampled])\n\n# check new class counts\nupsampled.Survived.value_counts()", "_____no_output_____" ], [ "len(upsampled)", "_____no_output_____" ] ], [ [ "Now that we have balanced classes, lets see how this can affect the performance of the model. ", "_____no_output_____" ] ], [ [ "# trying logistic regression again with the balanced dataset\ny_train = upsampled.Survived\nX_train = upsampled.drop('Survived', axis=1)\n\n\n# upsampled_dt = DecisionTreeClassifier(max_depth=5)\nupsampled_lr = LogisticRegression(solver='liblinear')\n\n\n# upsampled_dt.fit(X_train, y_train)\nupsampled_lr.fit(X_train, y_train)\n\n\n# upsampled_pred = upsampled_dt.predict(X_test)\nupsampled_pred = upsampled_lr.predict(X_test)\n\n\n\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, upsampled_pred))\n\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, upsampled_pred))\n\n", "Test Accuracy score: 0.7713004484304933\nTest F1 score: 0.6577181208053691\n" ], [ "results['upsampled'] = (accuracy_score(y_test, upsampled_pred), f1_score(y_test, upsampled_pred))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "## Downsampling", "_____no_output_____" ] ], [ [ "# downsample majority\nsurvived_downsampled = resample(deceased,\n replace = False, # sample without replacement\n n_samples = len(survived), # match minority n\n random_state = 23) # reproducible results", "_____no_output_____" ], [ "# combine minority and downsampled majority\ndownsampled = pd.concat([survived_downsampled, survived])\n\n# checking counts\ndownsampled.Survived.value_counts()", "_____no_output_____" ], [ "# trying logistic regression again with the balanced dataset\ny_train = downsampled.Survived\nX_train = downsampled.drop('Survived', axis=1)\n\n\ndownsampled_lr = LogisticRegression(solver='liblinear')\n\n\ndownsampled_lr.fit(X_train, y_train)\n\ndownsampled_pred = downsampled_lr.predict(X_test)\n\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, downsampled_pred))\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, downsampled_pred))", "Test Accuracy score: 0.7802690582959642\nTest F1 score: 0.6711409395973155\n" ], [ "results['downsampled'] = (accuracy_score(y_test, downsampled_pred), f1_score(y_test, downsampled_pred))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "## Over-sampling: SMOTE\n\nSMOTE (Synthetic Minority Oversampling Technique) consists of synthesizing elements for the minority class, based on those that already exist. It works randomly picking a point from the minority class and computing the k-nearest neighbors for this point. The synthetic points are added between the chosen point and its neighbors.\n\n![alt text](images/smote.png)", "_____no_output_____" ] ], [ [ "from imblearn.over_sampling import SMOTE", "Using TensorFlow backend.\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/anaconda3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" ], [ "# setting up testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=23, )", "_____no_output_____" ], [ "\nsm = SMOTE(random_state=23)\nX_train, y_train = sm.fit_sample(X_train, y_train)", "_____no_output_____" ], [ "y_train.value_counts()", "_____no_output_____" ], [ "smote_lr = LogisticRegression(solver='liblinear')", "_____no_output_____" ], [ "# smote_dt.fit(X_train, y_train)\nsmote_lr.fit(X_train, y_train)\n", "_____no_output_____" ], [ "smote_pred = smote_lr.predict(X_test)\n", "_____no_output_____" ], [ "\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, smote_pred))\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, smote_pred))", "Test Accuracy score: 0.7847533632286996\nTest F1 score: 0.68\n" ], [ "results['smote'] = (accuracy_score(y_test, smote_pred), f1_score(y_test, smote_pred))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "## Under-sampling: Tomek links\n\nTomek links are pairs of very close instances, but of opposite classes. Removing the instances of the majority class of each pair increases the space between the two classes, facilitating the classification process.", "_____no_output_____" ], [ "![alt text](images/tomek.png)", "_____no_output_____" ] ], [ [ "from collections import Counter\nfrom imblearn.under_sampling import TomekLinks \n\n", "_____no_output_____" ], [ "# setting up testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=23, )", "_____no_output_____" ], [ "tl = TomekLinks()\nX_res, y_res = tl.fit_resample(X_train, y_train)\nprint('Resampled dataset shape %s' % Counter(y_res))\n", "Resampled dataset shape Counter({0: 356, 1: 263})\n" ], [ "feature_cols", "_____no_output_____" ], [ "tl.sample_indices_", "_____no_output_____" ], [ "# remove Tomek links\ntl = TomekLinks()\nX_resampled, y_resampled = tl.fit_sample(X_train, y_train)\n\n", "_____no_output_____" ] ], [ [ "## Show removed observations", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\nidx_samples_removed = np.setdiff1d(np.arange(X_train.shape[0]),\n tl.sample_indices_)\nidx_class_0 = y_resampled == 0\nplt.scatter(X_resampled[idx_class_0]['Age'], X_resampled[idx_class_0]['Fare'],\n alpha=.8, label='Perished')\nplt.scatter(X_resampled[~idx_class_0]['Age'], X_resampled[~idx_class_0]['Fare'],\n alpha=.8, label='Survived')\nplt.scatter(X_train.iloc[idx_samples_removed]['Age'], X_train.iloc[idx_samples_removed]['Fare'],\n alpha=.8, label='Removed samples')\nplt.legend()", "_____no_output_____" ], [ "len(idx_samples_removed)", "_____no_output_____" ], [ "len(X_train)", "_____no_output_____" ], [ "len(X_resampled)", "_____no_output_____" ], [ "tomek_lr = LogisticRegression(solver='liblinear')\n\ntomek_lr.fit(X_resampled, y_resampled)\n\ntomek_pred = tomek_lr.predict(X_test)\n\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, tomek_pred))\n\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, tomek_pred))", "Test Accuracy score: 0.7982062780269058\nTest F1 score: 0.6938775510204082\n" ], [ "results['tomek'] = (accuracy_score(y_test, tomek_pred), f1_score(y_test, tomek_pred))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "### Penalize Algorithms (Cost-Sensitive Training)\nThe next tactic is to use penalized learning algorithms that increase the cost of classification mistakes on the minority class.\n\nDuring training, we can use the argument `class_weight='balanced'` to penalize mistakes on the minority class by an amount proportional to how under-represented it is.", "_____no_output_____" ] ], [ [ "# setting up testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=23, )", "_____no_output_____" ], [ "lr_clf_weighted = LogisticRegression(solver='liblinear', class_weight='balanced')\n\nlr_clf_weighted.fit(X_train, y_train)\n\ny_weighted_test = lr_clf_weighted.predict(X_test)\n\n\n# checking accuracy\nprint('Test Accuracy score: ', accuracy_score(y_test, y_weighted_test))\n\n\n# checking accuracy\nprint('Test F1 score: ', f1_score(y_test, y_weighted_test))", "Test Accuracy score: 0.7802690582959642\nTest F1 score: 0.6711409395973155\n" ], [ "results['weighted'] = (accuracy_score(y_test, y_weighted_test), f1_score(y_test, y_weighted_test))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "## Tree-Based Algorithms\n\nDecision trees often perform well on imbalanced datasets because their hierarchical structure allows them to learn signals from both classes.", "_____no_output_____" ] ], [ [ "# Instantiate the classifier using 200 trees\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier(random_state = 23, n_estimators=200, class_weight='balanced')", "_____no_output_____" ], [ "#fit the model to the training data\nrfc.fit(X_train, y_train)\n\n#use the fitted model to predict on the test data\nrfc_pred = rfc.predict(X_test)\n\n\n# checking accuracy on the test data\nprint('Test Accuracy score: ', accuracy_score(y_test, rfc_pred))\n\n\n# checking accuracy on the test data\nprint('Test F1 score: ', f1_score(y_test, rfc_pred))", "Test Accuracy score: 0.7757847533632287\nTest F1 score: 0.6666666666666666\n" ], [ "results['rfc'] = (accuracy_score(y_test, rfc_pred), f1_score(y_test, rfc_pred))", "_____no_output_____" ], [ "results", "_____no_output_____" ] ], [ [ "## Change Your Performance Metric\n\nAccuracy is not the metric to use when working with an imbalanced dataset. We have seen that it is misleading.\n\nThere are metrics that have been designed to tell you a more truthful story when working with imbalanced classes.\n\n- Precision: A measure of a classifiers exactness.\n- Recall: A measure of a classifiers completeness\n- F1 Score (or F-score): A weighted average of precision and recall.\n\n- Kappa (or Cohen’s kappa): Classification accuracy normalized by the imbalance of the classes in the data.\n- ROC Curves: Like precision and recall, accuracy is divided into sensitivity and specificity and models can be chosen based on the balance thresholds of these values.\n\nWhen using a cross-validation method, you can utilize one of these as the scoring metric when comparing across multiple methods. \n\nThis will not change the way a model is fitted, it will just choose a different model as the **best_estimator** based on the scoring metric. ", "_____no_output_____" ], [ "## Reframe as Anomaly Detection\n\nIf your class imbalance is very extreme (less than 0.1%), it might be better to treat this as an anomay detection problem than a classification problem. \n**Anomaly detection**, a.k.a. outlier detection, is for detecting outliers and rare events. Instead of building a classification model, you'd have a \"profile\" of a normal observation. If a new observation strays too far from that \"normal profile,\" it would be flagged as an anomaly.\n\nhttps://towardsdatascience.com/anomaly-detection-for-dummies-15f148e559c1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb42dc09faba307b4685efaff1d60fd16ffb5e86
332,741
ipynb
Jupyter Notebook
example.ipynb
nowacklab/blg_strain
df109a8a76f6f5d311d24b47f1dcfb374287b378
[ "MIT" ]
3
2020-12-20T04:51:11.000Z
2022-01-26T04:52:43.000Z
example.ipynb
nowacklab/blg_strain
df109a8a76f6f5d311d24b47f1dcfb374287b378
[ "MIT" ]
null
null
null
example.ipynb
nowacklab/blg_strain
df109a8a76f6f5d311d24b47f1dcfb374287b378
[ "MIT" ]
1
2020-12-15T18:18:25.000Z
2020-12-15T18:18:25.000Z
970.090379
288,784
0.955794
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "This notebook provides a basic example of using the `blg_strain` package to calculate the magnetoelectric susceptibility for strained bilayer graphene.", "_____no_output_____" ], [ "# Strained Lattice", "_____no_output_____" ] ], [ [ "from blg_strain.lattice import StrainedLattice\n\nsl = StrainedLattice(eps=0.01, theta=0)\nsl.calculate()", "_____no_output_____" ] ], [ [ "Below is a plot of the Brillouin zone (black hexagon) and location of the K/K' points (red markers), which do not coincide with the high-symmetry points of the Brillouin zone.", "_____no_output_____" ] ], [ [ "fig = plt.figure()\naxes = [fig.add_subplot(x) for x in (121, 222, 224)]\nfor ax in axes:\n sl.plot_bz(ax)\n ax.set_aspect(1)\n\nw = 0.02\naxes[1].set_xlim(sl.K[0] - w, sl.K[0] + w)\naxes[1].set_ylim(sl.K[1] - w, sl.K[1] + w)\naxes[2].set_xlim(sl.Kp[0] - w, sl.Kp[0] + w)\naxes[2].set_ylim(sl.Kp[1] - w, sl.Kp[1] + w)", "_____no_output_____" ] ], [ [ "# Band Structure", "_____no_output_____" ] ], [ [ "from blg_strain.bands import BandStructure\n\nbs = BandStructure(sl=sl, window=0.1, Delta=0.01)\nbs.calculate(Nkx=200, Nky=200)", "_____no_output_____" ] ], [ [ "Below are plots of the energy, one component of the wavefunction, Berry curvature, and orbital magnetic moment in regions of momentum space surrounding the K and K' valleys.", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(2, 4, figsize=(14, 7))\npcolormesh_kwargs = dict(cmap='cividis', shading='gouraud')\ncontour_kwargs = dict(colors='k', linewidths=0.5, linestyles='solid')\n\nn = 2 # Band index\nm = 1 # component of wavefunction\nfor i, (axK, axKp, A) in enumerate(zip(axes[0,:], \n axes[1,:], \n [bs.E[n], bs.Psi[n,m,:,:].real, bs.Omega[n], bs.Mu[n]])):\n # K\n axK.pcolormesh(bs.Kxa, bs.Kya, A, **pcolormesh_kwargs)\n axK.contour(bs.Kxa, bs.Kya, A, **contour_kwargs)\n\n # K'\n if i >= 2: # Omega and Mu\n A = -A\n axKp.pcolormesh(-bs.Kxa, -bs.Kya, A, **pcolormesh_kwargs)\n axKp.contour(-bs.Kxa, -bs.Kya, A, **contour_kwargs)\n\nfor ax in axes.flatten():\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect(1)\n\naxes[0,0].set_title('Conduction band energy')\naxes[0,1].set_title(f'Component {m} of wavefunction')\naxes[0,2].set_title('Berry curvature')\naxes[0,3].set_title('Orbital magnetic moment')\naxes[0,0].set_ylabel('$K$', rotation=0, labelpad=30, fontsize=16, va='center')\naxes[1,0].set_ylabel('$K\\'$', rotation=0, labelpad=30, fontsize=16, va='center')", "_____no_output_____" ] ], [ [ "# Filled bands", "_____no_output_____" ] ], [ [ "from blg_strain.bands import FilledBands\n\nfb = FilledBands(bs=bs, EF=0.01)\nfb.calculate(Nkx=500, Nky=500)", "_____no_output_____" ] ], [ [ "Below is a plot of the $x$ component of magnetoelectric susceptibility as a function of doping (carrier density) for the band structure illustrated above.", "_____no_output_____" ] ], [ [ "EFs = np.linspace(0, 0.015, 100)\nns = np.empty_like(EFs)\nalphas = np.empty_like(EFs)\nfor i, EF in enumerate(EFs):\n fb = FilledBands(bs=bs, EF=EF)\n fb.calculate(500, 500)\n ns[i] = fb.n\n alphas[i] = fb.alpha[0]\n \nfig, ax = plt.subplots()\nax.plot(ns/1e16, alphas)\nax.set_xlabel('Carrier density ($10^{12}$ cm$^{-2}$)')\nax.set_ylabel('Magnetoelectric coefficient (a.u.)')", "_____no_output_____" ] ], [ [ "# Saving and Loading", "_____no_output_____" ] ], [ [ "base_path = 'example'\nsl.save(base_path)\nbs.save()\nfb.save()", "_____no_output_____" ], [ "sl_path = '/'.join((base_path, 'StrainedLattice_eps0.010_theta0.000_Run0'))\nsl = StrainedLattice.load(sl_path + '.h5')\n\nbs_path = '/'.join((sl_path, 'BandStructure_Nkx200_Nky200_Delta10.000'))\nbs = BandStructure.load(bs_path + '.h5')\n\nfb_path = '/'.join((bs_path, 'FilledBands_Nkx500_Nky500_EF15.000'))\nfb = FilledBands.load(fb_path + '.h5')", "_____no_output_____" ] ], [ [ "## Create and load \"summary\" file", "_____no_output_____" ] ], [ [ "from blg_strain.utils.saver import load\n\nDeltas, EFs, ns, Ds, alphas = load(sl_path)", "_____no_output_____" ], [ "Deltas, EFs, ns, Ds, alphas", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb42ff9ff8c8d2f57af87ed3a584d88d66aed27a
561,014
ipynb
Jupyter Notebook
transfer-learning/Transfer_Learning.ipynb
sksq96/deep-learning
ab7171f05372ed5cc30c6425ee980a89d674071a
[ "MIT" ]
2
2020-09-03T13:15:29.000Z
2020-12-01T10:28:45.000Z
transfer-learning/Transfer_Learning.ipynb
sksq96/deep-learning
ab7171f05372ed5cc30c6425ee980a89d674071a
[ "MIT" ]
null
null
null
transfer-learning/Transfer_Learning.ipynb
sksq96/deep-learning
ab7171f05372ed5cc30c6425ee980a89d674071a
[ "MIT" ]
2
2019-12-25T12:38:45.000Z
2020-04-15T23:29:55.000Z
430.555641
254,140
0.935912
[ [ [ "# Transfer Learning\n\nMost of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture.\n\n<img src=\"assets/cnnarchitecture.jpg\" width=700px>\n\nVGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.\n\nYou can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf).\n\n## Pretrained VGGNet\n\nWe'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This code is already included in 'tensorflow_vgg' directory, sdo you don't have to clone it.\n\nThis is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. **You'll need to clone the repo into the folder containing this notebook.** Then download the parameter file using the next cell.", "_____no_output_____" ] ], [ [ "from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\n\nvgg_dir = 'tensorflow_vgg/'\n# Make sure vgg exists\nif not isdir(vgg_dir):\n raise Exception(\"VGG directory doesn't exist!\")\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(vgg_dir + \"vgg16.npy\"):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:\n urlretrieve(\n 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',\n vgg_dir + 'vgg16.npy',\n pbar.hook)\nelse:\n print(\"Parameter file already exists!\")", "VGG16 Parameters: 553MB [03:19, 2.77MB/s] \n" ] ], [ [ "## Flower power\n\nHere we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining).", "_____no_output_____" ] ], [ [ "import tarfile\n\ndataset_folder_path = 'flower_photos'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile('flower_photos.tar.gz'):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:\n urlretrieve(\n 'http://download.tensorflow.org/example_images/flower_photos.tgz',\n 'flower_photos.tar.gz',\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with tarfile.open('flower_photos.tar.gz') as tar:\n tar.extractall()\n tar.close()", "Flowers Dataset: 229MB [01:10, 3.27MB/s] \n" ] ], [ [ "## ConvNet Codes\n\nBelow, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.\n\nHere we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $224 \\times 224 \\times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py)):\n\n```\nself.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\nself.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\nself.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\nself.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\nself.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\nself.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\nself.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\nself.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\nself.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\nself.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\nself.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\nself.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\nself.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\nself.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\nself.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\nself.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\nself.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\nself.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\nself.fc6 = self.fc_layer(self.pool5, \"fc6\")\nself.relu6 = tf.nn.relu(self.fc6)\n```\n\nSo what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use\n\n```\nwith tf.Session() as sess:\n vgg = vgg16.Vgg16()\n input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\n with tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n```\n\nThis creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer,\n\n```\nfeed_dict = {input_: images}\ncodes = sess.run(vgg.relu6, feed_dict=feed_dict)\n```", "_____no_output_____" ] ], [ [ "import os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_vgg import vgg16\nfrom tensorflow_vgg import utils", "/Users/ericness/anaconda3/envs/tensorflow/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5\n return f(*args, **kwds)\n" ], [ "data_dir = 'flower_photos/'\ncontents = os.listdir(data_dir)\nclasses = [each for each in contents if os.path.isdir(data_dir + each)]", "_____no_output_____" ] ], [ [ "Below I'm running images through the VGG network in batches.\n\n> **Exercise:** Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).", "_____no_output_____" ] ], [ [ "# Set the batch size higher if you can fit in in your GPU memory\nbatch_size = 10\ncodes_list = []\nlabels = []\nbatch = []\n\ncodes = None\n\nwith tf.Session() as sess:\n \n # TODO: Build the vgg network here\n vgg = vgg16.Vgg16()\n input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\n with tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n \n for each in classes:\n print(\"Starting {} images\".format(each))\n class_path = data_dir + each\n files = os.listdir(class_path)\n for ii, file in enumerate(files, 1):\n # Add images to the current batch\n # utils.load_image crops the input images for us, from the center\n img = utils.load_image(os.path.join(class_path, file))\n batch.append(img.reshape((1, 224, 224, 3)))\n labels.append(each)\n \n # Running the batch through the network to get the codes\n if ii % batch_size == 0 or ii == len(files):\n \n # Image batch to pass to VGG network\n images = np.concatenate(batch)\n \n # TODO: Get the values from the relu6 layer of the VGG network\n feed_dict = {input_: images}\n codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)\n \n # Here I'm building an array of the codes\n if codes is None:\n codes = codes_batch\n else:\n codes = np.concatenate((codes, codes_batch))\n \n # Reset to start building the next batch\n batch = []\n print('{} images processed'.format(ii))", "/Users/ericness/Dropbox/Education/Udacity/dl_foundations/deep-learning/transfer-learning/tensorflow_vgg/vgg16.npy\nnpy file loaded\nbuild model started\nbuild model finished: 0s\nStarting daisy images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n633 images processed\nStarting dandelion images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n640 images processed\n650 images processed\n660 images processed\n670 images processed\n680 images processed\n690 images processed\n700 images processed\n710 images processed\n720 images processed\n730 images processed\n740 images processed\n750 images processed\n760 images processed\n770 images processed\n780 images processed\n790 images processed\n800 images processed\n810 images processed\n820 images processed\n830 images processed\n840 images processed\n850 images processed\n860 images processed\n870 images processed\n880 images processed\n890 images processed\n898 images processed\nStarting roses images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n640 images processed\n641 images processed\nStarting sunflowers images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n640 images processed\n650 images processed\n660 images processed\n670 images processed\n680 images processed\n690 images processed\n699 images processed\nStarting tulips images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n640 images processed\n650 images processed\n660 images processed\n670 images processed\n680 images processed\n690 images processed\n700 images processed\n710 images processed\n720 images processed\n730 images processed\n740 images processed\n750 images processed\n760 images processed\n770 images processed\n780 images processed\n790 images processed\n799 images processed\n" ], [ "# write codes to file\nwith open('codes', 'w') as f:\n codes.tofile(f)\n \n# write labels to file\nimport csv\nwith open('labels', 'w') as f:\n writer = csv.writer(f, delimiter='\\n')\n writer.writerow(labels)", "_____no_output_____" ] ], [ [ "## Building the Classifier\n\nNow that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.", "_____no_output_____" ] ], [ [ "# read codes and labels from file\nimport csv\n\nwith open('labels') as f:\n reader = csv.reader(f, delimiter='\\n')\n labels = np.array([each for each in reader if len(each) > 0]).squeeze()\nwith open('codes') as f:\n codes = np.fromfile(f, dtype=np.float32)\n codes = codes.reshape((len(labels), -1))", "_____no_output_____" ] ], [ [ "### Data prep\n\nAs usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!\n\n> **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels. ", "_____no_output_____" ] ], [ [ "labels[0]", "_____no_output_____" ], [ "codes.shape", "_____no_output_____" ], [ "from sklearn import preprocessing\n\nunique_labels = list(set(labels))\n\nlb = preprocessing.LabelBinarizer()\nlabels_vecs = lb.fit(unique_labels).transform(labels)\n", "_____no_output_____" ] ], [ [ "Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn.\n\nYou can create the splitter like so:\n```\nss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n```\nThen split the data with \n```\nsplitter = ss.split(x, y)\n```\n\n`ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split).\n\n> **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import StratifiedShuffleSplit\n\nss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n\ntrain_idx, val_idx = next(ss.split(codes, labels_vecs))\n\nhalf_val_len = int(len(val_idx)/2)\nval_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]\n\ntrain_x, train_y = codes[train_idx], labels_vecs[train_idx]\nval_x, val_y = codes[val_idx], labels_vecs[val_idx]\ntest_x, test_y = codes[test_idx], labels_vecs[test_idx] ", "_____no_output_____" ], [ "print(\"Train shapes (x, y):\", train_x.shape, train_y.shape)\nprint(\"Validation shapes (x, y):\", val_x.shape, val_y.shape)\nprint(\"Test shapes (x, y):\", test_x.shape, test_y.shape)", "Train shapes (x, y): (2936, 4096) (2936, 5)\nValidation shapes (x, y): (367, 4096) (367, 5)\nTest shapes (x, y): (367, 4096) (367, 5)\n" ] ], [ [ "If you did it right, you should see these sizes for the training sets:\n\n```\nTrain shapes (x, y): (2936, 4096) (2936, 5)\nValidation shapes (x, y): (367, 4096) (367, 5)\nTest shapes (x, y): (367, 4096) (367, 5)\n```", "_____no_output_____" ], [ "### Classifier layers\n\nOnce you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.\n\n> **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.", "_____no_output_____" ] ], [ [ "inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])\nlabels_ = tf.placeholder(tf.float32, shape=[None, labels_vecs.shape[1]])\n\n# TODO: Classifier layers and operations\ndense1 = tf.layers.dense(inputs_, 256, activation=tf.nn.relu)\n\ndropout1 = tf.layers.dropout(dense1, 0.2)\n\ndense2 = tf.layers.dense(dropout1, 64, activation=tf.nn.relu)\n\ndropout2 = tf.layers.dropout(dense2, 0.2)\n\nlogits = tf.layers.dense(dropout2, len(unique_labels), activation=None)\ncost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_, logits=logits))\n\noptimizer = tf.train.AdamOptimizer(0.005).minimize(cost)\n\n# Operations for validation/test accuracy\npredicted = tf.nn.softmax(logits)\ncorrect_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "_____no_output_____" ] ], [ [ "### Batches!\n\nHere is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.", "_____no_output_____" ] ], [ [ "def get_batches(x, y, n_batches=10):\n \"\"\" Return a generator that yields batches from arrays x and y. \"\"\"\n batch_size = len(x)//n_batches\n \n for ii in range(0, n_batches*batch_size, batch_size):\n # If we're not on the last batch, grab data with size batch_size\n if ii != (n_batches-1)*batch_size:\n X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] \n # On the last batch, grab the rest of the data\n else:\n X, Y = x[ii:], y[ii:]\n # I love generators\n yield X, Y", "_____no_output_____" ] ], [ [ "### Training\n\nHere, we'll train the network.\n\n> **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the `get_batches` function I wrote before to get your batches like `for x, y in get_batches(train_x, train_y)`. Or write your own!", "_____no_output_____" ] ], [ [ "epochs = 10\niteration = 0\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n \n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for x, y in get_batches(train_x, train_y):\n feed = {inputs_: x,\n labels_: y}\n loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n print(\"Epoch: {}/{}\".format(e+1, epochs),\n \"Iteration: {}\".format(iteration),\n \"Training loss: {:.5f}\".format(loss))\n iteration += 1\n \n if iteration % 5 == 0:\n feed = {inputs_: val_x,\n labels_: val_y}\n val_acc = sess.run(accuracy, feed_dict=feed)\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Validation Acc: {:.4f}\".format(val_acc))\n saver.save(sess, \"checkpoints/flowers.ckpt\")", "Epoch: 1/10 Iteration: 0 Training loss: 2.15550\nEpoch: 1/10 Iteration: 1 Training loss: 13.43394\nEpoch: 1/10 Iteration: 2 Training loss: 11.91960\nEpoch: 1/10 Iteration: 3 Training loss: 6.86606\nEpoch: 1/10 Iteration: 4 Training loss: 3.34315\nEpoch: 0/10 Iteration: 5 Validation Acc: 0.6294\nEpoch: 1/10 Iteration: 5 Training loss: 3.16944\nEpoch: 1/10 Iteration: 6 Training loss: 3.74332\nEpoch: 1/10 Iteration: 7 Training loss: 2.24317\nEpoch: 1/10 Iteration: 8 Training loss: 1.22297\nEpoch: 1/10 Iteration: 9 Training loss: 0.98106\nEpoch: 0/10 Iteration: 10 Validation Acc: 0.7466\nEpoch: 2/10 Iteration: 10 Training loss: 0.71755\nEpoch: 2/10 Iteration: 11 Training loss: 0.79062\nEpoch: 2/10 Iteration: 12 Training loss: 1.14872\nEpoch: 2/10 Iteration: 13 Training loss: 0.83818\nEpoch: 2/10 Iteration: 14 Training loss: 0.94546\nEpoch: 1/10 Iteration: 15 Validation Acc: 0.8174\nEpoch: 2/10 Iteration: 15 Training loss: 0.69332\nEpoch: 2/10 Iteration: 16 Training loss: 0.71916\nEpoch: 2/10 Iteration: 17 Training loss: 0.42164\nEpoch: 2/10 Iteration: 18 Training loss: 0.39719\nEpoch: 2/10 Iteration: 19 Training loss: 0.63498\nEpoch: 1/10 Iteration: 20 Validation Acc: 0.7766\nEpoch: 3/10 Iteration: 20 Training loss: 0.53496\nEpoch: 3/10 Iteration: 21 Training loss: 0.46704\nEpoch: 3/10 Iteration: 22 Training loss: 0.39498\nEpoch: 3/10 Iteration: 23 Training loss: 0.27884\nEpoch: 3/10 Iteration: 24 Training loss: 0.23453\nEpoch: 2/10 Iteration: 25 Validation Acc: 0.8474\nEpoch: 3/10 Iteration: 25 Training loss: 0.26922\nEpoch: 3/10 Iteration: 26 Training loss: 0.41700\nEpoch: 3/10 Iteration: 27 Training loss: 0.29992\nEpoch: 3/10 Iteration: 28 Training loss: 0.24477\nEpoch: 3/10 Iteration: 29 Training loss: 0.22896\nEpoch: 2/10 Iteration: 30 Validation Acc: 0.8256\nEpoch: 4/10 Iteration: 30 Training loss: 0.14196\nEpoch: 4/10 Iteration: 31 Training loss: 0.15383\nEpoch: 4/10 Iteration: 32 Training loss: 0.20010\nEpoch: 4/10 Iteration: 33 Training loss: 0.19133\nEpoch: 4/10 Iteration: 34 Training loss: 0.16920\nEpoch: 3/10 Iteration: 35 Validation Acc: 0.8311\nEpoch: 4/10 Iteration: 35 Training loss: 0.17282\nEpoch: 4/10 Iteration: 36 Training loss: 0.21023\nEpoch: 4/10 Iteration: 37 Training loss: 0.12780\nEpoch: 4/10 Iteration: 38 Training loss: 0.11128\nEpoch: 4/10 Iteration: 39 Training loss: 0.12850\nEpoch: 3/10 Iteration: 40 Validation Acc: 0.8501\nEpoch: 5/10 Iteration: 40 Training loss: 0.09171\nEpoch: 5/10 Iteration: 41 Training loss: 0.11403\nEpoch: 5/10 Iteration: 42 Training loss: 0.10393\nEpoch: 5/10 Iteration: 43 Training loss: 0.11188\nEpoch: 5/10 Iteration: 44 Training loss: 0.09737\nEpoch: 4/10 Iteration: 45 Validation Acc: 0.8610\nEpoch: 5/10 Iteration: 45 Training loss: 0.10674\nEpoch: 5/10 Iteration: 46 Training loss: 0.12389\nEpoch: 5/10 Iteration: 47 Training loss: 0.08740\nEpoch: 5/10 Iteration: 48 Training loss: 0.08173\nEpoch: 5/10 Iteration: 49 Training loss: 0.07571\nEpoch: 4/10 Iteration: 50 Validation Acc: 0.8474\nEpoch: 6/10 Iteration: 50 Training loss: 0.07307\nEpoch: 6/10 Iteration: 51 Training loss: 0.09155\nEpoch: 6/10 Iteration: 52 Training loss: 0.05998\nEpoch: 6/10 Iteration: 53 Training loss: 0.08215\nEpoch: 6/10 Iteration: 54 Training loss: 0.07238\nEpoch: 5/10 Iteration: 55 Validation Acc: 0.8719\nEpoch: 6/10 Iteration: 55 Training loss: 0.08115\nEpoch: 6/10 Iteration: 56 Training loss: 0.11844\nEpoch: 6/10 Iteration: 57 Training loss: 0.07272\nEpoch: 6/10 Iteration: 58 Training loss: 0.05314\nEpoch: 6/10 Iteration: 59 Training loss: 0.06103\nEpoch: 5/10 Iteration: 60 Validation Acc: 0.8501\nEpoch: 7/10 Iteration: 60 Training loss: 0.06314\nEpoch: 7/10 Iteration: 61 Training loss: 0.06992\nEpoch: 7/10 Iteration: 62 Training loss: 0.05834\nEpoch: 7/10 Iteration: 63 Training loss: 0.06845\nEpoch: 7/10 Iteration: 64 Training loss: 0.04818\nEpoch: 6/10 Iteration: 65 Validation Acc: 0.8583\nEpoch: 7/10 Iteration: 65 Training loss: 0.06607\nEpoch: 7/10 Iteration: 66 Training loss: 0.07573\nEpoch: 7/10 Iteration: 67 Training loss: 0.06438\nEpoch: 7/10 Iteration: 68 Training loss: 0.04777\nEpoch: 7/10 Iteration: 69 Training loss: 0.04485\nEpoch: 6/10 Iteration: 70 Validation Acc: 0.8719\nEpoch: 8/10 Iteration: 70 Training loss: 0.04231\nEpoch: 8/10 Iteration: 71 Training loss: 0.06324\nEpoch: 8/10 Iteration: 72 Training loss: 0.03677\nEpoch: 8/10 Iteration: 73 Training loss: 0.05039\nEpoch: 8/10 Iteration: 74 Training loss: 0.04709\nEpoch: 7/10 Iteration: 75 Validation Acc: 0.8774\nEpoch: 8/10 Iteration: 75 Training loss: 0.04231\nEpoch: 8/10 Iteration: 76 Training loss: 0.05109\nEpoch: 8/10 Iteration: 77 Training loss: 0.05331\nEpoch: 8/10 Iteration: 78 Training loss: 0.04650\nEpoch: 8/10 Iteration: 79 Training loss: 0.03930\nEpoch: 7/10 Iteration: 80 Validation Acc: 0.8828\nEpoch: 9/10 Iteration: 80 Training loss: 0.02749\nEpoch: 9/10 Iteration: 81 Training loss: 0.03800\nEpoch: 9/10 Iteration: 82 Training loss: 0.02947\nEpoch: 9/10 Iteration: 83 Training loss: 0.05074\nEpoch: 9/10 Iteration: 84 Training loss: 0.04849\nEpoch: 8/10 Iteration: 85 Validation Acc: 0.9019\nEpoch: 9/10 Iteration: 85 Training loss: 0.03153\nEpoch: 9/10 Iteration: 86 Training loss: 0.04161\nEpoch: 9/10 Iteration: 87 Training loss: 0.05535\nEpoch: 9/10 Iteration: 88 Training loss: 0.04335\nEpoch: 9/10 Iteration: 89 Training loss: 0.04239\nEpoch: 8/10 Iteration: 90 Validation Acc: 0.8828\nEpoch: 10/10 Iteration: 90 Training loss: 0.02280\nEpoch: 10/10 Iteration: 91 Training loss: 0.03285\nEpoch: 10/10 Iteration: 92 Training loss: 0.03035\nEpoch: 10/10 Iteration: 93 Training loss: 0.05355\nEpoch: 10/10 Iteration: 94 Training loss: 0.04446\nEpoch: 9/10 Iteration: 95 Validation Acc: 0.8883\nEpoch: 10/10 Iteration: 95 Training loss: 0.02867\nEpoch: 10/10 Iteration: 96 Training loss: 0.03725\nEpoch: 10/10 Iteration: 97 Training loss: 0.04641\nEpoch: 10/10 Iteration: 98 Training loss: 0.04265\nEpoch: 10/10 Iteration: 99 Training loss: 0.03986\nEpoch: 9/10 Iteration: 100 Validation Acc: 0.8910\n" ] ], [ [ "### Testing\n\nBelow you see the test accuracy. You can also see the predictions returned for images.", "_____no_output_____" ] ], [ [ "with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n feed = {inputs_: test_x,\n labels_: test_y}\n test_acc = sess.run(accuracy, feed_dict=feed)\n print(\"Test accuracy: {:.4f}\".format(test_acc))", "INFO:tensorflow:Restoring parameters from checkpoints/flowers.ckpt\nTest accuracy: 0.8910\n" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import imread", "_____no_output_____" ] ], [ [ "Below, feel free to choose images and see how the trained classifier predicts the flowers in them.", "_____no_output_____" ] ], [ [ "test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'\ntest_img = imread(test_img_path)\nplt.imshow(test_img)", "/Users/ericness/anaconda3/envs/tensorflow/lib/python3.5/site-packages/ipykernel_launcher.py:2: DeprecationWarning: `imread` is deprecated!\n`imread` is deprecated in SciPy 1.0.0.\nUse ``matplotlib.pyplot.imread`` instead.\n \n" ], [ "# Run this cell if you don't have a vgg graph built\nif 'vgg' in globals():\n print('\"vgg\" object already exists. Will not create again.')\nelse:\n #create vgg\n with tf.Session() as sess:\n input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\n vgg = vgg16.Vgg16()\n vgg.build(input_)", "_____no_output_____" ], [ "with tf.Session() as sess:\n img = utils.load_image(test_img_path)\n img = img.reshape((1, 224, 224, 3))\n\n feed_dict = {input_: img}\n code = sess.run(vgg.relu6, feed_dict=feed_dict)\n \nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n feed = {inputs_: code}\n prediction = sess.run(predicted, feed_dict=feed).squeeze()", "INFO:tensorflow:Restoring parameters from checkpoints/flowers.ckpt\n" ], [ "plt.imshow(test_img)", "_____no_output_____" ], [ "plt.barh(np.arange(5), prediction)\n_ = plt.yticks(np.arange(5), lb.classes_)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]